diff options
Diffstat (limited to 'drivers')
566 files changed, 23875 insertions, 8850 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 1e34f846508f..7802d8846a8d 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -210,7 +210,7 @@ config ACPI_TINY_POWER_BUTTON_SIGNAL config ACPI_VIDEO tristate "Video" - depends on X86 && BACKLIGHT_CLASS_DEVICE + depends on BACKLIGHT_CLASS_DEVICE depends on INPUT select THERMAL help @@ -255,7 +255,6 @@ config ACPI_DOCK config ACPI_CPU_FREQ_PSS bool - select THERMAL config ACPI_PROCESSOR_CSTATE def_bool y @@ -287,6 +286,7 @@ config ACPI_PROCESSOR depends on X86 || IA64 || ARM64 || LOONGARCH select ACPI_PROCESSOR_IDLE select ACPI_CPU_FREQ_PSS if X86 || IA64 || LOONGARCH + select THERMAL default y help This driver adds support for the ACPI Processor package. It is required @@ -572,6 +572,21 @@ source "drivers/acpi/pmic/Kconfig" config ACPI_VIOT bool +config ACPI_PRMT + bool "Platform Runtime Mechanism Support" + depends on EFI && (X86_64 || ARM64) + default y + help + Platform Runtime Mechanism (PRM) is a firmware interface exposing a + set of binary executables that can be called from the AML interpreter + or directly from device drivers. + + Say Y to enable the AML interpreter to execute the PRM code. + + While this feature is optional in principle, leaving it out may + substantially increase computational overhead related to the + initialization of some server systems. + endif # ACPI config X86_PM_TIMER @@ -589,18 +604,3 @@ config X86_PM_TIMER You should nearly always say Y here because many modern systems require this timer. - -config ACPI_PRMT - bool "Platform Runtime Mechanism Support" - depends on EFI && X86_64 - default y - help - Platform Runtime Mechanism (PRM) is a firmware interface exposing a - set of binary executables that can be called from the AML interpreter - or directly from device drivers. - - Say Y to enable the AML interpreter to execute the PRM code. - - While this feature is optional in principle, leaving it out may - substantially increase computational overhead related to the - initialization of some server systems. diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index b5a8d3e00a52..0002eecbf870 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -109,10 +109,9 @@ obj-$(CONFIG_ACPI_PPTT) += pptt.o obj-$(CONFIG_ACPI_PFRUT) += pfr_update.o pfr_telemetry.o # processor has its own "processor." module_param namespace -processor-y := processor_driver.o +processor-y := processor_driver.o processor_thermal.o processor-$(CONFIG_ACPI_PROCESSOR_IDLE) += processor_idle.o -processor-$(CONFIG_ACPI_CPU_FREQ_PSS) += processor_throttling.o \ - processor_thermal.o +processor-$(CONFIG_ACPI_CPU_FREQ_PSS) += processor_throttling.o processor-$(CONFIG_CPU_FREQ) += processor_perflib.o obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c index 48e5059d67ca..50540d4d4948 100644 --- a/drivers/acpi/acpi_lpit.c +++ b/drivers/acpi/acpi_lpit.c @@ -109,17 +109,11 @@ static void lpit_update_residency(struct lpit_residency_info *info, if (!info->iomem_addr) return; - if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) - return; - /* Silently fail, if cpuidle attribute group is not present */ sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj, &dev_attr_low_power_idle_system_residency_us.attr, "cpuidle"); } else if (info->gaddr.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { - if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) - return; - /* Silently fail, if cpuidle attribute group is not present */ sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj, &dev_attr_low_power_idle_cpu_residency_us.attr, diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index fbe0756259c5..c4d4d21391d7 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -422,6 +422,9 @@ static int register_device_clock(struct acpi_device *adev, if (!lpss_clk_dev) lpt_register_clock_device(); + if (IS_ERR(lpss_clk_dev)) + return PTR_ERR(lpss_clk_dev); + clk_data = platform_get_drvdata(lpss_clk_dev); if (!clk_data) return -ENODEV; diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index eaea733b368a..5cbe2196176d 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -1150,24 +1150,25 @@ acpi_video_get_device_type(struct acpi_video_bus *video, return 0; } -static int -acpi_video_bus_get_one_device(struct acpi_device *device, - struct acpi_video_bus *video) +static int acpi_video_bus_get_one_device(struct acpi_device *device, void *arg) { - unsigned long long device_id; - int status, device_type; - struct acpi_video_device *data; + struct acpi_video_bus *video = arg; struct acpi_video_device_attrib *attribute; + struct acpi_video_device *data; + unsigned long long device_id; + acpi_status status; + int device_type; - status = - acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id); - /* Some device omits _ADR, we skip them instead of fail */ + status = acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id); + /* Skip devices without _ADR instead of failing. */ if (ACPI_FAILURE(status)) - return 0; + goto exit; data = kzalloc(sizeof(struct acpi_video_device), GFP_KERNEL); - if (!data) + if (!data) { + dev_dbg(&device->dev, "Cannot attach\n"); return -ENOMEM; + } strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS); @@ -1230,7 +1231,9 @@ acpi_video_bus_get_one_device(struct acpi_device *device, list_add_tail(&data->entry, &video->video_device_list); mutex_unlock(&video->device_list_lock); - return status; +exit: + video->child_count++; + return 0; } /* @@ -1542,9 +1545,6 @@ static int acpi_video_bus_get_devices(struct acpi_video_bus *video, struct acpi_device *device) { - int status = 0; - struct acpi_device *dev; - /* * There are systems where video module known to work fine regardless * of broken _DOD and ignoring returned value here doesn't cause @@ -1552,16 +1552,7 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video, */ acpi_video_device_enumerate(video); - list_for_each_entry(dev, &device->children, node) { - - status = acpi_video_bus_get_one_device(dev, video); - if (status) { - dev_err(&dev->dev, "Can't attach device\n"); - break; - } - video->child_count++; - } - return status; + return acpi_dev_for_each_child(device, acpi_video_bus_get_one_device, video); } /* acpi_video interface */ diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 33b7fbbeda82..9f49272cad39 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -3,7 +3,7 @@ * apei-base.c - ACPI Platform Error Interface (APEI) supporting * infrastructure * - * APEI allows to report errors (for example from the chipset) to the + * APEI allows to report errors (for example from the chipset) to * the operating system. This improves NMI handling especially. In * addition it supports error serialization and error injection. * diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c index 598fd19b65fa..45973aa6e06d 100644 --- a/drivers/acpi/apei/bert.c +++ b/drivers/acpi/apei/bert.c @@ -29,16 +29,26 @@ #undef pr_fmt #define pr_fmt(fmt) "BERT: " fmt + +#define ACPI_BERT_PRINT_MAX_RECORDS 5 #define ACPI_BERT_PRINT_MAX_LEN 1024 static int bert_disable; +/* + * Print "all" the error records in the BERT table, but avoid huge spam to + * the console if the BIOS included oversize records, or too many records. + * Skipping some records here does not lose anything because the full + * data is available to user tools in: + * /sys/firmware/acpi/tables/data/BERT + */ static void __init bert_print_all(struct acpi_bert_region *region, unsigned int region_len) { struct acpi_hest_generic_status *estatus = (struct acpi_hest_generic_status *)region; int remain = region_len; + int printed = 0, skipped = 0; u32 estatus_len; while (remain >= sizeof(struct acpi_bert_region)) { @@ -46,24 +56,26 @@ static void __init bert_print_all(struct acpi_bert_region *region, if (remain < estatus_len) { pr_err(FW_BUG "Truncated status block (length: %u).\n", estatus_len); - return; + break; } /* No more error records. */ if (!estatus->block_status) - return; + break; if (cper_estatus_check(estatus)) { pr_err(FW_BUG "Invalid error record.\n"); - return; + break; } - pr_info_once("Error records from previous boot:\n"); - if (region_len < ACPI_BERT_PRINT_MAX_LEN) + if (estatus_len < ACPI_BERT_PRINT_MAX_LEN && + printed < ACPI_BERT_PRINT_MAX_RECORDS) { + pr_info_once("Error records from previous boot:\n"); cper_estatus_print(KERN_INFO HW_ERR, estatus); - else - pr_info_once("Max print length exceeded, table data is available at:\n" - "/sys/firmware/acpi/tables/data/BERT"); + printed++; + } else { + skipped++; + } /* * Because the boot error source is "one-time polled" type, @@ -75,6 +87,9 @@ static void __init bert_print_all(struct acpi_bert_region *region, estatus = (void *)estatus + estatus_len; remain -= estatus_len; } + + if (skipped) + pr_info(HW_ERR "Skipped %d error records\n", skipped); } static int __init setup_bert_disable(char *str) diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index d4326ec12d29..6b583373c58a 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -546,6 +546,8 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, != REGION_INTERSECTS) && (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY) != REGION_INTERSECTS) && + (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_SOFT_RESERVED) + != REGION_INTERSECTS) && !arch_is_platform_page(base_addr))) return -EINVAL; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index e2db1bdd9dd2..c0d20d997891 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -464,7 +464,6 @@ out_free: static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) { struct acpi_device *adev; - struct acpi_driver *driver; u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; bool hotplug_event = false; @@ -516,10 +515,13 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) if (!adev) goto err; - driver = adev->driver; - if (driver && driver->ops.notify && - (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS)) - driver->ops.notify(adev, type); + if (adev->dev.driver) { + struct acpi_driver *driver = to_acpi_driver(adev->dev.driver); + + if (driver && driver->ops.notify && + (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS)) + driver->ops.notify(adev, type); + } if (!hotplug_event) { acpi_bus_put_acpi_device(adev); @@ -538,8 +540,9 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) static void acpi_notify_device(acpi_handle handle, u32 event, void *data) { struct acpi_device *device = data; + struct acpi_driver *acpi_drv = to_acpi_driver(device->dev.driver); - device->driver->ops.notify(device, event); + acpi_drv->ops.notify(device, event); } static void acpi_notify_device_fixed(void *data) @@ -1032,8 +1035,6 @@ static int acpi_device_probe(struct device *dev) if (ret) return ret; - acpi_dev->driver = acpi_drv; - pr_debug("Driver [%s] successfully bound to device [%s]\n", acpi_drv->name, acpi_dev->pnp.bus_id); @@ -1043,7 +1044,6 @@ static int acpi_device_probe(struct device *dev) if (acpi_drv->ops.remove) acpi_drv->ops.remove(acpi_dev); - acpi_dev->driver = NULL; acpi_dev->driver_data = NULL; return ret; } @@ -1059,15 +1059,14 @@ static int acpi_device_probe(struct device *dev) static void acpi_device_remove(struct device *dev) { struct acpi_device *acpi_dev = to_acpi_device(dev); - struct acpi_driver *acpi_drv = acpi_dev->driver; + struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver); + + if (acpi_drv->ops.notify) + acpi_device_remove_notify_handler(acpi_dev); + + if (acpi_drv->ops.remove) + acpi_drv->ops.remove(acpi_dev); - if (acpi_drv) { - if (acpi_drv->ops.notify) - acpi_device_remove_notify_handler(acpi_dev); - if (acpi_drv->ops.remove) - acpi_drv->ops.remove(acpi_dev); - } - acpi_dev->driver = NULL; acpi_dev->driver_data = NULL; put_device(dev); @@ -1101,6 +1100,7 @@ static int acpi_dev_for_one_check(struct device *dev, void *context) return adwc->fn(to_acpi_device(dev), adwc->data); } +EXPORT_SYMBOL_GPL(acpi_dev_for_each_child); int acpi_dev_for_each_child(struct acpi_device *adev, int (*fn)(struct acpi_device *, void *), void *data) @@ -1113,6 +1113,18 @@ int acpi_dev_for_each_child(struct acpi_device *adev, return device_for_each_child(&adev->dev, &adwc, acpi_dev_for_one_check); } +int acpi_dev_for_each_child_reverse(struct acpi_device *adev, + int (*fn)(struct acpi_device *, void *), + void *data) +{ + struct acpi_dev_walk_context adwc = { + .fn = fn, + .data = data, + }; + + return device_for_each_child_reverse(&adev->dev, &adwc, acpi_dev_for_one_check); +} + /* -------------------------------------------------------------------------- Initialization/Cleanup -------------------------------------------------------------------------- */ @@ -1144,6 +1156,9 @@ static int __init acpi_bus_init_irq(void) case ACPI_IRQ_MODEL_PLATFORM: message = "platform specific model"; break; + case ACPI_IRQ_MODEL_LPIC: + message = "LPIC"; + break; default: pr_info("Unknown interrupt routing model\n"); return -ENODEV; @@ -1399,6 +1414,7 @@ static int __init acpi_init(void) pci_mmcfg_late_init(); acpi_iort_init(); + acpi_viot_early_init(); acpi_hest_init(); acpi_ghes_init(); acpi_scan_init(); diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c index ccaa647ac3d4..5b7e3b9ae370 100644 --- a/drivers/acpi/container.c +++ b/drivers/acpi/container.c @@ -23,17 +23,18 @@ static const struct acpi_device_id container_device_ids[] = { #ifdef CONFIG_ACPI_CONTAINER -static int acpi_container_offline(struct container_dev *cdev) +static int check_offline(struct acpi_device *adev, void *not_used) { - struct acpi_device *adev = ACPI_COMPANION(&cdev->dev); - struct acpi_device *child; + if (acpi_scan_is_offline(adev, false)) + return 0; - /* Check all of the dependent devices' physical companions. */ - list_for_each_entry(child, &adev->children, node) - if (!acpi_scan_is_offline(child, false)) - return -EBUSY; + return -EBUSY; +} - return 0; +static int acpi_container_offline(struct container_dev *cdev) +{ + /* Check all of the dependent devices' physical companions. */ + return acpi_dev_for_each_child(ACPI_COMPANION(&cdev->dev), check_offline, NULL); } static void acpi_container_release(struct device *dev) diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 3c6d4ef87be0..1e15a9f25ae9 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -618,33 +618,6 @@ static int pcc_data_alloc(int pcc_ss_id) return 0; } -/* Check if CPPC revision + num_ent combination is supported */ -static bool is_cppc_supported(int revision, int num_ent) -{ - int expected_num_ent; - - switch (revision) { - case CPPC_V2_REV: - expected_num_ent = CPPC_V2_NUM_ENT; - break; - case CPPC_V3_REV: - expected_num_ent = CPPC_V3_NUM_ENT; - break; - default: - pr_debug("Firmware exports unsupported CPPC revision: %d\n", - revision); - return false; - } - - if (expected_num_ent != num_ent) { - pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n", - num_ent, expected_num_ent, revision); - return false; - } - - return true; -} - /* * An example CPC table looks like the following. * @@ -733,7 +706,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) cpc_obj->type, pr->id); goto out_free; } - cpc_ptr->num_entries = num_ent; /* Second entry should be revision. */ cpc_obj = &out_obj->package.elements[1]; @@ -744,10 +716,32 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) cpc_obj->type, pr->id); goto out_free; } - cpc_ptr->version = cpc_rev; - if (!is_cppc_supported(cpc_rev, num_ent)) + if (cpc_rev < CPPC_V2_REV) { + pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev, + pr->id); + goto out_free; + } + + /* + * Disregard _CPC if the number of entries in the return pachage is not + * as expected, but support future revisions being proper supersets of + * the v3 and only causing more entries to be returned by _CPC. + */ + if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) || + (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) || + (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) { + pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n", + num_ent, pr->id); goto out_free; + } + if (cpc_rev > CPPC_V3_REV) { + num_ent = CPPC_V3_NUM_ENT; + cpc_rev = CPPC_V3_REV; + } + + cpc_ptr->num_entries = num_ent; + cpc_ptr->version = cpc_rev; /* Iterate through remaining entries in _CPC */ for (i = 2; i < num_ent; i++) { diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 130b5f4a50a3..9dce1245689c 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -369,6 +369,28 @@ int acpi_device_fix_up_power(struct acpi_device *device) } EXPORT_SYMBOL_GPL(acpi_device_fix_up_power); +static int fix_up_power_if_applicable(struct acpi_device *adev, void *not_used) +{ + if (adev->status.present && adev->status.enabled) + acpi_device_fix_up_power(adev); + + return 0; +} + +/** + * acpi_device_fix_up_power_extended - Force device and its children into D0. + * @adev: Parent device object whose power state is to be fixed up. + * + * Call acpi_device_fix_up_power() for @adev and its children so long as they + * are reported as present and enabled. + */ +void acpi_device_fix_up_power_extended(struct acpi_device *adev) +{ + acpi_device_fix_up_power(adev); + acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL); +} +EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_extended); + int acpi_device_update_power(struct acpi_device *device, int *state_p) { int state; diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index d5d6403ba07b..120873dad2cc 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c @@ -376,7 +376,7 @@ eject_store(struct device *d, struct device_attribute *attr, return -EINVAL; if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled) - && !acpi_device->driver) + && !d->driver) return -ENODEV; status = acpi_get_type(acpi_device->handle, ¬_used); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index a1b871a418f8..c95e535035a0 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -180,7 +180,6 @@ static struct workqueue_struct *ec_wq; static struct workqueue_struct *ec_query_wq; static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */ -static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */ static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ @@ -1407,24 +1406,16 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval) if (ec->data_addr == 0 || ec->command_addr == 0) return AE_OK; - if (boot_ec && boot_ec_is_ecdt && EC_FLAGS_IGNORE_DSDT_GPE) { - /* - * Always inherit the GPE number setting from the ECDT - * EC. - */ - ec->gpe = boot_ec->gpe; - } else { - /* Get GPE bit assignment (EC events). */ - /* TODO: Add support for _GPE returning a package */ - status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); - if (ACPI_SUCCESS(status)) - ec->gpe = tmp; + /* Get GPE bit assignment (EC events). */ + /* TODO: Add support for _GPE returning a package */ + status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); + if (ACPI_SUCCESS(status)) + ec->gpe = tmp; + /* + * Errors are non-fatal, allowing for ACPI Reduced Hardware + * platforms which use GpioInt instead of GPE. + */ - /* - * Errors are non-fatal, allowing for ACPI Reduced Hardware - * platforms which use GpioInt instead of GPE. - */ - } /* Use the global lock for all EC transactions? */ tmp = 0; acpi_evaluate_integer(handle, "_GLK", NULL, &tmp); @@ -1626,15 +1617,18 @@ static int acpi_ec_add(struct acpi_device *device) } if (boot_ec && ec->command_addr == boot_ec->command_addr && - ec->data_addr == boot_ec->data_addr && - !EC_FLAGS_TRUST_DSDT_GPE) { + ec->data_addr == boot_ec->data_addr) { /* - * Trust PNP0C09 namespace location rather than - * ECDT ID. But trust ECDT GPE rather than _GPE - * because of ASUS quirks, so do not change - * boot_ec->gpe to ec->gpe. + * Trust PNP0C09 namespace location rather than ECDT ID. + * But trust ECDT GPE rather than _GPE because of ASUS + * quirks. So do not change boot_ec->gpe to ec->gpe, + * except when the TRUST_DSDT_GPE quirk is set. */ boot_ec->handle = ec->handle; + + if (EC_FLAGS_TRUST_DSDT_GPE) + boot_ec->gpe = ec->gpe; + acpi_handle_debug(ec->handle, "duplicated.\n"); acpi_ec_free(ec); ec = boot_ec; @@ -1862,68 +1856,40 @@ static int ec_honor_dsdt_gpe(const struct dmi_system_id *id) return 0; } -/* - * Some DSDTs contain wrong GPE setting. - * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD - * https://bugzilla.kernel.org/show_bug.cgi?id=195651 - */ -static int ec_honor_ecdt_gpe(const struct dmi_system_id *id) -{ - pr_debug("Detected system needing ignore DSDT GPE setting.\n"); - EC_FLAGS_IGNORE_DSDT_GPE = 1; - return 0; -} - static const struct dmi_system_id ec_dmi_table[] __initconst = { { - ec_correct_ecdt, "MSI MS-171F", { - DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), - DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUS FX502VD", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "FX502VD"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUS FX502VE", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "FX502VE"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUS GL702VMK", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BA", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X505BA"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BP", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X505BP"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BA", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X542BA"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BP", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X542BP"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUS X550VXK", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUS X580VD", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL}, + /* + * MSI MS-171F + * https://bugzilla.kernel.org/show_bug.cgi?id=12461 + */ + .callback = ec_correct_ecdt, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), + DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"), + }, + }, { - /* https://bugzilla.kernel.org/show_bug.cgi?id=209989 */ - ec_honor_dsdt_gpe, "HP Pavilion Gaming Laptop 15-cx0xxx", { - DMI_MATCH(DMI_SYS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"),}, NULL}, + /* + * HP Pavilion Gaming Laptop 15-cx0xxx + * https://bugzilla.kernel.org/show_bug.cgi?id=209989 + */ + .callback = ec_honor_dsdt_gpe, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"), + }, + }, { - ec_clear_on_resume, "Samsung hardware", { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, - {}, + /* + * Samsung hardware + * https://bugzilla.kernel.org/show_bug.cgi?id=44161 + */ + .callback = ec_clear_on_resume, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + }, + }, + {} }; void __init acpi_ec_ecdt_probe(void) @@ -2201,28 +2167,18 @@ static int acpi_ec_init_workqueues(void) static const struct dmi_system_id acpi_ec_no_wakeup[] = { { - .ident = "Thinkpad X1 Carbon 6th", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"), }, }, { - .ident = "ThinkPad X1 Carbon 6th", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"), - }, - }, - { - .ident = "ThinkPad X1 Yoga 3rd", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"), }, }, { - .ident = "HP ZHAN 66 Pro", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"), diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 8d769114a048..204fe94c7e45 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -77,12 +77,22 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev) #define FIND_CHILD_MIN_SCORE 1 #define FIND_CHILD_MAX_SCORE 2 +static int match_any(struct acpi_device *adev, void *not_used) +{ + return 1; +} + +static bool acpi_dev_has_children(struct acpi_device *adev) +{ + return acpi_dev_for_each_child(adev, match_any, NULL) > 0; +} + static int find_child_checks(struct acpi_device *adev, bool check_children) { unsigned long long sta; acpi_status status; - if (check_children && list_empty(&adev->children)) + if (check_children && !acpi_dev_has_children(adev)) return -ENODEV; status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta); @@ -105,54 +115,97 @@ static int find_child_checks(struct acpi_device *adev, bool check_children) return FIND_CHILD_MAX_SCORE; } -struct acpi_device *acpi_find_child_device(struct acpi_device *parent, - u64 address, bool check_children) -{ - struct acpi_device *adev, *ret = NULL; - int ret_score = 0; - - if (!parent) - return NULL; +struct find_child_walk_data { + struct acpi_device *adev; + u64 address; + int score; + bool check_sta; + bool check_children; +}; - list_for_each_entry(adev, &parent->children, node) { - acpi_bus_address addr = acpi_device_adr(adev); - int score; +static int check_one_child(struct acpi_device *adev, void *data) +{ + struct find_child_walk_data *wd = data; + int score; - if (!adev->pnp.type.bus_address || addr != address) - continue; + if (!adev->pnp.type.bus_address || acpi_device_adr(adev) != wd->address) + return 0; - if (!ret) { - /* This is the first matching object. Save it. */ - ret = adev; - continue; - } + if (!wd->adev) { /* - * There is more than one matching device object with the same - * _ADR value. That really is unexpected, so we are kind of - * beyond the scope of the spec here. We have to choose which - * one to return, though. - * - * First, check if the previously found object is good enough - * and return it if so. Second, do the same for the object that - * we've just found. + * This is the first matching object, so save it. If it is not + * necessary to look for any other matching objects, stop the + * search. */ - if (!ret_score) { - ret_score = find_child_checks(ret, check_children); - if (ret_score == FIND_CHILD_MAX_SCORE) - return ret; - } - score = find_child_checks(adev, check_children); - if (score == FIND_CHILD_MAX_SCORE) { - return adev; - } else if (score > ret_score) { - ret = adev; - ret_score = score; - } + wd->adev = adev; + return !(wd->check_sta || wd->check_children); } - return ret; + + /* + * There is more than one matching device object with the same _ADR + * value. That really is unexpected, so we are kind of beyond the scope + * of the spec here. We have to choose which one to return, though. + * + * First, get the score for the previously found object and terminate + * the walk if it is maximum. + */ + if (!wd->score) { + score = find_child_checks(wd->adev, wd->check_children); + if (score == FIND_CHILD_MAX_SCORE) + return 1; + + wd->score = score; + } + /* + * Second, if the object that has just been found has a better score, + * replace the previously found one with it and terminate the walk if + * the new score is maximum. + */ + score = find_child_checks(adev, wd->check_children); + if (score > wd->score) { + wd->adev = adev; + if (score == FIND_CHILD_MAX_SCORE) + return 1; + + wd->score = score; + } + + /* Continue, because there may be better matches. */ + return 0; +} + +static struct acpi_device *acpi_find_child(struct acpi_device *parent, + u64 address, bool check_children, + bool check_sta) +{ + struct find_child_walk_data wd = { + .address = address, + .check_children = check_children, + .check_sta = check_sta, + .adev = NULL, + .score = 0, + }; + + if (parent) + acpi_dev_for_each_child(parent, check_one_child, &wd); + + return wd.adev; +} + +struct acpi_device *acpi_find_child_device(struct acpi_device *parent, + u64 address, bool check_children) +{ + return acpi_find_child(parent, address, check_children, true); } EXPORT_SYMBOL_GPL(acpi_find_child_device); +struct acpi_device *acpi_find_child_by_adr(struct acpi_device *adev, + acpi_bus_address adr) +{ + return acpi_find_child(adev, adr, false, false); +} +EXPORT_SYMBOL_GPL(acpi_find_child_by_adr); + static void acpi_physnode_link_name(char *buf, unsigned int node_id) { if (node_id > 0) diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c index c68e694fca26..dabe45eba055 100644 --- a/drivers/acpi/irq.c +++ b/drivers/acpi/irq.c @@ -12,7 +12,8 @@ enum acpi_irq_model_id acpi_irq_model; -static struct fwnode_handle *acpi_gsi_domain_id; +static struct fwnode_handle *(*acpi_get_gsi_domain_id)(u32 gsi); +static u32 (*acpi_gsi_to_irq_fallback)(u32 gsi); /** * acpi_gsi_to_irq() - Retrieve the linux irq number for a given GSI @@ -26,14 +27,18 @@ static struct fwnode_handle *acpi_gsi_domain_id; */ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) { - struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, - DOMAIN_BUS_ANY); + struct irq_domain *d; + d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(gsi), + DOMAIN_BUS_ANY); *irq = irq_find_mapping(d, gsi); /* - * *irq == 0 means no mapping, that should - * be reported as a failure + * *irq == 0 means no mapping, that should be reported as a + * failure, unless there is an arch-specific fallback handler. */ + if (!*irq && acpi_gsi_to_irq_fallback) + *irq = acpi_gsi_to_irq_fallback(gsi); + return (*irq > 0) ? 0 : -EINVAL; } EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); @@ -53,12 +58,12 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, { struct irq_fwspec fwspec; - if (WARN_ON(!acpi_gsi_domain_id)) { + fwspec.fwnode = acpi_get_gsi_domain_id(gsi); + if (WARN_ON(!fwspec.fwnode)) { pr_warn("GSI: No registered irqchip, giving up\n"); return -EINVAL; } - fwspec.fwnode = acpi_gsi_domain_id; fwspec.param[0] = gsi; fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity); fwspec.param_count = 2; @@ -73,13 +78,14 @@ EXPORT_SYMBOL_GPL(acpi_register_gsi); */ void acpi_unregister_gsi(u32 gsi) { - struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, - DOMAIN_BUS_ANY); + struct irq_domain *d; int irq; if (WARN_ON(acpi_irq_model == ACPI_IRQ_MODEL_GIC && gsi < 16)) return; + d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(gsi), + DOMAIN_BUS_ANY); irq = irq_find_mapping(d, gsi); irq_dispose_mapping(irq); } @@ -97,7 +103,8 @@ EXPORT_SYMBOL_GPL(acpi_unregister_gsi); * The referenced device fwhandle or NULL on failure */ static struct fwnode_handle * -acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source) +acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source, + u32 gsi) { struct fwnode_handle *result; struct acpi_device *device; @@ -105,7 +112,7 @@ acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source) acpi_status status; if (!source->string_length) - return acpi_gsi_domain_id; + return acpi_get_gsi_domain_id(gsi); status = acpi_get_handle(NULL, source->string_ptr, &handle); if (WARN_ON(ACPI_FAILURE(status))) @@ -194,7 +201,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares, ctx->index -= irq->interrupt_count; return AE_OK; } - fwnode = acpi_gsi_domain_id; + fwnode = acpi_get_gsi_domain_id(irq->interrupts[ctx->index]); acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index], irq->triggering, irq->polarity, irq->shareable, ctx); @@ -207,7 +214,8 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares, ctx->index -= eirq->interrupt_count; return AE_OK; } - fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source); + fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source, + eirq->interrupts[ctx->index]); acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index], eirq->triggering, eirq->polarity, eirq->shareable, ctx); @@ -291,10 +299,20 @@ EXPORT_SYMBOL_GPL(acpi_irq_get); * GSI interrupts */ void __init acpi_set_irq_model(enum acpi_irq_model_id model, - struct fwnode_handle *fwnode) + struct fwnode_handle *(*fn)(u32)) { acpi_irq_model = model; - acpi_gsi_domain_id = fwnode; + acpi_get_gsi_domain_id = fn; +} + +/** + * acpi_set_gsi_to_irq_fallback - Register a GSI transfer + * callback to fallback to arch specified implementation. + * @fn: arch-specific fallback handler + */ +void __init acpi_set_gsi_to_irq_fallback(u32 (*fn)(u32)) +{ + acpi_gsi_to_irq_fallback = fn; } /** @@ -312,8 +330,14 @@ struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, const struct irq_domain_ops *ops, void *host_data) { - struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, - DOMAIN_BUS_ANY); + struct irq_domain *d; + + /* This only works for the GIC model... */ + if (acpi_irq_model != ACPI_IRQ_MODEL_GIC) + return NULL; + + d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(0), + DOMAIN_BUS_ANY); if (!d) return NULL; diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index 58647051c948..aa1038b8aec4 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -95,7 +95,7 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource, case ACPI_RESOURCE_TYPE_IRQ: { struct acpi_resource_irq *p = &resource->data.irq; - if (!p || !p->interrupt_count) { + if (!p->interrupt_count) { acpi_handle_debug(handle, "Blank _PRS IRQ resource\n"); return AE_OK; @@ -121,7 +121,7 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource, { struct acpi_resource_extended_irq *p = &resource->data.extended_irq; - if (!p || !p->interrupt_count) { + if (!p->interrupt_count) { acpi_handle_debug(handle, "Blank _PRS EXT IRQ resource\n"); return AE_OK; @@ -182,7 +182,7 @@ static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource, case ACPI_RESOURCE_TYPE_IRQ: { struct acpi_resource_irq *p = &resource->data.irq; - if (!p || !p->interrupt_count) { + if (!p->interrupt_count) { /* * IRQ descriptors may have no IRQ# bits set, * particularly those w/ _STA disabled @@ -197,7 +197,7 @@ static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource, { struct acpi_resource_extended_irq *p = &resource->data.extended_irq; - if (!p || !p->interrupt_count) { + if (!p->interrupt_count) { /* * extended IRQ descriptors must * return at least 1 IRQ diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c index 4d3a219c67f8..998101cf16e4 100644 --- a/drivers/acpi/prmt.c +++ b/drivers/acpi/prmt.c @@ -53,7 +53,7 @@ static LIST_HEAD(prm_module_list); struct prm_handler_info { guid_t guid; - u64 handler_addr; + void *handler_addr; u64 static_data_buffer_addr; u64 acpi_param_buffer_addr; @@ -148,7 +148,7 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end) th = &tm->handlers[cur_handler]; guid_copy(&th->guid, (guid_t *)handler_info->handler_guid); - th->handler_addr = efi_pa_va_lookup(handler_info->handler_address); + th->handler_addr = (void *)efi_pa_va_lookup(handler_info->handler_address); th->static_data_buffer_addr = efi_pa_va_lookup(handler_info->static_data_buffer_address); th->acpi_param_buffer_addr = efi_pa_va_lookup(handler_info->acpi_param_buffer_address); } while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info))); diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 368a9edefd0c..1278969eec1f 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -139,75 +139,17 @@ static int acpi_soft_cpu_dead(unsigned int cpu) } #ifdef CONFIG_ACPI_CPU_FREQ_PSS -static int acpi_pss_perf_init(struct acpi_processor *pr, - struct acpi_device *device) +static void acpi_pss_perf_init(struct acpi_processor *pr) { - int result = 0; - acpi_processor_ppc_has_changed(pr, 0); acpi_processor_get_throttling_info(pr); if (pr->flags.throttling) pr->flags.limit = 1; - - pr->cdev = thermal_cooling_device_register("Processor", device, - &processor_cooling_ops); - if (IS_ERR(pr->cdev)) { - result = PTR_ERR(pr->cdev); - return result; - } - - dev_dbg(&device->dev, "registered as cooling_device%d\n", - pr->cdev->id); - - result = sysfs_create_link(&device->dev.kobj, - &pr->cdev->device.kobj, - "thermal_cooling"); - if (result) { - dev_err(&device->dev, - "Failed to create sysfs link 'thermal_cooling'\n"); - goto err_thermal_unregister; - } - - result = sysfs_create_link(&pr->cdev->device.kobj, - &device->dev.kobj, - "device"); - if (result) { - dev_err(&pr->cdev->device, - "Failed to create sysfs link 'device'\n"); - goto err_remove_sysfs_thermal; - } - - return 0; - - err_remove_sysfs_thermal: - sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); - err_thermal_unregister: - thermal_cooling_device_unregister(pr->cdev); - - return result; -} - -static void acpi_pss_perf_exit(struct acpi_processor *pr, - struct acpi_device *device) -{ - if (pr->cdev) { - sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); - sysfs_remove_link(&pr->cdev->device.kobj, "device"); - thermal_cooling_device_unregister(pr->cdev); - pr->cdev = NULL; - } } #else -static inline int acpi_pss_perf_init(struct acpi_processor *pr, - struct acpi_device *device) -{ - return 0; -} - -static inline void acpi_pss_perf_exit(struct acpi_processor *pr, - struct acpi_device *device) {} +static inline void acpi_pss_perf_init(struct acpi_processor *pr) {} #endif /* CONFIG_ACPI_CPU_FREQ_PSS */ static int __acpi_processor_start(struct acpi_device *device) @@ -229,7 +171,9 @@ static int __acpi_processor_start(struct acpi_device *device) if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) acpi_processor_power_init(pr); - result = acpi_pss_perf_init(pr, device); + acpi_pss_perf_init(pr); + + result = acpi_processor_thermal_init(pr, device); if (result) goto err_power_exit; @@ -239,7 +183,7 @@ static int __acpi_processor_start(struct acpi_device *device) return 0; result = -ENODEV; - acpi_pss_perf_exit(pr, device); + acpi_processor_thermal_exit(pr, device); err_power_exit: acpi_processor_power_exit(pr); @@ -277,10 +221,10 @@ static int acpi_processor_stop(struct device *dev) return 0; acpi_processor_power_exit(pr); - acpi_pss_perf_exit(pr, device); - acpi_cppc_processor_exit(pr); + acpi_processor_thermal_exit(pr, device); + return 0; } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 6a5572a1a80c..16a1663d02d4 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -23,6 +23,7 @@ #include <linux/minmax.h> #include <linux/perf_event.h> #include <acpi/processor.h> +#include <linux/context_tracking.h> /* * Include the apic definitions for x86 to have the APIC timer related defines @@ -607,7 +608,7 @@ static DEFINE_RAW_SPINLOCK(c3_lock); * @cx: Target state context * @index: index of target state */ -static int acpi_idle_enter_bm(struct cpuidle_driver *drv, +static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv, struct acpi_processor *pr, struct acpi_processor_cx *cx, int index) @@ -647,11 +648,11 @@ static int acpi_idle_enter_bm(struct cpuidle_driver *drv, raw_spin_unlock(&c3_lock); } - rcu_idle_enter(); + ct_idle_enter(); acpi_idle_do_entry(cx); - rcu_idle_exit(); + ct_idle_exit(); /* Re-enable bus master arbitration */ if (dis_bm) { @@ -664,7 +665,7 @@ static int acpi_idle_enter_bm(struct cpuidle_driver *drv, return index; } -static int acpi_idle_enter(struct cpuidle_device *dev, +static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); @@ -693,7 +694,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev, return index; } -static int acpi_idle_enter_s2idle(struct cpuidle_device *dev, +static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index d8b2dfcd59b5..db6ac540e924 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c @@ -266,3 +266,57 @@ const struct thermal_cooling_device_ops processor_cooling_ops = { .get_cur_state = processor_get_cur_state, .set_cur_state = processor_set_cur_state, }; + +int acpi_processor_thermal_init(struct acpi_processor *pr, + struct acpi_device *device) +{ + int result = 0; + + pr->cdev = thermal_cooling_device_register("Processor", device, + &processor_cooling_ops); + if (IS_ERR(pr->cdev)) { + result = PTR_ERR(pr->cdev); + return result; + } + + dev_dbg(&device->dev, "registered as cooling_device%d\n", + pr->cdev->id); + + result = sysfs_create_link(&device->dev.kobj, + &pr->cdev->device.kobj, + "thermal_cooling"); + if (result) { + dev_err(&device->dev, + "Failed to create sysfs link 'thermal_cooling'\n"); + goto err_thermal_unregister; + } + + result = sysfs_create_link(&pr->cdev->device.kobj, + &device->dev.kobj, + "device"); + if (result) { + dev_err(&pr->cdev->device, + "Failed to create sysfs link 'device'\n"); + goto err_remove_sysfs_thermal; + } + + return 0; + +err_remove_sysfs_thermal: + sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); +err_thermal_unregister: + thermal_cooling_device_unregister(pr->cdev); + + return result; +} + +void acpi_processor_thermal_exit(struct acpi_processor *pr, + struct acpi_device *device) +{ + if (pr->cdev) { + sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); + sysfs_remove_link(&pr->cdev->device.kobj, "device"); + thermal_cooling_device_unregister(pr->cdev); + pr->cdev = NULL; + } +} diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index d3173811614e..e764f9ac9cf8 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -1012,6 +1012,22 @@ static int acpi_node_prop_read(const struct fwnode_handle *fwnode, propname, proptype, val, nval); } +static int stop_on_next(struct acpi_device *adev, void *data) +{ + struct acpi_device **ret_p = data; + + if (!*ret_p) { + *ret_p = adev; + return 1; + } + + /* Skip until the "previous" object is found. */ + if (*ret_p == adev) + *ret_p = NULL; + + return 0; +} + /** * acpi_get_next_subnode - Return the next child node handle for a fwnode * @fwnode: Firmware node to find the next child node for. @@ -1020,35 +1036,22 @@ static int acpi_node_prop_read(const struct fwnode_handle *fwnode, struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, struct fwnode_handle *child) { - const struct acpi_device *adev = to_acpi_device_node(fwnode); - const struct list_head *head; - struct list_head *next; + struct acpi_device *adev = to_acpi_device_node(fwnode); if ((!child || is_acpi_device_node(child)) && adev) { - struct acpi_device *child_adev; + struct acpi_device *child_adev = to_acpi_device_node(child); - head = &adev->children; - if (list_empty(head)) - goto nondev; + acpi_dev_for_each_child(adev, stop_on_next, &child_adev); + if (child_adev) + return acpi_fwnode_handle(child_adev); - if (child) { - adev = to_acpi_device_node(child); - next = adev->node.next; - if (next == head) { - child = NULL; - goto nondev; - } - child_adev = list_entry(next, struct acpi_device, node); - } else { - child_adev = list_first_entry(head, struct acpi_device, - node); - } - return acpi_fwnode_handle(child_adev); + child = NULL; } - nondev: if (!child || is_acpi_data_node(child)) { const struct acpi_data_node *data = to_acpi_data_node(fwnode); + const struct list_head *head; + struct list_head *next; struct acpi_data_node *dn; /* diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index c2d494784425..510cdec375c4 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -416,6 +416,16 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, { int i; +#ifdef CONFIG_X86 + /* + * IRQ override isn't needed on modern AMD Zen systems and + * this override breaks active low IRQs on AMD Ryzen 6000 and + * newer systems. Skip it. + */ + if (boot_cpu_has(X86_FEATURE_ZEN)) + return false; +#endif + for (i = 0; i < ARRAY_SIZE(skip_override_table); i++) { const struct irq_override_cmp *entry = &skip_override_table[i]; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 762b61f67e6c..b100e6ca9bb4 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -334,10 +334,9 @@ static int acpi_scan_device_check(struct acpi_device *adev) return error; } -static int acpi_scan_bus_check(struct acpi_device *adev) +static int acpi_scan_bus_check(struct acpi_device *adev, void *not_used) { struct acpi_scan_handler *handler = adev->handler; - struct acpi_device *child; int error; acpi_bus_get_status(adev); @@ -353,19 +352,14 @@ static int acpi_scan_bus_check(struct acpi_device *adev) dev_warn(&adev->dev, "Namespace scan failure\n"); return error; } - list_for_each_entry(child, &adev->children, node) { - error = acpi_scan_bus_check(child); - if (error) - return error; - } - return 0; + return acpi_dev_for_each_child(adev, acpi_scan_bus_check, NULL); } static int acpi_generic_hotplug_event(struct acpi_device *adev, u32 type) { switch (type) { case ACPI_NOTIFY_BUS_CHECK: - return acpi_scan_bus_check(adev); + return acpi_scan_bus_check(adev, NULL); case ACPI_NOTIFY_DEVICE_CHECK: return acpi_scan_device_check(adev); case ACPI_NOTIFY_EJECT_REQUEST: @@ -471,8 +465,6 @@ static void acpi_device_del(struct acpi_device *device) struct acpi_device_bus_id *acpi_device_bus_id; mutex_lock(&acpi_device_lock); - if (device->parent) - list_del(&device->node); list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) if (!strcmp(acpi_device_bus_id->bus_id, @@ -488,6 +480,7 @@ static void acpi_device_del(struct acpi_device *device) } list_del(&device->wakeup_list); + mutex_unlock(&acpi_device_lock); acpi_power_add_remove_device(device, false); @@ -680,8 +673,6 @@ static int __acpi_device_add(struct acpi_device *device, * ------- * Link this device to its parent and siblings. */ - INIT_LIST_HEAD(&device->children); - INIT_LIST_HEAD(&device->node); INIT_LIST_HEAD(&device->wakeup_list); INIT_LIST_HEAD(&device->physical_node_list); INIT_LIST_HEAD(&device->del_list); @@ -721,9 +712,6 @@ static int __acpi_device_add(struct acpi_device *device, list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list); } - if (device->parent) - list_add_tail(&device->node, &device->parent->children); - if (device->wakeup.flags.valid) list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list); @@ -752,9 +740,6 @@ static int __acpi_device_add(struct acpi_device *device, err: mutex_lock(&acpi_device_lock); - if (device->parent) - list_del(&device->node); - list_del(&device->wakeup_list); err_unlock: @@ -2187,9 +2172,8 @@ static int acpi_scan_attach_handler(struct acpi_device *device) return ret; } -static void acpi_bus_attach(struct acpi_device *device, bool first_pass) +static int acpi_bus_attach(struct acpi_device *device, void *first_pass) { - struct acpi_device *child; bool skip = !first_pass && device->flags.visited; acpi_handle ejd; int ret; @@ -2206,7 +2190,7 @@ static void acpi_bus_attach(struct acpi_device *device, bool first_pass) device->flags.initialized = false; acpi_device_clear_enumerated(device); device->flags.power_manageable = 0; - return; + return 0; } if (device->handler) goto ok; @@ -2224,7 +2208,7 @@ static void acpi_bus_attach(struct acpi_device *device, bool first_pass) ret = acpi_scan_attach_handler(device); if (ret < 0) - return; + return 0; device->flags.match_driver = true; if (ret > 0 && !device->flags.enumeration_by_parent) { @@ -2234,19 +2218,20 @@ static void acpi_bus_attach(struct acpi_device *device, bool first_pass) ret = device_attach(&device->dev); if (ret < 0) - return; + return 0; if (device->pnp.type.platform_id || device->flags.enumeration_by_parent) acpi_default_enumeration(device); else acpi_device_set_enumerated(device); - ok: - list_for_each_entry(child, &device->children, node) - acpi_bus_attach(child, first_pass); +ok: + acpi_dev_for_each_child(device, acpi_bus_attach, first_pass); if (!skip && device->handler && device->handler->hotplug.notify_online) device->handler->hotplug.notify_online(device); + + return 0; } static int acpi_dev_get_first_consumer_dev_cb(struct acpi_dep_data *dep, void *data) @@ -2274,7 +2259,7 @@ static void acpi_scan_clear_dep_fn(struct work_struct *work) cdw = container_of(work, struct acpi_scan_clear_dep_work, work); acpi_scan_lock_acquire(); - acpi_bus_attach(cdw->adev, true); + acpi_bus_attach(cdw->adev, (void *)true); acpi_scan_lock_release(); acpi_dev_put(cdw->adev); @@ -2432,7 +2417,7 @@ int acpi_bus_scan(acpi_handle handle) if (!device) return -ENODEV; - acpi_bus_attach(device, true); + acpi_bus_attach(device, (void *)true); if (!acpi_bus_scan_second_pass) return 0; @@ -2446,25 +2431,17 @@ int acpi_bus_scan(acpi_handle handle) acpi_bus_check_add_2, NULL, NULL, (void **)&device); - acpi_bus_attach(device, false); + acpi_bus_attach(device, NULL); return 0; } EXPORT_SYMBOL(acpi_bus_scan); -/** - * acpi_bus_trim - Detach scan handlers and drivers from ACPI device objects. - * @adev: Root of the ACPI namespace scope to walk. - * - * Must be called under acpi_scan_lock. - */ -void acpi_bus_trim(struct acpi_device *adev) +static int acpi_bus_trim_one(struct acpi_device *adev, void *not_used) { struct acpi_scan_handler *handler = adev->handler; - struct acpi_device *child; - list_for_each_entry_reverse(child, &adev->children, node) - acpi_bus_trim(child); + acpi_dev_for_each_child_reverse(adev, acpi_bus_trim_one, NULL); adev->flags.match_driver = false; if (handler) { @@ -2482,6 +2459,19 @@ void acpi_bus_trim(struct acpi_device *adev) acpi_device_set_power(adev, ACPI_STATE_D3_COLD); adev->flags.initialized = false; acpi_device_clear_enumerated(adev); + + return 0; +} + +/** + * acpi_bus_trim - Detach scan handlers and drivers from ACPI device objects. + * @adev: Root of the ACPI namespace scope to walk. + * + * Must be called under acpi_scan_lock. + */ +void acpi_bus_trim(struct acpi_device *adev) +{ + acpi_bus_trim_one(adev, NULL); } EXPORT_SYMBOL_GPL(acpi_bus_trim); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 04ea1569df78..ad4b2987b3d6 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -360,6 +360,14 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "80E3"), }, }, + { + .callback = init_nvs_save_s3, + .ident = "Lenovo G40-45", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "80E1"), + }, + }, /* * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using * the Low Power S0 Idle firmware interface (see @@ -816,6 +824,9 @@ static const struct platform_s2idle_ops acpi_s2idle_ops = { void __weak acpi_s2idle_setup(void) { + if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) + pr_info("Efficient low-power S0 idle declared\n"); + s2idle_set_ops(&acpi_s2idle_ops); } diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index becc198e4c22..5d7f38016a24 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -349,6 +349,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, { .callback = video_detect_force_native, + /* Dell Inspiron N4010 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N4010"), + }, + }, + { + .callback = video_detect_force_native, /* Dell Vostro V131 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), @@ -430,7 +438,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = { .callback = video_detect_force_native, .ident = "Clevo NL5xRU", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), }, }, @@ -438,59 +445,75 @@ static const struct dmi_system_id video_detect_dmi_table[] = { .callback = video_detect_force_native, .ident = "Clevo NL5xRU", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), - DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "AURA1501"), }, }, { .callback = video_detect_force_native, .ident = "Clevo NL5xRU", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), - DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"), }, }, { .callback = video_detect_force_native, - .ident = "Clevo NL5xRU", + .ident = "Clevo NL5xNU", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), - DMI_MATCH(DMI_BOARD_NAME, "AURA1501"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), }, }, + /* + * The TongFang PF5PU1G, PF4NU1F, PF5NU1G, and PF5LUXG/TUXEDO BA15 Gen10, + * Pulse 14/15 Gen1, and Pulse 15 Gen2 have the same problem as the Clevo + * NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description + * above. + */ { .callback = video_detect_force_native, - .ident = "Clevo NL5xRU", + .ident = "TongFang PF5PU1G", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), - DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"), + DMI_MATCH(DMI_BOARD_NAME, "PF5PU1G"), }, }, { .callback = video_detect_force_native, - .ident = "Clevo NL5xNU", + .ident = "TongFang PF4NU1F", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PF4NU1F"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang PF4NU1F", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), - DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + DMI_MATCH(DMI_BOARD_NAME, "PULSE1401"), }, }, { .callback = video_detect_force_native, - .ident = "Clevo NL5xNU", + .ident = "TongFang PF5NU1G", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), - DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + DMI_MATCH(DMI_BOARD_NAME, "PF5NU1G"), }, }, { .callback = video_detect_force_native, - .ident = "Clevo NL5xNU", + .ident = "TongFang PF5NU1G", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), - DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "PULSE1501"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang PF5LUXG", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"), }, }, - /* * Desktops which falsely report a backlight and which our heuristics * for this do not catch. diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c index d2256326c73a..647f11cf165d 100644 --- a/drivers/acpi/viot.c +++ b/drivers/acpi/viot.c @@ -249,6 +249,26 @@ err_free: } /** + * acpi_viot_early_init - Test the presence of VIOT and enable ACS + * + * If the VIOT does exist, ACS must be enabled. This cannot be + * done in acpi_viot_init() which is called after the bus scan + */ +void __init acpi_viot_early_init(void) +{ +#ifdef CONFIG_PCI + acpi_status status; + struct acpi_table_header *hdr; + + status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr); + if (ACPI_FAILURE(status)) + return; + pci_request_acs(); + acpi_put_table(hdr); +#endif +} + +/** * acpi_viot_init - Parse the VIOT table * * Parse the VIOT table, prepare the list of endpoints to be used during DMA @@ -319,12 +339,6 @@ static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data) epid = ((domain_nr - ep->segment_start) << 16) + dev_id - ep->bdf_start + ep->endpoint_id; - /* - * If we found a PCI range managed by the viommu, we're - * the one that has to request ACS. - */ - pci_request_acs(); - return viot_dev_iommu_init(&pdev->dev, ep->viommu, epid); } diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c index 2963229062f8..f9ac12b778e6 100644 --- a/drivers/acpi/x86/s2idle.c +++ b/drivers/acpi/x86/s2idle.c @@ -369,9 +369,6 @@ static int lps0_device_attach(struct acpi_device *adev, if (lps0_device_handle) return 0; - if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) - return 0; - if (acpi_s2idle_vendor_amd()) { /* AMD0004, AMD0005, AMDI0005: * - Should use rev_id 0x0 @@ -397,7 +394,9 @@ static int lps0_device_attach(struct acpi_device *adev, lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1; acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n", ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask); - } else if (lps0_dsm_func_mask_microsoft > 0 && !strcmp(hid, "AMDI0007")) { + } else if (lps0_dsm_func_mask_microsoft > 0 && + (!strcmp(hid, "AMDI0007") || + !strcmp(hid, "AMDI0008"))) { lps0_dsm_func_mask_microsoft = -EINVAL; acpi_handle_debug(adev->handle, "_DSM Using AMD method\n"); } @@ -419,11 +418,15 @@ static int lps0_device_attach(struct acpi_device *adev, lpi_device_get_constraints(); /* - * Use suspend-to-idle by default if the default suspend mode was not - * set from the command line. + * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set in + * the FADT and the default suspend mode was not set from the command + * line. */ - if (mem_sleep_default > PM_SUSPEND_MEM && !acpi_sleep_default_s3) + if ((acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) && + mem_sleep_default > PM_SUSPEND_MEM && !acpi_sleep_default_s3) { mem_sleep_current = PM_SUSPEND_TO_IDLE; + pr_info("Low-power S0 idle used by default for system suspend\n"); + } /* * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index bb45a9c00514..1c9f4fb2595d 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -148,7 +148,7 @@ config SATA_AHCI_PLATFORM config AHCI_BRCM tristate "Broadcom AHCI SATA support" depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \ - ARCH_BCM_63XX || COMPILE_TEST + ARCH_BCMBCA || COMPILE_TEST select SATA_HOST help This option enables support for the AHCI SATA3 controller found on diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index 3d345d173556..61b4ccf88bf1 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -480,10 +480,10 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf) * RETURNS: * Determined xfermask. */ -unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev, - const struct ata_acpi_gtm *gtm) +unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev, + const struct ata_acpi_gtm *gtm) { - unsigned long xfer_mask = 0; + unsigned int xfer_mask = 0; unsigned int type; int unit; u8 mode; @@ -525,7 +525,7 @@ int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm) struct ata_device *dev; ata_for_each_dev(dev, &ap->link, ENABLED) { - unsigned long xfer_mask, udma_mask; + unsigned int xfer_mask, udma_mask; xfer_mask = ata_acpi_gtm_xfermask(dev, gtm); ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 9601fa92950a..826d41f341e4 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -93,7 +93,7 @@ struct ata_force_param { const char *name; u8 cbl; u8 spd_limit; - unsigned long xfer_mask; + unsigned int xfer_mask; unsigned int horkage_on; unsigned int horkage_off; u16 lflags_on; @@ -425,7 +425,7 @@ static void ata_force_xfermask(struct ata_device *dev) for (i = ata_force_tbl_size - 1; i >= 0; i--) { const struct ata_force_ent *fe = &ata_force_tbl[i]; - unsigned long pio_mask, mwdma_mask, udma_mask; + unsigned int pio_mask, mwdma_mask, udma_mask; if (fe->port != -1 && fe->port != dev->link->ap->print_id) continue; @@ -803,11 +803,11 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, * RETURNS: * Packed xfer_mask. */ -unsigned long ata_pack_xfermask(unsigned long pio_mask, - unsigned long mwdma_mask, - unsigned long udma_mask) +unsigned int ata_pack_xfermask(unsigned int pio_mask, + unsigned int mwdma_mask, + unsigned int udma_mask) { - return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | + return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); } @@ -823,8 +823,8 @@ EXPORT_SYMBOL_GPL(ata_pack_xfermask); * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. * Any NULL destination masks will be ignored. */ -void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, - unsigned long *mwdma_mask, unsigned long *udma_mask) +void ata_unpack_xfermask(unsigned int xfer_mask, unsigned int *pio_mask, + unsigned int *mwdma_mask, unsigned int *udma_mask) { if (pio_mask) *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; @@ -857,7 +857,7 @@ static const struct ata_xfer_ent { * RETURNS: * Matching XFER_* value, 0xff if no match found. */ -u8 ata_xfer_mask2mode(unsigned long xfer_mask) +u8 ata_xfer_mask2mode(unsigned int xfer_mask) { int highbit = fls(xfer_mask) - 1; const struct ata_xfer_ent *ent; @@ -881,7 +881,7 @@ EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); * RETURNS: * Matching xfer_mask, 0 if no match found. */ -unsigned long ata_xfer_mode2mask(u8 xfer_mode) +unsigned int ata_xfer_mode2mask(u8 xfer_mode) { const struct ata_xfer_ent *ent; @@ -930,7 +930,7 @@ EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); * Constant C string representing highest speed listed in * @mode_mask, or the constant C string "<n/a>". */ -const char *ata_mode_string(unsigned long xfer_mask) +const char *ata_mode_string(unsigned int xfer_mask) { static const char * const xfer_mode_str[] = { "PIO0", @@ -1103,16 +1103,16 @@ static u64 ata_id_n_sectors(const u16 *id) if (ata_id_has_lba(id)) { if (ata_id_has_lba48(id)) return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); - else - return ata_id_u32(id, ATA_ID_LBA_CAPACITY); - } else { - if (ata_id_current_chs_valid(id)) - return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * - id[ATA_ID_CUR_SECTORS]; - else - return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * - id[ATA_ID_SECTORS]; + + return ata_id_u32(id, ATA_ID_LBA_CAPACITY); } + + if (ata_id_current_chs_valid(id)) + return (u32)id[ATA_ID_CUR_CYLS] * (u32)id[ATA_ID_CUR_HEADS] * + (u32)id[ATA_ID_CUR_SECTORS]; + + return (u32)id[ATA_ID_CYLS] * (u32)id[ATA_ID_HEADS] * + (u32)id[ATA_ID_SECTORS]; } u64 ata_tf_to_lba48(const struct ata_taskfile *tf) @@ -1383,9 +1383,9 @@ static inline void ata_dump_id(struct ata_device *dev, const u16 *id) * RETURNS: * Computed xfermask */ -unsigned long ata_id_xfermask(const u16 *id) +unsigned int ata_id_xfermask(const u16 *id) { - unsigned long pio_mask, mwdma_mask, udma_mask; + unsigned int pio_mask, mwdma_mask, udma_mask; /* Usual case. Word 53 indicates word 64 is valid */ if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { @@ -1467,10 +1467,10 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc) * RETURNS: * Zero on success, AC_ERR_* mask on failure */ -unsigned ata_exec_internal_sg(struct ata_device *dev, - struct ata_taskfile *tf, const u8 *cdb, - int dma_dir, struct scatterlist *sgl, - unsigned int n_elem, unsigned long timeout) +static unsigned ata_exec_internal_sg(struct ata_device *dev, + struct ata_taskfile *tf, const u8 *cdb, + int dma_dir, struct scatterlist *sgl, + unsigned int n_elem, unsigned int timeout) { struct ata_link *link = dev->link; struct ata_port *ap = link->ap; @@ -1645,7 +1645,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, unsigned ata_exec_internal(struct ata_device *dev, struct ata_taskfile *tf, const u8 *cdb, int dma_dir, void *buf, unsigned int buflen, - unsigned long timeout) + unsigned int timeout) { struct scatterlist *psg = NULL, sg; unsigned int n_elem = 0; @@ -2534,7 +2534,7 @@ int ata_dev_configure(struct ata_device *dev) struct ata_port *ap = dev->link->ap; bool print_info = ata_dev_print_info(dev); const u16 *id = dev->id; - unsigned long xfer_mask; + unsigned int xfer_mask; unsigned int err_mask; char revbuf[7]; /* XYZ-99\0 */ char fwrevbuf[ATA_ID_FW_REV_LEN+1]; @@ -3202,8 +3202,8 @@ u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) { char buf[32]; - unsigned long orig_mask, xfer_mask; - unsigned long pio_mask, mwdma_mask, udma_mask; + unsigned int orig_mask, xfer_mask; + unsigned int pio_mask, mwdma_mask, udma_mask; int quiet, highbit; quiet = !!(sel & ATA_DNXFER_QUIET); @@ -3381,7 +3381,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) /* step 1: calculate xfer_mask */ ata_for_each_dev(dev, link, ENABLED) { - unsigned long pio_mask, dma_mask; + unsigned int pio_mask, dma_mask; unsigned int mode_mask; mode_mask = ATA_DMA_MASK_ATA; @@ -4217,7 +4217,7 @@ static void ata_dev_xfermask(struct ata_device *dev) struct ata_link *link = dev->link; struct ata_port *ap = link->ap; struct ata_host *host = ap->host; - unsigned long xfer_mask; + unsigned int xfer_mask; /* controller modes available */ xfer_mask = ata_pack_xfermask(ap->pio_mask, @@ -4342,7 +4342,7 @@ unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) { struct ata_taskfile tf; unsigned int err_mask; - unsigned long timeout = 0; + unsigned int timeout = 0; /* set up set-features taskfile */ ata_dev_dbg(dev, "set features - SATA features\n"); @@ -5776,7 +5776,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) /* set cable, sata_spd_limit and report */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; - unsigned long xfer_mask; + unsigned int xfer_mask; /* set SATA cable type if still unset */ if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 3307ed45fe4d..ef4508d72c02 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -86,36 +86,36 @@ static const unsigned long ata_eh_reset_timeouts[] = { ULONG_MAX, /* > 1 min has elapsed, give up */ }; -static const unsigned long ata_eh_identify_timeouts[] = { +static const unsigned int ata_eh_identify_timeouts[] = { 5000, /* covers > 99% of successes and not too boring on failures */ 10000, /* combined time till here is enough even for media access */ 30000, /* for true idiots */ - ULONG_MAX, + UINT_MAX, }; -static const unsigned long ata_eh_revalidate_timeouts[] = { +static const unsigned int ata_eh_revalidate_timeouts[] = { 15000, /* Some drives are slow to read log pages when waking-up */ 15000, /* combined time till here is enough even for media access */ - ULONG_MAX, + UINT_MAX, }; -static const unsigned long ata_eh_flush_timeouts[] = { +static const unsigned int ata_eh_flush_timeouts[] = { 15000, /* be generous with flush */ 15000, /* ditto */ 30000, /* and even more generous */ - ULONG_MAX, + UINT_MAX, }; -static const unsigned long ata_eh_other_timeouts[] = { +static const unsigned int ata_eh_other_timeouts[] = { 5000, /* same rationale as identify timeout */ 10000, /* ditto */ /* but no merciful 30sec for other commands, it just isn't worth it */ - ULONG_MAX, + UINT_MAX, }; struct ata_eh_cmd_timeout_ent { const u8 *commands; - const unsigned long *timeouts; + const unsigned int *timeouts; }; /* The following table determines timeouts to use for EH internal @@ -326,7 +326,7 @@ static int ata_lookup_timeout_table(u8 cmd) * RETURNS: * Determined timeout. */ -unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) +unsigned int ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) { struct ata_eh_context *ehc = &dev->link->eh_context; int ent = ata_lookup_timeout_table(cmd); @@ -361,7 +361,7 @@ void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) return; idx = ehc->cmd_timeout_idx[dev->devno][ent]; - if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) + if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != UINT_MAX) ehc->cmd_timeout_idx[dev->devno][ent]++; } @@ -802,11 +802,11 @@ void ata_port_wait_eh(struct ata_port *ap) } EXPORT_SYMBOL_GPL(ata_port_wait_eh); -static int ata_eh_nr_in_flight(struct ata_port *ap) +static unsigned int ata_eh_nr_in_flight(struct ata_port *ap) { struct ata_queued_cmd *qc; unsigned int tag; - int nr = 0; + unsigned int nr = 0; /* count only non-internal commands */ ata_qc_for_each(ap, qc, tag) { @@ -821,7 +821,7 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t) { struct ata_port *ap = from_timer(ap, t, fastdrain_timer); unsigned long flags; - int cnt; + unsigned int cnt; spin_lock_irqsave(ap->lock, flags); @@ -870,7 +870,7 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t) */ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) { - int cnt; + unsigned int cnt; /* already scheduled? */ if (ap->pflags & ATA_PFLAG_EH_PENDING) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 86dbb1cdfabd..9b999c0e8c37 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -539,13 +539,13 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) return rc; } -static int ata_ioc32(struct ata_port *ap) +static bool ata_ioc32(struct ata_port *ap) { if (ap->flags & ATA_FLAG_PIO_DMA) - return 1; + return true; if (ap->pflags & ATA_PFLAG_PIO32) - return 1; - return 0; + return true; + return false; } /* diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index c38027887499..a7e9a75410a3 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -9,7 +9,7 @@ * and various sysfs attributes to expose these topologies and management * interfaces to user-space. * - * There are 3 objects defined in in this class: + * There are 3 objects defined in this class: * - ata_port * - ata_link * - ata_device diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 926a7f41303d..98bc8649c63f 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -52,11 +52,7 @@ extern u64 ata_tf_read_block(const struct ata_taskfile *tf, extern unsigned ata_exec_internal(struct ata_device *dev, struct ata_taskfile *tf, const u8 *cdb, int dma_dir, void *buf, unsigned int buflen, - unsigned long timeout); -extern unsigned ata_exec_internal_sg(struct ata_device *dev, - struct ata_taskfile *tf, const u8 *cdb, - int dma_dir, struct scatterlist *sg, - unsigned int n_elem, unsigned long timeout); + unsigned int timeout); extern int ata_wait_ready(struct ata_link *link, unsigned long deadline, int (*check_ready)(struct ata_link *link)); extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, @@ -136,7 +132,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev); int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev); /* libata-eh.c */ -extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd); +extern unsigned int ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd); extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd); extern void ata_eh_acquire(struct ata_port *ap); extern void ata_eh_release(struct ata_port *ap); diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c index ade4c3eee230..f8706ee427d2 100644 --- a/drivers/ata/pata_acpi.c +++ b/drivers/ata/pata_acpi.c @@ -97,7 +97,7 @@ static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device * this case the list of discovered valid modes obtained by ACPI probing */ -static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask) +static unsigned int pacpi_mode_filter(struct ata_device *adev, unsigned int mask) { struct pata_acpi *acpi = adev->link->ap->private_data; return mask & acpi->mask[adev->devno]; diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index 1b90cda27246..76ad0e73fe2a 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c @@ -115,7 +115,7 @@ static int ali_c2_cable_detect(struct ata_port *ap) * fix that later on. Also ensure we do not do UDMA on WDC drives */ -static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask) +static unsigned int ali_20_filter(struct ata_device *adev, unsigned int mask) { char model_num[ATA_ID_PROD_LEN + 1]; /* No DMA on anything but a disk for now */ diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index 154748cfcc79..f216f9d7b9ec 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c @@ -264,8 +264,8 @@ static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev) * cached during driver attach and are consulted to select transfer * mode. */ -static unsigned long nv_mode_filter(struct ata_device *dev, - unsigned long xfer_mask) +static unsigned int nv_mode_filter(struct ata_device *dev, + unsigned int xfer_mask) { static const unsigned int udma_mask_map[] = { ATA_UDMA2, ATA_UDMA1, ATA_UDMA0, 0, @@ -274,7 +274,7 @@ static unsigned long nv_mode_filter(struct ata_device *dev, char acpi_str[32] = ""; u32 saved_udma, udma; const struct ata_acpi_gtm *gtm; - unsigned long bios_limit = 0, acpi_limit = 0, limit; + unsigned int bios_limit = 0, acpi_limit = 0, limit; /* find out what BIOS configured */ udma = saved_udma = (unsigned long)ap->host->private_data; @@ -310,10 +310,10 @@ static unsigned long nv_mode_filter(struct ata_device *dev, cable detection result */ limit |= ata_pack_xfermask(ATA_PIO4, ATA_MWDMA2, ATA_UDMA2); - ata_port_dbg(ap, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, " - "BIOS=0x%lx (0x%x) ACPI=0x%lx%s\n", - xfer_mask, limit, xfer_mask & limit, bios_limit, - saved_udma, acpi_limit, acpi_str); + ata_port_dbg(ap, + "nv_mode_filter: 0x%x&0x%x->0x%x, BIOS=0x%x (0x%x) ACPI=0x%x%s\n", + xfer_mask, limit, xfer_mask & limit, bios_limit, + saved_udma, acpi_limit, acpi_str); return xfer_mask & limit; } diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index c99e8f0708b3..7e441fb304d3 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c @@ -194,7 +194,7 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, * Block UDMA on devices that cause trouble with this controller. */ -static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask) +static unsigned int hpt366_filter(struct ata_device *adev, unsigned int mask) { if (adev->class == ATA_DEV_ATA) { if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 156f304ef051..ce3c5eaa7e76 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -23,7 +23,7 @@ #include <linux/libata.h> #define DRV_NAME "pata_hpt37x" -#define DRV_VERSION "0.6.25" +#define DRV_VERSION "0.6.30" struct hpt_clock { u8 xfer_speed; @@ -278,7 +278,7 @@ static const char * const bad_ata100_5[] = { * Block UDMA on devices that cause trouble with this controller. */ -static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask) +static unsigned int hpt370_filter(struct ata_device *adev, unsigned int mask) { if (adev->class == ATA_DEV_ATA) { if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) @@ -297,7 +297,7 @@ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask) * Block UDMA on devices that cause trouble with this controller. */ -static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask) +static unsigned int hpt370a_filter(struct ata_device *adev, unsigned int mask) { if (adev->class == ATA_DEV_ATA) { if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) @@ -314,7 +314,7 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask) * The Marvell bridge chips used on the HighPoint SATA cards do not seem * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes... */ -static unsigned long hpt372_filter(struct ata_device *adev, unsigned long mask) +static unsigned int hpt372_filter(struct ata_device *adev, unsigned int mask) { if (ata_id_is_sata(adev->id)) mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA); @@ -592,21 +592,19 @@ static struct ata_port_operations hpt374_fn1_port_ops = { /** * hpt37x_clock_slot - Turn timing to PC clock entry - * @freq: Reported frequency timing - * @base: Base timing + * @freq: Reported frequency in MHz * - * Turn the timing data intoa clock slot (0 for 33, 1 for 40, 2 for 50 + * Turn the timing data into a clock slot (0 for 33, 1 for 40, 2 for 50 * and 3 for 66Mhz) */ -static int hpt37x_clock_slot(unsigned int freq, unsigned int base) +static int hpt37x_clock_slot(unsigned int freq) { - unsigned int f = (base * freq) / 192; /* Mhz */ - if (f < 40) + if (freq < 40) return 0; /* 33Mhz slot */ - if (f < 45) + if (freq < 45) return 1; /* 40Mhz slot */ - if (f < 55) + if (freq < 55) return 2; /* 50Mhz slot */ return 3; /* 60Mhz slot */ } @@ -646,24 +644,57 @@ static int hpt37x_calibrate_dpll(struct pci_dev *dev) return 0; } -static u32 hpt374_read_freq(struct pci_dev *pdev) +static int hpt37x_pci_clock(struct pci_dev *pdev, unsigned int base) { - u32 freq; - unsigned long io_base = pci_resource_start(pdev, 4); + unsigned int freq; + u32 fcnt; - if (PCI_FUNC(pdev->devfn) & 1) { - struct pci_dev *pdev_0; + /* + * Some devices do not let this value be accessed via PCI space + * according to the old driver. In addition we must use the value + * from FN 0 on the HPT374. + */ + if (pdev->device == PCI_DEVICE_ID_TTI_HPT374 && + (PCI_FUNC(pdev->devfn) & 1)) { + struct pci_dev *pdev_fn0; - pdev_0 = pci_get_slot(pdev->bus, pdev->devfn - 1); - /* Someone hot plugged the controller on us ? */ - if (pdev_0 == NULL) + pdev_fn0 = pci_get_slot(pdev->bus, pdev->devfn - 1); + /* Someone hot plugged the controller on us? */ + if (!pdev_fn0) return 0; - io_base = pci_resource_start(pdev_0, 4); - freq = inl(io_base + 0x90); - pci_dev_put(pdev_0); - } else - freq = inl(io_base + 0x90); - return freq; + fcnt = inl(pci_resource_start(pdev_fn0, 4) + 0x90); + pci_dev_put(pdev_fn0); + } else { + fcnt = inl(pci_resource_start(pdev, 4) + 0x90); + } + + if ((fcnt >> 12) != 0xABCDE) { + u32 total = 0; + int i; + u16 sr; + + dev_warn(&pdev->dev, "BIOS clock data not set\n"); + + /* This is the process the HPT371 BIOS is reported to use */ + for (i = 0; i < 128; i++) { + pci_read_config_word(pdev, 0x78, &sr); + total += sr & 0x1FF; + udelay(15); + } + fcnt = total / 128; + } + fcnt &= 0x1FF; + + freq = (fcnt * base) / 192; /* in MHz */ + + /* Clamp to bands */ + if (freq < 40) + return 33; + if (freq < 45) + return 40; + if (freq < 55) + return 50; + return 66; } /** @@ -770,7 +801,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) u8 rev = dev->revision; u8 irqmask; u8 mcr1; - u32 freq; + unsigned int freq; /* MHz */ int prefer_dpll = 1; unsigned long iobase = pci_resource_start(dev, 4); @@ -896,42 +927,16 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) if (chip_table == &hpt372a) outb(0x0e, iobase + 0x9c); - /* - * Some devices do not let this value be accessed via PCI space - * according to the old driver. In addition we must use the value - * from FN 0 on the HPT374. - */ - - if (chip_table == &hpt374) { - freq = hpt374_read_freq(dev); - if (freq == 0) - return -ENODEV; - } else - freq = inl(iobase + 0x90); - - if ((freq >> 12) != 0xABCDE) { - int i; - u16 sr; - u32 total = 0; - - dev_warn(&dev->dev, "BIOS has not set timing clocks\n"); - - /* This is the process the HPT371 BIOS is reported to use */ - for (i = 0; i < 128; i++) { - pci_read_config_word(dev, 0x78, &sr); - total += sr & 0x1FF; - udelay(15); - } - freq = total / 128; - } - freq &= 0x1FF; + freq = hpt37x_pci_clock(dev, chip_table->base); + if (!freq) + return -ENODEV; /* * Turn the frequency check into a band and then find a timing * table to match it. */ - clock_slot = hpt37x_clock_slot(freq, chip_table->base); + clock_slot = hpt37x_clock_slot(freq); if (chip_table->clocks[clock_slot] == NULL || prefer_dpll) { /* * We need to try PLL mode instead diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c index 1f6afd8ee29b..617c95522f43 100644 --- a/drivers/ata/pata_hpt3x2n.c +++ b/drivers/ata/pata_hpt3x2n.c @@ -24,7 +24,7 @@ #include <linux/libata.h> #define DRV_NAME "pata_hpt3x2n" -#define DRV_VERSION "0.3.18" +#define DRV_VERSION "0.3.19" enum { PCI66 = (1 << 1), @@ -113,7 +113,7 @@ static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed) * The Marvell bridge chips used on the HighPoint SATA cards do not seem * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes... */ -static unsigned long hpt372n_filter(struct ata_device *adev, unsigned long mask) +static unsigned int hpt372n_filter(struct ata_device *adev, unsigned int mask) { if (ata_id_is_sata(adev->id)) mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA); @@ -403,17 +403,20 @@ static int hpt3xn_calibrate_dpll(struct pci_dev *dev) return 0; } -static int hpt3x2n_pci_clock(struct pci_dev *pdev) +static int hpt3x2n_pci_clock(struct pci_dev *pdev, unsigned int base) { - unsigned long freq; + unsigned int freq; u32 fcnt; - unsigned long iobase = pci_resource_start(pdev, 4); - fcnt = inl(iobase + 0x90); /* Not PCI readable for some chips */ + /* + * Some devices do not let this value be accessed via PCI space + * according to the old driver. + */ + fcnt = inl(pci_resource_start(pdev, 4) + 0x90); if ((fcnt >> 12) != 0xABCDE) { + u32 total = 0; int i; u16 sr; - u32 total = 0; dev_warn(&pdev->dev, "BIOS clock data not set\n"); @@ -427,7 +430,7 @@ static int hpt3x2n_pci_clock(struct pci_dev *pdev) } fcnt &= 0x1FF; - freq = (fcnt * 77) / 192; + freq = (fcnt * base) / 192; /* in MHz */ /* Clamp to bands */ if (freq < 40) @@ -559,7 +562,7 @@ hpt372n: * 50 for UDMA100. Right now we always use 66 */ - pci_mhz = hpt3x2n_pci_clock(dev); + pci_mhz = hpt3x2n_pci_clock(dev, 77); f_low = (pci_mhz * 48) / 66; /* PCI Mhz for 66Mhz DPLL */ f_high = f_low + 2; /* Tolerance */ diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 42798402cf63..bfea2be2959a 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -1028,7 +1028,7 @@ static void pmac_macio_calc_timing_masks(struct pata_macio_priv *priv, } i++; } - dev_dbg(priv->dev, "Supported masks: PIO=%lx, MWDMA=%lx, UDMA=%lx\n", + dev_dbg(priv->dev, "Supported masks: PIO=%x, MWDMA=%x, UDMA=%x\n", pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask); } diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index 4fbb3eed8b0b..4191aa61c8e4 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c @@ -57,7 +57,7 @@ static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline); static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev); static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev); static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc); -static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask); +static unsigned int pdc2027x_mode_filter(struct ata_device *adev, unsigned int mask); static int pdc2027x_cable_detect(struct ata_port *ap); static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed); @@ -251,7 +251,7 @@ static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline) * Block UDMA on devices that cause trouble with this controller. */ -static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask) +static unsigned int pdc2027x_mode_filter(struct ata_device *adev, unsigned int mask) { unsigned char model_num[ATA_ID_PROD_LEN + 1]; struct ata_device *pair = ata_dev_pair(adev); diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index e410fe44177f..c0bc4af0d196 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c @@ -150,7 +150,7 @@ static u8 serverworks_is_csb(struct pci_dev *pdev) * bug we hit. */ -static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned long mask) +static unsigned int serverworks_osb4_filter(struct ata_device *adev, unsigned int mask) { if (adev->class == ATA_DEV_ATA) mask &= ~ATA_MASK_UDMA; @@ -166,7 +166,7 @@ static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned l * Check the blacklist and disable UDMA5 if matched */ -static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned long mask) +static unsigned int serverworks_csb_filter(struct ata_device *adev, unsigned int mask) { const char *p; char model_num[ATA_ID_PROD_LEN + 1]; diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index b5b764e18adf..92e4cf05de2c 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c @@ -525,7 +525,7 @@ static void sis_133_set_dmamode (struct ata_port *ap, struct ata_device *adev) * Block UDMA6 on devices that do not support it. */ -static unsigned long sis_133_mode_filter(struct ata_device *adev, unsigned long mask) +static unsigned int sis_133_mode_filter(struct ata_device *adev, unsigned int mask) { struct ata_port *ap = adev->link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 215c02d4056a..34f00f389932 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -352,7 +352,7 @@ static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev) * one breed of Transcend SSD. Return the updated mask. */ -static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask) +static unsigned int via_mode_filter(struct ata_device *dev, unsigned int mask) { struct ata_host *host = dev->link->ap->host; const struct via_isa_bridge *config = host->private_data; diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index de5bd02cad44..e3cff01201b8 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -4057,7 +4057,7 @@ static int mv_platform_probe(struct platform_device *pdev) /* * Simple resource validation .. */ - if (unlikely(pdev->num_resources != 2)) { + if (unlikely(pdev->num_resources != 1)) { dev_err(&pdev->dev, "invalid number of resources\n"); return -EINVAL; } diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 739e52cd4aba..55a10e6d4e2a 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -222,6 +222,9 @@ static void genpd_debug_remove(struct generic_pm_domain *genpd) { struct dentry *d; + if (!genpd_debugfs_dir) + return; + d = debugfs_lookup(genpd->name, genpd_debugfs_dir); debugfs_remove(d); } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 949907e2e242..997be3ac20a7 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1862,10 +1862,13 @@ int pm_runtime_force_suspend(struct device *dev) callback = RPM_GET_CALLBACK(dev, runtime_suspend); + dev_pm_enable_wake_irq_check(dev, true); ret = callback ? callback(dev) : 0; if (ret) goto err; + dev_pm_enable_wake_irq_complete(dev); + /* * If the device can stay in suspend after the system-wide transition * to the working state that will follow, drop the children counter of @@ -1882,6 +1885,7 @@ int pm_runtime_force_suspend(struct device *dev) return 0; err: + dev_pm_disable_wake_irq_check(dev, true); pm_runtime_enable(dev); return ret; } @@ -1915,9 +1919,11 @@ int pm_runtime_force_resume(struct device *dev) callback = RPM_GET_CALLBACK(dev, runtime_resume); + dev_pm_disable_wake_irq_check(dev, false); ret = callback ? callback(dev) : 0; if (ret) { pm_runtime_set_suspended(dev); + dev_pm_enable_wake_irq_check(dev, false); goto out; } diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 11a4ffe91367..e3befa2c1b66 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -501,36 +501,6 @@ void device_set_wakeup_capable(struct device *dev, bool capable) EXPORT_SYMBOL_GPL(device_set_wakeup_capable); /** - * device_init_wakeup - Device wakeup initialization. - * @dev: Device to handle. - * @enable: Whether or not to enable @dev as a wakeup device. - * - * By default, most devices should leave wakeup disabled. The exceptions are - * devices that everyone expects to be wakeup sources: keyboards, power buttons, - * possibly network interfaces, etc. Also, devices that don't generate their - * own wakeup requests but merely forward requests from one bus to another - * (like PCI bridges) should have wakeup enabled by default. - */ -int device_init_wakeup(struct device *dev, bool enable) -{ - int ret = 0; - - if (!dev) - return -EINVAL; - - if (enable) { - device_set_wakeup_capable(dev, true); - ret = device_wakeup_enable(dev); - } else { - device_wakeup_disable(dev); - device_set_wakeup_capable(dev, false); - } - - return ret; -} -EXPORT_SYMBOL_GPL(device_init_wakeup); - -/** * device_set_wakeup_enable - Enable or disable a device to wake up the system. * @dev: Device to handle. * @enable: enable/disable flag diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index d0f5bc827978..362e043e26d8 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -133,6 +133,12 @@ int regcache_init(struct regmap *map, const struct regmap_config *config) return -EINVAL; } + if (config->num_reg_defaults && !config->reg_defaults) { + dev_err(map->dev, + "Register defaults number are set without the reg!\n"); + return -EINVAL; + } + for (i = 0; i < config->num_reg_defaults; i++) if (config->reg_defaults[i].reg % map->reg_stride) return -EINVAL; @@ -495,7 +501,8 @@ EXPORT_SYMBOL_GPL(regcache_drop_region); void regcache_cache_only(struct regmap *map, bool enable) { map->lock(map->lock_arg); - WARN_ON(map->cache_bypass && enable); + WARN_ON(map->cache_type != REGCACHE_NONE && + map->cache_bypass && enable); map->cache_only = enable; trace_regmap_cache_only(map, enable); map->unlock(map->lock_arg); @@ -531,7 +538,7 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty); * @enable: flag if changes should not be written to the cache * * When a register map is marked with the cache bypass option, writes - * to the register map API will only update the hardware and not the + * to the register map API will only update the hardware and not * the cache directly. This is useful when syncing the cache back to * the hardware. */ diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index a6db605707b0..4ef9488d05cd 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -30,6 +30,9 @@ struct regmap_irq_chip_data { int irq; int wake_count; + unsigned int mask_base; + unsigned int unmask_base; + void *status_reg_buf; unsigned int *main_status_buf; unsigned int *status_buf; @@ -39,33 +42,15 @@ struct regmap_irq_chip_data { unsigned int *type_buf; unsigned int *type_buf_def; unsigned int **virt_buf; + unsigned int **config_buf; unsigned int irq_reg_stride; - unsigned int type_reg_stride; - bool clear_status:1; -}; + unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data, + unsigned int base, int index); -static int sub_irq_reg(struct regmap_irq_chip_data *data, - unsigned int base_reg, int i) -{ - const struct regmap_irq_chip *chip = data->chip; - struct regmap *map = data->map; - struct regmap_irq_sub_irq_map *subreg; - unsigned int offset; - int reg = 0; - - if (!chip->sub_reg_offsets || !chip->not_fixed_stride) { - /* Assume linear mapping */ - reg = base_reg + (i * map->reg_stride * data->irq_reg_stride); - } else { - subreg = &chip->sub_reg_offsets[i]; - offset = subreg->offset[0]; - reg = base_reg + offset; - } - - return reg; -} + unsigned int clear_status:1; +}; static inline const struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data, @@ -74,21 +59,25 @@ struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data, return &data->chip->irqs[irq]; } -static void regmap_irq_lock(struct irq_data *data) +static bool regmap_irq_can_bulk_read_status(struct regmap_irq_chip_data *data) { - struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + struct regmap *map = data->map; - mutex_lock(&d->lock); + /* + * While possible that a user-defined ->get_irq_reg() callback might + * be linear enough to support bulk reads, most of the time it won't. + * Therefore only allow them if the default callback is being used. + */ + return data->irq_reg_stride == 1 && map->reg_stride == 1 && + data->get_irq_reg == regmap_irq_get_irq_reg_linear && + !map->use_single_read; } -static int regmap_irq_update_bits(struct regmap_irq_chip_data *d, - unsigned int reg, unsigned int mask, - unsigned int val) +static void regmap_irq_lock(struct irq_data *data) { - if (d->chip->mask_writeonly) - return regmap_write_bits(d->map, reg, mask, val); - else - return regmap_update_bits(d->map, reg, mask, val); + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + + mutex_lock(&d->lock); } static void regmap_irq_sync_unlock(struct irq_data *data) @@ -97,7 +86,6 @@ static void regmap_irq_sync_unlock(struct irq_data *data) struct regmap *map = d->map; int i, j, ret; u32 reg; - u32 unmask_offset; u32 val; if (d->chip->runtime_pm) { @@ -109,7 +97,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data) if (d->clear_status) { for (i = 0; i < d->chip->num_regs; i++) { - reg = sub_irq_reg(d, d->chip->status_base, i); + reg = d->get_irq_reg(d, d->chip->status_base, i); ret = regmap_read(map, reg, &val); if (ret) @@ -126,44 +114,32 @@ static void regmap_irq_sync_unlock(struct irq_data *data) * suppress pointless writes. */ for (i = 0; i < d->chip->num_regs; i++) { - if (!d->chip->mask_base) - continue; + if (d->mask_base) { + reg = d->get_irq_reg(d, d->mask_base, i); + ret = regmap_update_bits(d->map, reg, + d->mask_buf_def[i], d->mask_buf[i]); + if (ret) + dev_err(d->map->dev, "Failed to sync masks in %x\n", + reg); + } - reg = sub_irq_reg(d, d->chip->mask_base, i); - if (d->chip->mask_invert) { - ret = regmap_irq_update_bits(d, reg, - d->mask_buf_def[i], ~d->mask_buf[i]); - } else if (d->chip->unmask_base) { - /* set mask with mask_base register */ - ret = regmap_irq_update_bits(d, reg, + if (d->unmask_base) { + reg = d->get_irq_reg(d, d->unmask_base, i); + ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], ~d->mask_buf[i]); - if (ret < 0) - dev_err(d->map->dev, - "Failed to sync unmasks in %x\n", + if (ret) + dev_err(d->map->dev, "Failed to sync masks in %x\n", reg); - unmask_offset = d->chip->unmask_base - - d->chip->mask_base; - /* clear mask with unmask_base register */ - ret = regmap_irq_update_bits(d, - reg + unmask_offset, - d->mask_buf_def[i], - d->mask_buf[i]); - } else { - ret = regmap_irq_update_bits(d, reg, - d->mask_buf_def[i], d->mask_buf[i]); } - if (ret != 0) - dev_err(d->map->dev, "Failed to sync masks in %x\n", - reg); - reg = sub_irq_reg(d, d->chip->wake_base, i); + reg = d->get_irq_reg(d, d->chip->wake_base, i); if (d->wake_buf) { if (d->chip->wake_invert) - ret = regmap_irq_update_bits(d, reg, + ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], ~d->wake_buf[i]); else - ret = regmap_irq_update_bits(d, reg, + ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], d->wake_buf[i]); if (ret != 0) @@ -180,7 +156,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data) * it'll be ignored in irq handler, then may introduce irq storm */ if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) { - reg = sub_irq_reg(d, d->chip->ack_base, i); + reg = d->get_irq_reg(d, d->chip->ack_base, i); /* some chips ack by write 0 */ if (d->chip->ack_invert) @@ -204,12 +180,12 @@ static void regmap_irq_sync_unlock(struct irq_data *data) for (i = 0; i < d->chip->num_type_reg; i++) { if (!d->type_buf_def[i]) continue; - reg = sub_irq_reg(d, d->chip->type_base, i); + reg = d->get_irq_reg(d, d->chip->type_base, i); if (d->chip->type_invert) - ret = regmap_irq_update_bits(d, reg, + ret = regmap_update_bits(d->map, reg, d->type_buf_def[i], ~d->type_buf[i]); else - ret = regmap_irq_update_bits(d, reg, + ret = regmap_update_bits(d->map, reg, d->type_buf_def[i], d->type_buf[i]); if (ret != 0) dev_err(d->map->dev, "Failed to sync type in %x\n", @@ -220,8 +196,8 @@ static void regmap_irq_sync_unlock(struct irq_data *data) if (d->chip->num_virt_regs) { for (i = 0; i < d->chip->num_virt_regs; i++) { for (j = 0; j < d->chip->num_regs; j++) { - reg = sub_irq_reg(d, d->chip->virt_reg_base[i], - j); + reg = d->get_irq_reg(d, d->chip->virt_reg_base[i], + j); ret = regmap_write(map, reg, d->virt_buf[i][j]); if (ret != 0) dev_err(d->map->dev, @@ -231,6 +207,17 @@ static void regmap_irq_sync_unlock(struct irq_data *data) } } + for (i = 0; i < d->chip->num_config_bases; i++) { + for (j = 0; j < d->chip->num_config_regs; j++) { + reg = d->get_irq_reg(d, d->chip->config_base[i], j); + ret = regmap_write(map, reg, d->config_buf[i][j]); + if (ret) + dev_err(d->map->dev, + "Failed to write config %x: %d\n", + reg, ret); + } + } + if (d->chip->runtime_pm) pm_runtime_put(map->dev); @@ -253,22 +240,19 @@ static void regmap_irq_enable(struct irq_data *data) struct regmap *map = d->map; const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); unsigned int reg = irq_data->reg_offset / map->reg_stride; - unsigned int mask, type; - - type = irq_data->type.type_falling_val | irq_data->type.type_rising_val; + unsigned int mask; /* * The type_in_mask flag means that the underlying hardware uses - * separate mask bits for rising and falling edge interrupts, but - * we want to make them into a single virtual interrupt with - * configurable edge. + * separate mask bits for each interrupt trigger type, but we want + * to have a single logical interrupt with a configurable type. * - * If the interrupt we're enabling defines the falling or rising - * masks then instead of using the regular mask bits for this - * interrupt, use the value previously written to the type buffer - * at the corresponding offset in regmap_irq_set_type(). + * If the interrupt we're enabling defines any supported types + * then instead of using the regular mask bits for this interrupt, + * use the value previously written to the type buffer at the + * corresponding offset in regmap_irq_set_type(). */ - if (d->chip->type_in_mask && type) + if (d->chip->type_in_mask && irq_data->type.types_supported) mask = d->type_buf[reg] & irq_data->mask; else mask = irq_data->mask; @@ -293,7 +277,7 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type) struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); struct regmap *map = d->map; const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); - int reg; + int reg, ret; const struct regmap_irq_type *t = &irq_data->type; if ((t->types_supported & type) != type) @@ -333,9 +317,19 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type) return -EINVAL; } - if (d->chip->set_type_virt) - return d->chip->set_type_virt(d->virt_buf, type, data->hwirq, - reg); + if (d->chip->set_type_virt) { + ret = d->chip->set_type_virt(d->virt_buf, type, data->hwirq, + reg); + if (ret) + return ret; + } + + if (d->chip->set_type_config) { + ret = d->chip->set_type_config(d->config_buf, type, + irq_data, reg); + if (ret) + return ret; + } return 0; } @@ -376,14 +370,17 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, const struct regmap_irq_chip *chip = data->chip; struct regmap *map = data->map; struct regmap_irq_sub_irq_map *subreg; + unsigned int reg; int i, ret = 0; if (!chip->sub_reg_offsets) { - /* Assume linear mapping */ - ret = regmap_read(map, chip->status_base + - (b * map->reg_stride * data->irq_reg_stride), - &data->status_buf[b]); + reg = data->get_irq_reg(data, chip->status_base, b); + ret = regmap_read(map, reg, &data->status_buf[b]); } else { + /* + * Note we can't use ->get_irq_reg() here because the offsets + * in 'subreg' are *not* interchangeable with indices. + */ subreg = &chip->sub_reg_offsets[b]; for (i = 0; i < subreg->num_regs; i++) { unsigned int offset = subreg->offset[i]; @@ -449,10 +446,18 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) * sake of simplicity. and add bulk reads only if needed */ for (i = 0; i < chip->num_main_regs; i++) { - ret = regmap_read(map, chip->main_status + - (i * map->reg_stride - * data->irq_reg_stride), - &data->main_status_buf[i]); + /* + * For not_fixed_stride, don't use ->get_irq_reg(). + * It would produce an incorrect result. + */ + if (data->chip->not_fixed_stride) + reg = chip->main_status + + i * map->reg_stride * data->irq_reg_stride; + else + reg = data->get_irq_reg(data, + chip->main_status, i); + + ret = regmap_read(map, reg, &data->main_status_buf[i]); if (ret) { dev_err(map->dev, "Failed to read IRQ status %d\n", @@ -481,8 +486,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) } } - } else if (!map->use_single_read && map->reg_stride == 1 && - data->irq_reg_stride == 1) { + } else if (regmap_irq_can_bulk_read_status(data)) { u8 *buf8 = data->status_reg_buf; u16 *buf16 = data->status_reg_buf; @@ -518,7 +522,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) } else { for (i = 0; i < data->chip->num_regs; i++) { - unsigned int reg = sub_irq_reg(data, + unsigned int reg = data->get_irq_reg(data, data->chip->status_base, i); ret = regmap_read(map, reg, &data->status_buf[i]); @@ -546,7 +550,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) data->status_buf[i] &= ~data->mask_buf[i]; if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) { - reg = sub_irq_reg(data, data->chip->ack_base, i); + reg = data->get_irq_reg(data, data->chip->ack_base, i); if (chip->ack_invert) ret = regmap_write(map, reg, @@ -607,6 +611,91 @@ static const struct irq_domain_ops regmap_domain_ops = { }; /** + * regmap_irq_get_irq_reg_linear() - Linear IRQ register mapping callback. + * @data: Data for the &struct regmap_irq_chip + * @base: Base register + * @index: Register index + * + * Returns the register address corresponding to the given @base and @index + * by the formula ``base + index * regmap_stride * irq_reg_stride``. + */ +unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data, + unsigned int base, int index) +{ + const struct regmap_irq_chip *chip = data->chip; + struct regmap *map = data->map; + + /* + * FIXME: This is for backward compatibility and should be removed + * when not_fixed_stride is dropped (it's only used by qcom-pm8008). + */ + if (chip->not_fixed_stride && chip->sub_reg_offsets) { + struct regmap_irq_sub_irq_map *subreg; + + subreg = &chip->sub_reg_offsets[0]; + return base + subreg->offset[0]; + } + + return base + index * map->reg_stride * data->irq_reg_stride; +} +EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear); + +/** + * regmap_irq_set_type_config_simple() - Simple IRQ type configuration callback. + * @buf: Buffer containing configuration register values, this is a 2D array of + * `num_config_bases` rows, each of `num_config_regs` elements. + * @type: The requested IRQ type. + * @irq_data: The IRQ being configured. + * @idx: Index of the irq's config registers within each array `buf[i]` + * + * This is a &struct regmap_irq_chip->set_type_config callback suitable for + * chips with one config register. Register values are updated according to + * the &struct regmap_irq_type data associated with an IRQ. + */ +int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type, + const struct regmap_irq *irq_data, int idx) +{ + const struct regmap_irq_type *t = &irq_data->type; + + if (t->type_reg_mask) + buf[0][idx] &= ~t->type_reg_mask; + else + buf[0][idx] &= ~(t->type_falling_val | + t->type_rising_val | + t->type_level_low_val | + t->type_level_high_val); + + switch (type) { + case IRQ_TYPE_EDGE_FALLING: + buf[0][idx] |= t->type_falling_val; + break; + + case IRQ_TYPE_EDGE_RISING: + buf[0][idx] |= t->type_rising_val; + break; + + case IRQ_TYPE_EDGE_BOTH: + buf[0][idx] |= (t->type_falling_val | + t->type_rising_val); + break; + + case IRQ_TYPE_LEVEL_HIGH: + buf[0][idx] |= t->type_level_high_val; + break; + + case IRQ_TYPE_LEVEL_LOW: + buf[0][idx] |= t->type_level_low_val; + break; + + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple); + +/** * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling * * @fwnode: The firmware node where the IRQ domain should be added to. @@ -634,7 +723,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, int ret = -ENOMEM; int num_type_reg; u32 reg; - u32 unmask_offset; if (chip->num_regs <= 0) return -EINVAL; @@ -651,11 +739,19 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, } if (chip->not_fixed_stride) { + dev_warn(map->dev, "not_fixed_stride is deprecated; use ->get_irq_reg() instead"); + for (i = 0; i < chip->num_regs; i++) if (chip->sub_reg_offsets[i].num_regs != 1) return -EINVAL; } + if (chip->num_type_reg) + dev_warn(map->dev, "type registers are deprecated; use config registers instead"); + + if (chip->num_virt_regs || chip->virt_reg_base || chip->set_type_virt) + dev_warn(map->dev, "virtual registers are deprecated; use config registers instead"); + if (irq_base) { irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0); if (irq_base < 0) { @@ -671,30 +767,30 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (chip->num_main_regs) { d->main_status_buf = kcalloc(chip->num_main_regs, - sizeof(unsigned int), + sizeof(*d->main_status_buf), GFP_KERNEL); if (!d->main_status_buf) goto err_alloc; } - d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int), + d->status_buf = kcalloc(chip->num_regs, sizeof(*d->status_buf), GFP_KERNEL); if (!d->status_buf) goto err_alloc; - d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int), + d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf), GFP_KERNEL); if (!d->mask_buf) goto err_alloc; - d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int), + d->mask_buf_def = kcalloc(chip->num_regs, sizeof(*d->mask_buf_def), GFP_KERNEL); if (!d->mask_buf_def) goto err_alloc; if (chip->wake_base) { - d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int), + d->wake_buf = kcalloc(chip->num_regs, sizeof(*d->wake_buf), GFP_KERNEL); if (!d->wake_buf) goto err_alloc; @@ -703,11 +799,11 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg; if (num_type_reg) { d->type_buf_def = kcalloc(num_type_reg, - sizeof(unsigned int), GFP_KERNEL); + sizeof(*d->type_buf_def), GFP_KERNEL); if (!d->type_buf_def) goto err_alloc; - d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int), + d->type_buf = kcalloc(num_type_reg, sizeof(*d->type_buf), GFP_KERNEL); if (!d->type_buf) goto err_alloc; @@ -724,13 +820,31 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, for (i = 0; i < chip->num_virt_regs; i++) { d->virt_buf[i] = kcalloc(chip->num_regs, - sizeof(unsigned int), + sizeof(**d->virt_buf), GFP_KERNEL); if (!d->virt_buf[i]) goto err_alloc; } } + if (chip->num_config_bases && chip->num_config_regs) { + /* + * Create config_buf[num_config_bases][num_config_regs] + */ + d->config_buf = kcalloc(chip->num_config_bases, + sizeof(*d->config_buf), GFP_KERNEL); + if (!d->config_buf) + goto err_alloc; + + for (i = 0; i < chip->num_config_regs; i++) { + d->config_buf[i] = kcalloc(chip->num_config_regs, + sizeof(**d->config_buf), + GFP_KERNEL); + if (!d->config_buf[i]) + goto err_alloc; + } + } + d->irq_chip = regmap_irq_chip; d->irq_chip.name = chip->name; d->irq = irq; @@ -738,18 +852,53 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, d->chip = chip; d->irq_base = irq_base; + if (chip->mask_base && chip->unmask_base && + !chip->mask_unmask_non_inverted) { + /* + * Chips that specify both mask_base and unmask_base used to + * get inverted mask behavior by default, with no way to ask + * for the normal, non-inverted behavior. This "inverted by + * default" behavior is deprecated, but we have to support it + * until existing drivers have been fixed. + * + * Existing drivers should be updated by swapping mask_base + * and unmask_base and setting mask_unmask_non_inverted=true. + * New drivers should always set the flag. + */ + dev_warn(map->dev, "mask_base and unmask_base are inverted, please fix it"); + + /* Might as well warn about mask_invert while we're at it... */ + if (chip->mask_invert) + dev_warn(map->dev, "mask_invert=true ignored"); + + d->mask_base = chip->unmask_base; + d->unmask_base = chip->mask_base; + } else if (chip->mask_invert) { + /* + * Swap the roles of mask_base and unmask_base if the bits are + * inverted. This is deprecated, drivers should use unmask_base + * directly. + */ + dev_warn(map->dev, "mask_invert=true is deprecated; please switch to unmask_base"); + + d->mask_base = chip->unmask_base; + d->unmask_base = chip->mask_base; + } else { + d->mask_base = chip->mask_base; + d->unmask_base = chip->unmask_base; + } + if (chip->irq_reg_stride) d->irq_reg_stride = chip->irq_reg_stride; else d->irq_reg_stride = 1; - if (chip->type_reg_stride) - d->type_reg_stride = chip->type_reg_stride; + if (chip->get_irq_reg) + d->get_irq_reg = chip->get_irq_reg; else - d->type_reg_stride = 1; + d->get_irq_reg = regmap_irq_get_irq_reg_linear; - if (!map->use_single_read && map->reg_stride == 1 && - d->irq_reg_stride == 1) { + if (regmap_irq_can_bulk_read_status(d)) { d->status_reg_buf = kmalloc_array(chip->num_regs, map->format.val_bytes, GFP_KERNEL); @@ -766,35 +915,34 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, /* Mask all the interrupts by default */ for (i = 0; i < chip->num_regs; i++) { d->mask_buf[i] = d->mask_buf_def[i]; - if (!chip->mask_base) - continue; - reg = sub_irq_reg(d, d->chip->mask_base, i); + if (d->mask_base) { + reg = d->get_irq_reg(d, d->mask_base, i); + ret = regmap_update_bits(d->map, reg, + d->mask_buf_def[i], d->mask_buf[i]); + if (ret) { + dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", + reg, ret); + goto err_alloc; + } + } - if (chip->mask_invert) - ret = regmap_irq_update_bits(d, reg, - d->mask_buf[i], ~d->mask_buf[i]); - else if (d->chip->unmask_base) { - unmask_offset = d->chip->unmask_base - - d->chip->mask_base; - ret = regmap_irq_update_bits(d, - reg + unmask_offset, - d->mask_buf[i], - d->mask_buf[i]); - } else - ret = regmap_irq_update_bits(d, reg, - d->mask_buf[i], d->mask_buf[i]); - if (ret != 0) { - dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", - reg, ret); - goto err_alloc; + if (d->unmask_base) { + reg = d->get_irq_reg(d, d->unmask_base, i); + ret = regmap_update_bits(d->map, reg, + d->mask_buf_def[i], ~d->mask_buf[i]); + if (ret) { + dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", + reg, ret); + goto err_alloc; + } } if (!chip->init_ack_masked) continue; /* Ack masked but set interrupts */ - reg = sub_irq_reg(d, d->chip->status_base, i); + reg = d->get_irq_reg(d, d->chip->status_base, i); ret = regmap_read(map, reg, &d->status_buf[i]); if (ret != 0) { dev_err(map->dev, "Failed to read IRQ status: %d\n", @@ -806,7 +954,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, d->status_buf[i] = ~d->status_buf[i]; if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) { - reg = sub_irq_reg(d, d->chip->ack_base, i); + reg = d->get_irq_reg(d, d->chip->ack_base, i); if (chip->ack_invert) ret = regmap_write(map, reg, ~(d->status_buf[i] & d->mask_buf[i])); @@ -831,14 +979,14 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (d->wake_buf) { for (i = 0; i < chip->num_regs; i++) { d->wake_buf[i] = d->mask_buf_def[i]; - reg = sub_irq_reg(d, d->chip->wake_base, i); + reg = d->get_irq_reg(d, d->chip->wake_base, i); if (chip->wake_invert) - ret = regmap_irq_update_bits(d, reg, + ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], 0); else - ret = regmap_irq_update_bits(d, reg, + ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], d->wake_buf[i]); if (ret != 0) { @@ -851,7 +999,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (chip->num_type_reg && !chip->type_in_mask) { for (i = 0; i < chip->num_type_reg; ++i) { - reg = sub_irq_reg(d, d->chip->type_base, i); + reg = d->get_irq_reg(d, d->chip->type_base, i); ret = regmap_read(map, reg, &d->type_buf_def[i]); @@ -907,6 +1055,11 @@ err_alloc: kfree(d->virt_buf[i]); kfree(d->virt_buf); } + if (d->config_buf) { + for (i = 0; i < chip->num_config_bases; i++) + kfree(d->config_buf[i]); + kfree(d->config_buf); + } kfree(d); return ret; } @@ -947,7 +1100,7 @@ EXPORT_SYMBOL_GPL(regmap_add_irq_chip); void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) { unsigned int virq; - int hwirq; + int i, hwirq; if (!d) return; @@ -977,6 +1130,11 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) kfree(d->mask_buf); kfree(d->status_reg_buf); kfree(d->status_buf); + if (d->config_buf) { + for (i = 0; i < d->chip->num_config_bases; i++) + kfree(d->config_buf[i]); + kfree(d->config_buf); + } kfree(d); } EXPORT_SYMBOL_GPL(regmap_del_irq_chip); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index c3517ccc3159..fee221c5008c 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -882,6 +882,8 @@ struct regmap *__regmap_init(struct device *dev, if (config && config->read && config->write) { map->reg_read = _regmap_bus_read; + if (config->reg_update_bits) + map->reg_update_bits = config->reg_update_bits; /* Bulk read/write */ map->read = config->read; @@ -1298,6 +1300,9 @@ static void regmap_field_init(struct regmap_field *rm_field, rm_field->reg = reg_field.reg; rm_field->shift = reg_field.lsb; rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); + + WARN_ONCE(rm_field->mask == 0, "invalid empty mask defined\n"); + rm_field->id_size = reg_field.id_size; rm_field->id_offset = reg_field.id_offset; } @@ -2219,6 +2224,28 @@ int regmap_field_update_bits_base(struct regmap_field *field, EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); /** + * regmap_field_test_bits() - Check if all specified bits are set in a + * register field. + * + * @field: Register field to operate on + * @bits: Bits to test + * + * Returns -1 if the underlying regmap_field_read() fails, 0 if at least one of the + * tested bits is not set and 1 if all tested bits are set. + */ +int regmap_field_test_bits(struct regmap_field *field, unsigned int bits) +{ + unsigned int val, ret; + + ret = regmap_field_read(field, &val); + if (ret) + return ret; + + return (val & bits) == bits; +} +EXPORT_SYMBOL_GPL(regmap_field_test_bits); + +/** * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a * register field with port ID * diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index fdb81f2794cd..e19fcab016ba 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -408,6 +408,15 @@ config BLK_DEV_RBD If unsure, say N. +config BLK_DEV_UBLK + tristate "Userspace block driver (Experimental)" + select IO_URING + help + io_uring based userspace block driver. Together with ublk server, ublk + has been working well, but interface with userspace or command data + definition isn't finalized yet, and might change according to future + requirement, so mark is as experimental now. + source "drivers/block/rnbd/Kconfig" endif # BLK_DEV diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 934a9c7c3a7c..be631352567e 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -39,4 +39,6 @@ obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/ obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/ +obj-$(CONFIG_BLK_DEV_UBLK) += ublk_drv.o + swim_mod-y := swim.o swim_asm.o diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 5a566f2fd533..4c8b2ba579ee 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1802,7 +1802,7 @@ static int fd_alloc_disk(int drive, int system) unit[drive].gendisk[system] = disk; err = add_disk(disk); if (err) - blk_cleanup_disk(disk); + put_disk(disk); return err; } diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 348adf335217..12b3ca8f6f4a 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -427,7 +427,7 @@ aoeblk_gdalloc(void *vp) return; out_disk_cleanup: - blk_cleanup_disk(gd); + put_disk(gd); err_tagset: blk_mq_free_tag_set(set); err_mempool: diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index b381d1c3ef32..3523dd82d7a0 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c @@ -277,7 +277,7 @@ freedev(struct aoedev *d) if (d->gd) { aoedisk_rm_debugfs(d); del_gendisk(d->gd); - blk_cleanup_disk(d->gd); + put_disk(d->gd); blk_mq_free_tag_set(&d->tag_set); } t = d->targets; diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index e232cc4fd444..9deb4df6bdb8 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -2031,7 +2031,7 @@ static void ataflop_probe(dev_t dev) return; cleanup_disk: - blk_cleanup_disk(unit[drive].disk[type]); + put_disk(unit[drive].disk[type]); unit[drive].disk[type] = NULL; } @@ -2045,7 +2045,6 @@ static void atari_floppy_cleanup(void) if (!unit[i].disk[type]) continue; del_gendisk(unit[i].disk[type]); - blk_cleanup_queue(unit[i].disk[type]->queue); put_disk(unit[i].disk[type]); } blk_mq_free_tag_set(&unit[i].tag_set); @@ -2064,7 +2063,7 @@ static void atari_cleanup_floppy_disk(struct atari_floppy_struct *fs) continue; if (fs->registered[type]) del_gendisk(fs->disk[type]); - blk_cleanup_disk(fs->disk[type]); + put_disk(fs->disk[type]); } blk_mq_free_tag_set(&fs->tag_set); } diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 6e3f2f0d2352..859499cd1ff8 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -256,7 +256,7 @@ static void copy_from_brd(void *dst, struct brd_device *brd, * Process a single bvec of a bio. */ static int brd_do_bvec(struct brd_device *brd, struct page *page, - unsigned int len, unsigned int off, unsigned int op, + unsigned int len, unsigned int off, enum req_op op, sector_t sector) { void *mem; @@ -310,7 +310,7 @@ static void brd_submit_bio(struct bio *bio) } static int brd_rw_page(struct block_device *bdev, sector_t sector, - struct page *page, unsigned int op) + struct page *page, enum req_op op) { struct brd_device *brd = bdev->bd_disk->private_data; int err; @@ -419,7 +419,7 @@ static int brd_alloc(int i) return 0; out_cleanup_disk: - blk_cleanup_disk(disk); + put_disk(disk); out_free_dev: list_del(&brd->brd_list); kfree(brd); @@ -439,7 +439,7 @@ static void brd_cleanup(void) list_for_each_entry_safe(brd, next, &brd_devices, brd_list) { del_gendisk(brd->brd_disk); - blk_cleanup_disk(brd->brd_disk); + put_disk(brd->brd_disk); brd_free_pages(brd); list_del(&brd->brd_list); kfree(brd); diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index f5bcded3640d..e27478ae579c 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -124,12 +124,13 @@ void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_b static int _drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, - sector_t sector, int op) + sector_t sector, enum req_op op) { struct bio *bio; /* we do all our meta data IO in aligned 4k blocks. */ const int size = 4096; - int err, op_flags = 0; + int err; + blk_opf_t op_flags = 0; device->md_io.done = 0; device->md_io.error = -ENODEV; @@ -174,7 +175,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, } int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, - sector_t sector, int op) + sector_t sector, enum req_op op) { int err; D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1); @@ -385,7 +386,7 @@ static int __al_write_transaction(struct drbd_device *device, struct al_transact write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates; rcu_read_unlock(); if (write_al_updates) { - if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) { + if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) { err = -EIO; drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR); } else { diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 9e060e49b3f8..603f6828dd79 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -977,7 +977,7 @@ static void drbd_bm_endio(struct bio *bio) static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local) { struct drbd_device *device = ctx->device; - unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE; + enum req_op op = ctx->flags & BM_AIO_READ ? REQ_OP_READ : REQ_OP_WRITE; struct bio *bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op, GFP_NOIO, &drbd_md_io_bio_set); struct drbd_bitmap *b = device->bitmap; diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 4d3efaa20b7b..f15f2f041596 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1495,7 +1495,7 @@ extern int drbd_resync_finished(struct drbd_device *device); extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent); extern void drbd_md_put_buffer(struct drbd_device *device); extern int drbd_md_sync_page_io(struct drbd_device *device, - struct drbd_backing_dev *bdev, sector_t sector, int op); + struct drbd_backing_dev *bdev, sector_t sector, enum req_op op); extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int); extern void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_backing_dev *bdev, unsigned int *done); @@ -1547,8 +1547,7 @@ extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device); extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector, bool throttle_if_app_is_waiting); extern int drbd_submit_peer_request(struct drbd_device *, - struct drbd_peer_request *, const unsigned, - const unsigned, const int); + struct drbd_peer_request *, blk_opf_t, int); extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *); extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64, sector_t, unsigned int, diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 2887350ae010..f3e4db16fd07 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2207,7 +2207,7 @@ void drbd_destroy_device(struct kref *kref) if (device->bitmap) /* should no longer be there. */ drbd_bm_cleanup(device); __free_page(device->md_io.page); - blk_cleanup_disk(device->vdisk); + put_disk(device->vdisk); kfree(device->rs_plan_s); /* not for_each_connection(connection, resource): @@ -2807,7 +2807,7 @@ out_no_minor_idr: out_no_bitmap: __free_page(device->md_io.page); out_no_io_page: - blk_cleanup_disk(disk); + put_disk(disk); out_no_disk: kref_put(&resource->kref, drbd_destroy_resource); kfree(device); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 6762be53f409..af4c7d65490b 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1621,8 +1621,7 @@ static void drbd_issue_peer_discard_or_zero_out(struct drbd_device *device, stru /* TODO allocate from our own bio_set. */ int drbd_submit_peer_request(struct drbd_device *device, struct drbd_peer_request *peer_req, - const unsigned op, const unsigned op_flags, - const int fault_type) + const blk_opf_t opf, const int fault_type) { struct bio *bios = NULL; struct bio *bio; @@ -1668,8 +1667,7 @@ int drbd_submit_peer_request(struct drbd_device *device, * generated bio, but a bio allocated on behalf of the peer. */ next_bio: - bio = bio_alloc(device->ldev->backing_bdev, nr_pages, op | op_flags, - GFP_NOIO); + bio = bio_alloc(device->ldev->backing_bdev, nr_pages, opf, GFP_NOIO); /* > peer_req->i.sector, unless this is the first bio */ bio->bi_iter.bi_sector = sector; bio->bi_private = peer_req; @@ -2060,7 +2058,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto spin_unlock_irq(&device->resource->req_lock); atomic_add(pi->size >> 9, &device->rs_sect_ev); - if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0, + if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, DRBD_FAULT_RS_WR) == 0) return 0; @@ -2383,14 +2381,14 @@ static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, co /* see also bio_flags_to_wire() * DRBD_REQ_*, because we need to semantically map the flags to data packet * flags and back. We may replicate to other kernel versions. */ -static unsigned long wire_flags_to_bio_flags(u32 dpf) +static blk_opf_t wire_flags_to_bio_flags(u32 dpf) { return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | (dpf & DP_FUA ? REQ_FUA : 0) | (dpf & DP_FLUSH ? REQ_PREFLUSH : 0); } -static unsigned long wire_flags_to_bio_op(u32 dpf) +static enum req_op wire_flags_to_bio_op(u32 dpf) { if (dpf & DP_ZEROES) return REQ_OP_WRITE_ZEROES; @@ -2543,7 +2541,8 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * struct drbd_peer_request *peer_req; struct p_data *p = pi->data; u32 peer_seq = be32_to_cpu(p->seq_num); - int op, op_flags; + enum req_op op; + blk_opf_t op_flags; u32 dp_flags; int err, tp; @@ -2681,7 +2680,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * peer_req->flags |= EE_CALL_AL_COMPLETE_IO; } - err = drbd_submit_peer_request(device, peer_req, op, op_flags, + err = drbd_submit_peer_request(device, peer_req, op | op_flags, DRBD_FAULT_DT_WR); if (!err) return 0; @@ -2979,7 +2978,7 @@ submit_for_resync: submit: update_receiver_timing_details(connection, drbd_submit_peer_request); inc_unacked(device); - if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0, + if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, fault_type) == 0) return 0; @@ -4951,7 +4950,7 @@ static int receive_rs_deallocated(struct drbd_connection *connection, struct pac if (get_ldev(device)) { struct drbd_peer_request *peer_req; - const int op = REQ_OP_WRITE_ZEROES; + const enum req_op op = REQ_OP_WRITE_ZEROES; peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector, size, 0, GFP_NOIO); @@ -4969,7 +4968,8 @@ static int receive_rs_deallocated(struct drbd_connection *connection, struct pac spin_unlock_irq(&device->resource->req_lock); atomic_add(pi->size >> 9, &device->rs_sect_ev); - err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR); + err = drbd_submit_peer_request(device, peer_req, op, + DRBD_FAULT_RS_WR); if (err) { spin_lock_irq(&device->resource->req_lock); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index e64bcfba30ef..6d8dd14458c6 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -523,16 +523,14 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) { - char b[BDEVNAME_SIZE]; - if (!__ratelimit(&drbd_ratelimit_state)) return; - drbd_warn(device, "local %s IO error sector %llu+%u on %s\n", + drbd_warn(device, "local %s IO error sector %llu+%u on %pg\n", (req->rq_state & RQ_WRITE) ? "WRITE" : "READ", (unsigned long long)req->i.sector, req->i.size >> 9, - bdevname(device->ldev->backing_bdev, b)); + device->ldev->backing_bdev); } /* Helper for HANDED_OVER_TO_NETWORK. diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index af3051dd8912..0bb1a900c2d5 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -405,7 +405,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, spin_unlock_irq(&device->resource->req_lock); atomic_add(size >> 9, &device->rs_sect_ev); - if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0, + if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, DRBD_FAULT_RS_RD) == 0) return 0; diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 015841f50f4e..ccad3d7b3ddd 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -2859,7 +2859,7 @@ static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx, if (WARN(atomic_read(&usage_count) == 0, "warning: usage count=0, current_req=%p sect=%ld flags=%llx\n", current_req, (long)blk_rq_pos(current_req), - (unsigned long long) current_req->cmd_flags)) + (__force unsigned long long) current_req->cmd_flags)) return BLK_STS_IOERR; if (test_and_set_bit(0, &fdc_busy)) { @@ -4557,7 +4557,7 @@ out: return; cleanup_disk: - blk_cleanup_disk(disks[drive][type]); + put_disk(disks[drive][type]); disks[drive][type] = NULL; mutex_unlock(&floppy_probe_lock); } @@ -4753,7 +4753,7 @@ out_put_disk: if (!disks[drive][0]) break; del_timer_sync(&motor_off_timer[drive]); - blk_cleanup_disk(disks[drive][0]); + put_disk(disks[drive][0]); blk_mq_free_tag_set(&tag_sets[drive]); } return err; @@ -4985,7 +4985,7 @@ static void __exit floppy_module_exit(void) } for (i = 0; i < ARRAY_SIZE(floppy_type); i++) { if (disks[drive][i]) - blk_cleanup_disk(disks[drive][i]); + put_disk(disks[drive][i]); } blk_mq_free_tag_set(&tag_sets[drive]); } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 084f9b8a0ba3..e3c0ba93c1a3 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -2040,7 +2040,7 @@ static int loop_add(int i) return i; out_cleanup_disk: - blk_cleanup_disk(disk); + put_disk(disk); out_cleanup_tags: blk_mq_free_tag_set(&lo->tag_set); out_free_idr: @@ -2057,7 +2057,6 @@ static void loop_remove(struct loop_device *lo) { /* Make this loop device unreachable from pathname. */ del_gendisk(lo->lo_disk); - blk_cleanup_queue(lo->lo_disk->queue); blk_mq_free_tag_set(&lo->tag_set); mutex_lock(&loop_ctl_mutex); diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 27386a572ba4..562725d222a7 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -94,17 +94,12 @@ /* Device instance number, incremented each time a device is probed. */ static int instance; -static LIST_HEAD(online_list); -static LIST_HEAD(removing_list); -static DEFINE_SPINLOCK(dev_lock); - /* * Global variable used to hold the major block device number * allocated in mtip_init(). */ static int mtip_major; static struct dentry *dfs_parent; -static struct dentry *dfs_device_status; static u32 cpu_use[NR_CPUS]; @@ -146,11 +141,8 @@ static bool mtip_check_surprise_removal(struct driver_data *dd) pci_read_config_word(dd->pdev, 0x00, &vendor_id); if (vendor_id == 0xFFFF) { dd->sr = true; - if (dd->queue) - blk_queue_flag_set(QUEUE_FLAG_DEAD, dd->queue); - else - dev_warn(&dd->pdev->dev, - "%s: dd->queue is NULL\n", __func__); + if (dd->disk) + blk_mark_disk_dead(dd->disk); return true; /* device removed */ } @@ -2170,106 +2162,6 @@ static const struct attribute_group *mtip_disk_attr_groups[] = { NULL, }; -/* debugsfs entries */ - -static ssize_t show_device_status(struct device_driver *drv, char *buf) -{ - int size = 0; - struct driver_data *dd, *tmp; - unsigned long flags; - char id_buf[42]; - u16 status = 0; - - spin_lock_irqsave(&dev_lock, flags); - size += sprintf(&buf[size], "Devices Present:\n"); - list_for_each_entry_safe(dd, tmp, &online_list, online_list) { - if (dd->pdev) { - if (dd->port && - dd->port->identify && - dd->port->identify_valid) { - strlcpy(id_buf, - (char *) (dd->port->identify + 10), 21); - status = *(dd->port->identify + 141); - } else { - memset(id_buf, 0, 42); - status = 0; - } - - if (dd->port && - test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { - size += sprintf(&buf[size], - " device %s %s (ftl rebuild %d %%)\n", - dev_name(&dd->pdev->dev), - id_buf, - status); - } else { - size += sprintf(&buf[size], - " device %s %s\n", - dev_name(&dd->pdev->dev), - id_buf); - } - } - } - - size += sprintf(&buf[size], "Devices Being Removed:\n"); - list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) { - if (dd->pdev) { - if (dd->port && - dd->port->identify && - dd->port->identify_valid) { - strlcpy(id_buf, - (char *) (dd->port->identify+10), 21); - status = *(dd->port->identify + 141); - } else { - memset(id_buf, 0, 42); - status = 0; - } - - if (dd->port && - test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { - size += sprintf(&buf[size], - " device %s %s (ftl rebuild %d %%)\n", - dev_name(&dd->pdev->dev), - id_buf, - status); - } else { - size += sprintf(&buf[size], - " device %s %s\n", - dev_name(&dd->pdev->dev), - id_buf); - } - } - } - spin_unlock_irqrestore(&dev_lock, flags); - - return size; -} - -static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, - size_t len, loff_t *offset) -{ - int size = *offset; - char *buf; - int rv = 0; - - if (!len || *offset) - return 0; - - buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - size += show_device_status(NULL, buf); - - *offset = size <= len ? size : len; - size = copy_to_user(ubuf, buf, *offset); - if (size) - rv = -EFAULT; - - kfree(buf); - return rv ? rv : *offset; -} - static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, size_t len, loff_t *offset) { @@ -2363,13 +2255,6 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, return rv ? rv : *offset; } -static const struct file_operations mtip_device_status_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = mtip_hw_read_device_status, - .llseek = no_llseek, -}; - static const struct file_operations mtip_regs_fops = { .owner = THIS_MODULE, .open = simple_open, @@ -2556,7 +2441,7 @@ static void mtip_softirq_done_fn(struct request *rq) blk_mq_end_request(rq, cmd->status); } -static bool mtip_abort_cmd(struct request *req, void *data, bool reserved) +static bool mtip_abort_cmd(struct request *req, void *data) { struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req); struct driver_data *dd = data; @@ -2569,7 +2454,7 @@ static bool mtip_abort_cmd(struct request *req, void *data, bool reserved) return true; } -static bool mtip_queue_cmd(struct request *req, void *data, bool reserved) +static bool mtip_queue_cmd(struct request *req, void *data) { struct driver_data *dd = data; @@ -3297,26 +3182,12 @@ static int mtip_block_getgeo(struct block_device *dev, return 0; } -static int mtip_block_open(struct block_device *dev, fmode_t mode) +static void mtip_block_free_disk(struct gendisk *disk) { - struct driver_data *dd; + struct driver_data *dd = disk->private_data; - if (dev && dev->bd_disk) { - dd = (struct driver_data *) dev->bd_disk->private_data; - - if (dd) { - if (test_bit(MTIP_DDF_REMOVAL_BIT, - &dd->dd_flag)) { - return -ENODEV; - } - return 0; - } - } - return -ENODEV; -} - -static void mtip_block_release(struct gendisk *disk, fmode_t mode) -{ + ida_free(&rssd_index_ida, dd->index); + kfree(dd); } /* @@ -3326,13 +3197,12 @@ static void mtip_block_release(struct gendisk *disk, fmode_t mode) * layer. */ static const struct block_device_operations mtip_block_ops = { - .open = mtip_block_open, - .release = mtip_block_release, .ioctl = mtip_block_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = mtip_block_compat_ioctl, #endif .getgeo = mtip_block_getgeo, + .free_disk = mtip_block_free_disk, .owner = THIS_MODULE }; @@ -3487,12 +3357,11 @@ static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq, return 0; } -static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req, - bool reserved) +static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req) { struct driver_data *dd = req->q->queuedata; - if (reserved) { + if (blk_mq_is_reserved_rq(req)) { struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req); cmd->status = BLK_STS_TIMEOUT; @@ -3664,7 +3533,7 @@ init_hw_cmds_error: disk_index_error: ida_free(&rssd_index_ida, index); ida_get_error: - blk_cleanup_disk(dd->disk); + put_disk(dd->disk); block_queue_alloc_init_error: blk_mq_free_tag_set(&dd->tags); block_queue_alloc_tag_error: @@ -3673,72 +3542,6 @@ protocol_init_error: return rv; } -static bool mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv) -{ - struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); - - cmd->status = BLK_STS_IOERR; - blk_mq_complete_request(rq); - return true; -} - -/* - * Block layer deinitialization function. - * - * Called by the PCI layer as each P320 device is removed. - * - * @dd Pointer to the driver data structure. - * - * return value - * 0 - */ -static int mtip_block_remove(struct driver_data *dd) -{ - mtip_hw_debugfs_exit(dd); - - if (dd->mtip_svc_handler) { - set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags); - wake_up_interruptible(&dd->port->svc_wait); - kthread_stop(dd->mtip_svc_handler); - } - - if (!dd->sr) { - /* - * Explicitly wait here for IOs to quiesce, - * as mtip_standby_drive usually won't wait for IOs. - */ - if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS)) - mtip_standby_drive(dd); - } - else - dev_info(&dd->pdev->dev, "device %s surprise removal\n", - dd->disk->disk_name); - - blk_freeze_queue_start(dd->queue); - blk_mq_quiesce_queue(dd->queue); - blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd); - blk_mq_unquiesce_queue(dd->queue); - - if (dd->disk) { - if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) - del_gendisk(dd->disk); - if (dd->disk->queue) { - blk_cleanup_queue(dd->queue); - blk_mq_free_tag_set(&dd->tags); - dd->queue = NULL; - } - put_disk(dd->disk); - } - dd->disk = NULL; - - ida_free(&rssd_index_ida, dd->index); - - /* De-initialize the protocol layer. */ - mtip_hw_exit(dd); - - return 0; -} - /* * Function called by the PCI layer when just before the * machine shuts down. @@ -3755,23 +3558,14 @@ static int mtip_block_shutdown(struct driver_data *dd) { mtip_hw_shutdown(dd); - /* Delete our gendisk structure, and cleanup the blk queue. */ - if (dd->disk) { - dev_info(&dd->pdev->dev, - "Shutting down %s ...\n", dd->disk->disk_name); + dev_info(&dd->pdev->dev, + "Shutting down %s ...\n", dd->disk->disk_name); - if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) - del_gendisk(dd->disk); - if (dd->disk->queue) { - blk_cleanup_queue(dd->queue); - blk_mq_free_tag_set(&dd->tags); - } - put_disk(dd->disk); - dd->disk = NULL; - dd->queue = NULL; - } + if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) + del_gendisk(dd->disk); - ida_free(&rssd_index_ida, dd->index); + blk_mq_free_tag_set(&dd->tags); + put_disk(dd->disk); return 0; } @@ -3905,7 +3699,6 @@ static int mtip_pci_probe(struct pci_dev *pdev, const struct cpumask *node_mask; int cpu, i = 0, j = 0; int my_node = NUMA_NO_NODE; - unsigned long flags; /* Allocate memory for this devices private data. */ my_node = pcibus_to_node(pdev->bus); @@ -3952,9 +3745,6 @@ static int mtip_pci_probe(struct pci_dev *pdev, dd->pdev = pdev; dd->numa_node = my_node; - INIT_LIST_HEAD(&dd->online_list); - INIT_LIST_HEAD(&dd->remove_list); - memset(dd->workq_name, 0, 32); snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); @@ -4047,11 +3837,6 @@ static int mtip_pci_probe(struct pci_dev *pdev, else rv = 0; /* device in rebuild state, return 0 from probe */ - /* Add to online list even if in ftl rebuild */ - spin_lock_irqsave(&dev_lock, flags); - list_add(&dd->online_list, &online_list); - spin_unlock_irqrestore(&dev_lock, flags); - goto done; block_initialize_err: @@ -4085,14 +3870,7 @@ done: static void mtip_pci_remove(struct pci_dev *pdev) { struct driver_data *dd = pci_get_drvdata(pdev); - unsigned long flags, to; - - set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag); - - spin_lock_irqsave(&dev_lock, flags); - list_del_init(&dd->online_list); - list_add(&dd->remove_list, &removing_list); - spin_unlock_irqrestore(&dev_lock, flags); + unsigned long to; mtip_check_surprise_removal(dd); synchronize_irq(dd->pdev->irq); @@ -4109,11 +3887,35 @@ static void mtip_pci_remove(struct pci_dev *pdev) "Completion workers still active!\n"); } - blk_mark_disk_dead(dd->disk); set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); - /* Clean up the block layer. */ - mtip_block_remove(dd); + if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) + del_gendisk(dd->disk); + + mtip_hw_debugfs_exit(dd); + + if (dd->mtip_svc_handler) { + set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags); + wake_up_interruptible(&dd->port->svc_wait); + kthread_stop(dd->mtip_svc_handler); + } + + if (!dd->sr) { + /* + * Explicitly wait here for IOs to quiesce, + * as mtip_standby_drive usually won't wait for IOs. + */ + if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS)) + mtip_standby_drive(dd); + } + else + dev_info(&dd->pdev->dev, "device %s surprise removal\n", + dd->disk->disk_name); + + blk_mq_free_tag_set(&dd->tags); + + /* De-initialize the protocol layer. */ + mtip_hw_exit(dd); if (dd->isr_workq) { destroy_workqueue(dd->isr_workq); @@ -4124,14 +3926,10 @@ static void mtip_pci_remove(struct pci_dev *pdev) pci_disable_msi(pdev); - spin_lock_irqsave(&dev_lock, flags); - list_del_init(&dd->remove_list); - spin_unlock_irqrestore(&dev_lock, flags); - - kfree(dd); - pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); pci_set_drvdata(pdev, NULL); + + put_disk(dd->disk); } /* @@ -4250,15 +4048,6 @@ static int __init mtip_init(void) pr_warn("Error creating debugfs parent\n"); dfs_parent = NULL; } - if (dfs_parent) { - dfs_device_status = debugfs_create_file("device_status", - 0444, dfs_parent, NULL, - &mtip_device_status_fops); - if (IS_ERR_OR_NULL(dfs_device_status)) { - pr_err("Error creating device_status node\n"); - dfs_device_status = NULL; - } - } /* Register our PCI operations. */ error = pci_register_driver(&mtip_pci_driver); diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 6816beb45352..f7328f19ac5c 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -149,7 +149,6 @@ enum { MTIP_DDF_RESUME_BIT = 6, MTIP_DDF_INIT_DONE_BIT = 7, MTIP_DDF_REBUILD_FAILED_BIT = 8, - MTIP_DDF_REMOVAL_BIT = 9, MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | (1 << MTIP_DDF_SEC_LOCK_BIT) | @@ -462,10 +461,6 @@ struct driver_data { int isr_binding; - struct list_head online_list; /* linkage for online list */ - - struct list_head remove_list; /* linkage for removing list */ - int unal_qdepth; /* qdepth of unaligned IO queue */ }; diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c index e094d2b8b5a9..d914156db2d8 100644 --- a/drivers/block/n64cart.c +++ b/drivers/block/n64cart.c @@ -157,7 +157,7 @@ static int __init n64cart_probe(struct platform_device *pdev) return 0; out_cleanup_disk: - blk_cleanup_disk(disk); + put_disk(disk); out: return err; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 07f3c139a3d7..f5d098a148cb 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -250,7 +250,7 @@ static void nbd_dev_remove(struct nbd_device *nbd) struct gendisk *disk = nbd->disk; del_gendisk(disk); - blk_cleanup_disk(disk); + put_disk(disk); blk_mq_free_tag_set(&nbd->tag_set); /* @@ -393,8 +393,7 @@ static u32 req_to_nbd_cmd_type(struct request *req) } } -static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, - bool reserved) +static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req) { struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); struct nbd_device *nbd = cmd->nbd; @@ -880,7 +879,7 @@ static void recv_work(struct work_struct *work) kfree(args); } -static bool nbd_clear_req(struct request *req, void *data, bool reserved) +static bool nbd_clear_req(struct request *req, void *data) { struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); @@ -1833,7 +1832,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) out_free_work: destroy_workqueue(nbd->recv_workq); out_err_disk: - blk_cleanup_disk(disk); + put_disk(disk); out_free_idr: mutex_lock(&nbd_index_mutex); idr_remove(&nbd_index_idr, index); diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 6b67088f4ea7..8b224ede2e33 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1310,7 +1310,7 @@ static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd, } static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, - enum req_opf op, + enum req_op op, sector_t sector, sector_t nr_sectors) { @@ -1381,9 +1381,8 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd) } } -blk_status_t null_process_cmd(struct nullb_cmd *cmd, - enum req_opf op, sector_t sector, - unsigned int nr_sectors) +blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op, + sector_t sector, unsigned int nr_sectors) { struct nullb_device *dev = cmd->nq->dev; blk_status_t ret; @@ -1401,7 +1400,7 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd, } static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, - sector_t nr_sectors, enum req_opf op) + sector_t nr_sectors, enum req_op op) { struct nullb_device *dev = cmd->nq->dev; struct nullb *nullb = dev->nullb; @@ -1578,7 +1577,7 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) return nr; } -static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res) +static enum blk_eh_timer_return null_timeout_rq(struct request *rq) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); @@ -1737,7 +1736,7 @@ static void null_del_dev(struct nullb *nullb) null_restart_queue_async(nullb); } - blk_cleanup_disk(nullb->disk); + put_disk(nullb->disk); if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) blk_mq_free_tag_set(nullb->tag_set); @@ -2082,7 +2081,7 @@ static int null_add_dev(struct nullb_device *dev) out_cleanup_zone: null_free_zoned_dev(dev); out_cleanup_disk: - blk_cleanup_disk(nullb->disk); + put_disk(nullb->disk); out_cleanup_tags: if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) blk_mq_free_tag_set(nullb->tag_set); diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h index 8359b43842f2..6fbf0a1b2622 100644 --- a/drivers/block/null_blk/null_blk.h +++ b/drivers/block/null_blk/null_blk.h @@ -136,9 +136,8 @@ struct nullb { blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector, sector_t nr_sectors); -blk_status_t null_process_cmd(struct nullb_cmd *cmd, - enum req_opf op, sector_t sector, - unsigned int nr_sectors); +blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op, + sector_t sector, unsigned int nr_sectors); #ifdef CONFIG_BLK_DEV_ZONED int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q); @@ -146,9 +145,8 @@ int null_register_zoned_dev(struct nullb *nullb); void null_free_zoned_dev(struct nullb_device *dev); int null_report_zones(struct gendisk *disk, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data); -blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, - enum req_opf op, sector_t sector, - sector_t nr_sectors); +blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op, + sector_t sector, sector_t nr_sectors); size_t null_zone_valid_read_len(struct nullb *nullb, sector_t sector, unsigned int len); #else @@ -164,7 +162,7 @@ static inline int null_register_zoned_dev(struct nullb *nullb) } static inline void null_free_zoned_dev(struct nullb_device *dev) {} static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, - enum req_opf op, sector_t sector, sector_t nr_sectors) + enum req_op op, sector_t sector, sector_t nr_sectors) { return BLK_STS_NOTSUPP; } diff --git a/drivers/block/null_blk/trace.h b/drivers/block/null_blk/trace.h index 86d6c12c603c..6b2b370e786f 100644 --- a/drivers/block/null_blk/trace.h +++ b/drivers/block/null_blk/trace.h @@ -36,7 +36,7 @@ TRACE_EVENT(nullb_zone_op, TP_ARGS(cmd, zone_no, zone_cond), TP_STRUCT__entry( __array(char, disk, DISK_NAME_LEN) - __field(enum req_opf, op) + __field(enum req_op, op) __field(unsigned int, zone_no) __field(unsigned int, zone_cond) ), diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c index 2fdd7b20c224..55a69e48ef8b 100644 --- a/drivers/block/null_blk/zoned.c +++ b/drivers/block/null_blk/zoned.c @@ -159,7 +159,7 @@ int null_register_zoned_dev(struct nullb *nullb) struct nullb_device *dev = nullb->dev; struct request_queue *q = nullb->q; - blk_queue_set_zoned(nullb->disk, BLK_ZONED_HM); + disk_set_zoned(nullb->disk, BLK_ZONED_HM); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE); @@ -170,12 +170,12 @@ int null_register_zoned_dev(struct nullb *nullb) return ret; } else { blk_queue_chunk_sectors(q, dev->zone_size_sects); - q->nr_zones = blkdev_nr_zones(nullb->disk); + nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0); } blk_queue_max_zone_append_sectors(q, dev->zone_size_sects); - blk_queue_max_open_zones(q, dev->zone_max_open); - blk_queue_max_active_zones(q, dev->zone_max_active); + disk_set_max_open_zones(nullb->disk, dev->zone_max_open); + disk_set_max_active_zones(nullb->disk, dev->zone_max_active); return 0; } @@ -600,7 +600,7 @@ static blk_status_t null_reset_zone(struct nullb_device *dev, return BLK_STS_OK; } -static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, +static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op, sector_t sector) { struct nullb_device *dev = cmd->nq->dev; @@ -653,7 +653,7 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, return ret; } -blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op, +blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op, sector_t sector, sector_t nr_sectors) { struct nullb_device *dev; diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index f462ad67931a..a5ab40784119 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -956,7 +956,7 @@ out_unreg_cdrom: out_pi_release: pi_release(cd->pi); out_free_disk: - blk_cleanup_disk(cd->disk); + put_disk(cd->disk); out_free_tag_set: blk_mq_free_tag_set(&cd->tag_set); return ret; @@ -1029,7 +1029,7 @@ static void __exit pcd_exit(void) unregister_cdrom(&cd->info); del_gendisk(cd->disk); pi_release(cd->pi); - blk_cleanup_disk(cd->disk); + put_disk(cd->disk); blk_mq_free_tag_set(&cd->tag_set); } diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 3637c38c72f9..f8a75bc90f70 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -501,6 +501,8 @@ static enum action do_pd_io_start(void) return do_pd_read_start(); else return do_pd_write_start(); + default: + break; } return Fail; } @@ -943,7 +945,7 @@ static int pd_probe_drive(struct pd_unit *disk, int autoprobe, int port, goto cleanup_disk; return 0; cleanup_disk: - blk_cleanup_disk(disk->gd); + put_disk(disk->gd); put_disk: put_disk(p); disk->gd = NULL; @@ -1018,7 +1020,7 @@ static void __exit pd_exit(void) if (p) { disk->gd = NULL; del_gendisk(p); - blk_cleanup_disk(p); + put_disk(p); blk_mq_free_tag_set(&disk->tag_set); pi_release(disk->pi); } diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 292e9a4ce1b9..eec1b9fde245 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -975,7 +975,7 @@ static int __init pf_init_unit(struct pf_unit *pf, bool autoprobe, int port, out_pi_release: pi_release(pf->pi); out_free_disk: - blk_cleanup_disk(pf->disk); + put_disk(pf->disk); out_free_tag_set: blk_mq_free_tag_set(&pf->tag_set); return ret; @@ -1044,7 +1044,7 @@ static void __exit pf_exit(void) if (!pf->present) continue; del_gendisk(pf->disk); - blk_cleanup_disk(pf->disk); + put_disk(pf->disk); blk_mq_free_tag_set(&pf->tag_set); pi_release(pf->pi); } diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 789093375344..01a15dbd9cde 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2460,11 +2460,9 @@ static int pkt_seq_show(struct seq_file *m, void *p) { struct pktcdvd_device *pd = m->private; char *msg; - char bdev_buf[BDEVNAME_SIZE]; int states[PACKET_NUM_STATES]; - seq_printf(m, "Writer %s mapped to %s:\n", pd->name, - bdevname(pd->bdev, bdev_buf)); + seq_printf(m, "Writer %s mapped to %pg:\n", pd->name, pd->bdev); seq_printf(m, "\nSettings:\n"); seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); @@ -2521,7 +2519,6 @@ static int pkt_seq_show(struct seq_file *m, void *p) static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) { int i; - char b[BDEVNAME_SIZE]; struct block_device *bdev; struct scsi_device *sdev; @@ -2534,8 +2531,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) if (!pd2) continue; if (pd2->bdev->bd_dev == dev) { - pkt_err(pd, "%s already setup\n", - bdevname(pd2->bdev, b)); + pkt_err(pd, "%pg already setup\n", pd2->bdev); return -EBUSY; } if (pd2->pkt_dev == dev) { @@ -2570,7 +2566,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) } proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd); - pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b)); + pkt_dbg(1, pd, "writer mapped to %pg\n", bdev); return 0; out_mem: @@ -2733,7 +2729,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) return 0; out_mem2: - blk_cleanup_disk(disk); + put_disk(disk); out_mem: mempool_exit(&pd->rb_pool); kfree(pd); @@ -2783,7 +2779,7 @@ static int pkt_remove_dev(dev_t pkt_dev) pkt_dbg(1, pd, "writer unmapped\n"); del_gendisk(pd->disk); - blk_cleanup_disk(pd->disk); + put_disk(pd->disk); mempool_exit(&pd->rb_pool); kfree(pd); diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 3054adf77460..36d7b36c60c7 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -473,7 +473,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) return 0; fail_cleanup_disk: - blk_cleanup_disk(gendisk); + put_disk(gendisk); fail_free_tag_set: blk_mq_free_tag_set(&priv->tag_set); fail_teardown: @@ -500,7 +500,7 @@ static void ps3disk_remove(struct ps3_system_bus_device *_dev) &ps3disk_mask); mutex_unlock(&ps3disk_mask_mutex); del_gendisk(priv->gendisk); - blk_cleanup_disk(priv->gendisk); + put_disk(priv->gendisk); blk_mq_free_tag_set(&priv->tag_set); dev_notice(&dev->sbd.core, "Synchronizing disk cache\n"); ps3disk_sync_cache(dev); diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 4f90819e245e..d1e0fefec90b 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -761,7 +761,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev) return 0; out_cleanup_disk: - blk_cleanup_disk(gendisk); + put_disk(gendisk); out_cache_cleanup: remove_proc_entry(DEVICE_NAME, NULL); ps3vram_cache_cleanup(dev); @@ -792,7 +792,7 @@ static void ps3vram_remove(struct ps3_system_bus_device *dev) struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); del_gendisk(priv->gendisk); - blk_cleanup_disk(priv->gendisk); + put_disk(priv->gendisk); remove_proc_entry(DEVICE_NAME, NULL); ps3vram_cache_cleanup(dev); iounmap(priv->reports); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index ef9bc62e9afd..0d8ec2fe5740 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4729,7 +4729,7 @@ static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx, static void rbd_free_disk(struct rbd_device *rbd_dev) { - blk_cleanup_disk(rbd_dev->disk); + put_disk(rbd_dev->disk); blk_mq_free_tag_set(&rbd_dev->tag_set); rbd_dev->disk = NULL; } diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index 409c76b81aed..b8d9e2824d9c 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -1408,7 +1408,7 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx) blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue); err = add_disk(dev->gd); if (err) - blk_cleanup_disk(dev->gd); + put_disk(dev->gd); return err; } @@ -1630,7 +1630,7 @@ put_sess: static void destroy_gen_disk(struct rnbd_clt_dev *dev) { del_gendisk(dev->gd); - blk_cleanup_disk(dev->gd); + put_disk(dev->gd); } static void destroy_sysfs(struct rnbd_clt_dev *dev, @@ -1755,7 +1755,7 @@ static void rnbd_destroy_sessions(void) list_for_each_entry_safe(dev, tn, &sess->devs_list, list) { /* * Here unmap happens in parallel for only one reason: - * blk_cleanup_queue() takes around half a second, so + * del_gendisk() takes around half a second, so * on huge amount of devices the whole module unload * procedure takes minutes. */ diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h index bfb08dd434d1..ea7ac8bca63c 100644 --- a/drivers/block/rnbd/rnbd-proto.h +++ b/drivers/block/rnbd/rnbd-proto.h @@ -229,9 +229,9 @@ static inline bool rnbd_flags_supported(u32 flags) return true; } -static inline u32 rnbd_to_bio_flags(u32 rnbd_opf) +static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf) { - u32 bio_opf; + blk_opf_t bio_opf; switch (rnbd_op(rnbd_opf)) { case RNBD_OP_READ: @@ -286,7 +286,8 @@ static inline u32 rq_to_rnbd_flags(struct request *rq) break; default: WARN(1, "Unknown request type %d (flags %llu)\n", - req_op(rq), (unsigned long long)rq->cmd_flags); + (__force u32)req_op(rq), + (__force unsigned long long)rq->cmd_flags); rnbd_opf = 0; } diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c index c5d0a0391165..c63017f6e421 100644 --- a/drivers/block/rnbd/rnbd-srv-dev.c +++ b/drivers/block/rnbd/rnbd-srv-dev.c @@ -28,7 +28,6 @@ struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags) goto err; dev->blk_open_flags = flags; - bdevname(dev->bdev, dev->name); return dev; diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h index 4309e5252469..8407d12f70af 100644 --- a/drivers/block/rnbd/rnbd-srv-dev.h +++ b/drivers/block/rnbd/rnbd-srv-dev.h @@ -15,7 +15,6 @@ struct rnbd_dev { struct block_device *bdev; fmode_t blk_open_flags; - char name[BDEVNAME_SIZE]; }; /** diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c index feaa76c5a342..297a6924ff4e 100644 --- a/drivers/block/rnbd/rnbd-srv-sysfs.c +++ b/drivers/block/rnbd/rnbd-srv-sysfs.c @@ -38,14 +38,13 @@ static struct kobj_type dev_ktype = { }; int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev, - struct block_device *bdev, - const char *dev_name) + struct block_device *bdev) { struct kobject *bdev_kobj; int ret; ret = kobject_init_and_add(&dev->dev_kobj, &dev_ktype, - rnbd_devs_kobj, dev_name); + rnbd_devs_kobj, "%pg", bdev); if (ret) { kobject_put(&dev->dev_kobj); return ret; diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index beaef43a67b9..0713014bf423 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -419,7 +419,7 @@ static struct rnbd_srv_sess_dev return sess_dev; } -static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(const char *id) +static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(struct block_device *bdev) { struct rnbd_srv_dev *dev; @@ -427,7 +427,7 @@ static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(const char *id) if (!dev) return ERR_PTR(-ENOMEM); - strscpy(dev->id, id, sizeof(dev->id)); + snprintf(dev->id, sizeof(dev->id), "%pg", bdev); kref_init(&dev->kref); INIT_LIST_HEAD(&dev->sess_dev_list); mutex_init(&dev->lock); @@ -512,7 +512,7 @@ rnbd_srv_get_or_create_srv_dev(struct rnbd_dev *rnbd_dev, int ret; struct rnbd_srv_dev *new_dev, *dev; - new_dev = rnbd_srv_init_srv_dev(rnbd_dev->name); + new_dev = rnbd_srv_init_srv_dev(rnbd_dev->bdev); if (IS_ERR(new_dev)) return new_dev; @@ -758,8 +758,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, */ mutex_lock(&srv_dev->lock); if (!srv_dev->dev_kobj.state_in_sysfs) { - ret = rnbd_srv_create_dev_sysfs(srv_dev, rnbd_dev->bdev, - rnbd_dev->name); + ret = rnbd_srv_create_dev_sysfs(srv_dev, rnbd_dev->bdev); if (ret) { mutex_unlock(&srv_dev->lock); rnbd_srv_err(srv_sess_dev, diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h index be2ae486d407..6926f9069dc4 100644 --- a/drivers/block/rnbd/rnbd-srv.h +++ b/drivers/block/rnbd/rnbd-srv.h @@ -68,8 +68,7 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev, /* rnbd-srv-sysfs.c */ int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev, - struct block_device *bdev, - const char *dir_name); + struct block_device *bdev); void rnbd_srv_destroy_dev_sysfs(struct rnbd_srv_dev *dev); int rnbd_srv_create_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev); void rnbd_srv_destroy_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev); diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index dd0a1a6fed29..fb855da971ee 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -886,7 +886,7 @@ static int probe_disk(struct vdc_port *port) return 0; out_cleanup_disk: - blk_cleanup_disk(g); + put_disk(g); out_free_tag: blk_mq_free_tag_set(&port->tag_set); return err; @@ -1070,7 +1070,7 @@ static void vdc_port_remove(struct vio_dev *vdev) del_timer_sync(&port->vio.timer); del_gendisk(port->disk); - blk_cleanup_disk(port->disk); + put_disk(port->disk); blk_mq_free_tag_set(&port->tag_set); vdc_free_tx_ring(port); diff --git a/drivers/block/swim.c b/drivers/block/swim.c index fef65a18d56f..42b4b6828690 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -783,7 +783,7 @@ static void swim_cleanup_floppy_disk(struct floppy_state *fs) if (fs->registered) del_gendisk(fs->disk); - blk_cleanup_disk(disk); + put_disk(disk); blk_mq_free_tag_set(&fs->tag_set); } diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 6c39f2c9f806..da811a7da03f 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -1238,7 +1238,7 @@ static int swim3_attach(struct macio_dev *mdev, return 0; out_cleanup_disk: - blk_cleanup_disk(disk); + put_disk(disk); out_free_tag_set: blk_mq_free_tag_set(&fs->tag_set); out_unregister: diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index 63b4f6431d2e..0e1a484cab0b 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c @@ -1377,7 +1377,7 @@ static void carm_free_disk(struct carm_host *host, unsigned int port_no) if (host->state > HST_DEV_ACTIVATE) del_gendisk(disk); - blk_cleanup_disk(disk); + put_disk(disk); } static int carm_init_shm(struct carm_host *host) @@ -1536,7 +1536,7 @@ err_out_free_majors: clear_bit(0, &carm_major_alloc); else if (host->major == 161) clear_bit(1, &carm_major_alloc); - blk_cleanup_queue(host->oob_q); + blk_mq_destroy_queue(host->oob_q); blk_mq_free_tag_set(&host->tag_set); err_out_dma_free: dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma); @@ -1570,7 +1570,7 @@ static void carm_remove_one (struct pci_dev *pdev) clear_bit(0, &carm_major_alloc); else if (host->major == 161) clear_bit(1, &carm_major_alloc); - blk_cleanup_queue(host->oob_q); + blk_mq_destroy_queue(host->oob_q); blk_mq_free_tag_set(&host->tag_set); dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma); iounmap(host->mmio); diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c new file mode 100644 index 000000000000..3f1906965ac8 --- /dev/null +++ b/drivers/block/ublk_drv.c @@ -0,0 +1,1545 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Userspace block device - block device which IO is handled from userspace + * + * Take full use of io_uring passthrough command for communicating with + * ublk userspace daemon(ublksrvd) for handling basic IO request. + * + * Copyright 2022 Ming Lei <ming.lei@redhat.com> + * + * (part of code stolen from loop.c) + */ +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/pagemap.h> +#include <linux/file.h> +#include <linux/stat.h> +#include <linux/errno.h> +#include <linux/major.h> +#include <linux/wait.h> +#include <linux/blkdev.h> +#include <linux/init.h> +#include <linux/swap.h> +#include <linux/slab.h> +#include <linux/compat.h> +#include <linux/mutex.h> +#include <linux/writeback.h> +#include <linux/completion.h> +#include <linux/highmem.h> +#include <linux/sysfs.h> +#include <linux/miscdevice.h> +#include <linux/falloc.h> +#include <linux/uio.h> +#include <linux/ioprio.h> +#include <linux/sched/mm.h> +#include <linux/uaccess.h> +#include <linux/cdev.h> +#include <linux/io_uring.h> +#include <linux/blk-mq.h> +#include <linux/delay.h> +#include <linux/mm.h> +#include <asm/page.h> +#include <linux/task_work.h> +#include <uapi/linux/ublk_cmd.h> + +#define UBLK_MINORS (1U << MINORBITS) + +/* All UBLK_F_* have to be included into UBLK_F_ALL */ +#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_URING_CMD_COMP_IN_TASK) + +struct ublk_rq_data { + struct callback_head work; +}; + +struct ublk_uring_cmd_pdu { + struct request *req; +}; + +/* + * io command is active: sqe cmd is received, and its cqe isn't done + * + * If the flag is set, the io command is owned by ublk driver, and waited + * for incoming blk-mq request from the ublk block device. + * + * If the flag is cleared, the io command will be completed, and owned by + * ublk server. + */ +#define UBLK_IO_FLAG_ACTIVE 0x01 + +/* + * IO command is completed via cqe, and it is being handled by ublksrv, and + * not committed yet + * + * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for + * cross verification + */ +#define UBLK_IO_FLAG_OWNED_BY_SRV 0x02 + +/* + * IO command is aborted, so this flag is set in case of + * !UBLK_IO_FLAG_ACTIVE. + * + * After this flag is observed, any pending or new incoming request + * associated with this io command will be failed immediately + */ +#define UBLK_IO_FLAG_ABORTED 0x04 + +struct ublk_io { + /* userspace buffer address from io cmd */ + __u64 addr; + unsigned int flags; + int res; + + struct io_uring_cmd *cmd; +}; + +struct ublk_queue { + int q_id; + int q_depth; + + unsigned long flags; + struct task_struct *ubq_daemon; + char *io_cmd_buf; + + unsigned long io_addr; /* mapped vm address */ + unsigned int max_io_sz; + bool abort_work_pending; + unsigned short nr_io_ready; /* how many ios setup */ + struct ublk_device *dev; + struct ublk_io ios[0]; +}; + +#define UBLK_DAEMON_MONITOR_PERIOD (5 * HZ) + +struct ublk_device { + struct gendisk *ub_disk; + + char *__queues; + + unsigned short queue_size; + unsigned short bs_shift; + struct ublksrv_ctrl_dev_info dev_info; + + struct blk_mq_tag_set tag_set; + + struct cdev cdev; + struct device cdev_dev; + +#define UB_STATE_OPEN 0 +#define UB_STATE_USED 1 + unsigned long state; + int ub_number; + + struct mutex mutex; + + spinlock_t mm_lock; + struct mm_struct *mm; + + struct completion completion; + unsigned int nr_queues_ready; + atomic_t nr_aborted_queues; + + /* + * Our ubq->daemon may be killed without any notification, so + * monitor each queue's daemon periodically + */ + struct delayed_work monitor_work; + struct work_struct stop_work; +}; + +static dev_t ublk_chr_devt; +static struct class *ublk_chr_class; + +static DEFINE_IDR(ublk_index_idr); +static DEFINE_SPINLOCK(ublk_idr_lock); +static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */ + +static DEFINE_MUTEX(ublk_ctl_mutex); + +static struct miscdevice ublk_misc; + +static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq) +{ + if (IS_BUILTIN(CONFIG_BLK_DEV_UBLK) && + !(ubq->flags & UBLK_F_URING_CMD_COMP_IN_TASK)) + return true; + return false; +} + +static struct ublk_device *ublk_get_device(struct ublk_device *ub) +{ + if (kobject_get_unless_zero(&ub->cdev_dev.kobj)) + return ub; + return NULL; +} + +static void ublk_put_device(struct ublk_device *ub) +{ + put_device(&ub->cdev_dev); +} + +static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev, + int qid) +{ + return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]); +} + +static inline bool ublk_rq_has_data(const struct request *rq) +{ + return rq->bio && bio_has_data(rq->bio); +} + +static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq, + int tag) +{ + return (struct ublksrv_io_desc *) + &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]); +} + +static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id) +{ + return ublk_get_queue(ub, q_id)->io_cmd_buf; +} + +static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id) +{ + struct ublk_queue *ubq = ublk_get_queue(ub, q_id); + + return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc), + PAGE_SIZE); +} + +static void ublk_free_disk(struct gendisk *disk) +{ + struct ublk_device *ub = disk->private_data; + + clear_bit(UB_STATE_USED, &ub->state); + put_device(&ub->cdev_dev); +} + +static const struct block_device_operations ub_fops = { + .owner = THIS_MODULE, + .free_disk = ublk_free_disk, +}; + +#define UBLK_MAX_PIN_PAGES 32 + +struct ublk_map_data { + const struct ublk_queue *ubq; + const struct request *rq; + const struct ublk_io *io; + unsigned max_bytes; +}; + +struct ublk_io_iter { + struct page *pages[UBLK_MAX_PIN_PAGES]; + unsigned pg_off; /* offset in the 1st page in pages */ + int nr_pages; /* how many page pointers in pages */ + struct bio *bio; + struct bvec_iter iter; +}; + +static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data, + unsigned max_bytes, bool to_vm) +{ + const unsigned total = min_t(unsigned, max_bytes, + PAGE_SIZE - data->pg_off + + ((data->nr_pages - 1) << PAGE_SHIFT)); + unsigned done = 0; + unsigned pg_idx = 0; + + while (done < total) { + struct bio_vec bv = bio_iter_iovec(data->bio, data->iter); + const unsigned int bytes = min3(bv.bv_len, total - done, + (unsigned)(PAGE_SIZE - data->pg_off)); + void *bv_buf = bvec_kmap_local(&bv); + void *pg_buf = kmap_local_page(data->pages[pg_idx]); + + if (to_vm) + memcpy(pg_buf + data->pg_off, bv_buf, bytes); + else + memcpy(bv_buf, pg_buf + data->pg_off, bytes); + + kunmap_local(pg_buf); + kunmap_local(bv_buf); + + /* advance page array */ + data->pg_off += bytes; + if (data->pg_off == PAGE_SIZE) { + pg_idx += 1; + data->pg_off = 0; + } + + done += bytes; + + /* advance bio */ + bio_advance_iter_single(data->bio, &data->iter, bytes); + if (!data->iter.bi_size) { + data->bio = data->bio->bi_next; + if (data->bio == NULL) + break; + data->iter = data->bio->bi_iter; + } + } + + return done; +} + +static inline int ublk_copy_user_pages(struct ublk_map_data *data, + bool to_vm) +{ + const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0; + const unsigned long start_vm = data->io->addr; + unsigned int done = 0; + struct ublk_io_iter iter = { + .pg_off = start_vm & (PAGE_SIZE - 1), + .bio = data->rq->bio, + .iter = data->rq->bio->bi_iter, + }; + const unsigned int nr_pages = round_up(data->max_bytes + + (start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT; + + while (done < nr_pages) { + const unsigned to_pin = min_t(unsigned, UBLK_MAX_PIN_PAGES, + nr_pages - done); + unsigned i, len; + + iter.nr_pages = get_user_pages_fast(start_vm + + (done << PAGE_SHIFT), to_pin, gup_flags, + iter.pages); + if (iter.nr_pages <= 0) + return done == 0 ? iter.nr_pages : done; + len = ublk_copy_io_pages(&iter, data->max_bytes, to_vm); + for (i = 0; i < iter.nr_pages; i++) { + if (to_vm) + set_page_dirty(iter.pages[i]); + put_page(iter.pages[i]); + } + data->max_bytes -= len; + done += iter.nr_pages; + } + + return done; +} + +static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req, + struct ublk_io *io) +{ + const unsigned int rq_bytes = blk_rq_bytes(req); + /* + * no zero copy, we delay copy WRITE request data into ublksrv + * context and the big benefit is that pinning pages in current + * context is pretty fast, see ublk_pin_user_pages + */ + if (req_op(req) != REQ_OP_WRITE && req_op(req) != REQ_OP_FLUSH) + return rq_bytes; + + if (ublk_rq_has_data(req)) { + struct ublk_map_data data = { + .ubq = ubq, + .rq = req, + .io = io, + .max_bytes = rq_bytes, + }; + + ublk_copy_user_pages(&data, true); + + return rq_bytes - data.max_bytes; + } + return rq_bytes; +} + +static int ublk_unmap_io(const struct ublk_queue *ubq, + const struct request *req, + struct ublk_io *io) +{ + const unsigned int rq_bytes = blk_rq_bytes(req); + + if (req_op(req) == REQ_OP_READ && ublk_rq_has_data(req)) { + struct ublk_map_data data = { + .ubq = ubq, + .rq = req, + .io = io, + .max_bytes = io->res, + }; + + WARN_ON_ONCE(io->res > rq_bytes); + + ublk_copy_user_pages(&data, false); + + return io->res - data.max_bytes; + } + return rq_bytes; +} + +static inline unsigned int ublk_req_build_flags(struct request *req) +{ + unsigned flags = 0; + + if (req->cmd_flags & REQ_FAILFAST_DEV) + flags |= UBLK_IO_F_FAILFAST_DEV; + + if (req->cmd_flags & REQ_FAILFAST_TRANSPORT) + flags |= UBLK_IO_F_FAILFAST_TRANSPORT; + + if (req->cmd_flags & REQ_FAILFAST_DRIVER) + flags |= UBLK_IO_F_FAILFAST_DRIVER; + + if (req->cmd_flags & REQ_META) + flags |= UBLK_IO_F_META; + + if (req->cmd_flags & REQ_FUA) + flags |= UBLK_IO_F_FUA; + + if (req->cmd_flags & REQ_NOUNMAP) + flags |= UBLK_IO_F_NOUNMAP; + + if (req->cmd_flags & REQ_SWAP) + flags |= UBLK_IO_F_SWAP; + + return flags; +} + +static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req) +{ + struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); + struct ublk_io *io = &ubq->ios[req->tag]; + u32 ublk_op; + + switch (req_op(req)) { + case REQ_OP_READ: + ublk_op = UBLK_IO_OP_READ; + break; + case REQ_OP_WRITE: + ublk_op = UBLK_IO_OP_WRITE; + break; + case REQ_OP_FLUSH: + ublk_op = UBLK_IO_OP_FLUSH; + break; + case REQ_OP_DISCARD: + ublk_op = UBLK_IO_OP_DISCARD; + break; + case REQ_OP_WRITE_ZEROES: + ublk_op = UBLK_IO_OP_WRITE_ZEROES; + break; + default: + return BLK_STS_IOERR; + } + + /* need to translate since kernel may change */ + iod->op_flags = ublk_op | ublk_req_build_flags(req); + iod->nr_sectors = blk_rq_sectors(req); + iod->start_sector = blk_rq_pos(req); + iod->addr = io->addr; + + return BLK_STS_OK; +} + +static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu( + struct io_uring_cmd *ioucmd) +{ + return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu; +} + +static bool ubq_daemon_is_dying(struct ublk_queue *ubq) +{ + return ubq->ubq_daemon->flags & PF_EXITING; +} + +/* todo: handle partial completion */ +static void ublk_complete_rq(struct request *req) +{ + struct ublk_queue *ubq = req->mq_hctx->driver_data; + struct ublk_io *io = &ubq->ios[req->tag]; + unsigned int unmapped_bytes; + + /* failed read IO if nothing is read */ + if (!io->res && req_op(req) == REQ_OP_READ) + io->res = -EIO; + + if (io->res < 0) { + blk_mq_end_request(req, errno_to_blk_status(io->res)); + return; + } + + /* + * FLUSH or DISCARD usually won't return bytes returned, so end them + * directly. + * + * Both the two needn't unmap. + */ + if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE) { + blk_mq_end_request(req, BLK_STS_OK); + return; + } + + /* for READ request, writing data in iod->addr to rq buffers */ + unmapped_bytes = ublk_unmap_io(ubq, req, io); + + /* + * Extremely impossible since we got data filled in just before + * + * Re-read simply for this unlikely case. + */ + if (unlikely(unmapped_bytes < io->res)) + io->res = unmapped_bytes; + + if (blk_update_request(req, BLK_STS_OK, io->res)) + blk_mq_requeue_request(req, true); + else + __blk_mq_end_request(req, BLK_STS_OK); +} + +/* + * __ublk_fail_req() may be called from abort context or ->ubq_daemon + * context during exiting, so lock is required. + * + * Also aborting may not be started yet, keep in mind that one failed + * request may be issued by block layer again. + */ +static void __ublk_fail_req(struct ublk_io *io, struct request *req) +{ + WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE); + + if (!(io->flags & UBLK_IO_FLAG_ABORTED)) { + io->flags |= UBLK_IO_FLAG_ABORTED; + blk_mq_end_request(req, BLK_STS_IOERR); + } +} + +#define UBLK_REQUEUE_DELAY_MS 3 + +static inline void __ublk_rq_task_work(struct request *req) +{ + struct ublk_queue *ubq = req->mq_hctx->driver_data; + struct ublk_device *ub = ubq->dev; + int tag = req->tag; + struct ublk_io *io = &ubq->ios[tag]; + bool task_exiting = current != ubq->ubq_daemon || + (current->flags & PF_EXITING); + unsigned int mapped_bytes; + + pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n", + __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags, + ublk_get_iod(ubq, req->tag)->addr); + + if (unlikely(task_exiting)) { + blk_mq_end_request(req, BLK_STS_IOERR); + mod_delayed_work(system_wq, &ub->monitor_work, 0); + return; + } + + mapped_bytes = ublk_map_io(ubq, req, io); + + /* partially mapped, update io descriptor */ + if (unlikely(mapped_bytes != blk_rq_bytes(req))) { + /* + * Nothing mapped, retry until we succeed. + * + * We may never succeed in mapping any bytes here because + * of OOM. TODO: reserve one buffer with single page pinned + * for providing forward progress guarantee. + */ + if (unlikely(!mapped_bytes)) { + blk_mq_requeue_request(req, false); + blk_mq_delay_kick_requeue_list(req->q, + UBLK_REQUEUE_DELAY_MS); + return; + } + + ublk_get_iod(ubq, req->tag)->nr_sectors = + mapped_bytes >> 9; + } + + /* mark this cmd owned by ublksrv */ + io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV; + + /* + * clear ACTIVE since we are done with this sqe/cmd slot + * We can only accept io cmd in case of being not active. + */ + io->flags &= ~UBLK_IO_FLAG_ACTIVE; + + /* tell ublksrv one io request is coming */ + io_uring_cmd_done(io->cmd, UBLK_IO_RES_OK, 0); +} + +static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd) +{ + struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); + + __ublk_rq_task_work(pdu->req); +} + +static void ublk_rq_task_work_fn(struct callback_head *work) +{ + struct ublk_rq_data *data = container_of(work, + struct ublk_rq_data, work); + struct request *req = blk_mq_rq_from_pdu(data); + + __ublk_rq_task_work(req); +} + +static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct ublk_queue *ubq = hctx->driver_data; + struct request *rq = bd->rq; + blk_status_t res; + + /* fill iod to slot in io cmd buffer */ + res = ublk_setup_iod(ubq, rq); + if (unlikely(res != BLK_STS_OK)) + return BLK_STS_IOERR; + + blk_mq_start_request(bd->rq); + + if (unlikely(ubq_daemon_is_dying(ubq))) { + fail: + mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0); + return BLK_STS_IOERR; + } + + if (ublk_can_use_task_work(ubq)) { + struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq); + enum task_work_notify_mode notify_mode = bd->last ? + TWA_SIGNAL_NO_IPI : TWA_NONE; + + if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode)) + goto fail; + } else { + struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd; + struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); + + pdu->req = rq; + io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb); + } + + return BLK_STS_OK; +} + +static void ublk_commit_rqs(struct blk_mq_hw_ctx *hctx) +{ + struct ublk_queue *ubq = hctx->driver_data; + + if (ublk_can_use_task_work(ubq)) + __set_notify_signal(ubq->ubq_daemon); +} + +static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data, + unsigned int hctx_idx) +{ + struct ublk_device *ub = driver_data; + struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num); + + hctx->driver_data = ubq; + return 0; +} + +static int ublk_init_rq(struct blk_mq_tag_set *set, struct request *req, + unsigned int hctx_idx, unsigned int numa_node) +{ + struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); + + init_task_work(&data->work, ublk_rq_task_work_fn); + return 0; +} + +static const struct blk_mq_ops ublk_mq_ops = { + .queue_rq = ublk_queue_rq, + .commit_rqs = ublk_commit_rqs, + .init_hctx = ublk_init_hctx, + .init_request = ublk_init_rq, +}; + +static int ublk_ch_open(struct inode *inode, struct file *filp) +{ + struct ublk_device *ub = container_of(inode->i_cdev, + struct ublk_device, cdev); + + if (test_and_set_bit(UB_STATE_OPEN, &ub->state)) + return -EBUSY; + filp->private_data = ub; + return 0; +} + +static int ublk_ch_release(struct inode *inode, struct file *filp) +{ + struct ublk_device *ub = filp->private_data; + + clear_bit(UB_STATE_OPEN, &ub->state); + return 0; +} + +/* map pre-allocated per-queue cmd buffer to ublksrv daemon */ +static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct ublk_device *ub = filp->private_data; + size_t sz = vma->vm_end - vma->vm_start; + unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc); + unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT; + int q_id, ret = 0; + + spin_lock(&ub->mm_lock); + if (!ub->mm) + ub->mm = current->mm; + if (current->mm != ub->mm) + ret = -EINVAL; + spin_unlock(&ub->mm_lock); + + if (ret) + return ret; + + if (vma->vm_flags & VM_WRITE) + return -EPERM; + + end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz; + if (phys_off < UBLKSRV_CMD_BUF_OFFSET || phys_off >= end) + return -EINVAL; + + q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz; + pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n", + __func__, q_id, current->pid, vma->vm_start, + phys_off, (unsigned long)sz); + + if (sz != ublk_queue_cmd_buf_size(ub, q_id)) + return -EINVAL; + + pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT; + return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot); +} + +static void ublk_commit_completion(struct ublk_device *ub, + struct ublksrv_io_cmd *ub_cmd) +{ + u32 qid = ub_cmd->q_id, tag = ub_cmd->tag; + struct ublk_queue *ubq = ublk_get_queue(ub, qid); + struct ublk_io *io = &ubq->ios[tag]; + struct request *req; + + /* now this cmd slot is owned by nbd driver */ + io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV; + io->res = ub_cmd->result; + + /* find the io request and complete */ + req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag); + + if (req && likely(!blk_should_fake_timeout(req->q))) + ublk_complete_rq(req); +} + +/* + * When ->ubq_daemon is exiting, either new request is ended immediately, + * or any queued io command is drained, so it is safe to abort queue + * lockless + */ +static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq) +{ + int i; + + if (!ublk_get_device(ub)) + return; + + for (i = 0; i < ubq->q_depth; i++) { + struct ublk_io *io = &ubq->ios[i]; + + if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) { + struct request *rq; + + /* + * Either we fail the request or ublk_rq_task_work_fn + * will do it + */ + rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i); + if (rq) + __ublk_fail_req(io, rq); + } + } + ublk_put_device(ub); +} + +static void ublk_daemon_monitor_work(struct work_struct *work) +{ + struct ublk_device *ub = + container_of(work, struct ublk_device, monitor_work.work); + int i; + + for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { + struct ublk_queue *ubq = ublk_get_queue(ub, i); + + if (ubq_daemon_is_dying(ubq)) { + schedule_work(&ub->stop_work); + + /* abort queue is for making forward progress */ + ublk_abort_queue(ub, ubq); + } + } + + /* + * We can't schedule monitor work after ublk_remove() is started. + * + * No need ub->mutex, monitor work are canceled after state is marked + * as DEAD, so DEAD state is observed reliably. + */ + if (ub->dev_info.state != UBLK_S_DEV_DEAD) + schedule_delayed_work(&ub->monitor_work, + UBLK_DAEMON_MONITOR_PERIOD); +} + +static void ublk_cancel_queue(struct ublk_queue *ubq) +{ + int i; + + for (i = 0; i < ubq->q_depth; i++) { + struct ublk_io *io = &ubq->ios[i]; + + if (io->flags & UBLK_IO_FLAG_ACTIVE) + io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0); + } +} + +/* Cancel all pending commands, must be called after del_gendisk() returns */ +static void ublk_cancel_dev(struct ublk_device *ub) +{ + int i; + + for (i = 0; i < ub->dev_info.nr_hw_queues; i++) + ublk_cancel_queue(ublk_get_queue(ub, i)); +} + +static void ublk_stop_dev(struct ublk_device *ub) +{ + mutex_lock(&ub->mutex); + if (ub->dev_info.state != UBLK_S_DEV_LIVE) + goto unlock; + + del_gendisk(ub->ub_disk); + ub->dev_info.state = UBLK_S_DEV_DEAD; + ub->dev_info.ublksrv_pid = -1; + ublk_cancel_dev(ub); + put_disk(ub->ub_disk); + ub->ub_disk = NULL; + unlock: + mutex_unlock(&ub->mutex); + cancel_delayed_work_sync(&ub->monitor_work); +} + +static inline bool ublk_queue_ready(struct ublk_queue *ubq) +{ + return ubq->nr_io_ready == ubq->q_depth; +} + +/* device can only be started after all IOs are ready */ +static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq) +{ + mutex_lock(&ub->mutex); + ubq->nr_io_ready++; + if (ublk_queue_ready(ubq)) { + ubq->ubq_daemon = current; + get_task_struct(ubq->ubq_daemon); + ub->nr_queues_ready++; + } + if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) + complete_all(&ub->completion); + mutex_unlock(&ub->mutex); +} + +static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) +{ + struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd; + struct ublk_device *ub = cmd->file->private_data; + struct ublk_queue *ubq; + struct ublk_io *io; + u32 cmd_op = cmd->cmd_op; + unsigned tag = ub_cmd->tag; + int ret = -EINVAL; + + pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n", + __func__, cmd->cmd_op, ub_cmd->q_id, tag, + ub_cmd->result); + + if (!(issue_flags & IO_URING_F_SQE128)) + goto out; + + if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues) + goto out; + + ubq = ublk_get_queue(ub, ub_cmd->q_id); + if (!ubq || ub_cmd->q_id != ubq->q_id) + goto out; + + if (ubq->ubq_daemon && ubq->ubq_daemon != current) + goto out; + + if (tag >= ubq->q_depth) + goto out; + + io = &ubq->ios[tag]; + + /* there is pending io cmd, something must be wrong */ + if (io->flags & UBLK_IO_FLAG_ACTIVE) { + ret = -EBUSY; + goto out; + } + + switch (cmd_op) { + case UBLK_IO_FETCH_REQ: + /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */ + if (ublk_queue_ready(ubq)) { + ret = -EBUSY; + goto out; + } + /* + * The io is being handled by server, so COMMIT_RQ is expected + * instead of FETCH_REQ + */ + if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) + goto out; + /* FETCH_RQ has to provide IO buffer */ + if (!ub_cmd->addr) + goto out; + io->cmd = cmd; + io->flags |= UBLK_IO_FLAG_ACTIVE; + io->addr = ub_cmd->addr; + + ublk_mark_io_ready(ub, ubq); + break; + case UBLK_IO_COMMIT_AND_FETCH_REQ: + /* FETCH_RQ has to provide IO buffer */ + if (!ub_cmd->addr) + goto out; + if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) + goto out; + io->addr = ub_cmd->addr; + io->flags |= UBLK_IO_FLAG_ACTIVE; + io->cmd = cmd; + ublk_commit_completion(ub, ub_cmd); + break; + default: + goto out; + } + return -EIOCBQUEUED; + + out: + io_uring_cmd_done(cmd, ret, 0); + pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n", + __func__, cmd_op, tag, ret, io->flags); + return -EIOCBQUEUED; +} + +static const struct file_operations ublk_ch_fops = { + .owner = THIS_MODULE, + .open = ublk_ch_open, + .release = ublk_ch_release, + .llseek = no_llseek, + .uring_cmd = ublk_ch_uring_cmd, + .mmap = ublk_ch_mmap, +}; + +static void ublk_deinit_queue(struct ublk_device *ub, int q_id) +{ + int size = ublk_queue_cmd_buf_size(ub, q_id); + struct ublk_queue *ubq = ublk_get_queue(ub, q_id); + + if (ubq->ubq_daemon) + put_task_struct(ubq->ubq_daemon); + if (ubq->io_cmd_buf) + free_pages((unsigned long)ubq->io_cmd_buf, get_order(size)); +} + +static int ublk_init_queue(struct ublk_device *ub, int q_id) +{ + struct ublk_queue *ubq = ublk_get_queue(ub, q_id); + gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO; + void *ptr; + int size; + + ubq->flags = ub->dev_info.flags; + ubq->q_id = q_id; + ubq->q_depth = ub->dev_info.queue_depth; + size = ublk_queue_cmd_buf_size(ub, q_id); + + ptr = (void *) __get_free_pages(gfp_flags, get_order(size)); + if (!ptr) + return -ENOMEM; + + ubq->io_cmd_buf = ptr; + ubq->dev = ub; + return 0; +} + +static void ublk_deinit_queues(struct ublk_device *ub) +{ + int nr_queues = ub->dev_info.nr_hw_queues; + int i; + + if (!ub->__queues) + return; + + for (i = 0; i < nr_queues; i++) + ublk_deinit_queue(ub, i); + kfree(ub->__queues); +} + +static int ublk_init_queues(struct ublk_device *ub) +{ + int nr_queues = ub->dev_info.nr_hw_queues; + int depth = ub->dev_info.queue_depth; + int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io); + int i, ret = -ENOMEM; + + ub->queue_size = ubq_size; + ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL); + if (!ub->__queues) + return ret; + + for (i = 0; i < nr_queues; i++) { + if (ublk_init_queue(ub, i)) + goto fail; + } + + init_completion(&ub->completion); + return 0; + + fail: + ublk_deinit_queues(ub); + return ret; +} + +static int ublk_alloc_dev_number(struct ublk_device *ub, int idx) +{ + int i = idx; + int err; + + spin_lock(&ublk_idr_lock); + /* allocate id, if @id >= 0, we're requesting that specific id */ + if (i >= 0) { + err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT); + if (err == -ENOSPC) + err = -EEXIST; + } else { + err = idr_alloc(&ublk_index_idr, ub, 0, 0, GFP_NOWAIT); + } + spin_unlock(&ublk_idr_lock); + + if (err >= 0) + ub->ub_number = err; + + return err; +} + +static void ublk_free_dev_number(struct ublk_device *ub) +{ + spin_lock(&ublk_idr_lock); + idr_remove(&ublk_index_idr, ub->ub_number); + wake_up_all(&ublk_idr_wq); + spin_unlock(&ublk_idr_lock); +} + +static void ublk_cdev_rel(struct device *dev) +{ + struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev); + + blk_mq_free_tag_set(&ub->tag_set); + ublk_deinit_queues(ub); + ublk_free_dev_number(ub); + mutex_destroy(&ub->mutex); + kfree(ub); +} + +static int ublk_add_chdev(struct ublk_device *ub) +{ + struct device *dev = &ub->cdev_dev; + int minor = ub->ub_number; + int ret; + + dev->parent = ublk_misc.this_device; + dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor); + dev->class = ublk_chr_class; + dev->release = ublk_cdev_rel; + device_initialize(dev); + + ret = dev_set_name(dev, "ublkc%d", minor); + if (ret) + goto fail; + + cdev_init(&ub->cdev, &ublk_ch_fops); + ret = cdev_device_add(&ub->cdev, dev); + if (ret) + goto fail; + return 0; + fail: + put_device(dev); + return ret; +} + +static void ublk_stop_work_fn(struct work_struct *work) +{ + struct ublk_device *ub = + container_of(work, struct ublk_device, stop_work); + + ublk_stop_dev(ub); +} + +/* align maximum I/O size to PAGE_SIZE */ +static void ublk_align_max_io_size(struct ublk_device *ub) +{ + unsigned int max_rq_bytes = ub->dev_info.rq_max_blocks << ub->bs_shift; + + ub->dev_info.rq_max_blocks = + round_down(max_rq_bytes, PAGE_SIZE) >> ub->bs_shift; +} + +static int ublk_add_tag_set(struct ublk_device *ub) +{ + ub->tag_set.ops = &ublk_mq_ops; + ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues; + ub->tag_set.queue_depth = ub->dev_info.queue_depth; + ub->tag_set.numa_node = NUMA_NO_NODE; + ub->tag_set.cmd_size = sizeof(struct ublk_rq_data); + ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + ub->tag_set.driver_data = ub; + return blk_mq_alloc_tag_set(&ub->tag_set); +} + +static void ublk_remove(struct ublk_device *ub) +{ + ublk_stop_dev(ub); + cancel_work_sync(&ub->stop_work); + cdev_device_del(&ub->cdev, &ub->cdev_dev); + put_device(&ub->cdev_dev); +} + +static struct ublk_device *ublk_get_device_from_id(int idx) +{ + struct ublk_device *ub = NULL; + + if (idx < 0) + return NULL; + + spin_lock(&ublk_idr_lock); + ub = idr_find(&ublk_index_idr, idx); + if (ub) + ub = ublk_get_device(ub); + spin_unlock(&ublk_idr_lock); + + return ub; +} + +static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd) +{ + struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd; + int ublksrv_pid = (int)header->data[0]; + unsigned long dev_blocks = header->data[1]; + struct ublk_device *ub; + struct gendisk *disk; + int ret = -EINVAL; + + if (ublksrv_pid <= 0) + return -EINVAL; + + ub = ublk_get_device_from_id(header->dev_id); + if (!ub) + return -EINVAL; + + wait_for_completion_interruptible(&ub->completion); + + schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD); + + mutex_lock(&ub->mutex); + if (ub->dev_info.state == UBLK_S_DEV_LIVE || + test_bit(UB_STATE_USED, &ub->state)) { + ret = -EEXIST; + goto out_unlock; + } + + /* We may get disk size updated */ + if (dev_blocks) + ub->dev_info.dev_blocks = dev_blocks; + + disk = blk_mq_alloc_disk(&ub->tag_set, ub); + if (IS_ERR(disk)) { + ret = PTR_ERR(disk); + goto out_unlock; + } + sprintf(disk->disk_name, "ublkb%d", ub->ub_number); + disk->fops = &ub_fops; + disk->private_data = ub; + + blk_queue_logical_block_size(disk->queue, ub->dev_info.block_size); + blk_queue_physical_block_size(disk->queue, ub->dev_info.block_size); + blk_queue_io_min(disk->queue, ub->dev_info.block_size); + blk_queue_max_hw_sectors(disk->queue, + ub->dev_info.rq_max_blocks << (ub->bs_shift - 9)); + disk->queue->limits.discard_granularity = PAGE_SIZE; + blk_queue_max_discard_sectors(disk->queue, UINT_MAX >> 9); + blk_queue_max_write_zeroes_sectors(disk->queue, UINT_MAX >> 9); + + set_capacity(disk, ub->dev_info.dev_blocks << (ub->bs_shift - 9)); + + ub->dev_info.ublksrv_pid = ublksrv_pid; + ub->ub_disk = disk; + get_device(&ub->cdev_dev); + ret = add_disk(disk); + if (ret) { + put_disk(disk); + goto out_unlock; + } + set_bit(UB_STATE_USED, &ub->state); + ub->dev_info.state = UBLK_S_DEV_LIVE; +out_unlock: + mutex_unlock(&ub->mutex); + ublk_put_device(ub); + return ret; +} + +static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd) +{ + struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd; + void __user *argp = (void __user *)(unsigned long)header->addr; + struct ublk_device *ub; + cpumask_var_t cpumask; + unsigned long queue; + unsigned int retlen; + unsigned int i; + int ret = -EINVAL; + + if (header->len * BITS_PER_BYTE < nr_cpu_ids) + return -EINVAL; + if (header->len & (sizeof(unsigned long)-1)) + return -EINVAL; + if (!header->addr) + return -EINVAL; + + ub = ublk_get_device_from_id(header->dev_id); + if (!ub) + return -EINVAL; + + queue = header->data[0]; + if (queue >= ub->dev_info.nr_hw_queues) + goto out_put_device; + + ret = -ENOMEM; + if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL)) + goto out_put_device; + + for_each_possible_cpu(i) { + if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue) + cpumask_set_cpu(i, cpumask); + } + + ret = -EFAULT; + retlen = min_t(unsigned short, header->len, cpumask_size()); + if (copy_to_user(argp, cpumask, retlen)) + goto out_free_cpumask; + if (retlen != header->len && + clear_user(argp + retlen, header->len - retlen)) + goto out_free_cpumask; + + ret = 0; +out_free_cpumask: + free_cpumask_var(cpumask); +out_put_device: + ublk_put_device(ub); + return ret; +} + +static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info) +{ + pr_devel("%s: dev id %d flags %llx\n", __func__, + info->dev_id, info->flags); + pr_devel("\t nr_hw_queues %d queue_depth %d block size %d dev_capacity %lld\n", + info->nr_hw_queues, info->queue_depth, + info->block_size, info->dev_blocks); +} + +static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd) +{ + struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd; + void __user *argp = (void __user *)(unsigned long)header->addr; + struct ublksrv_ctrl_dev_info info; + struct ublk_device *ub; + int ret = -EINVAL; + + if (header->len < sizeof(info) || !header->addr) + return -EINVAL; + if (header->queue_id != (u16)-1) { + pr_warn("%s: queue_id is wrong %x\n", + __func__, header->queue_id); + return -EINVAL; + } + if (copy_from_user(&info, argp, sizeof(info))) + return -EFAULT; + ublk_dump_dev_info(&info); + if (header->dev_id != info.dev_id) { + pr_warn("%s: dev id not match %u %u\n", + __func__, header->dev_id, info.dev_id); + return -EINVAL; + } + + ret = mutex_lock_killable(&ublk_ctl_mutex); + if (ret) + return ret; + + ret = -ENOMEM; + ub = kzalloc(sizeof(*ub), GFP_KERNEL); + if (!ub) + goto out_unlock; + mutex_init(&ub->mutex); + spin_lock_init(&ub->mm_lock); + INIT_WORK(&ub->stop_work, ublk_stop_work_fn); + INIT_DELAYED_WORK(&ub->monitor_work, ublk_daemon_monitor_work); + + ret = ublk_alloc_dev_number(ub, header->dev_id); + if (ret < 0) + goto out_free_ub; + + memcpy(&ub->dev_info, &info, sizeof(info)); + + /* update device id */ + ub->dev_info.dev_id = ub->ub_number; + + /* + * 64bit flags will be copied back to userspace as feature + * negotiation result, so have to clear flags which driver + * doesn't support yet, then userspace can get correct flags + * (features) to handle. + */ + ub->dev_info.flags &= UBLK_F_ALL; + + /* We are not ready to support zero copy */ + ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY; + + ub->bs_shift = ilog2(ub->dev_info.block_size); + ub->dev_info.nr_hw_queues = min_t(unsigned int, + ub->dev_info.nr_hw_queues, nr_cpu_ids); + ublk_align_max_io_size(ub); + + ret = ublk_init_queues(ub); + if (ret) + goto out_free_dev_number; + + ret = ublk_add_tag_set(ub); + if (ret) + goto out_deinit_queues; + + ret = -EFAULT; + if (copy_to_user(argp, &ub->dev_info, sizeof(info))) + goto out_free_tag_set; + + /* + * Add the char dev so that ublksrv daemon can be setup. + * ublk_add_chdev() will cleanup everything if it fails. + */ + ret = ublk_add_chdev(ub); + goto out_unlock; + +out_free_tag_set: + blk_mq_free_tag_set(&ub->tag_set); +out_deinit_queues: + ublk_deinit_queues(ub); +out_free_dev_number: + ublk_free_dev_number(ub); +out_free_ub: + mutex_destroy(&ub->mutex); + kfree(ub); +out_unlock: + mutex_unlock(&ublk_ctl_mutex); + return ret; +} + +static inline bool ublk_idr_freed(int id) +{ + void *ptr; + + spin_lock(&ublk_idr_lock); + ptr = idr_find(&ublk_index_idr, id); + spin_unlock(&ublk_idr_lock); + + return ptr == NULL; +} + +static int ublk_ctrl_del_dev(int idx) +{ + struct ublk_device *ub; + int ret; + + ret = mutex_lock_killable(&ublk_ctl_mutex); + if (ret) + return ret; + + ub = ublk_get_device_from_id(idx); + if (ub) { + ublk_remove(ub); + ublk_put_device(ub); + ret = 0; + } else { + ret = -ENODEV; + } + + /* + * Wait until the idr is removed, then it can be reused after + * DEL_DEV command is returned. + */ + if (!ret) + wait_event(ublk_idr_wq, ublk_idr_freed(idx)); + mutex_unlock(&ublk_ctl_mutex); + + return ret; +} + +static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd) +{ + struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd; + + pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n", + __func__, cmd->cmd_op, header->dev_id, header->queue_id, + header->data[0], header->addr, header->len); +} + +static int ublk_ctrl_stop_dev(struct io_uring_cmd *cmd) +{ + struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd; + struct ublk_device *ub; + + ub = ublk_get_device_from_id(header->dev_id); + if (!ub) + return -EINVAL; + + ublk_stop_dev(ub); + cancel_work_sync(&ub->stop_work); + + ublk_put_device(ub); + return 0; +} + +static int ublk_ctrl_get_dev_info(struct io_uring_cmd *cmd) +{ + struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd; + void __user *argp = (void __user *)(unsigned long)header->addr; + struct ublk_device *ub; + int ret = 0; + + if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr) + return -EINVAL; + + ub = ublk_get_device_from_id(header->dev_id); + if (!ub) + return -EINVAL; + + if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info))) + ret = -EFAULT; + ublk_put_device(ub); + + return ret; +} + +static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, + unsigned int issue_flags) +{ + struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd; + int ret = -EINVAL; + + ublk_ctrl_cmd_dump(cmd); + + if (!(issue_flags & IO_URING_F_SQE128)) + goto out; + + ret = -EPERM; + if (!capable(CAP_SYS_ADMIN)) + goto out; + + ret = -ENODEV; + switch (cmd->cmd_op) { + case UBLK_CMD_START_DEV: + ret = ublk_ctrl_start_dev(cmd); + break; + case UBLK_CMD_STOP_DEV: + ret = ublk_ctrl_stop_dev(cmd); + break; + case UBLK_CMD_GET_DEV_INFO: + ret = ublk_ctrl_get_dev_info(cmd); + break; + case UBLK_CMD_ADD_DEV: + ret = ublk_ctrl_add_dev(cmd); + break; + case UBLK_CMD_DEL_DEV: + ret = ublk_ctrl_del_dev(header->dev_id); + break; + case UBLK_CMD_GET_QUEUE_AFFINITY: + ret = ublk_ctrl_get_queue_affinity(cmd); + break; + default: + break; + } + out: + io_uring_cmd_done(cmd, ret, 0); + pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n", + __func__, ret, cmd->cmd_op, header->dev_id, header->queue_id); + return -EIOCBQUEUED; +} + +static const struct file_operations ublk_ctl_fops = { + .open = nonseekable_open, + .uring_cmd = ublk_ctrl_uring_cmd, + .owner = THIS_MODULE, + .llseek = noop_llseek, +}; + +static struct miscdevice ublk_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "ublk-control", + .fops = &ublk_ctl_fops, +}; + +static int __init ublk_init(void) +{ + int ret; + + init_waitqueue_head(&ublk_idr_wq); + + ret = misc_register(&ublk_misc); + if (ret) + return ret; + + ret = alloc_chrdev_region(&ublk_chr_devt, 0, UBLK_MINORS, "ublk-char"); + if (ret) + goto unregister_mis; + + ublk_chr_class = class_create(THIS_MODULE, "ublk-char"); + if (IS_ERR(ublk_chr_class)) { + ret = PTR_ERR(ublk_chr_class); + goto free_chrdev_region; + } + return 0; + +free_chrdev_region: + unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS); +unregister_mis: + misc_deregister(&ublk_misc); + return ret; +} + +static void __exit ublk_exit(void) +{ + struct ublk_device *ub; + int id; + + class_destroy(ublk_chr_class); + + misc_deregister(&ublk_misc); + + idr_for_each_entry(&ublk_index_idr, ub, id) + ublk_remove(ub); + + idr_destroy(&ublk_index_idr); + unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS); +} + +module_init(ublk_init); +module_exit(ublk_exit); + +MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 6fc7850c2b0a..d7d72e8f6e55 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -1089,7 +1089,7 @@ static int virtblk_probe(struct virtio_device *vdev) return 0; out_cleanup_disk: - blk_cleanup_disk(vblk->disk); + put_disk(vblk->disk); out_free_tags: blk_mq_free_tag_set(&vblk->tag_set); out_free_vq: @@ -1111,7 +1111,6 @@ static void virtblk_remove(struct virtio_device *vdev) flush_work(&vblk->config_work); del_gendisk(vblk->disk); - blk_cleanup_queue(vblk->disk->queue); blk_mq_free_tag_set(&vblk->tag_set); mutex_lock(&vblk->vdev_mutex); diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index a97f2bf5b01b..a5cf7f1e871c 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -442,7 +442,7 @@ static void free_req(struct xen_blkif_ring *ring, struct pending_req *req) * Routines for managing virtual block devices (vbds). */ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, - int operation) + enum req_op operation) { struct xen_vbd *vbd = &blkif->vbd; int rc = -EACCES; @@ -1193,8 +1193,8 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, struct bio *bio = NULL; struct bio **biolist = pending_req->biolist; int i, nbio = 0; - int operation; - int operation_flags = 0; + enum req_op operation; + blk_opf_t operation_flags = 0; struct blk_plug plug; bool drain = false; struct grant_page **pages = pending_req->segments; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 3646c0cae672..dc48298225a6 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -2397,7 +2397,7 @@ static void blkfront_connect(struct blkfront_info *info) err = device_add_disk(&info->xbdev->dev, info->gd, NULL); if (err) { - blk_cleanup_disk(info->gd); + put_disk(info->gd); blk_mq_free_tag_set(&info->tag_set); info->rq = NULL; goto fail; @@ -2482,7 +2482,7 @@ static int blkfront_remove(struct xenbus_device *xbdev) blkif_free(info, 0); if (info->gd) { xlbd_release_minors(info->gd->first_minor, info->gd->minors); - blk_cleanup_disk(info->gd); + put_disk(info->gd); blk_mq_free_tag_set(&info->tag_set); } diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 7a6ed83481b8..c1e85f356e4d 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -337,7 +337,7 @@ static int z2ram_register_disk(int minor) z2ram_gendisk[minor] = disk; err = add_disk(disk); if (err) - blk_cleanup_disk(disk); + put_disk(disk); return err; } @@ -384,7 +384,6 @@ static void __exit z2_exit(void) for (i = 0; i < Z2MINOR_COUNT; i++) { del_gendisk(z2ram_gendisk[i]); - blk_cleanup_queue(z2ram_gendisk[i]->queue); put_disk(z2ram_gendisk[i]); } blk_mq_free_tag_set(&tag_set); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index b8549c61ff2c..4abeb261b833 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1523,7 +1523,7 @@ static void zram_bio_discard(struct zram *zram, u32 index, * Returns 1 if IO request was successfully submitted. */ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, - int offset, unsigned int op, struct bio *bio) + int offset, enum req_op op, struct bio *bio) { int ret; @@ -1631,7 +1631,7 @@ static void zram_slot_free_notify(struct block_device *bdev, } static int zram_rw_page(struct block_device *bdev, sector_t sector, - struct page *page, unsigned int op) + struct page *page, enum req_op op) { int offset, ret; u32 index; @@ -1957,7 +1957,7 @@ static int zram_add(void) return device_id; out_cleanup_disk: - blk_cleanup_disk(zram->disk); + put_disk(zram->disk); out_free_idr: idr_remove(&zram_index_idr, device_id); out_free_dev: @@ -2008,7 +2008,7 @@ static int zram_remove(struct zram *zram) */ zram_reset_device(zram); - blk_cleanup_disk(zram->disk); + put_disk(zram->disk); kfree(zram); return 0; } diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c index 378f5d62a991..2e564803e786 100644 --- a/drivers/bus/hisi_lpc.c +++ b/drivers/bus/hisi_lpc.c @@ -379,7 +379,7 @@ static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev, /* * hisi_lpc_acpi_set_io_res - set the resources for a child - * @child: the device node to be updated the I/O resource + * @adev: ACPI companion of the device node to be updated the I/O resource * @hostdev: the device node associated with host controller * @res: double pointer to be set to the address of translated resources * @num_res: pointer to variable to hold the number of translated resources @@ -390,31 +390,24 @@ static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev, * host-relative address resource. This function will return the translated * logical PIO addresses for each child devices resources. */ -static int hisi_lpc_acpi_set_io_res(struct device *child, +static int hisi_lpc_acpi_set_io_res(struct acpi_device *adev, struct device *hostdev, const struct resource **res, int *num_res) { - struct acpi_device *adev; - struct acpi_device *host; + struct acpi_device *host = to_acpi_device(adev->dev.parent); struct resource_entry *rentry; LIST_HEAD(resource_list); struct resource *resources; int count; int i; - if (!child || !hostdev) - return -EINVAL; - - host = to_acpi_device(hostdev); - adev = to_acpi_device(child); - if (!adev->status.present) { - dev_dbg(child, "device is not present\n"); + dev_dbg(&adev->dev, "device is not present\n"); return -EIO; } if (acpi_device_enumerated(adev)) { - dev_dbg(child, "has been enumerated\n"); + dev_dbg(&adev->dev, "has been enumerated\n"); return -EIO; } @@ -425,7 +418,7 @@ static int hisi_lpc_acpi_set_io_res(struct device *child, */ count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); if (count <= 0) { - dev_dbg(child, "failed to get resources\n"); + dev_dbg(&adev->dev, "failed to get resources\n"); return count ? count : -EIO; } @@ -454,7 +447,7 @@ static int hisi_lpc_acpi_set_io_res(struct device *child, continue; ret = hisi_lpc_acpi_xlat_io_res(adev, host, &resources[i]); if (ret) { - dev_err(child, "translate IO range %pR failed (%d)\n", + dev_err(&adev->dev, "translate IO range %pR failed (%d)\n", &resources[i], ret); return ret; } @@ -471,6 +464,12 @@ static int hisi_lpc_acpi_remove_subdev(struct device *dev, void *unused) return 0; } +static int hisi_lpc_acpi_clear_enumerated(struct acpi_device *adev, void *not_used) +{ + acpi_device_clear_enumerated(adev); + return 0; +} + struct hisi_lpc_acpi_cell { const char *hid; const char *name; @@ -480,115 +479,114 @@ struct hisi_lpc_acpi_cell { static void hisi_lpc_acpi_remove(struct device *hostdev) { - struct acpi_device *adev = ACPI_COMPANION(hostdev); - struct acpi_device *child; - device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev); - - list_for_each_entry(child, &adev->children, node) - acpi_device_clear_enumerated(child); + acpi_dev_for_each_child(ACPI_COMPANION(hostdev), + hisi_lpc_acpi_clear_enumerated, NULL); } -/* - * hisi_lpc_acpi_probe - probe children for ACPI FW - * @hostdev: LPC host device pointer - * - * Returns 0 when successful, and a negative value for failure. - * - * Create a platform device per child, fixing up the resources - * from bus addresses to Logical PIO addresses. - * - */ -static int hisi_lpc_acpi_probe(struct device *hostdev) +static int hisi_lpc_acpi_add_child(struct acpi_device *child, void *data) { - struct acpi_device *adev = ACPI_COMPANION(hostdev); - struct acpi_device *child; + const char *hid = acpi_device_hid(child); + struct device *hostdev = data; + const struct hisi_lpc_acpi_cell *cell; + struct platform_device *pdev; + const struct resource *res; + bool found = false; + int num_res; int ret; - /* Only consider the children of the host */ - list_for_each_entry(child, &adev->children, node) { - const char *hid = acpi_device_hid(child); - const struct hisi_lpc_acpi_cell *cell; - struct platform_device *pdev; - const struct resource *res; - bool found = false; - int num_res; - - ret = hisi_lpc_acpi_set_io_res(&child->dev, &adev->dev, &res, - &num_res); - if (ret) { - dev_warn(hostdev, "set resource fail (%d)\n", ret); - goto fail; - } + ret = hisi_lpc_acpi_set_io_res(child, hostdev, &res, &num_res); + if (ret) { + dev_warn(hostdev, "set resource fail (%d)\n", ret); + return ret; + } - cell = (struct hisi_lpc_acpi_cell []){ - /* ipmi */ - { - .hid = "IPI0001", - .name = "hisi-lpc-ipmi", - }, - /* 8250-compatible uart */ - { - .hid = "HISI1031", - .name = "serial8250", - .pdata = (struct plat_serial8250_port []) { - { - .iobase = res->start, - .uartclk = 1843200, - .iotype = UPIO_PORT, - .flags = UPF_BOOT_AUTOCONF, - }, - {} + cell = (struct hisi_lpc_acpi_cell []){ + /* ipmi */ + { + .hid = "IPI0001", + .name = "hisi-lpc-ipmi", + }, + /* 8250-compatible uart */ + { + .hid = "HISI1031", + .name = "serial8250", + .pdata = (struct plat_serial8250_port []) { + { + .iobase = res->start, + .uartclk = 1843200, + .iotype = UPIO_PORT, + .flags = UPF_BOOT_AUTOCONF, }, - .pdata_size = 2 * - sizeof(struct plat_serial8250_port), + {} }, - {} - }; - - for (; cell && cell->name; cell++) { - if (!strcmp(cell->hid, hid)) { - found = true; - break; - } - } - - if (!found) { - dev_warn(hostdev, - "could not find cell for child device (%s), discarding\n", - hid); - continue; + .pdata_size = 2 * + sizeof(struct plat_serial8250_port), + }, + {} + }; + + for (; cell && cell->name; cell++) { + if (!strcmp(cell->hid, hid)) { + found = true; + break; } + } - pdev = platform_device_alloc(cell->name, PLATFORM_DEVID_AUTO); - if (!pdev) { - ret = -ENOMEM; - goto fail; - } + if (!found) { + dev_warn(hostdev, + "could not find cell for child device (%s), discarding\n", + hid); + return 0; + } - pdev->dev.parent = hostdev; - ACPI_COMPANION_SET(&pdev->dev, child); + pdev = platform_device_alloc(cell->name, PLATFORM_DEVID_AUTO); + if (!pdev) + return -ENOMEM; - ret = platform_device_add_resources(pdev, res, num_res); - if (ret) - goto fail; + pdev->dev.parent = hostdev; + ACPI_COMPANION_SET(&pdev->dev, child); - ret = platform_device_add_data(pdev, cell->pdata, - cell->pdata_size); - if (ret) - goto fail; + ret = platform_device_add_resources(pdev, res, num_res); + if (ret) + goto fail; - ret = platform_device_add(pdev); - if (ret) - goto fail; + ret = platform_device_add_data(pdev, cell->pdata, cell->pdata_size); + if (ret) + goto fail; - acpi_device_set_enumerated(child); - } + ret = platform_device_add(pdev); + if (ret) + goto fail; + acpi_device_set_enumerated(child); return 0; fail: - hisi_lpc_acpi_remove(hostdev); + platform_device_put(pdev); + return ret; +} + +/* + * hisi_lpc_acpi_probe - probe children for ACPI FW + * @hostdev: LPC host device pointer + * + * Returns 0 when successful, and a negative value for failure. + * + * Create a platform device per child, fixing up the resources + * from bus addresses to Logical PIO addresses. + * + */ +static int hisi_lpc_acpi_probe(struct device *hostdev) +{ + int ret; + + /* Only consider the children of the host */ + ret = acpi_dev_for_each_child(ACPI_COMPANION(hostdev), + hisi_lpc_acpi_add_child, hostdev); + if (ret) + hisi_lpc_acpi_remove(hostdev); + return ret; } diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 8e78b37d0f6a..ceded5772aac 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -817,7 +817,7 @@ probe_fail_free_irqs: free_irq(HW_EVENT_GDROM_DMA, &gd); free_irq(HW_EVENT_GDROM_CMD, &gd); probe_fail_cleanup_disk: - blk_cleanup_disk(gd.disk); + put_disk(gd.disk); probe_fail_free_tag_set: blk_mq_free_tag_set(&gd.tag_set); probe_fail_free_cd_info: @@ -831,7 +831,6 @@ probe_fail_no_mem: static int remove_gdrom(struct platform_device *devptr) { - blk_cleanup_queue(gd.gdrom_rq); blk_mq_free_tag_set(&gd.tag_set); free_irq(HW_EVENT_GDROM_CMD, &gd); free_irq(HW_EVENT_GDROM_DMA, &gd); diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 0b6c03643ddc..30192e123e5f 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -431,7 +431,6 @@ config ADI config RANDOM_TRUST_CPU bool "Initialize RNG using CPU RNG instructions" default y - depends on ARCH_RANDOM help Initialize the RNG using random numbers supplied by the CPU's RNG instructions (e.g. RDRAND), if supported and available. These diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index b3f2d55dc551..3da8e85f8aae 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -87,7 +87,7 @@ config HW_RANDOM_BA431 config HW_RANDOM_BCM2835 tristate "Broadcom BCM2835/BCM63xx Random Number Generator support" depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \ - ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC || COMPILE_TEST + ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC || COMPILE_TEST default HW_RANDOM help This driver provides kernel-side support for the Random Number diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c index 2beaa35c0d74..488808dc17a2 100644 --- a/drivers/char/hw_random/s390-trng.c +++ b/drivers/char/hw_random/s390-trng.c @@ -108,7 +108,6 @@ static ssize_t trng_counter_show(struct device *dev, { u64 dev_counter = atomic64_read(&trng_dev_counter); u64 hwrng_counter = atomic64_read(&trng_hwrng_counter); -#if IS_ENABLED(CONFIG_ARCH_RANDOM) u64 arch_counter = atomic64_read(&s390_arch_random_counter); return sysfs_emit(buf, @@ -118,14 +117,6 @@ static ssize_t trng_counter_show(struct device *dev, "total: %llu\n", dev_counter, hwrng_counter, arch_counter, dev_counter + hwrng_counter + arch_counter); -#else - return sysfs_emit(buf, - "trng: %llu\n" - "hwrng: %llu\n" - "total: %llu\n", - dev_counter, hwrng_counter, - dev_counter + hwrng_counter); -#endif } static DEVICE_ATTR(byte_counter, 0444, trng_counter_show, NULL); diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index 7444cc146e86..a9a0a3b09c8b 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -145,7 +145,7 @@ static int via_rng_init(struct hwrng *rng) } /* Control the RNG via MSR. Tread lightly and pay very close - * close attention to values written, as the reserved fields + * attention to values written, as the reserved fields * are documented to be "undefined and unpredictable"; but it * does not say to write them as zero, so I make a guess that * we restore the values we find in the register. diff --git a/drivers/char/random.c b/drivers/char/random.c index a1af90bacc9f..d44832e9e709 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -229,7 +229,7 @@ static void crng_reseed(void) /* * This generates a ChaCha block using the provided key, and then - * immediately overwites that key with half the block. It returns + * immediately overwrites that key with half the block. It returns * the resultant ChaCha state to the user, along with the second * half of the block containing 32 bytes of random data that may * be used; random_data_len may not be greater than 32. @@ -596,12 +596,20 @@ static void extract_entropy(void *buf, size_t len) unsigned long rdseed[32 / sizeof(long)]; size_t counter; } block; - size_t i; + size_t i, longs; - for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) { - if (!arch_get_random_seed_long(&block.rdseed[i]) && - !arch_get_random_long(&block.rdseed[i])) - block.rdseed[i] = random_get_entropy(); + for (i = 0; i < ARRAY_SIZE(block.rdseed);) { + longs = arch_get_random_seed_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i); + if (longs) { + i += longs; + continue; + } + longs = arch_get_random_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i); + if (longs) { + i += longs; + continue; + } + block.rdseed[i++] = random_get_entropy(); } spin_lock_irqsave(&input_pool.lock, flags); @@ -643,10 +651,10 @@ static void __cold _credit_init_bits(size_t bits) add = min_t(size_t, bits, POOL_BITS); + orig = READ_ONCE(input_pool.init_bits); do { - orig = READ_ONCE(input_pool.init_bits); new = min_t(unsigned int, POOL_BITS, orig + add); - } while (cmpxchg(&input_pool.init_bits, orig, new) != orig); + } while (!try_cmpxchg(&input_pool.init_bits, &orig, new)); if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) { crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */ @@ -776,22 +784,31 @@ static struct notifier_block pm_notifier = { .notifier_call = random_pm_notifica int __init random_init(const char *command_line) { ktime_t now = ktime_get_real(); - unsigned int i, arch_bits; - unsigned long entropy; + size_t i, longs, arch_bits; + unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)]; #if defined(LATENT_ENTROPY_PLUGIN) static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy; _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed)); #endif - for (i = 0, arch_bits = BLAKE2S_BLOCK_SIZE * 8; - i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) { - if (!arch_get_random_seed_long_early(&entropy) && - !arch_get_random_long_early(&entropy)) { - entropy = random_get_entropy(); - arch_bits -= sizeof(entropy) * 8; + for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) { + longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i); + if (longs) { + _mix_pool_bytes(entropy, sizeof(*entropy) * longs); + i += longs; + continue; } - _mix_pool_bytes(&entropy, sizeof(entropy)); + longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i); + if (longs) { + _mix_pool_bytes(entropy, sizeof(*entropy) * longs); + i += longs; + continue; + } + entropy[0] = random_get_entropy(); + _mix_pool_bytes(entropy, sizeof(*entropy)); + arch_bits -= sizeof(*entropy) * 8; + ++i; } _mix_pool_bytes(&now, sizeof(now)); _mix_pool_bytes(utsname(), sizeof(*(utsname()))); diff --git a/drivers/clk/.kunitconfig b/drivers/clk/.kunitconfig index cdbc7d7deba9..2fbeb71316f8 100644 --- a/drivers/clk/.kunitconfig +++ b/drivers/clk/.kunitconfig @@ -2,3 +2,4 @@ CONFIG_KUNIT=y CONFIG_COMMON_CLK=y CONFIG_CLK_KUNIT_TEST=y CONFIG_CLK_GATE_KUNIT_TEST=y +CONFIG_UML_PCI_OVER_VIRTIO=n diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig index ec738f74a026..77266afb1c79 100644 --- a/drivers/clk/bcm/Kconfig +++ b/drivers/clk/bcm/Kconfig @@ -22,9 +22,9 @@ config CLK_BCM2835 config CLK_BCM_63XX bool "Broadcom BCM63xx clock support" - depends on ARCH_BCM_63XX || COMPILE_TEST + depends on ARCH_BCMBCA || COMPILE_TEST select COMMON_CLK_IPROC - default ARCH_BCM_63XX + default ARCH_BCMBCA help Enable common clock framework support for Broadcom BCM63xx DSL SoCs based on the ARM architecture diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c index 29a8c710ae06..b7962e5149a5 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c @@ -138,6 +138,7 @@ static struct ccu_common *sun50i_h6_r_ccu_clks[] = { &r_apb2_rsb_clk.common, &r_apb1_ir_clk.common, &r_apb1_w1_clk.common, + &r_apb1_rtc_clk.common, &ir_clk.common, &w1_clk.common, }; diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 3c0ee102fe73..4f2bb7315b67 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -22,7 +22,7 @@ config CLKEVT_I8253 config I8253_LOCK bool -config OMAP_DM_TIMER +config OMAP_DM_SYSTIMER bool select TIMER_OF @@ -56,6 +56,13 @@ config DIGICOLOR_TIMER help Enables the support for the digicolor timer driver. +config OMAP_DM_TIMER + bool "OMAP dual-mode timer driver" if ARCH_K3 || COMPILE_TEST + default y if ARCH_K3 + select TIMER_OF + help + Enables the support for the TI dual-mode timer driver. + config DW_APB_TIMER bool "DW APB timer driver" if COMPILE_TEST help @@ -150,6 +157,14 @@ config TEGRA_TIMER help Enables support for the Tegra driver. +config TEGRA186_TIMER + bool "NVIDIA Tegra186 timer driver" + depends on ARCH_TEGRA || COMPILE_TEST + depends on WATCHDOG && WATCHDOG_CORE + help + Enables support for the timers and watchdogs found on NVIDIA + Tegra186 and later SoCs. + config VT8500_TIMER bool "VT8500 timer driver" if COMPILE_TEST depends on HAS_IOMEM @@ -367,7 +382,7 @@ config ARM_GT_INITIAL_PRESCALER_VAL depends on ARM_GLOBAL_TIMER help When the ARM global timer initializes, its current rate is declared - to the kernel and maintained forever. Should it's parent clock + to the kernel and maintained forever. Should its parent clock change, the driver tries to fix the timer's internal prescaler. On some machs (i.e. Zynq) the initial prescaler value thus poses bounds about how much the parent clock is allowed to decrease or diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 6ca640019e10..64ab547de97b 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -18,7 +18,7 @@ obj-$(CONFIG_CLKSRC_MMIO) += mmio.o obj-$(CONFIG_DAVINCI_TIMER) += timer-davinci.o obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o -obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm-systimer.o +obj-$(CONFIG_OMAP_DM_SYSTIMER) += timer-ti-dm-systimer.o obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o @@ -36,6 +36,7 @@ obj-$(CONFIG_SUN4I_TIMER) += timer-sun4i.o obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o obj-$(CONFIG_MESON6_TIMER) += timer-meson6.o obj-$(CONFIG_TEGRA_TIMER) += timer-tegra.o +obj-$(CONFIG_TEGRA186_TIMER) += timer-tegra186.o obj-$(CONFIG_VT8500_TIMER) += timer-vt8500.o obj-$(CONFIG_NSPIRE_TIMER) += timer-zevio.o obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index dd0956ad969c..64dcb082d4cf 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -981,6 +981,14 @@ static const struct of_device_id sh_cmt_of_table[] __maybe_unused = { .compatible = "renesas,rcar-gen3-cmt1", .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2] }, + { + .compatible = "renesas,rcar-gen4-cmt0", + .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2] + }, + { + .compatible = "renesas,rcar-gen4-cmt1", + .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2] + }, { } }; MODULE_DEVICE_TABLE(of, sh_cmt_of_table); diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c index 7bcb4a3f26fb..d5b29fd03ca2 100644 --- a/drivers/clocksource/timer-mediatek.c +++ b/drivers/clocksource/timer-mediatek.c @@ -22,6 +22,19 @@ #define TIMER_SYNC_TICKS (3) +/* cpux mcusys wrapper */ +#define CPUX_CON_REG 0x0 +#define CPUX_IDX_REG 0x4 + +/* cpux */ +#define CPUX_IDX_GLOBAL_CTRL 0x0 + #define CPUX_ENABLE BIT(0) + #define CPUX_CLK_DIV_MASK GENMASK(10, 8) + #define CPUX_CLK_DIV1 BIT(8) + #define CPUX_CLK_DIV2 BIT(9) + #define CPUX_CLK_DIV4 BIT(10) +#define CPUX_IDX_GLOBAL_IRQ 0x30 + /* gpt */ #define GPT_IRQ_EN_REG 0x00 #define GPT_IRQ_ENABLE(val) BIT((val) - 1) @@ -72,6 +85,52 @@ static void __iomem *gpt_sched_reg __read_mostly; +static u32 mtk_cpux_readl(u32 reg_idx, struct timer_of *to) +{ + writel(reg_idx, timer_of_base(to) + CPUX_IDX_REG); + return readl(timer_of_base(to) + CPUX_CON_REG); +} + +static void mtk_cpux_writel(u32 val, u32 reg_idx, struct timer_of *to) +{ + writel(reg_idx, timer_of_base(to) + CPUX_IDX_REG); + writel(val, timer_of_base(to) + CPUX_CON_REG); +} + +static void mtk_cpux_set_irq(struct timer_of *to, bool enable) +{ + const unsigned long *irq_mask = cpumask_bits(cpu_possible_mask); + u32 val; + + val = mtk_cpux_readl(CPUX_IDX_GLOBAL_IRQ, to); + + if (enable) + val |= *irq_mask; + else + val &= ~(*irq_mask); + + mtk_cpux_writel(val, CPUX_IDX_GLOBAL_IRQ, to); +} + +static int mtk_cpux_clkevt_shutdown(struct clock_event_device *clkevt) +{ + /* Clear any irq */ + mtk_cpux_set_irq(to_timer_of(clkevt), false); + + /* + * Disabling CPUXGPT timer will crash the platform, especially + * if Trusted Firmware is using it (usually, for sleep states), + * so we only mask the IRQ and call it a day. + */ + return 0; +} + +static int mtk_cpux_clkevt_resume(struct clock_event_device *clkevt) +{ + mtk_cpux_set_irq(to_timer_of(clkevt), true); + return 0; +} + static void mtk_syst_ack_irq(struct timer_of *to) { /* Clear and disable interrupt */ @@ -281,6 +340,60 @@ static struct timer_of to = { }, }; +static int __init mtk_cpux_init(struct device_node *node) +{ + static struct timer_of to_cpux; + u32 freq, val; + int ret; + + /* + * There are per-cpu interrupts for the CPUX General Purpose Timer + * but since this timer feeds the AArch64 System Timer we can rely + * on the CPU timer PPIs as well, so we don't declare TIMER_OF_IRQ. + */ + to_cpux.flags = TIMER_OF_BASE | TIMER_OF_CLOCK; + to_cpux.clkevt.name = "mtk-cpuxgpt"; + to_cpux.clkevt.rating = 10; + to_cpux.clkevt.cpumask = cpu_possible_mask; + to_cpux.clkevt.set_state_shutdown = mtk_cpux_clkevt_shutdown; + to_cpux.clkevt.tick_resume = mtk_cpux_clkevt_resume; + + /* If this fails, bad things are about to happen... */ + ret = timer_of_init(node, &to_cpux); + if (ret) { + WARN(1, "Cannot start CPUX timers.\n"); + return ret; + } + + /* + * Check if we're given a clock with the right frequency for this + * timer, otherwise warn but keep going with the setup anyway, as + * that makes it possible to still boot the kernel, even though + * it may not work correctly (random lockups, etc). + * The reason behind this is that having an early UART may not be + * possible for everyone and this gives a chance to retrieve kmsg + * for eventual debugging even on consumer devices. + */ + freq = timer_of_rate(&to_cpux); + if (freq > 13000000) + WARN(1, "Requested unsupported timer frequency %u\n", freq); + + /* Clock input is 26MHz, set DIV2 to achieve 13MHz clock */ + val = mtk_cpux_readl(CPUX_IDX_GLOBAL_CTRL, &to_cpux); + val &= ~CPUX_CLK_DIV_MASK; + val |= CPUX_CLK_DIV2; + mtk_cpux_writel(val, CPUX_IDX_GLOBAL_CTRL, &to_cpux); + + /* Enable all CPUXGPT timers */ + val = mtk_cpux_readl(CPUX_IDX_GLOBAL_CTRL, &to_cpux); + mtk_cpux_writel(val | CPUX_ENABLE, CPUX_IDX_GLOBAL_CTRL, &to_cpux); + + clockevents_config_and_register(&to_cpux.clkevt, timer_of_rate(&to_cpux), + TIMER_SYNC_TICKS, 0xffffffff); + + return 0; +} + static int __init mtk_syst_init(struct device_node *node) { int ret; @@ -339,3 +452,4 @@ static int __init mtk_gpt_init(struct device_node *node) } TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init); TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init); +TIMER_OF_DECLARE(mtk_mt6795, "mediatek,mt6795-systimer", mtk_cpux_init); diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c index abce83d2f00b..d5f1436f33d9 100644 --- a/drivers/clocksource/timer-microchip-pit64b.c +++ b/drivers/clocksource/timer-microchip-pit64b.c @@ -61,7 +61,7 @@ struct mchp_pit64b_timer { }; /** - * mchp_pit64b_clkevt - PIT64B clockevent data structure + * struct mchp_pit64b_clkevt - PIT64B clockevent data structure * @timer: PIT64B timer * @clkevt: clockevent */ @@ -75,7 +75,7 @@ struct mchp_pit64b_clkevt { struct mchp_pit64b_clkevt, clkevt)) /** - * mchp_pit64b_clksrc - PIT64B clocksource data structure + * struct mchp_pit64b_clksrc - PIT64B clocksource data structure * @timer: PIT64B timer * @clksrc: clocksource */ @@ -173,7 +173,8 @@ static int mchp_pit64b_clkevt_shutdown(struct clock_event_device *cedev) { struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev); - writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR); + if (!clockevent_state_detached(cedev)) + mchp_pit64b_suspend(timer); return 0; } @@ -182,35 +183,37 @@ static int mchp_pit64b_clkevt_set_periodic(struct clock_event_device *cedev) { struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev); + if (clockevent_state_shutdown(cedev)) + mchp_pit64b_resume(timer); + mchp_pit64b_reset(timer, mchp_pit64b_ce_cycles, MCHP_PIT64B_MR_CONT, MCHP_PIT64B_IER_PERIOD); return 0; } -static int mchp_pit64b_clkevt_set_next_event(unsigned long evt, - struct clock_event_device *cedev) +static int mchp_pit64b_clkevt_set_oneshot(struct clock_event_device *cedev) { struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev); - mchp_pit64b_reset(timer, evt, MCHP_PIT64B_MR_ONE_SHOT, + if (clockevent_state_shutdown(cedev)) + mchp_pit64b_resume(timer); + + mchp_pit64b_reset(timer, mchp_pit64b_ce_cycles, MCHP_PIT64B_MR_ONE_SHOT, MCHP_PIT64B_IER_PERIOD); return 0; } -static void mchp_pit64b_clkevt_suspend(struct clock_event_device *cedev) +static int mchp_pit64b_clkevt_set_next_event(unsigned long evt, + struct clock_event_device *cedev) { struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev); - mchp_pit64b_suspend(timer); -} - -static void mchp_pit64b_clkevt_resume(struct clock_event_device *cedev) -{ - struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev); + mchp_pit64b_reset(timer, evt, MCHP_PIT64B_MR_ONE_SHOT, + MCHP_PIT64B_IER_PERIOD); - mchp_pit64b_resume(timer); + return 0; } static irqreturn_t mchp_pit64b_interrupt(int irq, void *dev_id) @@ -242,8 +245,10 @@ static void __init mchp_pit64b_pres_compute(u32 *pres, u32 clk_rate, } /** - * mchp_pit64b_init_mode - prepare PIT64B mode register value to be used at - * runtime; this includes prescaler and SGCLK bit + * mchp_pit64b_init_mode() - prepare PIT64B mode register value to be used at + * runtime; this includes prescaler and SGCLK bit + * @timer: pointer to pit64b timer to init + * @max_rate: maximum rate that timer's clock could use * * PIT64B timer may be fed by gclk or pclk. When gclk is used its rate has to * be at least 3 times lower that pclk's rate. pclk rate is fixed, gclk rate @@ -341,6 +346,7 @@ static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer, if (!cs) return -ENOMEM; + mchp_pit64b_resume(timer); mchp_pit64b_reset(timer, ULLONG_MAX, MCHP_PIT64B_MR_CONT, 0); mchp_pit64b_cs_base = timer->base; @@ -362,8 +368,7 @@ static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer, pr_debug("clksrc: Failed to register PIT64B clocksource!\n"); /* Stop timer. */ - writel_relaxed(MCHP_PIT64B_CR_SWRST, - timer->base + MCHP_PIT64B_CR); + mchp_pit64b_suspend(timer); kfree(cs); return ret; @@ -395,9 +400,8 @@ static int __init mchp_pit64b_init_clkevt(struct mchp_pit64b_timer *timer, ce->clkevt.rating = 150; ce->clkevt.set_state_shutdown = mchp_pit64b_clkevt_shutdown; ce->clkevt.set_state_periodic = mchp_pit64b_clkevt_set_periodic; + ce->clkevt.set_state_oneshot = mchp_pit64b_clkevt_set_oneshot; ce->clkevt.set_next_event = mchp_pit64b_clkevt_set_next_event; - ce->clkevt.suspend = mchp_pit64b_clkevt_suspend; - ce->clkevt.resume = mchp_pit64b_clkevt_resume; ce->clkevt.cpumask = cpumask_of(0); ce->clkevt.irq = irq; @@ -448,19 +452,10 @@ static int __init mchp_pit64b_dt_init_timer(struct device_node *node, if (ret) goto irq_unmap; - ret = clk_prepare_enable(timer.pclk); - if (ret) - goto irq_unmap; - - if (timer.mode & MCHP_PIT64B_MR_SGCLK) { - ret = clk_prepare_enable(timer.gclk); - if (ret) - goto pclk_unprepare; - + if (timer.mode & MCHP_PIT64B_MR_SGCLK) clk_rate = clk_get_rate(timer.gclk); - } else { + else clk_rate = clk_get_rate(timer.pclk); - } clk_rate = clk_rate / (MCHP_PIT64B_MODE_TO_PRES(timer.mode) + 1); if (clkevt) @@ -469,15 +464,10 @@ static int __init mchp_pit64b_dt_init_timer(struct device_node *node, ret = mchp_pit64b_init_clksrc(&timer, clk_rate); if (ret) - goto gclk_unprepare; + goto irq_unmap; return 0; -gclk_unprepare: - if (timer.mode & MCHP_PIT64B_MR_SGCLK) - clk_disable_unprepare(timer.gclk); -pclk_unprepare: - clk_disable_unprepare(timer.pclk); irq_unmap: irq_dispose_mapping(irq); io_unmap: diff --git a/drivers/clocksource/timer-sun4i.c b/drivers/clocksource/timer-sun4i.c index bb6ea6c19829..94dc6e42e983 100644 --- a/drivers/clocksource/timer-sun4i.c +++ b/drivers/clocksource/timer-sun4i.c @@ -128,7 +128,7 @@ static void sun4i_timer_clear_interrupt(void __iomem *base) static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id) { - struct clock_event_device *evt = (struct clock_event_device *)dev_id; + struct clock_event_device *evt = dev_id; struct timer_of *to = to_timer_of(evt); sun4i_timer_clear_interrupt(timer_of_base(to)); diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 85900f7fc69f..7d5fa9069906 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c @@ -142,7 +142,7 @@ static int sun5i_clkevt_next_event(unsigned long evt, static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id) { - struct sun5i_timer_clkevt *ce = (struct sun5i_timer_clkevt *)dev_id; + struct sun5i_timer_clkevt *ce = dev_id; writel(0x1, ce->timer.base + TIMER_IRQ_ST_REG); ce->clkevt.event_handler(&ce->clkevt); diff --git a/drivers/clocksource/timer-tegra186.c b/drivers/clocksource/timer-tegra186.c new file mode 100644 index 000000000000..ea742889ee06 --- /dev/null +++ b/drivers/clocksource/timer-tegra186.c @@ -0,0 +1,514 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2020 NVIDIA Corporation. All rights reserved. + */ + +#include <linux/clocksource.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/watchdog.h> + +/* shared registers */ +#define TKETSC0 0x000 +#define TKETSC1 0x004 +#define TKEUSEC 0x008 +#define TKEOSC 0x00c + +#define TKEIE(x) (0x100 + ((x) * 4)) +#define TKEIE_WDT_MASK(x, y) ((y) << (16 + 4 * (x))) + +/* timer registers */ +#define TMRCR 0x000 +#define TMRCR_ENABLE BIT(31) +#define TMRCR_PERIODIC BIT(30) +#define TMRCR_PTV(x) ((x) & 0x0fffffff) + +#define TMRSR 0x004 +#define TMRSR_INTR_CLR BIT(30) + +#define TMRCSSR 0x008 +#define TMRCSSR_SRC_USEC (0 << 0) + +/* watchdog registers */ +#define WDTCR 0x000 +#define WDTCR_SYSTEM_POR_RESET_ENABLE BIT(16) +#define WDTCR_SYSTEM_DEBUG_RESET_ENABLE BIT(15) +#define WDTCR_REMOTE_INT_ENABLE BIT(14) +#define WDTCR_LOCAL_FIQ_ENABLE BIT(13) +#define WDTCR_LOCAL_INT_ENABLE BIT(12) +#define WDTCR_PERIOD_MASK (0xff << 4) +#define WDTCR_PERIOD(x) (((x) & 0xff) << 4) +#define WDTCR_TIMER_SOURCE_MASK 0xf +#define WDTCR_TIMER_SOURCE(x) ((x) & 0xf) + +#define WDTCMDR 0x008 +#define WDTCMDR_DISABLE_COUNTER BIT(1) +#define WDTCMDR_START_COUNTER BIT(0) + +#define WDTUR 0x00c +#define WDTUR_UNLOCK_PATTERN 0x0000c45a + +struct tegra186_timer_soc { + unsigned int num_timers; + unsigned int num_wdts; +}; + +struct tegra186_tmr { + struct tegra186_timer *parent; + void __iomem *regs; + unsigned int index; + unsigned int hwirq; +}; + +struct tegra186_wdt { + struct watchdog_device base; + + void __iomem *regs; + unsigned int index; + bool locked; + + struct tegra186_tmr *tmr; +}; + +static inline struct tegra186_wdt *to_tegra186_wdt(struct watchdog_device *wdd) +{ + return container_of(wdd, struct tegra186_wdt, base); +} + +struct tegra186_timer { + const struct tegra186_timer_soc *soc; + struct device *dev; + void __iomem *regs; + + struct tegra186_wdt *wdt; + struct clocksource usec; + struct clocksource tsc; + struct clocksource osc; +}; + +static void tmr_writel(struct tegra186_tmr *tmr, u32 value, unsigned int offset) +{ + writel_relaxed(value, tmr->regs + offset); +} + +static void wdt_writel(struct tegra186_wdt *wdt, u32 value, unsigned int offset) +{ + writel_relaxed(value, wdt->regs + offset); +} + +static u32 wdt_readl(struct tegra186_wdt *wdt, unsigned int offset) +{ + return readl_relaxed(wdt->regs + offset); +} + +static struct tegra186_tmr *tegra186_tmr_create(struct tegra186_timer *tegra, + unsigned int index) +{ + unsigned int offset = 0x10000 + index * 0x10000; + struct tegra186_tmr *tmr; + + tmr = devm_kzalloc(tegra->dev, sizeof(*tmr), GFP_KERNEL); + if (!tmr) + return ERR_PTR(-ENOMEM); + + tmr->parent = tegra; + tmr->regs = tegra->regs + offset; + tmr->index = index; + tmr->hwirq = 0; + + return tmr; +} + +static const struct watchdog_info tegra186_wdt_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, + .identity = "NVIDIA Tegra186 WDT", +}; + +static void tegra186_wdt_disable(struct tegra186_wdt *wdt) +{ + /* unlock and disable the watchdog */ + wdt_writel(wdt, WDTUR_UNLOCK_PATTERN, WDTUR); + wdt_writel(wdt, WDTCMDR_DISABLE_COUNTER, WDTCMDR); + + /* disable timer */ + tmr_writel(wdt->tmr, 0, TMRCR); +} + +static void tegra186_wdt_enable(struct tegra186_wdt *wdt) +{ + struct tegra186_timer *tegra = wdt->tmr->parent; + u32 value; + + /* unmask hardware IRQ, this may have been lost across powergate */ + value = TKEIE_WDT_MASK(wdt->index, 1); + writel(value, tegra->regs + TKEIE(wdt->tmr->hwirq)); + + /* clear interrupt */ + tmr_writel(wdt->tmr, TMRSR_INTR_CLR, TMRSR); + + /* select microsecond source */ + tmr_writel(wdt->tmr, TMRCSSR_SRC_USEC, TMRCSSR); + + /* configure timer (system reset happens on the fifth expiration) */ + value = TMRCR_PTV(wdt->base.timeout * USEC_PER_SEC / 5) | + TMRCR_PERIODIC | TMRCR_ENABLE; + tmr_writel(wdt->tmr, value, TMRCR); + + if (!wdt->locked) { + value = wdt_readl(wdt, WDTCR); + + /* select the proper timer source */ + value &= ~WDTCR_TIMER_SOURCE_MASK; + value |= WDTCR_TIMER_SOURCE(wdt->tmr->index); + + /* single timer period since that's already configured */ + value &= ~WDTCR_PERIOD_MASK; + value |= WDTCR_PERIOD(1); + + /* enable local interrupt for WDT petting */ + value |= WDTCR_LOCAL_INT_ENABLE; + + /* enable local FIQ and remote interrupt for debug dump */ + if (0) + value |= WDTCR_REMOTE_INT_ENABLE | + WDTCR_LOCAL_FIQ_ENABLE; + + /* enable system debug reset (doesn't properly reboot) */ + if (0) + value |= WDTCR_SYSTEM_DEBUG_RESET_ENABLE; + + /* enable system POR reset */ + value |= WDTCR_SYSTEM_POR_RESET_ENABLE; + + wdt_writel(wdt, value, WDTCR); + } + + wdt_writel(wdt, WDTCMDR_START_COUNTER, WDTCMDR); +} + +static int tegra186_wdt_start(struct watchdog_device *wdd) +{ + struct tegra186_wdt *wdt = to_tegra186_wdt(wdd); + + tegra186_wdt_enable(wdt); + + return 0; +} + +static int tegra186_wdt_stop(struct watchdog_device *wdd) +{ + struct tegra186_wdt *wdt = to_tegra186_wdt(wdd); + + tegra186_wdt_disable(wdt); + + return 0; +} + +static int tegra186_wdt_ping(struct watchdog_device *wdd) +{ + struct tegra186_wdt *wdt = to_tegra186_wdt(wdd); + + tegra186_wdt_disable(wdt); + tegra186_wdt_enable(wdt); + + return 0; +} + +static int tegra186_wdt_set_timeout(struct watchdog_device *wdd, + unsigned int timeout) +{ + struct tegra186_wdt *wdt = to_tegra186_wdt(wdd); + + if (watchdog_active(&wdt->base)) + tegra186_wdt_disable(wdt); + + wdt->base.timeout = timeout; + + if (watchdog_active(&wdt->base)) + tegra186_wdt_enable(wdt); + + return 0; +} + +static const struct watchdog_ops tegra186_wdt_ops = { + .owner = THIS_MODULE, + .start = tegra186_wdt_start, + .stop = tegra186_wdt_stop, + .ping = tegra186_wdt_ping, + .set_timeout = tegra186_wdt_set_timeout, +}; + +static struct tegra186_wdt *tegra186_wdt_create(struct tegra186_timer *tegra, + unsigned int index) +{ + unsigned int offset = 0x10000, source; + struct tegra186_wdt *wdt; + u32 value; + int err; + + offset += tegra->soc->num_timers * 0x10000 + index * 0x10000; + + wdt = devm_kzalloc(tegra->dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) + return ERR_PTR(-ENOMEM); + + wdt->regs = tegra->regs + offset; + wdt->index = index; + + /* read the watchdog configuration since it might be locked down */ + value = wdt_readl(wdt, WDTCR); + + if (value & WDTCR_LOCAL_INT_ENABLE) + wdt->locked = true; + + source = value & WDTCR_TIMER_SOURCE_MASK; + + wdt->tmr = tegra186_tmr_create(tegra, source); + if (IS_ERR(wdt->tmr)) + return ERR_CAST(wdt->tmr); + + wdt->base.info = &tegra186_wdt_info; + wdt->base.ops = &tegra186_wdt_ops; + wdt->base.min_timeout = 1; + wdt->base.max_timeout = 255; + wdt->base.parent = tegra->dev; + + err = watchdog_init_timeout(&wdt->base, 5, tegra->dev); + if (err < 0) { + dev_err(tegra->dev, "failed to initialize timeout: %d\n", err); + return ERR_PTR(err); + } + + err = devm_watchdog_register_device(tegra->dev, &wdt->base); + if (err < 0) { + dev_err(tegra->dev, "failed to register WDT: %d\n", err); + return ERR_PTR(err); + } + + return wdt; +} + +static u64 tegra186_timer_tsc_read(struct clocksource *cs) +{ + struct tegra186_timer *tegra = container_of(cs, struct tegra186_timer, + tsc); + u32 hi, lo, ss; + + hi = readl_relaxed(tegra->regs + TKETSC1); + + /* + * The 56-bit value of the TSC is spread across two registers that are + * not synchronized. In order to read them atomically, ensure that the + * high 24 bits match before and after reading the low 32 bits. + */ + do { + /* snapshot the high 24 bits */ + ss = hi; + + lo = readl_relaxed(tegra->regs + TKETSC0); + hi = readl_relaxed(tegra->regs + TKETSC1); + } while (hi != ss); + + return (u64)hi << 32 | lo; +} + +static int tegra186_timer_tsc_init(struct tegra186_timer *tegra) +{ + tegra->tsc.name = "tsc"; + tegra->tsc.rating = 300; + tegra->tsc.read = tegra186_timer_tsc_read; + tegra->tsc.mask = CLOCKSOURCE_MASK(56); + tegra->tsc.flags = CLOCK_SOURCE_IS_CONTINUOUS; + + return clocksource_register_hz(&tegra->tsc, 31250000); +} + +static u64 tegra186_timer_osc_read(struct clocksource *cs) +{ + struct tegra186_timer *tegra = container_of(cs, struct tegra186_timer, + osc); + + return readl_relaxed(tegra->regs + TKEOSC); +} + +static int tegra186_timer_osc_init(struct tegra186_timer *tegra) +{ + tegra->osc.name = "osc"; + tegra->osc.rating = 300; + tegra->osc.read = tegra186_timer_osc_read; + tegra->osc.mask = CLOCKSOURCE_MASK(32); + tegra->osc.flags = CLOCK_SOURCE_IS_CONTINUOUS; + + return clocksource_register_hz(&tegra->osc, 38400000); +} + +static u64 tegra186_timer_usec_read(struct clocksource *cs) +{ + struct tegra186_timer *tegra = container_of(cs, struct tegra186_timer, + usec); + + return readl_relaxed(tegra->regs + TKEUSEC); +} + +static int tegra186_timer_usec_init(struct tegra186_timer *tegra) +{ + tegra->usec.name = "usec"; + tegra->usec.rating = 300; + tegra->usec.read = tegra186_timer_usec_read; + tegra->usec.mask = CLOCKSOURCE_MASK(32); + tegra->usec.flags = CLOCK_SOURCE_IS_CONTINUOUS; + + return clocksource_register_hz(&tegra->usec, USEC_PER_SEC); +} + +static irqreturn_t tegra186_timer_irq(int irq, void *data) +{ + struct tegra186_timer *tegra = data; + + if (watchdog_active(&tegra->wdt->base)) { + tegra186_wdt_disable(tegra->wdt); + tegra186_wdt_enable(tegra->wdt); + } + + return IRQ_HANDLED; +} + +static int tegra186_timer_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct tegra186_timer *tegra; + unsigned int irq; + int err; + + tegra = devm_kzalloc(dev, sizeof(*tegra), GFP_KERNEL); + if (!tegra) + return -ENOMEM; + + tegra->soc = of_device_get_match_data(dev); + dev_set_drvdata(dev, tegra); + tegra->dev = dev; + + tegra->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(tegra->regs)) + return PTR_ERR(tegra->regs); + + err = platform_get_irq(pdev, 0); + if (err < 0) + return err; + + irq = err; + + /* create a watchdog using a preconfigured timer */ + tegra->wdt = tegra186_wdt_create(tegra, 0); + if (IS_ERR(tegra->wdt)) { + err = PTR_ERR(tegra->wdt); + dev_err(dev, "failed to create WDT: %d\n", err); + return err; + } + + err = tegra186_timer_tsc_init(tegra); + if (err < 0) { + dev_err(dev, "failed to register TSC counter: %d\n", err); + return err; + } + + err = tegra186_timer_osc_init(tegra); + if (err < 0) { + dev_err(dev, "failed to register OSC counter: %d\n", err); + goto unregister_tsc; + } + + err = tegra186_timer_usec_init(tegra); + if (err < 0) { + dev_err(dev, "failed to register USEC counter: %d\n", err); + goto unregister_osc; + } + + err = devm_request_irq(dev, irq, tegra186_timer_irq, 0, + "tegra186-timer", tegra); + if (err < 0) { + dev_err(dev, "failed to request IRQ#%u: %d\n", irq, err); + goto unregister_usec; + } + + return 0; + +unregister_usec: + clocksource_unregister(&tegra->usec); +unregister_osc: + clocksource_unregister(&tegra->osc); +unregister_tsc: + clocksource_unregister(&tegra->tsc); + return err; +} + +static int tegra186_timer_remove(struct platform_device *pdev) +{ + struct tegra186_timer *tegra = platform_get_drvdata(pdev); + + clocksource_unregister(&tegra->usec); + clocksource_unregister(&tegra->osc); + clocksource_unregister(&tegra->tsc); + + return 0; +} + +static int __maybe_unused tegra186_timer_suspend(struct device *dev) +{ + struct tegra186_timer *tegra = dev_get_drvdata(dev); + + if (watchdog_active(&tegra->wdt->base)) + tegra186_wdt_disable(tegra->wdt); + + return 0; +} + +static int __maybe_unused tegra186_timer_resume(struct device *dev) +{ + struct tegra186_timer *tegra = dev_get_drvdata(dev); + + if (watchdog_active(&tegra->wdt->base)) + tegra186_wdt_enable(tegra->wdt); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(tegra186_timer_pm_ops, tegra186_timer_suspend, + tegra186_timer_resume); + +static const struct tegra186_timer_soc tegra186_timer = { + .num_timers = 10, + .num_wdts = 3, +}; + +static const struct tegra186_timer_soc tegra234_timer = { + .num_timers = 16, + .num_wdts = 3, +}; + +static const struct of_device_id tegra186_timer_of_match[] = { + { .compatible = "nvidia,tegra186-timer", .data = &tegra186_timer }, + { .compatible = "nvidia,tegra234-timer", .data = &tegra234_timer }, + { } +}; +MODULE_DEVICE_TABLE(of, tegra186_timer_of_match); + +static struct platform_driver tegra186_wdt_driver = { + .driver = { + .name = "tegra186-timer", + .pm = &tegra186_timer_pm_ops, + .of_match_table = tegra186_timer_of_match, + }, + .probe = tegra186_timer_probe, + .remove = tegra186_timer_remove, +}; +module_platform_driver(tegra186_wdt_driver); + +MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>"); +MODULE_DESCRIPTION("NVIDIA Tegra186 timers driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index c194e8f74e1d..469f7c91564b 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c @@ -44,6 +44,121 @@ enum { REQUEST_BY_NODE, }; +static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg, + int posted) +{ + if (posted) + while (readl_relaxed(timer->pend) & (reg >> WPSHIFT)) + cpu_relax(); + + return readl_relaxed(timer->func_base + (reg & 0xff)); +} + +static inline void __omap_dm_timer_write(struct omap_dm_timer *timer, + u32 reg, u32 val, int posted) +{ + if (posted) + while (readl_relaxed(timer->pend) & (reg >> WPSHIFT)) + cpu_relax(); + + writel_relaxed(val, timer->func_base + (reg & 0xff)); +} + +static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer) +{ + u32 tidr; + + /* Assume v1 ip if bits [31:16] are zero */ + tidr = readl_relaxed(timer->io_base); + if (!(tidr >> 16)) { + timer->revision = 1; + timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET; + timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET; + timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET; + timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET; + timer->func_base = timer->io_base; + } else { + timer->revision = 2; + timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS; + timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET; + timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR; + timer->pend = timer->io_base + + _OMAP_TIMER_WRITE_PEND_OFFSET + + OMAP_TIMER_V2_FUNC_OFFSET; + timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET; + } +} + +/* + * __omap_dm_timer_enable_posted - enables write posted mode + * @timer: pointer to timer instance handle + * + * Enables the write posted mode for the timer. When posted mode is enabled + * writes to certain timer registers are immediately acknowledged by the + * internal bus and hence prevents stalling the CPU waiting for the write to + * complete. Enabling this feature can improve performance for writing to the + * timer registers. + */ +static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer) +{ + if (timer->posted) + return; + + if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) { + timer->posted = OMAP_TIMER_NONPOSTED; + __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0); + return; + } + + __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, + OMAP_TIMER_CTRL_POSTED, 0); + timer->context.tsicr = OMAP_TIMER_CTRL_POSTED; + timer->posted = OMAP_TIMER_POSTED; +} + +static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer, + int posted, unsigned long rate) +{ + u32 l; + + l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); + if (l & OMAP_TIMER_CTRL_ST) { + l &= ~0x1; + __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted); +#ifdef CONFIG_ARCH_OMAP2PLUS + /* Readback to make sure write has completed */ + __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); + /* + * Wait for functional clock period x 3.5 to make sure that + * timer is stopped + */ + udelay(3500000 / rate + 1); +#endif + } + + /* Ack possibly pending interrupt */ + writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat); +} + +static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer, + unsigned int value) +{ + writel_relaxed(value, timer->irq_ena); + __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0); +} + +static inline unsigned int +__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted) +{ + return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted); +} + +static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer, + unsigned int value) +{ + writel_relaxed(value, timer->irq_stat); +} + /** * omap_dm_timer_read_reg - read timer registers in posted and non-posted mode * @timer: timer pointer over which read operation to perform @@ -921,6 +1036,10 @@ static const struct dmtimer_platform_data omap3plus_pdata = { .timer_ops = &dmtimer_ops, }; +static const struct dmtimer_platform_data am6_pdata = { + .timer_ops = &dmtimer_ops, +}; + static const struct of_device_id omap_timer_match[] = { { .compatible = "ti,omap2420-timer", @@ -949,6 +1068,10 @@ static const struct of_device_id omap_timer_match[] = { .compatible = "ti,dm816-timer", .data = &omap3plus_pdata, }, + { + .compatible = "ti,am654-timer", + .data = &am6_pdata, + }, {}, }; MODULE_DEVICE_TABLE(of, omap_timer_match); diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index c3038cdc6865..2a84fc63371e 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -268,7 +268,7 @@ config LOONGSON2_CPUFREQ This option adds a CPUFreq driver for loongson processors which support software configurable cpu frequency. - Loongson2F and it's successors support this feature. + Loongson2F and its successors support this feature. If in doubt, say N. diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 3d514b82d055..1bb2b90ebb21 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -78,6 +78,8 @@ static bool boost_state(unsigned int cpu) switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: + case X86_VENDOR_CENTAUR: + case X86_VENDOR_ZHAOXIN: rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); msr = lo | ((u64)hi << 32); return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); @@ -97,6 +99,8 @@ static int boost_set_msr(bool enable) switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: + case X86_VENDOR_CENTAUR: + case X86_VENDOR_ZHAOXIN: msr_addr = MSR_IA32_MISC_ENABLE; msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; break; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 2cad42774164..954eef26685f 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -843,12 +843,14 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) unsigned int cpu; for_each_cpu(cpu, mask) { - if (i) - i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); - i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); + i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u ", cpu); if (i >= (PAGE_SIZE - 5)) break; } + + /* Remove the extra space at the end */ + i--; + i += sprintf(&buf[i], "\n"); return i; } @@ -971,21 +973,10 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, if (!fattr->store) return -EIO; - /* - * cpus_read_trylock() is used here to work around a circular lock - * dependency problem with respect to the cpufreq_register_driver(). - */ - if (!cpus_read_trylock()) - return -EBUSY; - - if (cpu_online(policy->cpu)) { - down_write(&policy->rwsem); - if (likely(!policy_is_inactive(policy))) - ret = fattr->store(policy, buf, count); - up_write(&policy->rwsem); - } - - cpus_read_unlock(); + down_write(&policy->rwsem); + if (likely(!policy_is_inactive(policy))) + ret = fattr->store(policy, buf, count); + up_write(&policy->rwsem); return ret; } @@ -1282,6 +1273,13 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy) unsigned long flags; int cpu; + /* + * The callers must ensure the policy is inactive by now, to avoid any + * races with show()/store() callbacks. + */ + if (unlikely(!policy_is_inactive(policy))) + pr_warn("%s: Freeing active policy\n", __func__); + /* Remove policy from list */ write_lock_irqsave(&cpufreq_driver_lock, flags); list_del(&policy->policy_list); @@ -1536,8 +1534,6 @@ out_destroy_policy: for_each_cpu(j, policy->real_cpus) remove_cpu_dev_symlink(policy, j, get_cpu_device(j)); - cpumask_clear(policy->cpus); - out_offline_policy: if (cpufreq_driver->offline) cpufreq_driver->offline(policy); @@ -1547,6 +1543,7 @@ out_exit_policy: cpufreq_driver->exit(policy); out_free_policy: + cpumask_clear(policy->cpus); up_write(&policy->rwsem); cpufreq_policy_free(policy); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index e8fbf970ff07..c52d19d67557 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -416,10 +416,13 @@ static struct dbs_governor od_dbs_gov = { static void od_set_powersave_bias(unsigned int powersave_bias) { unsigned int cpu; - cpumask_t done; + cpumask_var_t done; + + if (!alloc_cpumask_var(&done, GFP_KERNEL)) + return; default_powersave_bias = powersave_bias; - cpumask_clear(&done); + cpumask_clear(done); cpus_read_lock(); for_each_online_cpu(cpu) { @@ -428,7 +431,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias) struct dbs_data *dbs_data; struct od_dbs_tuners *od_tuners; - if (cpumask_test_cpu(cpu, &done)) + if (cpumask_test_cpu(cpu, done)) continue; policy = cpufreq_cpu_get_raw(cpu); @@ -439,13 +442,15 @@ static void od_set_powersave_bias(unsigned int powersave_bias) if (!policy_dbs) continue; - cpumask_or(&done, &done, policy->cpus); + cpumask_or(done, done, policy->cpus); dbs_data = policy_dbs->dbs_data; od_tuners = dbs_data->tuners; od_tuners->powersave_bias = default_powersave_bias; } cpus_read_unlock(); + + free_cpumask_var(done); } void od_register_powersave_bias_handler(unsigned int (*f) diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c index 813cccbfe934..f0e0a35c7f21 100644 --- a/drivers/cpufreq/mediatek-cpufreq-hw.c +++ b/drivers/cpufreq/mediatek-cpufreq-hw.c @@ -51,7 +51,7 @@ static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = { }; static int __maybe_unused -mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *mW, +mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW, unsigned long *KHz) { struct mtk_cpufreq_data *data; @@ -71,8 +71,9 @@ mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *mW, i--; *KHz = data->table[i].frequency; - *mW = readl_relaxed(data->reg_bases[REG_EM_POWER_TBL] + - i * LUT_ROW_SIZE) / 1000; + /* Provide micro-Watts value to the Energy Model */ + *uW = readl_relaxed(data->reg_bases[REG_EM_POWER_TBL] + + i * LUT_ROW_SIZE); return 0; } diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 6d2a4cf46db7..513a071845c2 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -19,6 +19,7 @@ #include <linux/slab.h> #include <linux/scmi_protocol.h> #include <linux/types.h> +#include <linux/units.h> struct scmi_data { int domain_id; @@ -99,6 +100,7 @@ static int __maybe_unused scmi_get_cpu_power(struct device *cpu_dev, unsigned long *power, unsigned long *KHz) { + enum scmi_power_scale power_scale = perf_ops->power_scale_get(ph); unsigned long Hz; int ret, domain; @@ -112,6 +114,10 @@ scmi_get_cpu_power(struct device *cpu_dev, unsigned long *power, if (ret) return ret; + /* Convert the power to uW if it is mW (ignore bogoW) */ + if (power_scale == SCMI_POWER_MILLIWATTS) + *power *= MICROWATT_PER_MILLIWATT; + /* The EM framework specifies the frequency in KHz. */ *KHz = Hz / 1000; @@ -249,8 +255,9 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy) static void scmi_cpufreq_register_em(struct cpufreq_policy *policy) { struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power); - bool power_scale_mw = perf_ops->power_scale_mw_get(ph); + enum scmi_power_scale power_scale = perf_ops->power_scale_get(ph); struct scmi_data *priv = policy->driver_data; + bool em_power_scale = false; /* * This callback will be called for each policy, but we don't need to @@ -262,9 +269,13 @@ static void scmi_cpufreq_register_em(struct cpufreq_policy *policy) if (!priv->nr_opp) return; + if (power_scale == SCMI_POWER_MILLIWATTS + || power_scale == SCMI_POWER_MICROWATTS) + em_power_scale = true; + em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp, &em_cb, priv->opp_shared_cpus, - power_scale_mw); + em_power_scale); } static struct cpufreq_driver scmi_cpufreq_driver = { diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index be7f512109f7..747aa537389b 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -3,7 +3,8 @@ # ARM CPU Idle drivers # config ARM_CPUIDLE - bool "Generic ARM/ARM64 CPU idle Driver" + bool "Generic ARM CPU idle Driver" + depends on ARM select DT_IDLE_STATES select CPU_IDLE_MULTIPLE_DRIVERS help diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index 540105ca0781..57bc3e3ae391 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -69,12 +69,12 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev, return -1; /* Do runtime PM to manage a hierarchical CPU toplogy. */ - rcu_irq_enter_irqson(); + ct_irq_enter_irqson(); if (s2idle) dev_pm_genpd_suspend(pd_dev); else pm_runtime_put_sync_suspend(pd_dev); - rcu_irq_exit_irqson(); + ct_irq_exit_irqson(); state = psci_get_domain_state(); if (!state) @@ -82,12 +82,12 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev, ret = psci_cpu_suspend_enter(state) ? -1 : idx; - rcu_irq_enter_irqson(); + ct_irq_enter_irqson(); if (s2idle) dev_pm_genpd_resume(pd_dev); else pm_runtime_get_sync(pd_dev); - rcu_irq_exit_irqson(); + ct_irq_exit_irqson(); cpu_pm_exit(); diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c index 1151e5e2ba82..862a2876f1c9 100644 --- a/drivers/cpuidle/cpuidle-riscv-sbi.c +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c @@ -116,12 +116,12 @@ static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev, return -1; /* Do runtime PM to manage a hierarchical CPU toplogy. */ - rcu_irq_enter_irqson(); + ct_irq_enter_irqson(); if (s2idle) dev_pm_genpd_suspend(pd_dev); else pm_runtime_put_sync_suspend(pd_dev); - rcu_irq_exit_irqson(); + ct_irq_exit_irqson(); if (sbi_is_domain_state_available()) state = sbi_get_domain_state(); @@ -130,12 +130,12 @@ static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev, ret = sbi_suspend(state) ? -1 : idx; - rcu_irq_enter_irqson(); + ct_irq_enter_irqson(); if (s2idle) dev_pm_genpd_resume(pd_dev); else pm_runtime_get_sync(pd_dev); - rcu_irq_exit_irqson(); + ct_irq_exit_irqson(); cpu_pm_exit(); diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index ef2ea1b12cd8..62dd956025f3 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -23,6 +23,7 @@ #include <linux/suspend.h> #include <linux/tick.h> #include <linux/mmu_context.h> +#include <linux/context_tracking.h> #include <trace/events/power.h> #include "cpuidle.h" @@ -150,12 +151,12 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv, */ stop_critical_timings(); if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) - rcu_idle_enter(); + ct_idle_enter(); target_state->enter_s2idle(dev, drv, index); if (WARN_ON_ONCE(!irqs_disabled())) local_irq_disable(); if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) - rcu_idle_exit(); + ct_idle_exit(); tick_unfreeze(); start_critical_timings(); @@ -233,10 +234,10 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, stop_critical_timings(); if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) - rcu_idle_enter(); + ct_idle_enter(); entered_state = target_state->enter(dev, drv, index); if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) - rcu_idle_exit(); + ct_idle_exit(); start_critical_timings(); sched_clock_idle_wakeup_event(); diff --git a/drivers/cpuidle/governors/haltpoll.c b/drivers/cpuidle/governors/haltpoll.c index cb2a96eafc02..1dff3a52917d 100644 --- a/drivers/cpuidle/governors/haltpoll.c +++ b/drivers/cpuidle/governors/haltpoll.c @@ -19,6 +19,7 @@ #include <linux/sched.h> #include <linux/module.h> #include <linux/kvm_para.h> +#include <trace/events/power.h> static unsigned int guest_halt_poll_ns __read_mostly = 200000; module_param(guest_halt_poll_ns, uint, 0644); @@ -90,6 +91,7 @@ static void adjust_poll_limit(struct cpuidle_device *dev, u64 block_ns) if (val > guest_halt_poll_ns) val = guest_halt_poll_ns; + trace_guest_halt_poll_ns_grow(val, dev->poll_limit_ns); dev->poll_limit_ns = val; } else if (block_ns > guest_halt_poll_ns && guest_halt_poll_allow_shrink) { @@ -100,6 +102,7 @@ static void adjust_poll_limit(struct cpuidle_device *dev, u64 block_ns) val = 0; else val /= shrink; + trace_guest_halt_poll_ns_shrink(val, dev->poll_limit_ns); dev->poll_limit_ns = val; } } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 5bb950182026..910d6751644c 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -170,6 +170,7 @@ dma_iv_error: while (i >= 0) { dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE); memzero_explicit(sf->iv[i], ivsize); + i--; } return err; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index 98593a0cff69..ac2329e2b0e5 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -528,25 +528,33 @@ static int allocate_flows(struct sun8i_ss_dev *ss) ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, GFP_KERNEL | GFP_DMA); - if (!ss->flows[i].biv) + if (!ss->flows[i].biv) { + err = -ENOMEM; goto error_engine; + } for (j = 0; j < MAX_SG; j++) { ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, GFP_KERNEL | GFP_DMA); - if (!ss->flows[i].iv[j]) + if (!ss->flows[i].iv[j]) { + err = -ENOMEM; goto error_engine; + } } /* the padding could be up to two block. */ ss->flows[i].pad = devm_kmalloc(ss->dev, MAX_PAD_SIZE, GFP_KERNEL | GFP_DMA); - if (!ss->flows[i].pad) + if (!ss->flows[i].pad) { + err = -ENOMEM; goto error_engine; + } ss->flows[i].result = devm_kmalloc(ss->dev, SHA256_DIGEST_SIZE, GFP_KERNEL | GFP_DMA); - if (!ss->flows[i].result) + if (!ss->flows[i].result) { + err = -ENOMEM; goto error_engine; + } ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true); if (!ss->flows[i].engine) { diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index ac417a6b39e5..36a82b22953c 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -30,8 +30,8 @@ static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key, int ret = 0; xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK); - if (!xtfm) - return -ENOMEM; + if (IS_ERR(xtfm)) + return PTR_ERR(xtfm); len = sizeof(*sdesc) + crypto_shash_descsize(xtfm); sdesc = kmalloc(len, GFP_KERNEL); @@ -586,7 +586,8 @@ retry: rctx->t_dst[k + 1].len = rctx->t_dst[k].len; } addr_xpad = dma_map_single(ss->dev, tfmctx->ipad, bs, DMA_TO_DEVICE); - if (dma_mapping_error(ss->dev, addr_xpad)) { + err = dma_mapping_error(ss->dev, addr_xpad); + if (err) { dev_err(ss->dev, "Fail to create DMA mapping of ipad\n"); goto err_dma_xpad; } @@ -612,7 +613,8 @@ retry: goto err_dma_result; } addr_xpad = dma_map_single(ss->dev, tfmctx->opad, bs, DMA_TO_DEVICE); - if (dma_mapping_error(ss->dev, addr_xpad)) { + err = dma_mapping_error(ss->dev, addr_xpad); + if (err) { dev_err(ss->dev, "Fail to create DMA mapping of opad\n"); goto err_dma_xpad; } diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 8278d98074e9..280f4b0e7133 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -1378,6 +1378,7 @@ static int crypto4xx_probe(struct platform_device *ofdev) struct resource res; struct device *dev = &ofdev->dev; struct crypto4xx_core_device *core_dev; + struct device_node *np; u32 pvr; bool is_revb = true; @@ -1385,29 +1386,36 @@ static int crypto4xx_probe(struct platform_device *ofdev) if (rc) return -ENODEV; - if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) { + np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto"); + if (np) { mtdcri(SDR0, PPC460EX_SDR0_SRST, mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET); mtdcri(SDR0, PPC460EX_SDR0_SRST, mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET); - } else if (of_find_compatible_node(NULL, NULL, - "amcc,ppc405ex-crypto")) { - mtdcri(SDR0, PPC405EX_SDR0_SRST, - mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET); - mtdcri(SDR0, PPC405EX_SDR0_SRST, - mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET); - is_revb = false; - } else if (of_find_compatible_node(NULL, NULL, - "amcc,ppc460sx-crypto")) { - mtdcri(SDR0, PPC460SX_SDR0_SRST, - mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET); - mtdcri(SDR0, PPC460SX_SDR0_SRST, - mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET); } else { - printk(KERN_ERR "Crypto Function Not supported!\n"); - return -EINVAL; + np = of_find_compatible_node(NULL, NULL, "amcc,ppc405ex-crypto"); + if (np) { + mtdcri(SDR0, PPC405EX_SDR0_SRST, + mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET); + mtdcri(SDR0, PPC405EX_SDR0_SRST, + mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET); + is_revb = false; + } else { + np = of_find_compatible_node(NULL, NULL, "amcc,ppc460sx-crypto"); + if (np) { + mtdcri(SDR0, PPC460SX_SDR0_SRST, + mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET); + mtdcri(SDR0, PPC460SX_SDR0_SRST, + mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET); + } else { + printk(KERN_ERR "Crypto Function Not supported!\n"); + return -EINVAL; + } + } } + of_node_put(np); + core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL); if (!core_dev) return -ENOMEM; diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index f72c6b3e4ad8..886bf258544c 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -2669,8 +2669,7 @@ static int atmel_aes_remove(struct platform_device *pdev) struct atmel_aes_dev *aes_dd; aes_dd = platform_get_drvdata(pdev); - if (!aes_dd) - return -ENODEV; + spin_lock(&atmel_aes.lock); list_del(&aes_dd->list); spin_unlock(&atmel_aes.lock); diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c index 59a57279e77b..a4b13d326cfc 100644 --- a/drivers/crypto/atmel-ecc.c +++ b/drivers/crypto/atmel-ecc.c @@ -349,8 +349,16 @@ static int atmel_ecc_remove(struct i2c_client *client) /* Return EBUSY if i2c client already allocated. */ if (atomic_read(&i2c_priv->tfm_count)) { - dev_err(&client->dev, "Device is busy\n"); - return -EBUSY; + /* + * After we return here, the memory backing the device is freed. + * That happens no matter what the return value of this function + * is because in the Linux device model there is no error + * handling for unbinding a driver. + * If there is still some action pending, it probably involves + * accessing the freed memory. + */ + dev_emerg(&client->dev, "Device is busy, expect memory corruption.\n"); + return 0; } crypto_unregister_kpp(&atmel_ecdh_nist_p256); diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index d1628112dacc..ca4b01926d1b 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -2666,11 +2666,8 @@ err_tasklet_kill: static int atmel_sha_remove(struct platform_device *pdev) { - struct atmel_sha_dev *sha_dd; + struct atmel_sha_dev *sha_dd = platform_get_drvdata(pdev); - sha_dd = platform_get_drvdata(pdev); - if (!sha_dd) - return -ENODEV; spin_lock(&atmel_sha.lock); list_del(&sha_dd->list); spin_unlock(&atmel_sha.lock); diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 9fd7b8e439d2..8b7bc1076e0d 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -1263,11 +1263,8 @@ err_tasklet_kill: static int atmel_tdes_remove(struct platform_device *pdev) { - struct atmel_tdes_dev *tdes_dd; + struct atmel_tdes_dev *tdes_dd = platform_get_drvdata(pdev); - tdes_dd = platform_get_drvdata(pdev); - if (!tdes_dd) - return -ENODEV; spin_lock(&atmel_tdes.lock); list_del(&tdes_dd->list); spin_unlock(&atmel_tdes.lock); diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 6753f0e6e55d..4482cb145d05 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -29,7 +29,7 @@ SHA512_DIGEST_SIZE * 2) /* - * This is a a cache of buffers, from which the users of CAAM QI driver + * This is a cache of buffers, from which the users of CAAM QI driver * can allocate short buffers. It's speedier than doing kmalloc on the hotpath. * NOTE: A more elegant solution would be to have some headroom in the frames * being processed. This can be added by the dpaa2-eth driver. This would @@ -5083,8 +5083,9 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) ppriv->net_dev.dev = *dev; INIT_LIST_HEAD(&ppriv->net_dev.napi_list); - netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll, - DPAA2_CAAM_NAPI_WEIGHT); + netif_napi_add_tx_weight(&ppriv->net_dev, &ppriv->napi, + dpaa2_dpseci_poll, + DPAA2_CAAM_NAPI_WEIGHT); } return 0; diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c index 78383d77da99..619564509936 100644 --- a/drivers/crypto/caam/caamhash_desc.c +++ b/drivers/crypto/caam/caamhash_desc.c @@ -22,7 +22,7 @@ * @ctx_len: size of Context Register * @import_ctx: true if previous Context Register needs to be restored * must be true for ahash update and final - * must be false for for ahash first and digest + * must be false for ahash first and digest * @era: SEC Era */ void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state, diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 8163f5df8ebf..c36f27376d7e 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -75,7 +75,7 @@ bool caam_congested __read_mostly; EXPORT_SYMBOL(caam_congested); /* - * This is a a cache of buffers, from which the users of CAAM QI driver + * This is a cache of buffers, from which the users of CAAM QI driver * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than * doing malloc on the hotpath. * NOTE: A more elegant solution would be to have some headroom in the frames @@ -749,8 +749,8 @@ int caam_qi_init(struct platform_device *caam_pdev) net_dev->dev = *qidev; INIT_LIST_HEAD(&net_dev->napi_list); - netif_napi_add(net_dev, irqtask, caam_qi_poll, - CAAM_NAPI_WEIGHT); + netif_napi_add_tx_weight(net_dev, irqtask, caam_qi_poll, + CAAM_NAPI_WEIGHT); napi_enable(irqtask); } diff --git a/drivers/crypto/cavium/cpt/cpt_hw_types.h b/drivers/crypto/cavium/cpt/cpt_hw_types.h index 96bc963bb804..8ec6edc69f3f 100644 --- a/drivers/crypto/cavium/cpt/cpt_hw_types.h +++ b/drivers/crypto/cavium/cpt/cpt_hw_types.h @@ -265,7 +265,7 @@ union cptx_pf_exe_bist_status { * big-endian format in memory. * iqb_ldwb:1 [7:7](R/W) Instruction load don't write back. * 0 = The hardware issues NCB transient load (LDT) towards the cache, - * which if the line hits and is is dirty will cause the line to be + * which if the line hits and it is dirty will cause the line to be * written back before being replaced. * 1 = The hardware issues NCB LDWB read-and-invalidate command towards * the cache when fetching the last word of instructions; as a result the diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index a5d9123a22ea..83350e2d9821 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -366,7 +366,7 @@ struct ccp_device { /* Master lists that all cmds are queued on. Because there can be * more than one CCP command queue that can process a cmd a separate - * backlog list is neeeded so that the backlog completion call + * backlog list is needed so that the backlog completion call * completes before the cmd is available for execution. */ spinlock_t cmd_lock ____cacheline_aligned; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 799b476fc3e8..9f588c9728f8 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -503,7 +503,7 @@ static int __sev_platform_shutdown_locked(int *error) struct sev_device *sev = psp_master->sev_data; int ret; - if (sev->state == SEV_STATE_UNINIT) + if (!sev || sev->state == SEV_STATE_UNINIT) return 0; ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); @@ -577,6 +577,8 @@ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) struct sev_user_data_status data; int ret; + memset(&data, 0, sizeof(data)); + ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, &argp->error); if (ret) return ret; @@ -630,7 +632,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) if (input.length > SEV_FW_BLOB_MAX_SIZE) return -EFAULT; - blob = kmalloc(input.length, GFP_KERNEL); + blob = kzalloc(input.length, GFP_KERNEL); if (!blob) return -ENOMEM; @@ -854,7 +856,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) input_address = (void __user *)input.address; if (input.address && input.length) { - id_blob = kmalloc(input.length, GFP_KERNEL); + id_blob = kzalloc(input.length, GFP_KERNEL); if (!id_blob) return -ENOMEM; @@ -973,14 +975,14 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) return -EFAULT; - pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL); + pdh_blob = kzalloc(input.pdh_cert_len, GFP_KERNEL); if (!pdh_blob) return -ENOMEM; data.pdh_cert_address = __psp_pa(pdh_blob); data.pdh_cert_len = input.pdh_cert_len; - cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL); + cert_blob = kzalloc(input.cert_chain_len, GFP_KERNEL); if (!cert_blob) { ret = -ENOMEM; goto e_free_pdh; diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index b5970ae54d0e..792d6da7f0c0 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -429,6 +429,12 @@ static const struct sp_dev_vdata dev_vdata[] = { .psp_vdata = &pspv2, #endif }, + { /* 6 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &pspv3, +#endif + }, }; static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] }, @@ -438,6 +444,7 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] }, { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] }, { PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] }, + { PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] }, /* Last entry must be zero */ { 0, } }; diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 7d1bee86d581..cadead18b59e 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -372,17 +372,10 @@ static int init_cc_resources(struct platform_device *plat_dev) dev->dma_mask = &dev->coherent_dma_mask; dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN); - while (dma_mask > 0x7fffffffUL) { - if (dma_supported(dev, dma_mask)) { - rc = dma_set_coherent_mask(dev, dma_mask); - if (!rc) - break; - } - dma_mask >>= 1; - } - + rc = dma_set_coherent_mask(dev, dma_mask); if (rc) { - dev_err(dev, "Failed in dma_set_mask, mask=%llx\n", dma_mask); + dev_err(dev, "Failed in dma_set_coherent_mask, mask=%llx\n", + dma_mask); return rc; } diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index d5421b0c6831..6124fbbbed94 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c @@ -41,6 +41,7 @@ static int cc_pm_resume(struct device *dev) /* wait for Cryptocell reset completion */ if (!cc_wait_for_reset_completion(drvdata)) { dev_err(dev, "Cryptocell reset not completed"); + clk_disable_unprepare(drvdata->clk); return -EBUSY; } @@ -48,6 +49,7 @@ static int cc_pm_resume(struct device *dev) rc = init_cc_regs(drvdata); if (rc) { dev_err(dev, "init_cc_regs (%x)\n", rc); + clk_disable_unprepare(drvdata->clk); return rc; } /* check if tee fips error occurred during power down */ diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 97d54c1465c2..3ba6f15deafc 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -252,7 +252,7 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req, if (unlikely(shift < 0)) return -EINVAL; - ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL); + ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC); if (unlikely(!ptr)) return -ENOMEM; diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index b4ca2eb034d7..ad83c194d664 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -877,13 +877,6 @@ static void qm_pm_put_sync(struct hisi_qm *qm) pm_runtime_put_autosuspend(dev); } -static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe) -{ - u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; - - return &qm->qp_array[cqn]; -} - static void qm_cq_head_update(struct hisi_qp *qp) { if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) { @@ -894,47 +887,37 @@ static void qm_cq_head_update(struct hisi_qp *qp) } } -static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm) +static void qm_poll_req_cb(struct hisi_qp *qp) { - if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) - return; - - if (qp->event_cb) { - qp->event_cb(qp); - return; - } - - if (qp->req_cb) { - struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; - - while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { - dma_rmb(); - qp->req_cb(qp, qp->sqe + qm->sqe_size * - le16_to_cpu(cqe->sq_head)); - qm_cq_head_update(qp); - cqe = qp->cqe + qp->qp_status.cq_head; - qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, - qp->qp_status.cq_head, 0); - atomic_dec(&qp->qp_status.used); - } + struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; + struct hisi_qm *qm = qp->qm; - /* set c_flag */ + while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { + dma_rmb(); + qp->req_cb(qp, qp->sqe + qm->sqe_size * + le16_to_cpu(cqe->sq_head)); + qm_cq_head_update(qp); + cqe = qp->cqe + qp->qp_status.cq_head; qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, - qp->qp_status.cq_head, 1); + qp->qp_status.cq_head, 0); + atomic_dec(&qp->qp_status.used); } + + /* set c_flag */ + qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); } -static void qm_work_process(struct work_struct *work) +static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data) { - struct hisi_qm *qm = container_of(work, struct hisi_qm, work); + struct hisi_qm *qm = poll_data->qm; struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; - struct hisi_qp *qp; int eqe_num = 0; + u16 cqn; while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { + cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; + poll_data->qp_finish_id[eqe_num] = cqn; eqe_num++; - qp = qm_to_hisi_qp(qm, eqe); - qm_poll_qp(qp, qm); if (qm->status.eq_head == QM_EQ_DEPTH - 1) { qm->status.eqc_phase = !qm->status.eqc_phase; @@ -945,37 +928,70 @@ static void qm_work_process(struct work_struct *work) qm->status.eq_head++; } - if (eqe_num == QM_EQ_DEPTH / 2 - 1) { - eqe_num = 0; - qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); - } + if (eqe_num == (QM_EQ_DEPTH >> 1) - 1) + break; } qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); + + return eqe_num; } -static irqreturn_t do_qm_irq(int irq, void *data) +static void qm_work_process(struct work_struct *work) { - struct hisi_qm *qm = (struct hisi_qm *)data; + struct hisi_qm_poll_data *poll_data = + container_of(work, struct hisi_qm_poll_data, work); + struct hisi_qm *qm = poll_data->qm; + struct hisi_qp *qp; + int eqe_num, i; - /* the workqueue created by device driver of QM */ - if (qm->wq) - queue_work(qm->wq, &qm->work); - else - schedule_work(&qm->work); + /* Get qp id of completed tasks and re-enable the interrupt. */ + eqe_num = qm_get_complete_eqe_num(poll_data); + for (i = eqe_num - 1; i >= 0; i--) { + qp = &qm->qp_array[poll_data->qp_finish_id[i]]; + if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) + continue; - return IRQ_HANDLED; + if (qp->event_cb) { + qp->event_cb(qp); + continue; + } + + if (likely(qp->req_cb)) + qm_poll_req_cb(qp); + } +} + +static bool do_qm_irq(struct hisi_qm *qm) +{ + struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; + struct hisi_qm_poll_data *poll_data; + u16 cqn; + + if (!readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) + return false; + + if (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { + cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; + poll_data = &qm->poll_data[cqn]; + queue_work(qm->wq, &poll_data->work); + + return true; + } + + return false; } static irqreturn_t qm_irq(int irq, void *data) { struct hisi_qm *qm = data; + bool ret; - if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) - return do_qm_irq(irq, data); + ret = do_qm_irq(qm); + if (ret) + return IRQ_HANDLED; atomic64_inc(&qm->debug.dfx.err_irq_cnt); - dev_err(&qm->pdev->dev, "invalid int source\n"); qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); return IRQ_NONE; @@ -3134,11 +3150,8 @@ static int qm_stop_qp_nolock(struct hisi_qp *qp) if (ret) dev_err(dev, "Failed to drain out data for stopping!\n"); - if (qp->qm->wq) - flush_workqueue(qp->qm->wq); - else - flush_work(&qp->qm->work); + flush_workqueue(qp->qm->wq); if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) qp_stop_fail_cb(qp); @@ -3557,8 +3570,10 @@ static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) for (i = num - 1; i >= 0; i--) { qdma = &qm->qp_array[i].qdma; dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); + kfree(qm->poll_data[i].qp_finish_id); } + kfree(qm->poll_data); kfree(qm->qp_array); } @@ -3567,12 +3582,18 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id) struct device *dev = &qm->pdev->dev; size_t off = qm->sqe_size * QM_Q_DEPTH; struct hisi_qp *qp; + int ret = -ENOMEM; + + qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16), + GFP_KERNEL); + if (!qm->poll_data[id].qp_finish_id) + return -ENOMEM; qp = &qm->qp_array[id]; qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, GFP_KERNEL); if (!qp->qdma.va) - return -ENOMEM; + goto err_free_qp_finish_id; qp->sqe = qp->qdma.va; qp->sqe_dma = qp->qdma.dma; @@ -3583,6 +3604,10 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id) qp->qp_id = id; return 0; + +err_free_qp_finish_id: + kfree(qm->poll_data[id].qp_finish_id); + return ret; } static void hisi_qm_pre_init(struct hisi_qm *qm) @@ -3672,6 +3697,26 @@ static void qm_last_regs_uninit(struct hisi_qm *qm) debug->qm_last_words = NULL; } +static void hisi_qm_unint_work(struct hisi_qm *qm) +{ + destroy_workqueue(qm->wq); +} + +static void hisi_qm_memory_uninit(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + + hisi_qp_memory_uninit(qm, qm->qp_num); + if (qm->qdma.va) { + hisi_qm_cache_wb(qm); + dma_free_coherent(dev, qm->qdma.size, + qm->qdma.va, qm->qdma.dma); + } + + idr_destroy(&qm->qp_idr); + kfree(qm->factor); +} + /** * hisi_qm_uninit() - Uninitialize qm. * @qm: The qm needed uninit. @@ -3680,13 +3725,10 @@ static void qm_last_regs_uninit(struct hisi_qm *qm) */ void hisi_qm_uninit(struct hisi_qm *qm) { - struct pci_dev *pdev = qm->pdev; - struct device *dev = &pdev->dev; - qm_last_regs_uninit(qm); qm_cmd_uninit(qm); - kfree(qm->factor); + hisi_qm_unint_work(qm); down_write(&qm->qps_lock); if (!qm_avail_state(qm, QM_CLOSE)) { @@ -3694,14 +3736,7 @@ void hisi_qm_uninit(struct hisi_qm *qm) return; } - hisi_qp_memory_uninit(qm, qm->qp_num); - idr_destroy(&qm->qp_idr); - - if (qm->qdma.va) { - hisi_qm_cache_wb(qm); - dma_free_coherent(dev, qm->qdma.size, - qm->qdma.va, qm->qdma.dma); - } + hisi_qm_memory_uninit(qm); hisi_qm_set_state(qm, QM_NOT_READY); up_write(&qm->qps_lock); @@ -6018,14 +6053,28 @@ err_disable_pcidev: return ret; } -static void hisi_qm_init_work(struct hisi_qm *qm) +static int hisi_qm_init_work(struct hisi_qm *qm) { - INIT_WORK(&qm->work, qm_work_process); + int i; + + for (i = 0; i < qm->qp_num; i++) + INIT_WORK(&qm->poll_data[i].work, qm_work_process); + if (qm->fun_type == QM_HW_PF) INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); if (qm->ver > QM_HW_V2) INIT_WORK(&qm->cmd_process, qm_cmd_process); + + qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | + WQ_UNBOUND, num_online_cpus(), + pci_name(qm->pdev)); + if (!qm->wq) { + pci_err(qm->pdev, "failed to alloc workqueue!\n"); + return -ENOMEM; + } + + return 0; } static int hisi_qp_alloc_memory(struct hisi_qm *qm) @@ -6038,11 +6087,18 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm) if (!qm->qp_array) return -ENOMEM; + qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); + if (!qm->poll_data) { + kfree(qm->qp_array); + return -ENOMEM; + } + /* one more page for device or qp statuses */ qp_dma_size = qm->sqe_size * QM_Q_DEPTH + sizeof(struct qm_cqe) * QM_Q_DEPTH; qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; for (i = 0; i < qm->qp_num; i++) { + qm->poll_data[i].qm = qm; ret = hisi_qp_memory_init(qm, qp_dma_size, i); if (ret) goto err_init_qp_mem; @@ -6176,7 +6232,10 @@ int hisi_qm_init(struct hisi_qm *qm) if (ret) goto err_alloc_uacce; - hisi_qm_init_work(qm); + ret = hisi_qm_init_work(qm); + if (ret) + goto err_free_qm_memory; + qm_cmd_init(qm); atomic_set(&qm->status.flags, QM_INIT); @@ -6184,6 +6243,8 @@ int hisi_qm_init(struct hisi_qm *qm) return 0; +err_free_qm_memory: + hisi_qm_memory_uninit(qm); err_alloc_uacce: if (qm->use_sva) { uacce_remove(qm->uacce); diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index 0a3c8f019b02..490e1542305e 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -449,7 +449,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp, */ } - mutex_lock(&ctx->queue->queuelock); + spin_lock_bh(&ctx->queue->queuelock); /* Put the IV in place for chained cases */ switch (ctx->cipher_alg) { case SEC_C_AES_CBC_128: @@ -509,7 +509,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp, list_del(&backlog_req->backlog_head); } } - mutex_unlock(&ctx->queue->queuelock); + spin_unlock_bh(&ctx->queue->queuelock); mutex_lock(&sec_req->lock); list_del(&sec_req_el->head); @@ -798,7 +798,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, */ /* Grab a big lock for a long time to avoid concurrency issues */ - mutex_lock(&queue->queuelock); + spin_lock_bh(&queue->queuelock); /* * Can go on to queue if we have space in either: @@ -814,15 +814,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, ret = -EBUSY; if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { list_add_tail(&sec_req->backlog_head, &ctx->backlog); - mutex_unlock(&queue->queuelock); + spin_unlock_bh(&queue->queuelock); goto out; } - mutex_unlock(&queue->queuelock); + spin_unlock_bh(&queue->queuelock); goto err_free_elements; } ret = sec_send_request(sec_req, queue); - mutex_unlock(&queue->queuelock); + spin_unlock_bh(&queue->queuelock); if (ret) goto err_free_elements; @@ -881,7 +881,7 @@ static int sec_alg_skcipher_init(struct crypto_skcipher *tfm) if (IS_ERR(ctx->queue)) return PTR_ERR(ctx->queue); - mutex_init(&ctx->queue->queuelock); + spin_lock_init(&ctx->queue->queuelock); ctx->queue->havesoftqueue = false; return 0; diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c index c8de1b51c843..e75851326c1e 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.c +++ b/drivers/crypto/hisilicon/sec/sec_drv.c @@ -892,7 +892,7 @@ bool sec_queue_can_enqueue(struct sec_queue *queue, int num) static void sec_queue_hw_init(struct sec_queue *queue) { sec_queue_ar_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC); - sec_queue_aw_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC); + sec_queue_aw_alloc(queue, SEC_QUEUE_AW_FROCE_NOALLOC); sec_queue_ar_pkgattr(queue, 1); sec_queue_aw_pkgattr(queue, 1); diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h index 179a8250d691..e2a50bf2234b 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.h +++ b/drivers/crypto/hisilicon/sec/sec_drv.h @@ -347,7 +347,7 @@ struct sec_queue { DECLARE_BITMAP(unprocessed, SEC_QUEUE_LEN); DECLARE_KFIFO_PTR(softqueue, typeof(struct sec_request_el *)); bool havesoftqueue; - struct mutex queuelock; + spinlock_t queuelock; void *shadow[SEC_QUEUE_LEN]; }; diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index c2e9b01187a7..d2a0bc93e752 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -119,7 +119,7 @@ struct sec_qp_ctx { struct idr req_idr; struct sec_alg_res res[QM_Q_DEPTH]; struct sec_ctx *ctx; - struct mutex req_lock; + spinlock_t req_lock; struct list_head backlog; struct hisi_acc_sgl_pool *c_in_pool; struct hisi_acc_sgl_pool *c_out_pool; @@ -143,10 +143,10 @@ struct sec_ctx { /* Threshold for fake busy, trigger to return -EBUSY to user */ u32 fake_req_limit; - /* Currrent cyclic index to select a queue for encipher */ + /* Current cyclic index to select a queue for encipher */ atomic_t enc_qcyclic; - /* Currrent cyclic index to select a queue for decipher */ + /* Current cyclic index to select a queue for decipher */ atomic_t dec_qcyclic; enum sec_alg_type alg_type; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 6eebe739893c..77c9f13cf69a 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -127,11 +127,11 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) { int req_id; - mutex_lock(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->req_lock); req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC); - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); if (unlikely(req_id < 0)) { dev_err(req->ctx->dev, "alloc req id fail!\n"); return req_id; @@ -156,9 +156,9 @@ static void sec_free_req_id(struct sec_req *req) qp_ctx->req_list[req_id] = NULL; req->qp_ctx = NULL; - mutex_lock(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->req_lock); idr_remove(&qp_ctx->req_idr, req_id); - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); } static u8 pre_parse_finished_bd(struct bd_status *status, void *resp) @@ -273,7 +273,7 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) return -EBUSY; - mutex_lock(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->req_lock); ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); if (ctx->fake_req_limit <= @@ -281,10 +281,10 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) list_add_tail(&req->backlog_head, &qp_ctx->backlog); atomic64_inc(&ctx->sec->debug.dfx.send_cnt); atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); return -EBUSY; } - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); if (unlikely(ret == -EBUSY)) return -ENOBUFS; @@ -487,7 +487,7 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, qp->req_cb = sec_req_cb; - mutex_init(&qp_ctx->req_lock); + spin_lock_init(&qp_ctx->req_lock); idr_init(&qp_ctx->req_idr); INIT_LIST_HEAD(&qp_ctx->backlog); @@ -620,7 +620,7 @@ static int sec_auth_init(struct sec_ctx *ctx) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, + a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, &a_ctx->a_key_dma, GFP_KERNEL); if (!a_ctx->a_key) return -ENOMEM; @@ -632,8 +632,8 @@ static void sec_auth_uninit(struct sec_ctx *ctx) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE); - dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, + memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE); + dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, a_ctx->a_key, a_ctx->a_key_dma); } @@ -1382,7 +1382,7 @@ static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, { struct sec_req *backlog_req = NULL; - mutex_lock(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->req_lock); if (ctx->fake_req_limit >= atomic_read(&qp_ctx->qp->qp_status.used) && !list_empty(&qp_ctx->backlog)) { @@ -1390,7 +1390,7 @@ static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, typeof(*backlog_req), backlog_head); list_del(&backlog_req->backlog_head); } - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); return backlog_req; } diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h index 5e039b50e9d4..d033f63b583f 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.h +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -7,6 +7,7 @@ #define SEC_AIV_SIZE 12 #define SEC_IV_SIZE 24 #define SEC_MAX_KEY_SIZE 64 +#define SEC_MAX_AKEY_SIZE 128 #define SEC_COMM_SCENE 0 #define SEC_MIN_BLOCK_SZ 1 diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 4d85d2cbf376..2c0be91c0b09 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -508,16 +508,17 @@ static int sec_engine_init(struct hisi_qm *qm) writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG); - /* HW V2 enable sm4 extra mode, as ctr/ecb */ - if (qm->ver < QM_HW_V3) + if (qm->ver < QM_HW_V3) { + /* HW V2 enable sm4 extra mode, as ctr/ecb */ writel_relaxed(SEC_BD_ERR_CHK_EN0, qm->io_base + SEC_BD_ERR_CHK_EN_REG0); - /* Enable sm4 xts mode multiple iv */ - writel_relaxed(SEC_BD_ERR_CHK_EN1, - qm->io_base + SEC_BD_ERR_CHK_EN_REG1); - writel_relaxed(SEC_BD_ERR_CHK_EN3, - qm->io_base + SEC_BD_ERR_CHK_EN_REG3); + /* HW V2 enable sm4 xts mode multiple iv */ + writel_relaxed(SEC_BD_ERR_CHK_EN1, + qm->io_base + SEC_BD_ERR_CHK_EN_REG1); + writel_relaxed(SEC_BD_ERR_CHK_EN3, + qm->io_base + SEC_BD_ERR_CHK_EN_REG3); + } /* config endian */ sec_set_endian(qm); @@ -1002,8 +1003,6 @@ static int sec_pf_probe_init(struct sec_dev *sec) static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { - int ret; - qm->pdev = pdev; qm->ver = pdev->revision; qm->algs = "cipher\ndigest\naead"; @@ -1029,25 +1028,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; } - /* - * WQ_HIGHPRI: SEC request must be low delayed, - * so need a high priority workqueue. - * WQ_UNBOUND: SEC task is likely with long - * running CPU intensive workloads. - */ - qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | - WQ_UNBOUND, num_online_cpus(), - pci_name(qm->pdev)); - if (!qm->wq) { - pci_err(qm->pdev, "fail to alloc workqueue\n"); - return -ENOMEM; - } - - ret = hisi_qm_init(qm); - if (ret) - destroy_workqueue(qm->wq); - - return ret; + return hisi_qm_init(qm); } static void sec_qm_uninit(struct hisi_qm *qm) @@ -1078,8 +1059,6 @@ static int sec_probe_init(struct sec_dev *sec) static void sec_probe_uninit(struct hisi_qm *qm) { hisi_qm_dev_err_uninit(qm); - - destroy_workqueue(qm->wq); } static void sec_iommu_used_check(struct sec_dev *sec) diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c index 829f2caf0f67..97e500db0a82 100644 --- a/drivers/crypto/hisilicon/trng/trng.c +++ b/drivers/crypto/hisilicon/trng/trng.c @@ -185,7 +185,7 @@ static int hisi_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) struct hisi_trng *trng; int currsize = 0; u32 val = 0; - u32 ret; + int ret; trng = container_of(rng, struct hisi_trng, rng); diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 67869513e48c..ad35434a3fdb 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019 HiSilicon Limited. */ #include <crypto/internal/acompress.h> #include <linux/bitfield.h> +#include <linux/bitmap.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include "zip.h" @@ -606,8 +607,7 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) req_q = &ctx->qp_ctx[i].req_q; req_q->size = QM_Q_DEPTH; - req_q->req_bitmap = kcalloc(BITS_TO_LONGS(req_q->size), - sizeof(long), GFP_KERNEL); + req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL); if (!req_q->req_bitmap) { ret = -ENOMEM; if (i == 0) @@ -631,11 +631,11 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) return 0; err_free_loop1: - kfree(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap); + bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap); err_free_loop0: kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q); err_free_bitmap: - kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap); + bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap); return ret; } @@ -645,7 +645,7 @@ static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx) for (i = 0; i < HZIP_CTX_Q_NUM; i++) { kfree(ctx->qp_ctx[i].req_q.q); - kfree(ctx->qp_ctx[i].req_q.req_bitmap); + bitmap_free(ctx->qp_ctx[i].req_q.req_bitmap); } } diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 9c925e9c0a2d..c3303d99acac 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -990,8 +990,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { - int ret; - qm->pdev = pdev; qm->ver = pdev->revision; if (pdev->revision >= QM_HW_V3) @@ -1021,25 +1019,12 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; } - qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | - WQ_UNBOUND, num_online_cpus(), - pci_name(qm->pdev)); - if (!qm->wq) { - pci_err(qm->pdev, "fail to alloc workqueue\n"); - return -ENOMEM; - } - - ret = hisi_qm_init(qm); - if (ret) - destroy_workqueue(qm->wq); - - return ret; + return hisi_qm_init(qm); } static void hisi_zip_qm_uninit(struct hisi_qm *qm) { hisi_qm_uninit(qm); - destroy_workqueue(qm->wq); } static int hisi_zip_probe_init(struct hisi_zip *hisi_zip) diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 9b1a158aec29..ad0d8c4a71ac 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1831,6 +1831,8 @@ static const struct of_device_id safexcel_of_match_table[] = { {}, }; +MODULE_DEVICE_TABLE(of, safexcel_of_match_table); + static struct platform_driver crypto_safexcel = { .probe = safexcel_probe, .remove = safexcel_remove, diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index ce1e611a163e..797ff91512e0 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -497,15 +497,15 @@ struct result_data_desc { u32 packet_length:17; u32 error_code:15; - u8 bypass_length:4; - u8 e15:1; - u16 rsvd0; - u8 hash_bytes:1; - u8 hash_length:6; - u8 generic_bytes:1; - u8 checksum:1; - u8 next_header:1; - u8 length:1; + u32 bypass_length:4; + u32 e15:1; + u32 rsvd0:16; + u32 hash_bytes:1; + u32 hash_length:6; + u32 generic_bytes:1; + u32 checksum:1; + u32 next_header:1; + u32 length:1; u16 application_id; u16 rsvd1; diff --git a/drivers/crypto/keembay/keembay-ocs-ecc.c b/drivers/crypto/keembay/keembay-ocs-ecc.c index 5d0785d3f1b5..2269df17514c 100644 --- a/drivers/crypto/keembay/keembay-ocs-ecc.c +++ b/drivers/crypto/keembay/keembay-ocs-ecc.c @@ -976,8 +976,6 @@ static int kmb_ocs_ecc_remove(struct platform_device *pdev) struct ocs_ecc_dev *ecc_dev; ecc_dev = platform_get_drvdata(pdev); - if (!ecc_dev) - return -ENODEV; crypto_unregister_kpp(&ocs_ecdh_p384); crypto_unregister_kpp(&ocs_ecdh_p256); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c index bb02e0db3615..7503f6b18ac5 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c @@ -51,11 +51,47 @@ static const struct devlink_param otx2_cpt_dl_params[] = { NULL), }; -static int otx2_cpt_devlink_info_get(struct devlink *devlink, +static int otx2_cpt_dl_info_firmware_version_put(struct devlink_info_req *req, + struct otx2_cpt_eng_grp_info grp[], + const char *ver_name, int eng_type) +{ + struct otx2_cpt_engs_rsvd *eng; + int i; + + for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) { + eng = find_engines_by_type(&grp[i], eng_type); + if (eng) + return devlink_info_version_running_put(req, ver_name, + eng->ucode->ver_str); + } + + return 0; +} + +static int otx2_cpt_devlink_info_get(struct devlink *dl, struct devlink_info_req *req, struct netlink_ext_ack *extack) { - return devlink_info_driver_name_put(req, "rvu_cptpf"); + struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl); + struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf; + int err; + + err = devlink_info_driver_name_put(req, "rvu_cptpf"); + if (err) + return err; + + err = otx2_cpt_dl_info_firmware_version_put(req, cptpf->eng_grps.grp, + "fw.ae", OTX2_CPT_AE_TYPES); + if (err) + return err; + + err = otx2_cpt_dl_info_firmware_version_put(req, cptpf->eng_grps.grp, + "fw.se", OTX2_CPT_SE_TYPES); + if (err) + return err; + + return otx2_cpt_dl_info_firmware_version_put(req, cptpf->eng_grps.grp, + "fw.ie", OTX2_CPT_IE_TYPES); } static const struct devlink_ops otx2_cpt_devlink_ops = { diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 9cba2f714c7e..f10050fead16 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -476,7 +476,7 @@ release_fw: return ret; } -static struct otx2_cpt_engs_rsvd *find_engines_by_type( +struct otx2_cpt_engs_rsvd *find_engines_by_type( struct otx2_cpt_eng_grp_info *eng_grp, int eng_type) { @@ -1605,7 +1605,10 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf, if (!strncasecmp(val, "se", 2) && strchr(val, ':')) { if (has_se || ucode_idx) goto err_print; - tmp = strim(strsep(&val, ":")); + tmp = strsep(&val, ":"); + if (!tmp) + goto err_print; + tmp = strim(tmp); if (!val) goto err_print; if (strlen(tmp) != 2) @@ -1617,7 +1620,10 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf, } else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) { if (has_ae || ucode_idx) goto err_print; - tmp = strim(strsep(&val, ":")); + tmp = strsep(&val, ":"); + if (!tmp) + goto err_print; + tmp = strim(tmp); if (!val) goto err_print; if (strlen(tmp) != 2) @@ -1629,7 +1635,10 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf, } else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) { if (has_ie || ucode_idx) goto err_print; - tmp = strim(strsep(&val, ":")); + tmp = strsep(&val, ":"); + if (!tmp) + goto err_print; + tmp = strim(tmp); if (!val) goto err_print; if (strlen(tmp) != 2) diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h index 8f4d4e5f531a..e69320a54b5d 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h @@ -166,4 +166,7 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf, int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf, struct devlink_param_gset_ctx *ctx); void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf); +struct otx2_cpt_engs_rsvd *find_engines_by_type( + struct otx2_cpt_eng_grp_info *eng_grp, + int eng_type); #endif /* __OTX2_CPTPF_UCODE_H */ diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c index f418817c0f43..f34c75a862f2 100644 --- a/drivers/crypto/nx/nx-common-powernv.c +++ b/drivers/crypto/nx/nx-common-powernv.c @@ -75,7 +75,7 @@ static int (*nx842_powernv_exec)(const unsigned char *in, /** * setup_indirect_dde - Setup an indirect DDE * - * The DDE is setup with the the DDE count, byte count, and address of + * The DDE is setup with the DDE count, byte count, and address of * first direct DDE in the list. */ static void setup_indirect_dde(struct data_descriptor_entry *dde, diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c index 7584a34ba88c..3ea334b7f820 100644 --- a/drivers/crypto/nx/nx-common-pseries.c +++ b/drivers/crypto/nx/nx-common-pseries.c @@ -1208,10 +1208,13 @@ static struct vio_driver nx842_vio_driver = { static int __init nx842_pseries_init(void) { struct nx842_devdata *new_devdata; + struct device_node *np; int ret; - if (!of_find_compatible_node(NULL, NULL, "ibm,compression")) + np = of_find_compatible_node(NULL, NULL, "ibm,compression"); + if (!np) return -ENODEV; + of_node_put(np); RCU_INIT_POINTER(devdata, NULL); new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL); diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 581211a92628..67a99c760bc4 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1261,9 +1261,6 @@ static int omap_aes_remove(struct platform_device *pdev) struct aead_alg *aalg; int i, j; - if (!dd) - return -ENODEV; - spin_lock_bh(&list_lock); list_del(&dd->list); spin_unlock_bh(&list_lock); @@ -1279,7 +1276,6 @@ static int omap_aes_remove(struct platform_device *pdev) aalg = &dd->pdata->aead_algs_info->algs_list[i]; crypto_unregister_aead(aalg); dd->pdata->aead_algs_info->registered--; - } crypto_engine_exit(dd->engine); diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 538aff80869f..f783769ea110 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -1091,9 +1091,6 @@ static int omap_des_remove(struct platform_device *pdev) struct omap_des_dev *dd = platform_get_drvdata(pdev); int i, j; - if (!dd) - return -ENODEV; - spin_lock_bh(&list_lock); list_del(&dd->list); spin_unlock_bh(&list_lock); @@ -1106,7 +1103,6 @@ static int omap_des_remove(struct platform_device *pdev) tasklet_kill(&dd->done_task); omap_des_dma_cleanup(dd); pm_runtime_disable(dd->dev); - dd = NULL; return 0; } diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 4b37dc69a50c..655a7f5a406a 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -2197,8 +2197,7 @@ static int omap_sham_remove(struct platform_device *pdev) int i, j; dd = platform_get_drvdata(pdev); - if (!dd) - return -ENODEV; + spin_lock_bh(&sham.lock); list_del(&dd->list); spin_unlock_bh(&sham.lock); diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig index 4b90c0f22b03..1220cc86f910 100644 --- a/drivers/crypto/qat/Kconfig +++ b/drivers/crypto/qat/Kconfig @@ -17,7 +17,7 @@ config CRYPTO_DEV_QAT config CRYPTO_DEV_QAT_DH895xCC tristate "Support for Intel(R) DH895xCC" - depends on X86 && PCI + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) select CRYPTO_DEV_QAT help Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology @@ -28,7 +28,7 @@ config CRYPTO_DEV_QAT_DH895xCC config CRYPTO_DEV_QAT_C3XXX tristate "Support for Intel(R) C3XXX" - depends on X86 && PCI + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) select CRYPTO_DEV_QAT help Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology @@ -39,7 +39,7 @@ config CRYPTO_DEV_QAT_C3XXX config CRYPTO_DEV_QAT_C62X tristate "Support for Intel(R) C62X" - depends on X86 && PCI + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) select CRYPTO_DEV_QAT help Support for Intel(R) C62x with Intel(R) QuickAssist Technology @@ -50,7 +50,7 @@ config CRYPTO_DEV_QAT_C62X config CRYPTO_DEV_QAT_4XXX tristate "Support for Intel(R) QAT_4XXX" - depends on X86 && PCI + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) select CRYPTO_DEV_QAT help Support for Intel(R) QuickAssist Technology QAT_4xxx @@ -61,7 +61,7 @@ config CRYPTO_DEV_QAT_4XXX config CRYPTO_DEV_QAT_DH895xCCVF tristate "Support for Intel(R) DH895xCC Virtual Function" - depends on X86 && PCI + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) select PCI_IOV select CRYPTO_DEV_QAT @@ -74,7 +74,7 @@ config CRYPTO_DEV_QAT_DH895xCCVF config CRYPTO_DEV_QAT_C3XXXVF tristate "Support for Intel(R) C3XXX Virtual Function" - depends on X86 && PCI + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) select PCI_IOV select CRYPTO_DEV_QAT help @@ -86,7 +86,7 @@ config CRYPTO_DEV_QAT_C3XXXVF config CRYPTO_DEV_QAT_C62XVF tristate "Support for Intel(R) C62X Virtual Function" - depends on X86 && PCI + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) select PCI_IOV select CRYPTO_DEV_QAT help diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c index fb5970a68484..fda5f699ff57 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -357,6 +357,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data) hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; hw_data->enable_pm = adf_gen4_enable_pm; hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; + hw_data->dev_config = adf_crypto_dev_config; adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h index 1034752845ca..9d49248931f6 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h +++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h @@ -70,5 +70,6 @@ enum icp_qat_4xxx_slice_mask { void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data); void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data); +int adf_crypto_dev_config(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c index 181fa1c8b3c7..2f212561acc4 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c @@ -53,7 +53,7 @@ static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev) return 0; } -static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) +int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; int banks = GET_MAX_BANKS(accel_dev); @@ -289,6 +289,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err_disable_aer; } + ret = adf_sysfs_init(accel_dev); + if (ret) + goto out_err_disable_aer; + ret = adf_crypto_dev_config(accel_dev); if (ret) goto out_err_disable_aer; diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile index 04f058acc4d3..80919cfcc29d 100644 --- a/drivers/crypto/qat/qat_common/Makefile +++ b/drivers/crypto/qat/qat_common/Makefile @@ -10,6 +10,7 @@ intel_qat-objs := adf_cfg.o \ adf_transport.o \ adf_admin.o \ adf_hw_arbiter.o \ + adf_sysfs.o \ adf_gen2_hw_data.o \ adf_gen4_hw_data.o \ adf_gen4_pm.o \ diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index ede6458c9dbf..0a55a4f34dcf 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -199,6 +199,7 @@ struct adf_hw_device_data { char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); u32 (*uof_get_num_objs)(void); u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); + int (*dev_config)(struct adf_accel_dev *accel_dev); struct adf_pfvf_ops pfvf_ops; struct adf_hw_csr_ops csr_ops; const char *fw_name; diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c index b5b208cbe5a1..e61b3e13db3b 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/qat/qat_common/adf_cfg.c @@ -128,6 +128,24 @@ static void adf_cfg_keyval_add(struct adf_cfg_key_val *new, list_add_tail(&new->list, &sec->param_head); } +static void adf_cfg_keyval_remove(const char *key, struct adf_cfg_section *sec) +{ + struct list_head *head = &sec->param_head; + struct list_head *list_ptr, *tmp; + + list_for_each_prev_safe(list_ptr, tmp, head) { + struct adf_cfg_key_val *ptr = + list_entry(list_ptr, struct adf_cfg_key_val, list); + + if (strncmp(ptr->key, key, sizeof(ptr->key))) + continue; + + list_del(list_ptr); + kfree(ptr); + break; + } +} + static void adf_cfg_keyval_del_all(struct list_head *head) { struct list_head *list_ptr, *tmp; @@ -208,7 +226,8 @@ static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev, * @type: Type - string, int or address * * Function adds configuration key - value entry in the appropriate section - * in the given acceleration device + * in the given acceleration device. If the key exists already, the value + * is updated. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. @@ -222,6 +241,8 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, struct adf_cfg_key_val *key_val; struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev, section_name); + char temp_val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + if (!section) return -EFAULT; @@ -246,6 +267,24 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, return -EINVAL; } key_val->type = type; + + /* Add the key-value pair as below policy: + * 1. if the key doesn't exist, add it; + * 2. if the key already exists with a different value then update it + * to the new value (the key is deleted and the newly created + * key_val containing the new value is added to the database); + * 3. if the key exists with the same value, then return without doing + * anything (the newly created key_val is freed). + */ + if (!adf_cfg_key_val_get(accel_dev, section_name, key, temp_val)) { + if (strncmp(temp_val, key_val->val, sizeof(temp_val))) { + adf_cfg_keyval_remove(key, section); + } else { + kfree(key_val); + return 0; + } + } + down_write(&cfg->lock); adf_cfg_keyval_add(key_val, section); up_write(&cfg->lock); diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 0464fa257929..7bb477c3ce25 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -49,11 +49,6 @@ struct service_hndl { struct list_head list; }; -static inline int get_current_node(void) -{ - return topology_physical_package_id(raw_smp_processor_id()); -} - int adf_service_register(struct service_hndl *service); int adf_service_unregister(struct service_hndl *service); @@ -61,6 +56,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev); int adf_dev_start(struct adf_accel_dev *accel_dev); void adf_dev_stop(struct adf_accel_dev *accel_dev); void adf_dev_shutdown(struct adf_accel_dev *accel_dev); +int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev); void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data); void adf_clean_vf_map(bool); @@ -132,6 +128,8 @@ void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev); int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev); +int adf_sysfs_init(struct adf_accel_dev *accel_dev); + int qat_hal_init(struct adf_accel_dev *accel_dev); void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle); int qat_hal_start(struct icp_qat_fw_loader_handle *handle); diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index c2c718f1b489..33a9a46d6949 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -363,3 +363,29 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) } return 0; } + +int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev) +{ + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + int ret; + + ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, services); + + adf_dev_stop(accel_dev); + adf_dev_shutdown(accel_dev); + + if (!ret) { + ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); + if (ret) + return ret; + + ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, + services, ADF_STR); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c index f38b2ffde146..b2db1d70d71f 100644 --- a/drivers/crypto/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/qat/qat_common/adf_sriov.c @@ -120,32 +120,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_disable_sriov); -static int adf_sriov_prepare_restart(struct adf_accel_dev *accel_dev) -{ - char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; - int ret; - - ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, services); - - adf_dev_stop(accel_dev); - adf_dev_shutdown(accel_dev); - - if (!ret) { - ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); - if (ret) - return ret; - - ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, - services, ADF_STR); - if (ret) - return ret; - } - - return 0; -} - /** * adf_sriov_configure() - Enable SRIOV for the device * @pdev: Pointer to PCI device. @@ -185,7 +159,7 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs) return -EBUSY; } - ret = adf_sriov_prepare_restart(accel_dev); + ret = adf_dev_shutdown_cache_cfg(accel_dev); if (ret) return ret; } diff --git a/drivers/crypto/qat/qat_common/adf_sysfs.c b/drivers/crypto/qat/qat_common/adf_sysfs.c new file mode 100644 index 000000000000..e8b078e719c2 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_sysfs.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2022 Intel Corporation */ +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include "adf_accel_devices.h" +#include "adf_cfg.h" +#include "adf_common_drv.h" + +static const char * const state_operations[] = { + [DEV_DOWN] = "down", + [DEV_UP] = "up", +}; + +static ssize_t state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + char *state; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + state = adf_dev_started(accel_dev) ? "up" : "down"; + return sysfs_emit(buf, "%s\n", state); +} + +static ssize_t state_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_accel_dev *accel_dev; + u32 accel_id; + int ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + accel_id = accel_dev->accel_id; + + if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) { + dev_info(dev, "Device qat_dev%d is busy\n", accel_id); + return -EBUSY; + } + + ret = sysfs_match_string(state_operations, buf); + if (ret < 0) + return ret; + + switch (ret) { + case DEV_DOWN: + if (!adf_dev_started(accel_dev)) { + dev_info(dev, "Device qat_dev%d already down\n", + accel_id); + return -EINVAL; + } + + dev_info(dev, "Stopping device qat_dev%d\n", accel_id); + + ret = adf_dev_shutdown_cache_cfg(accel_dev); + if (ret < 0) + return -EINVAL; + + break; + case DEV_UP: + if (adf_dev_started(accel_dev)) { + dev_info(dev, "Device qat_dev%d already up\n", + accel_id); + return -EINVAL; + } + + dev_info(dev, "Starting device qat_dev%d\n", accel_id); + + ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev); + if (!ret) + ret = adf_dev_init(accel_dev); + if (!ret) + ret = adf_dev_start(accel_dev); + + if (ret < 0) { + dev_err(dev, "Failed to start device qat_dev%d\n", + accel_id); + adf_dev_shutdown_cache_cfg(accel_dev); + return ret; + } + break; + default: + return -EINVAL; + } + + return count; +} + +static const char * const services_operations[] = { + ADF_CFG_CY, + ADF_CFG_DC, +}; + +static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + struct adf_accel_dev *accel_dev; + int ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, services); + if (ret) + return ret; + + return sysfs_emit(buf, "%s\n", services); +} + +static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev, + const char *services) +{ + return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, services, + ADF_STR); +} + +static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_hw_device_data *hw_data; + struct adf_accel_dev *accel_dev; + int ret; + + ret = sysfs_match_string(services_operations, buf); + if (ret < 0) + return ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + if (adf_dev_started(accel_dev)) { + dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n", + accel_dev->accel_id); + return -EINVAL; + } + + ret = adf_sysfs_update_dev_config(accel_dev, services_operations[ret]); + if (ret < 0) + return ret; + + hw_data = GET_HW_DATA(accel_dev); + + /* Update capabilities mask after change in configuration. + * A call to this function is required as capabilities are, at the + * moment, tied to configuration + */ + hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); + if (!hw_data->accel_capabilities_mask) + return -EINVAL; + + return count; +} + +static DEVICE_ATTR_RW(state); +static DEVICE_ATTR_RW(cfg_services); + +static struct attribute *qat_attrs[] = { + &dev_attr_state.attr, + &dev_attr_cfg_services.attr, + NULL, +}; + +static struct attribute_group qat_group = { + .attrs = qat_attrs, + .name = "qat", +}; + +int adf_sysfs_init(struct adf_accel_dev *accel_dev) +{ + int ret; + + ret = devm_device_add_group(&GET_DEV(accel_dev), &qat_group); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to create qat attribute group: %d\n", ret); + } + + return ret; +} +EXPORT_SYMBOL_GPL(adf_sysfs_init); diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 148edbe379e3..fb45fa83841c 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -605,7 +605,7 @@ static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key, { struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); struct qat_crypto_instance *inst = NULL; - int node = get_current_node(); + int node = numa_node_id(); struct device *dev; int ret; @@ -1065,7 +1065,7 @@ static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx, { struct qat_crypto_instance *inst = NULL; struct device *dev; - int node = get_current_node(); + int node = numa_node_id(); int ret; inst = qat_crypto_get_instance_node(node); diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 16d97db9ea15..095ed2a404d2 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -489,7 +489,7 @@ static int qat_dh_init_tfm(struct crypto_kpp *tfm) { struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); struct qat_crypto_instance *inst = - qat_crypto_get_instance_node(get_current_node()); + qat_crypto_get_instance_node(numa_node_id()); if (!inst) return -EINVAL; @@ -1225,7 +1225,7 @@ static int qat_rsa_init_tfm(struct crypto_akcipher *tfm) { struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct qat_crypto_instance *inst = - qat_crypto_get_instance_node(get_current_node()); + qat_crypto_get_instance_node(numa_node_id()); if (!inst) return -EINVAL; diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 7717e9e5977b..b79e49aa724f 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -2321,9 +2321,6 @@ static int s5p_aes_remove(struct platform_device *pdev) struct s5p_aes_dev *pdata = platform_get_drvdata(pdev); int i; - if (!pdata) - return -ENODEV; - for (i = 0; i < ARRAY_SIZE(algs); i++) crypto_unregister_skcipher(&algs[i]); diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c index 6957a125b447..f4bc06c24ad8 100644 --- a/drivers/crypto/sa2ul.c +++ b/drivers/crypto/sa2ul.c @@ -86,7 +86,6 @@ struct sa_match_data { u8 priv; u8 priv_id; u32 supported_algos; - bool skip_engine_control; }; static struct device *sa_k3_dev; @@ -2361,7 +2360,15 @@ static int sa_link_child(struct device *dev, void *data) static struct sa_match_data am654_match_data = { .priv = 1, .priv_id = 1, - .supported_algos = GENMASK(SA_ALG_AUTHENC_SHA256_AES, 0), + .supported_algos = BIT(SA_ALG_CBC_AES) | + BIT(SA_ALG_EBC_AES) | + BIT(SA_ALG_CBC_DES3) | + BIT(SA_ALG_ECB_DES3) | + BIT(SA_ALG_SHA1) | + BIT(SA_ALG_SHA256) | + BIT(SA_ALG_SHA512) | + BIT(SA_ALG_AUTHENC_SHA1_AES) | + BIT(SA_ALG_AUTHENC_SHA256_AES), }; static struct sa_match_data am64_match_data = { @@ -2372,7 +2379,6 @@ static struct sa_match_data am64_match_data = { BIT(SA_ALG_SHA256) | BIT(SA_ALG_SHA512) | BIT(SA_ALG_AUTHENC_SHA256_AES), - .skip_engine_control = true, }; static const struct of_device_id of_match[] = { @@ -2390,6 +2396,7 @@ static int sa_ul_probe(struct platform_device *pdev) struct device_node *node = dev->of_node; static void __iomem *saul_base; struct sa_crypto_data *dev_data; + u32 status, val; int ret; dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL); @@ -2426,13 +2433,13 @@ static int sa_ul_probe(struct platform_device *pdev) spin_lock_init(&dev_data->scid_lock); - if (!dev_data->match_data->skip_engine_control) { - u32 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN | - SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN | - SA_EEC_TRNG_EN; - + val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN | + SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN | + SA_EEC_TRNG_EN; + status = readl_relaxed(saul_base + SA_ENGINE_STATUS); + /* Only enable engines if all are not already enabled */ + if (val & ~status) writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL); - } sa_register_algos(dev_data); diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h index ed66d1f111db..92bf97232a29 100644 --- a/drivers/crypto/sa2ul.h +++ b/drivers/crypto/sa2ul.h @@ -16,6 +16,7 @@ #include <crypto/sha1.h> #include <crypto/sha2.h> +#define SA_ENGINE_STATUS 0x0008 #define SA_ENGINE_ENABLE_CONTROL 0x1000 struct sa_tfm_ctx; diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 265ef3e96fdd..f104e8a43036 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -421,7 +421,7 @@ static int hash_get_device_data(struct hash_ctx *ctx, * @keylen: The lengt of the key. * * Note! This function DOES NOT write to the NBLW registry, even though - * specified in the the hw design spec. Either due to incorrect info in the + * specified in the hw design spec. Either due to incorrect info in the * spec or due to a bug in the hw. */ static void hash_hw_write_key(struct hash_device_data *device_data, diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index 5bc5710a6de0..77eca20bc7ac 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@ -23,6 +23,7 @@ #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <crypto/b128ops.h> +#include "aesp8-ppc.h" void gcm_init_p8(u128 htable[16], const u64 Xi[2]); void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl index 09bba1852eec..041e633c214f 100644 --- a/drivers/crypto/vmx/ghashp8-ppc.pl +++ b/drivers/crypto/vmx/ghashp8-ppc.pl @@ -16,7 +16,7 @@ # details see https://www.openssl.org/~appro/cryptogams/. # ==================================================================== # -# GHASH for for PowerISA v2.07. +# GHASH for PowerISA v2.07. # # July 2014 # diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index 87eb2b837e68..9754d8b31621 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -120,6 +120,16 @@ config ARM_TEGRA_DEVFREQ It reads ACTMON counters of memory controllers and adjusts the operating frequencies and voltages with OPP support. +config ARM_MEDIATEK_CCI_DEVFREQ + tristate "MEDIATEK CCI DEVFREQ Driver" + depends on ARM_MEDIATEK_CPUFREQ || COMPILE_TEST + select DEVFREQ_GOV_PASSIVE + help + This adds a devfreq driver for MediaTek Cache Coherent Interconnect + which is shared the same regulators with the cpu cluster. It can track + buck voltages and update a proper CCI frequency. Use the notification + to get the regulator status. + config ARM_RK3399_DMC_DEVFREQ tristate "ARM RK3399 DMC DEVFREQ Driver" depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \ diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile index 0b6be92a25d9..bf40d04928d0 100644 --- a/drivers/devfreq/Makefile +++ b/drivers/devfreq/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o obj-$(CONFIG_ARM_IMX_BUS_DEVFREQ) += imx-bus.o obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o +obj-$(CONFIG_ARM_MEDIATEK_CCI_DEVFREQ) += mtk-cci-devfreq.o obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o obj-$(CONFIG_ARM_SUN8I_A33_MBUS_DEVFREQ) += sun8i-a33-mbus.o obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra30-devfreq.o diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 9602141bb8ec..63347a5ae599 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -696,6 +696,8 @@ static int qos_notifier_call(struct devfreq *devfreq) /** * qos_min_notifier_call() - Callback for QoS min_freq changes. * @nb: Should be devfreq->nb_min + * @val: not used + * @ptr: not used */ static int qos_min_notifier_call(struct notifier_block *nb, unsigned long val, void *ptr) @@ -706,6 +708,8 @@ static int qos_min_notifier_call(struct notifier_block *nb, /** * qos_max_notifier_call() - Callback for QoS max_freq changes. * @nb: Should be devfreq->nb_max + * @val: not used + * @ptr: not used */ static int qos_max_notifier_call(struct notifier_block *nb, unsigned long val, void *ptr) diff --git a/drivers/devfreq/imx-bus.c b/drivers/devfreq/imx-bus.c index f3f6e25053ed..f87067fc574d 100644 --- a/drivers/devfreq/imx-bus.c +++ b/drivers/devfreq/imx-bus.c @@ -59,7 +59,7 @@ static int imx_bus_init_icc(struct device *dev) struct imx_bus *priv = dev_get_drvdata(dev); const char *icc_driver_name; - if (!of_get_property(dev->of_node, "#interconnect-cells", 0)) + if (!of_get_property(dev->of_node, "#interconnect-cells", NULL)) return 0; if (!IS_ENABLED(CONFIG_INTERCONNECT_IMX)) { dev_warn(dev, "imx interconnect drivers disabled\n"); diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c new file mode 100644 index 000000000000..71abb3fbd042 --- /dev/null +++ b/drivers/devfreq/mtk-cci-devfreq.c @@ -0,0 +1,440 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 MediaTek Inc. + */ + +#include <linux/clk.h> +#include <linux/devfreq.h> +#include <linux/minmax.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/regulator/consumer.h> + +struct mtk_ccifreq_platform_data { + int min_volt_shift; + int max_volt_shift; + int proc_max_volt; + int sram_min_volt; + int sram_max_volt; +}; + +struct mtk_ccifreq_drv { + struct device *dev; + struct devfreq *devfreq; + struct regulator *proc_reg; + struct regulator *sram_reg; + struct clk *cci_clk; + struct clk *inter_clk; + int inter_voltage; + unsigned long pre_freq; + /* Avoid race condition for regulators between notify and policy */ + struct mutex reg_lock; + struct notifier_block opp_nb; + const struct mtk_ccifreq_platform_data *soc_data; + int vtrack_max; +}; + +static int mtk_ccifreq_set_voltage(struct mtk_ccifreq_drv *drv, int new_voltage) +{ + const struct mtk_ccifreq_platform_data *soc_data = drv->soc_data; + struct device *dev = drv->dev; + int pre_voltage, pre_vsram, new_vsram, vsram, voltage, ret; + int retry_max = drv->vtrack_max; + + if (!drv->sram_reg) { + ret = regulator_set_voltage(drv->proc_reg, new_voltage, + drv->soc_data->proc_max_volt); + return ret; + } + + pre_voltage = regulator_get_voltage(drv->proc_reg); + if (pre_voltage < 0) { + dev_err(dev, "invalid vproc value: %d\n", pre_voltage); + return pre_voltage; + } + + pre_vsram = regulator_get_voltage(drv->sram_reg); + if (pre_vsram < 0) { + dev_err(dev, "invalid vsram value: %d\n", pre_vsram); + return pre_vsram; + } + + new_vsram = clamp(new_voltage + soc_data->min_volt_shift, + soc_data->sram_min_volt, soc_data->sram_max_volt); + + do { + if (pre_voltage <= new_voltage) { + vsram = clamp(pre_voltage + soc_data->max_volt_shift, + soc_data->sram_min_volt, new_vsram); + ret = regulator_set_voltage(drv->sram_reg, vsram, + soc_data->sram_max_volt); + if (ret) + return ret; + + if (vsram == soc_data->sram_max_volt || + new_vsram == soc_data->sram_min_volt) + voltage = new_voltage; + else + voltage = vsram - soc_data->min_volt_shift; + + ret = regulator_set_voltage(drv->proc_reg, voltage, + soc_data->proc_max_volt); + if (ret) { + regulator_set_voltage(drv->sram_reg, pre_vsram, + soc_data->sram_max_volt); + return ret; + } + } else if (pre_voltage > new_voltage) { + voltage = max(new_voltage, + pre_vsram - soc_data->max_volt_shift); + ret = regulator_set_voltage(drv->proc_reg, voltage, + soc_data->proc_max_volt); + if (ret) + return ret; + + if (voltage == new_voltage) + vsram = new_vsram; + else + vsram = max(new_vsram, + voltage + soc_data->min_volt_shift); + + ret = regulator_set_voltage(drv->sram_reg, vsram, + soc_data->sram_max_volt); + if (ret) { + regulator_set_voltage(drv->proc_reg, pre_voltage, + soc_data->proc_max_volt); + return ret; + } + } + + pre_voltage = voltage; + pre_vsram = vsram; + + if (--retry_max < 0) { + dev_err(dev, + "over loop count, failed to set voltage\n"); + return -EINVAL; + } + } while (voltage != new_voltage || vsram != new_vsram); + + return 0; +} + +static int mtk_ccifreq_target(struct device *dev, unsigned long *freq, + u32 flags) +{ + struct mtk_ccifreq_drv *drv = dev_get_drvdata(dev); + struct clk *cci_pll = clk_get_parent(drv->cci_clk); + struct dev_pm_opp *opp; + unsigned long opp_rate; + int voltage, pre_voltage, inter_voltage, target_voltage, ret; + + if (!drv) + return -EINVAL; + + if (drv->pre_freq == *freq) + return 0; + + inter_voltage = drv->inter_voltage; + + opp_rate = *freq; + opp = devfreq_recommended_opp(dev, &opp_rate, 1); + if (IS_ERR(opp)) { + dev_err(dev, "failed to find opp for freq: %ld\n", opp_rate); + return PTR_ERR(opp); + } + + mutex_lock(&drv->reg_lock); + + voltage = dev_pm_opp_get_voltage(opp); + dev_pm_opp_put(opp); + + pre_voltage = regulator_get_voltage(drv->proc_reg); + if (pre_voltage < 0) { + dev_err(dev, "invalid vproc value: %d\n", pre_voltage); + ret = pre_voltage; + goto out_unlock; + } + + /* scale up: set voltage first then freq. */ + target_voltage = max(inter_voltage, voltage); + if (pre_voltage <= target_voltage) { + ret = mtk_ccifreq_set_voltage(drv, target_voltage); + if (ret) { + dev_err(dev, "failed to scale up voltage\n"); + goto out_restore_voltage; + } + } + + /* switch the cci clock to intermediate clock source. */ + ret = clk_set_parent(drv->cci_clk, drv->inter_clk); + if (ret) { + dev_err(dev, "failed to re-parent cci clock\n"); + goto out_restore_voltage; + } + + /* set the original clock to target rate. */ + ret = clk_set_rate(cci_pll, *freq); + if (ret) { + dev_err(dev, "failed to set cci pll rate: %d\n", ret); + clk_set_parent(drv->cci_clk, cci_pll); + goto out_restore_voltage; + } + + /* switch the cci clock back to the original clock source. */ + ret = clk_set_parent(drv->cci_clk, cci_pll); + if (ret) { + dev_err(dev, "failed to re-parent cci clock\n"); + mtk_ccifreq_set_voltage(drv, inter_voltage); + goto out_unlock; + } + + /* + * If the new voltage is lower than the intermediate voltage or the + * original voltage, scale down to the new voltage. + */ + if (voltage < inter_voltage || voltage < pre_voltage) { + ret = mtk_ccifreq_set_voltage(drv, voltage); + if (ret) { + dev_err(dev, "failed to scale down voltage\n"); + goto out_unlock; + } + } + + drv->pre_freq = *freq; + mutex_unlock(&drv->reg_lock); + + return 0; + +out_restore_voltage: + mtk_ccifreq_set_voltage(drv, pre_voltage); + +out_unlock: + mutex_unlock(&drv->reg_lock); + return ret; +} + +static int mtk_ccifreq_opp_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct dev_pm_opp *opp = data; + struct mtk_ccifreq_drv *drv; + unsigned long freq, volt; + + drv = container_of(nb, struct mtk_ccifreq_drv, opp_nb); + + if (event == OPP_EVENT_ADJUST_VOLTAGE) { + freq = dev_pm_opp_get_freq(opp); + + mutex_lock(&drv->reg_lock); + /* current opp item is changed */ + if (freq == drv->pre_freq) { + volt = dev_pm_opp_get_voltage(opp); + mtk_ccifreq_set_voltage(drv, volt); + } + mutex_unlock(&drv->reg_lock); + } + + return 0; +} + +static struct devfreq_dev_profile mtk_ccifreq_profile = { + .target = mtk_ccifreq_target, +}; + +static int mtk_ccifreq_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_ccifreq_drv *drv; + struct devfreq_passive_data *passive_data; + struct dev_pm_opp *opp; + unsigned long rate, opp_volt; + int ret; + + drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); + if (!drv) + return -ENOMEM; + + drv->dev = dev; + drv->soc_data = (const struct mtk_ccifreq_platform_data *) + of_device_get_match_data(&pdev->dev); + mutex_init(&drv->reg_lock); + platform_set_drvdata(pdev, drv); + + drv->cci_clk = devm_clk_get(dev, "cci"); + if (IS_ERR(drv->cci_clk)) { + ret = PTR_ERR(drv->cci_clk); + return dev_err_probe(dev, ret, "failed to get cci clk\n"); + } + + drv->inter_clk = devm_clk_get(dev, "intermediate"); + if (IS_ERR(drv->inter_clk)) { + ret = PTR_ERR(drv->inter_clk); + return dev_err_probe(dev, ret, + "failed to get intermediate clk\n"); + } + + drv->proc_reg = devm_regulator_get_optional(dev, "proc"); + if (IS_ERR(drv->proc_reg)) { + ret = PTR_ERR(drv->proc_reg); + return dev_err_probe(dev, ret, + "failed to get proc regulator\n"); + } + + ret = regulator_enable(drv->proc_reg); + if (ret) { + dev_err(dev, "failed to enable proc regulator\n"); + return ret; + } + + drv->sram_reg = devm_regulator_get_optional(dev, "sram"); + if (IS_ERR(drv->sram_reg)) + drv->sram_reg = NULL; + else { + ret = regulator_enable(drv->sram_reg); + if (ret) { + dev_err(dev, "failed to enable sram regulator\n"); + goto out_free_resources; + } + } + + /* + * We assume min voltage is 0 and tracking target voltage using + * min_volt_shift for each iteration. + * The retry_max is 3 times of expected iteration count. + */ + drv->vtrack_max = 3 * DIV_ROUND_UP(max(drv->soc_data->sram_max_volt, + drv->soc_data->proc_max_volt), + drv->soc_data->min_volt_shift); + + ret = clk_prepare_enable(drv->cci_clk); + if (ret) + goto out_free_resources; + + ret = dev_pm_opp_of_add_table(dev); + if (ret) { + dev_err(dev, "failed to add opp table: %d\n", ret); + goto out_disable_cci_clk; + } + + rate = clk_get_rate(drv->inter_clk); + opp = dev_pm_opp_find_freq_ceil(dev, &rate); + if (IS_ERR(opp)) { + ret = PTR_ERR(opp); + dev_err(dev, "failed to get intermediate opp: %d\n", ret); + goto out_remove_opp_table; + } + drv->inter_voltage = dev_pm_opp_get_voltage(opp); + dev_pm_opp_put(opp); + + rate = U32_MAX; + opp = dev_pm_opp_find_freq_floor(drv->dev, &rate); + if (IS_ERR(opp)) { + dev_err(dev, "failed to get opp\n"); + ret = PTR_ERR(opp); + goto out_remove_opp_table; + } + + opp_volt = dev_pm_opp_get_voltage(opp); + dev_pm_opp_put(opp); + ret = mtk_ccifreq_set_voltage(drv, opp_volt); + if (ret) { + dev_err(dev, "failed to scale to highest voltage %lu in proc_reg\n", + opp_volt); + goto out_remove_opp_table; + } + + passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL); + if (!passive_data) { + ret = -ENOMEM; + goto out_remove_opp_table; + } + + passive_data->parent_type = CPUFREQ_PARENT_DEV; + drv->devfreq = devm_devfreq_add_device(dev, &mtk_ccifreq_profile, + DEVFREQ_GOV_PASSIVE, + passive_data); + if (IS_ERR(drv->devfreq)) { + ret = -EPROBE_DEFER; + dev_err(dev, "failed to add devfreq device: %ld\n", + PTR_ERR(drv->devfreq)); + goto out_remove_opp_table; + } + + drv->opp_nb.notifier_call = mtk_ccifreq_opp_notifier; + ret = dev_pm_opp_register_notifier(dev, &drv->opp_nb); + if (ret) { + dev_err(dev, "failed to register opp notifier: %d\n", ret); + goto out_remove_opp_table; + } + return 0; + +out_remove_opp_table: + dev_pm_opp_of_remove_table(dev); + +out_disable_cci_clk: + clk_disable_unprepare(drv->cci_clk); + +out_free_resources: + if (regulator_is_enabled(drv->proc_reg)) + regulator_disable(drv->proc_reg); + if (drv->sram_reg && regulator_is_enabled(drv->sram_reg)) + regulator_disable(drv->sram_reg); + + return ret; +} + +static int mtk_ccifreq_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_ccifreq_drv *drv; + + drv = platform_get_drvdata(pdev); + + dev_pm_opp_unregister_notifier(dev, &drv->opp_nb); + dev_pm_opp_of_remove_table(dev); + clk_disable_unprepare(drv->cci_clk); + regulator_disable(drv->proc_reg); + if (drv->sram_reg) + regulator_disable(drv->sram_reg); + + return 0; +} + +static const struct mtk_ccifreq_platform_data mt8183_platform_data = { + .min_volt_shift = 100000, + .max_volt_shift = 200000, + .proc_max_volt = 1150000, +}; + +static const struct mtk_ccifreq_platform_data mt8186_platform_data = { + .min_volt_shift = 100000, + .max_volt_shift = 250000, + .proc_max_volt = 1118750, + .sram_min_volt = 850000, + .sram_max_volt = 1118750, +}; + +static const struct of_device_id mtk_ccifreq_machines[] = { + { .compatible = "mediatek,mt8183-cci", .data = &mt8183_platform_data }, + { .compatible = "mediatek,mt8186-cci", .data = &mt8186_platform_data }, + { }, +}; +MODULE_DEVICE_TABLE(of, mtk_ccifreq_machines); + +static struct platform_driver mtk_ccifreq_platdrv = { + .probe = mtk_ccifreq_probe, + .remove = mtk_ccifreq_remove, + .driver = { + .name = "mtk-ccifreq", + .of_match_table = mtk_ccifreq_machines, + }, +}; +module_platform_driver(mtk_ccifreq_platdrv); + +MODULE_DESCRIPTION("MediaTek CCI devfreq driver"); +MODULE_AUTHOR("Jia-Wei Chang <jia-wei.chang@mediatek.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c index 65ecf17a36f4..585a95fe2bd6 100644 --- a/drivers/devfreq/tegra30-devfreq.c +++ b/drivers/devfreq/tegra30-devfreq.c @@ -922,8 +922,10 @@ static int tegra_devfreq_probe(struct platform_device *pdev) devfreq = devm_devfreq_add_device(&pdev->dev, &tegra_devfreq_profile, "tegra_actmon", NULL); - if (IS_ERR(devfreq)) + if (IS_ERR(devfreq)) { + dev_err(&pdev->dev, "Failed to add device: %pe\n", devfreq); return PTR_ERR(devfreq); + } return 0; } diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 32f55640890c..3f08e0b960ec 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -549,7 +549,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) goto err_dmabuf; } - file->f_mode |= FMODE_LSEEK; dmabuf->file = file; mutex_init(&dmabuf->lock); diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c index 59b0bedc9c24..c8fa7dcfdbd0 100644 --- a/drivers/edac/ghes_edac.c +++ b/drivers/edac/ghes_edac.c @@ -103,9 +103,14 @@ static void dimm_setup_label(struct dimm_info *dimm, u16 handle) dmi_memdev_name(handle, &bank, &device); - /* both strings must be non-zero */ - if (bank && *bank && device && *device) - snprintf(dimm->label, sizeof(dimm->label), "%s %s", bank, device); + /* + * Set to a NULL string when both bank and device are zero. In this case, + * the label assigned by default will be preserved. + */ + snprintf(dimm->label, sizeof(dimm->label), "%s%s%s", + (bank && *bank) ? bank : "", + (bank && *bank && device && *device) ? " " : "", + (device && *device) ? device : ""); } static void assign_dmi_dimm_info(struct dimm_info *dimm, struct memdev_dmi_entry *entry) diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index 1cee64b80a7e..f7d37c282819 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -514,6 +514,28 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p) memset(p, 0, sizeof(*p)); } +static void enable_intr(struct synps_edac_priv *priv) +{ + /* Enable UE/CE Interrupts */ + if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR) + writel(DDR_UE_MASK | DDR_CE_MASK, + priv->baseaddr + ECC_CLR_OFST); + else + writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, + priv->baseaddr + DDR_QOS_IRQ_EN_OFST); + +} + +static void disable_intr(struct synps_edac_priv *priv) +{ + /* Disable UE/CE Interrupts */ + if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR) + writel(0x0, priv->baseaddr + ECC_CLR_OFST); + else + writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, + priv->baseaddr + DDR_QOS_IRQ_DB_OFST); +} + /** * intr_handler - Interrupt Handler for ECC interrupts. * @irq: IRQ number. @@ -555,6 +577,9 @@ static irqreturn_t intr_handler(int irq, void *dev_id) /* v3.0 of the controller does not have this register */ if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST); + else + enable_intr(priv); + return IRQ_HANDLED; } @@ -837,25 +862,6 @@ static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev) init_csrows(mci); } -static void enable_intr(struct synps_edac_priv *priv) -{ - /* Enable UE/CE Interrupts */ - if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR) - writel(DDR_UE_MASK | DDR_CE_MASK, - priv->baseaddr + ECC_CLR_OFST); - else - writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, - priv->baseaddr + DDR_QOS_IRQ_EN_OFST); - -} - -static void disable_intr(struct synps_edac_priv *priv) -{ - /* Disable UE/CE Interrupts */ - writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, - priv->baseaddr + DDR_QOS_IRQ_DB_OFST); -} - static int setup_irq(struct mem_ctl_info *mci, struct platform_device *pdev) { diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index 1e7b7fec97d9..a14f65444b35 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -149,4 +149,16 @@ config ARM_SCMI_POWER_DOMAIN will be called scmi_pm_domain. Note this may needed early in boot before rootfs may be available. +config ARM_SCMI_POWER_CONTROL + tristate "SCMI system power control driver" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + help + This enables System Power control logic which binds system shutdown or + reboot actions to SCMI System Power notifications generated by SCP + firmware. + + This driver can also be built as a module. If so, the module will be + called scmi_power_control. Note this may needed early in boot to catch + early shutdown/reboot SCMI requests. + endmenu diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 8d4afadda38c..9ea86f8cc8f7 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -7,11 +7,12 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o -scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o +scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \ $(scmi-transport-y) obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o +obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy) # The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 8b7ac6663d57..609ebedee9cb 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -19,6 +19,7 @@ #include <linux/export.h> #include <linux/idr.h> #include <linux/io.h> +#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/kernel.h> #include <linux/ktime.h> #include <linux/hashtable.h> @@ -60,6 +61,11 @@ static atomic_t transfer_last_id; static DEFINE_IDR(scmi_requested_devices); static DEFINE_MUTEX(scmi_requested_devices_mtx); +/* Track globally the creation of SCMI SystemPower related devices */ +static bool scmi_syspower_registered; +/* Protect access to scmi_syspower_registered */ +static DEFINE_MUTEX(scmi_syspower_mtx); + struct scmi_requested_dev { const struct scmi_device_id *id_table; struct list_head node; @@ -660,6 +666,11 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, smp_store_mb(xfer->priv, priv); info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, xfer); + + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); + scmi_notify(cinfo->handle, xfer->hdr.protocol_id, xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); @@ -694,6 +705,12 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, smp_store_mb(xfer->priv, priv); info->desc->ops->fetch_response(cinfo, xfer); + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, + xfer->hdr.type == MSG_TYPE_DELAYED_RESP ? + "DLYD" : "RESP", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); + trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, xfer->hdr.protocol_id, xfer->hdr.seq, xfer->hdr.type); @@ -827,6 +844,12 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, xfer->state = SCMI_XFER_RESP_OK; } spin_unlock_irqrestore(&xfer->lock, flags); + + /* Trace polled replies. */ + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, + "RESP", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); } } else { /* And we wait for the response. */ @@ -903,6 +926,10 @@ static int do_xfer(const struct scmi_protocol_handle *ph, return ret; } + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND", + xfer->hdr.seq, xfer->hdr.status, + xfer->tx.buf, xfer->tx.len); + ret = scmi_wait_for_message_response(cinfo, xfer); if (!ret && xfer->hdr.status) ret = scmi_to_linux_errno(xfer->hdr.status); @@ -1259,10 +1286,174 @@ out: return ret; } +struct scmi_msg_get_fc_info { + __le32 domain; + __le32 message_id; +}; + +struct scmi_msg_resp_desc_fc { + __le32 attr; +#define SUPPORTS_DOORBELL(x) ((x) & BIT(0)) +#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x)) + __le32 rate_limit; + __le32 chan_addr_low; + __le32 chan_addr_high; + __le32 chan_size; + __le32 db_addr_low; + __le32 db_addr_high; + __le32 db_set_lmask; + __le32 db_set_hmask; + __le32 db_preserve_lmask; + __le32 db_preserve_hmask; +}; + +static void +scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, + u8 describe_id, u32 message_id, u32 valid_size, + u32 domain, void __iomem **p_addr, + struct scmi_fc_db_info **p_db) +{ + int ret; + u32 flags; + u64 phys_addr; + u8 size; + void __iomem *addr; + struct scmi_xfer *t; + struct scmi_fc_db_info *db = NULL; + struct scmi_msg_get_fc_info *info; + struct scmi_msg_resp_desc_fc *resp; + const struct scmi_protocol_instance *pi = ph_to_pi(ph); + + if (!p_addr) { + ret = -EINVAL; + goto err_out; + } + + ret = ph->xops->xfer_get_init(ph, describe_id, + sizeof(*info), sizeof(*resp), &t); + if (ret) + goto err_out; + + info = t->tx.buf; + info->domain = cpu_to_le32(domain); + info->message_id = cpu_to_le32(message_id); + + /* + * Bail out on error leaving fc_info addresses zeroed; this includes + * the case in which the requested domain/message_id does NOT support + * fastchannels at all. + */ + ret = ph->xops->do_xfer(ph, t); + if (ret) + goto err_xfer; + + resp = t->rx.buf; + flags = le32_to_cpu(resp->attr); + size = le32_to_cpu(resp->chan_size); + if (size != valid_size) { + ret = -EINVAL; + goto err_xfer; + } + + phys_addr = le32_to_cpu(resp->chan_addr_low); + phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; + addr = devm_ioremap(ph->dev, phys_addr, size); + if (!addr) { + ret = -EADDRNOTAVAIL; + goto err_xfer; + } + + *p_addr = addr; + + if (p_db && SUPPORTS_DOORBELL(flags)) { + db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL); + if (!db) { + ret = -ENOMEM; + goto err_db; + } + + size = 1 << DOORBELL_REG_WIDTH(flags); + phys_addr = le32_to_cpu(resp->db_addr_low); + phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; + addr = devm_ioremap(ph->dev, phys_addr, size); + if (!addr) { + ret = -EADDRNOTAVAIL; + goto err_db_mem; + } + + db->addr = addr; + db->width = size; + db->set = le32_to_cpu(resp->db_set_lmask); + db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; + db->mask = le32_to_cpu(resp->db_preserve_lmask); + db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; + + *p_db = db; + } + + ph->xops->xfer_put(ph, t); + + dev_dbg(ph->dev, + "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n", + pi->proto->id, message_id, domain); + + return; + +err_db_mem: + devm_kfree(ph->dev, db); + +err_db: + *p_addr = NULL; + +err_xfer: + ph->xops->xfer_put(ph, t); + +err_out: + dev_warn(ph->dev, + "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n", + pi->proto->id, message_id, domain, ret); +} + +#define SCMI_PROTO_FC_RING_DB(w) \ +do { \ + u##w val = 0; \ + \ + if (db->mask) \ + val = ioread##w(db->addr) & db->mask; \ + iowrite##w((u##w)db->set | val, db->addr); \ +} while (0) + +static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db) +{ + if (!db || !db->addr) + return; + + if (db->width == 1) + SCMI_PROTO_FC_RING_DB(8); + else if (db->width == 2) + SCMI_PROTO_FC_RING_DB(16); + else if (db->width == 4) + SCMI_PROTO_FC_RING_DB(32); + else /* db->width == 8 */ +#ifdef CONFIG_64BIT + SCMI_PROTO_FC_RING_DB(64); +#else + { + u64 val = 0; + + if (db->mask) + val = ioread64_hi_lo(db->addr) & db->mask; + iowrite64_hi_lo(db->set | val, db->addr); + } +#endif +} + static const struct scmi_proto_helpers_ops helpers_ops = { .extended_name_get = scmi_common_extended_name_get, .iter_response_init = scmi_iterator_init, .iter_response_run = scmi_iterator_run, + .fastchannel_init = scmi_common_fastchannel_init, + .fastchannel_db_ring = scmi_common_fastchannel_db_ring, }; /** @@ -1497,6 +1688,30 @@ static void scmi_devm_release_protocol(struct device *dev, void *res) scmi_protocol_release(dres->handle, dres->protocol_id); } +static struct scmi_protocol_instance __must_check * +scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id) +{ + struct scmi_protocol_instance *pi; + struct scmi_protocol_devres *dres; + + dres = devres_alloc(scmi_devm_release_protocol, + sizeof(*dres), GFP_KERNEL); + if (!dres) + return ERR_PTR(-ENOMEM); + + pi = scmi_get_protocol_instance(sdev->handle, protocol_id); + if (IS_ERR(pi)) { + devres_free(dres); + return pi; + } + + dres->handle = sdev->handle; + dres->protocol_id = protocol_id; + devres_add(&sdev->dev, dres); + + return pi; +} + /** * scmi_devm_protocol_get - Devres managed get protocol operations and handle * @sdev: A reference to an scmi_device whose embedded struct device is to @@ -1520,32 +1735,47 @@ scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id, struct scmi_protocol_handle **ph) { struct scmi_protocol_instance *pi; - struct scmi_protocol_devres *dres; - struct scmi_handle *handle = sdev->handle; if (!ph) return ERR_PTR(-EINVAL); - dres = devres_alloc(scmi_devm_release_protocol, - sizeof(*dres), GFP_KERNEL); - if (!dres) - return ERR_PTR(-ENOMEM); - - pi = scmi_get_protocol_instance(handle, protocol_id); - if (IS_ERR(pi)) { - devres_free(dres); + pi = scmi_devres_protocol_instance_get(sdev, protocol_id); + if (IS_ERR(pi)) return pi; - } - - dres->handle = handle; - dres->protocol_id = protocol_id; - devres_add(&sdev->dev, dres); *ph = &pi->ph; return pi->proto->ops; } +/** + * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol + * @sdev: A reference to an scmi_device whose embedded struct device is to + * be used for devres accounting. + * @protocol_id: The protocol being requested. + * + * Get hold of a protocol accounting for its usage, possibly triggering its + * initialization but without getting access to its protocol specific operations + * and handle. + * + * Being a devres based managed method, protocol hold will be automatically + * released, and possibly de-initialized on last user, once the SCMI driver + * owning the scmi_device is unbound from it. + * + * Return: 0 on SUCCESS + */ +static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev, + u8 protocol_id) +{ + struct scmi_protocol_instance *pi; + + pi = scmi_devres_protocol_instance_get(sdev, protocol_id); + if (IS_ERR(pi)) + return PTR_ERR(pi); + + return 0; +} + static int scmi_devm_protocol_match(struct device *dev, void *res, void *data) { struct scmi_protocol_devres *dres = res; @@ -1849,21 +2079,39 @@ scmi_get_protocol_device(struct device_node *np, struct scmi_info *info, if (sdev) return sdev; + mutex_lock(&scmi_syspower_mtx); + if (prot_id == SCMI_PROTOCOL_SYSTEM && scmi_syspower_registered) { + dev_warn(info->dev, + "SCMI SystemPower protocol device must be unique !\n"); + mutex_unlock(&scmi_syspower_mtx); + + return NULL; + } + pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id); sdev = scmi_device_create(np, info->dev, prot_id, name); if (!sdev) { dev_err(info->dev, "failed to create %d protocol device\n", prot_id); + mutex_unlock(&scmi_syspower_mtx); + return NULL; } if (scmi_txrx_setup(info, &sdev->dev, prot_id)) { dev_err(&sdev->dev, "failed to setup transport\n"); scmi_device_destroy(sdev); + mutex_unlock(&scmi_syspower_mtx); + return NULL; } + if (prot_id == SCMI_PROTOCOL_SYSTEM) + scmi_syspower_registered = true; + + mutex_unlock(&scmi_syspower_mtx); + return sdev; } @@ -2132,6 +2380,7 @@ static int scmi_probe(struct platform_device *pdev) handle = &info->handle; handle->dev = info->dev; handle->version = &info->version; + handle->devm_protocol_acquire = scmi_devm_protocol_acquire; handle->devm_protocol_get = scmi_devm_protocol_get; handle->devm_protocol_put = scmi_devm_protocol_put; @@ -2401,6 +2650,7 @@ static int __init scmi_driver_init(void) scmi_sensors_register(); scmi_voltage_register(); scmi_system_register(); + scmi_powercap_register(); return platform_driver_register(&scmi_driver); } @@ -2417,6 +2667,7 @@ static void __exit scmi_driver_exit(void) scmi_sensors_unregister(); scmi_voltage_unregister(); scmi_system_unregister(); + scmi_powercap_unregister(); scmi_bus_exit(); diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index bbb0331801ff..ecf5c4de851b 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -10,13 +10,14 @@ #include <linux/bits.h> #include <linux/of.h> #include <linux/io.h> -#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/scmi_protocol.h> #include <linux/sort.h> +#include <trace/events/scmi.h> + #include "protocols.h" #include "notify.h" @@ -35,6 +36,12 @@ enum scmi_performance_protocol_cmd { PERF_DOMAIN_NAME_GET = 0xc, }; +enum { + PERF_FC_LEVEL, + PERF_FC_LIMIT, + PERF_FC_MAX, +}; + struct scmi_opp { u32 perf; u32 power; @@ -115,43 +122,6 @@ struct scmi_msg_resp_perf_describe_levels { } opp[]; }; -struct scmi_perf_get_fc_info { - __le32 domain; - __le32 message_id; -}; - -struct scmi_msg_resp_perf_desc_fc { - __le32 attr; -#define SUPPORTS_DOORBELL(x) ((x) & BIT(0)) -#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x)) - __le32 rate_limit; - __le32 chan_addr_low; - __le32 chan_addr_high; - __le32 chan_size; - __le32 db_addr_low; - __le32 db_addr_high; - __le32 db_set_lmask; - __le32 db_set_hmask; - __le32 db_preserve_lmask; - __le32 db_preserve_hmask; -}; - -struct scmi_fc_db_info { - int width; - u64 set; - u64 mask; - void __iomem *addr; -}; - -struct scmi_fc_info { - void __iomem *level_set_addr; - void __iomem *limit_set_addr; - void __iomem *level_get_addr; - void __iomem *limit_get_addr; - struct scmi_fc_db_info *level_set_db; - struct scmi_fc_db_info *limit_set_db; -}; - struct perf_dom_info { bool set_limits; bool set_perf; @@ -170,8 +140,7 @@ struct perf_dom_info { struct scmi_perf_info { u32 version; int num_domains; - bool power_scale_mw; - bool power_scale_uw; + enum scmi_power_scale power_scale; u64 stats_addr; u32 stats_size; struct perf_dom_info *dom_info; @@ -201,9 +170,13 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph, u16 flags = le16_to_cpu(attr->flags); pi->num_domains = le16_to_cpu(attr->num_domains); - pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags); + + if (POWER_SCALE_IN_MILLIWATT(flags)) + pi->power_scale = SCMI_POWER_MILLIWATTS; if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3) - pi->power_scale_uw = POWER_SCALE_IN_MICROWATT(flags); + if (POWER_SCALE_IN_MICROWATT(flags)) + pi->power_scale = SCMI_POWER_MICROWATTS; + pi->stats_addr = le32_to_cpu(attr->stats_addr_low) | (u64)le32_to_cpu(attr->stats_addr_high) << 32; pi->stats_size = le32_to_cpu(attr->stats_size); @@ -360,40 +333,6 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain, return ret; } -#define SCMI_PERF_FC_RING_DB(w) \ -do { \ - u##w val = 0; \ - \ - if (db->mask) \ - val = ioread##w(db->addr) & db->mask; \ - iowrite##w((u##w)db->set | val, db->addr); \ -} while (0) - -static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db) -{ - if (!db || !db->addr) - return; - - if (db->width == 1) - SCMI_PERF_FC_RING_DB(8); - else if (db->width == 2) - SCMI_PERF_FC_RING_DB(16); - else if (db->width == 4) - SCMI_PERF_FC_RING_DB(32); - else /* db->width == 8 */ -#ifdef CONFIG_64BIT - SCMI_PERF_FC_RING_DB(64); -#else - { - u64 val = 0; - - if (db->mask) - val = ioread64_hi_lo(db->addr) & db->mask; - iowrite64_hi_lo(db->set | val, db->addr); - } -#endif -} - static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph, u32 domain, u32 max_perf, u32 min_perf) { @@ -426,10 +365,14 @@ static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph, if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf) return -EINVAL; - if (dom->fc_info && dom->fc_info->limit_set_addr) { - iowrite32(max_perf, dom->fc_info->limit_set_addr); - iowrite32(min_perf, dom->fc_info->limit_set_addr + 4); - scmi_perf_fc_ring_db(dom->fc_info->limit_set_db); + if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) { + struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT]; + + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_SET, + domain, min_perf, max_perf); + iowrite32(max_perf, fci->set_addr); + iowrite32(min_perf, fci->set_addr + 4); + ph->hops->fastchannel_db_ring(fci->set_db); return 0; } @@ -468,9 +411,13 @@ static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph, struct scmi_perf_info *pi = ph->get_priv(ph); struct perf_dom_info *dom = pi->dom_info + domain; - if (dom->fc_info && dom->fc_info->limit_get_addr) { - *max_perf = ioread32(dom->fc_info->limit_get_addr); - *min_perf = ioread32(dom->fc_info->limit_get_addr + 4); + if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) { + struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT]; + + *max_perf = ioread32(fci->get_addr); + *min_perf = ioread32(fci->get_addr + 4); + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_GET, + domain, *min_perf, *max_perf); return 0; } @@ -505,9 +452,13 @@ static int scmi_perf_level_set(const struct scmi_protocol_handle *ph, struct scmi_perf_info *pi = ph->get_priv(ph); struct perf_dom_info *dom = pi->dom_info + domain; - if (dom->fc_info && dom->fc_info->level_set_addr) { - iowrite32(level, dom->fc_info->level_set_addr); - scmi_perf_fc_ring_db(dom->fc_info->level_set_db); + if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) { + struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL]; + + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_SET, + domain, level, 0); + iowrite32(level, fci->set_addr); + ph->hops->fastchannel_db_ring(fci->set_db); return 0; } @@ -542,8 +493,10 @@ static int scmi_perf_level_get(const struct scmi_protocol_handle *ph, struct scmi_perf_info *pi = ph->get_priv(ph); struct perf_dom_info *dom = pi->dom_info + domain; - if (dom->fc_info && dom->fc_info->level_get_addr) { - *level = ioread32(dom->fc_info->level_get_addr); + if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) { + *level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr); + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_GET, + domain, *level, 0); return 0; } @@ -572,100 +525,33 @@ static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph, return ret; } -static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size) -{ - if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4) - return true; - if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8) - return true; - return false; -} - -static void -scmi_perf_domain_desc_fc(const struct scmi_protocol_handle *ph, u32 domain, - u32 message_id, void __iomem **p_addr, - struct scmi_fc_db_info **p_db) -{ - int ret; - u32 flags; - u64 phys_addr; - u8 size; - void __iomem *addr; - struct scmi_xfer *t; - struct scmi_fc_db_info *db; - struct scmi_perf_get_fc_info *info; - struct scmi_msg_resp_perf_desc_fc *resp; - - if (!p_addr) - return; - - ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_FASTCHANNEL, - sizeof(*info), sizeof(*resp), &t); - if (ret) - return; - - info = t->tx.buf; - info->domain = cpu_to_le32(domain); - info->message_id = cpu_to_le32(message_id); - - ret = ph->xops->do_xfer(ph, t); - if (ret) - goto err_xfer; - - resp = t->rx.buf; - flags = le32_to_cpu(resp->attr); - size = le32_to_cpu(resp->chan_size); - if (!scmi_perf_fc_size_is_valid(message_id, size)) - goto err_xfer; - - phys_addr = le32_to_cpu(resp->chan_addr_low); - phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; - addr = devm_ioremap(ph->dev, phys_addr, size); - if (!addr) - goto err_xfer; - *p_addr = addr; - - if (p_db && SUPPORTS_DOORBELL(flags)) { - db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL); - if (!db) - goto err_xfer; - - size = 1 << DOORBELL_REG_WIDTH(flags); - phys_addr = le32_to_cpu(resp->db_addr_low); - phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; - addr = devm_ioremap(ph->dev, phys_addr, size); - if (!addr) - goto err_xfer; - - db->addr = addr; - db->width = size; - db->set = le32_to_cpu(resp->db_set_lmask); - db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; - db->mask = le32_to_cpu(resp->db_preserve_lmask); - db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; - *p_db = db; - } -err_xfer: - ph->xops->xfer_put(ph, t); -} - static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph, u32 domain, struct scmi_fc_info **p_fc) { struct scmi_fc_info *fc; - fc = devm_kzalloc(ph->dev, sizeof(*fc), GFP_KERNEL); + fc = devm_kcalloc(ph->dev, PERF_FC_MAX, sizeof(*fc), GFP_KERNEL); if (!fc) return; - scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_SET, - &fc->level_set_addr, &fc->level_set_db); - scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_GET, - &fc->level_get_addr, NULL); - scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_SET, - &fc->limit_set_addr, &fc->limit_set_db); - scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_GET, - &fc->limit_get_addr, NULL); + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LEVEL_SET, 4, domain, + &fc[PERF_FC_LEVEL].set_addr, + &fc[PERF_FC_LEVEL].set_db); + + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LEVEL_GET, 4, domain, + &fc[PERF_FC_LEVEL].get_addr, NULL); + + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LIMITS_SET, 8, domain, + &fc[PERF_FC_LIMIT].set_addr, + &fc[PERF_FC_LIMIT].set_db); + + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LIMITS_GET, 8, domain, + &fc[PERF_FC_LIMIT].get_addr, NULL); + *p_fc = fc; } @@ -789,14 +675,15 @@ static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph, dom = pi->dom_info + scmi_dev_domain_id(dev); - return dom->fc_info && dom->fc_info->level_set_addr; + return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr; } -static bool scmi_power_scale_mw_get(const struct scmi_protocol_handle *ph) +static enum scmi_power_scale +scmi_power_scale_get(const struct scmi_protocol_handle *ph) { struct scmi_perf_info *pi = ph->get_priv(ph); - return pi->power_scale_mw; + return pi->power_scale; } static const struct scmi_perf_proto_ops perf_proto_ops = { @@ -811,7 +698,7 @@ static const struct scmi_perf_proto_ops perf_proto_ops = { .freq_get = scmi_dvfs_freq_get, .est_power_get = scmi_dvfs_est_power_get, .fast_switch_possible = scmi_fast_switch_possible, - .power_scale_mw_get = scmi_power_scale_mw_get, + .power_scale_get = scmi_power_scale_get, }; static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph, diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c new file mode 100644 index 000000000000..83b90bde755c --- /dev/null +++ b/drivers/firmware/arm_scmi/powercap.c @@ -0,0 +1,866 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Powercap Protocol + * + * Copyright (C) 2022 ARM Ltd. + */ + +#define pr_fmt(fmt) "SCMI Notifications POWERCAP - " fmt + +#include <linux/bitfield.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/scmi_protocol.h> + +#include <trace/events/scmi.h> + +#include "protocols.h" +#include "notify.h" + +enum scmi_powercap_protocol_cmd { + POWERCAP_DOMAIN_ATTRIBUTES = 0x3, + POWERCAP_CAP_GET = 0x4, + POWERCAP_CAP_SET = 0x5, + POWERCAP_PAI_GET = 0x6, + POWERCAP_PAI_SET = 0x7, + POWERCAP_DOMAIN_NAME_GET = 0x8, + POWERCAP_MEASUREMENTS_GET = 0x9, + POWERCAP_CAP_NOTIFY = 0xa, + POWERCAP_MEASUREMENTS_NOTIFY = 0xb, + POWERCAP_DESCRIBE_FASTCHANNEL = 0xc, +}; + +enum { + POWERCAP_FC_CAP, + POWERCAP_FC_PAI, + POWERCAP_FC_MAX, +}; + +struct scmi_msg_resp_powercap_domain_attributes { + __le32 attributes; +#define SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(x) ((x) & BIT(31)) +#define SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(x) ((x) & BIT(30)) +#define SUPPORTS_ASYNC_POWERCAP_CAP_SET(x) ((x) & BIT(29)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(28)) +#define SUPPORTS_POWERCAP_CAP_CONFIGURATION(x) ((x) & BIT(27)) +#define SUPPORTS_POWERCAP_MONITORING(x) ((x) & BIT(26)) +#define SUPPORTS_POWERCAP_PAI_CONFIGURATION(x) ((x) & BIT(25)) +#define SUPPORTS_POWERCAP_FASTCHANNELS(x) ((x) & BIT(22)) +#define POWERCAP_POWER_UNIT(x) \ + (FIELD_GET(GENMASK(24, 23), (x))) +#define SUPPORTS_POWER_UNITS_MW(x) \ + (POWERCAP_POWER_UNIT(x) == 0x2) +#define SUPPORTS_POWER_UNITS_UW(x) \ + (POWERCAP_POWER_UNIT(x) == 0x1) + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; + __le32 min_pai; + __le32 max_pai; + __le32 pai_step; + __le32 min_power_cap; + __le32 max_power_cap; + __le32 power_cap_step; + __le32 sustainable_power; + __le32 accuracy; + __le32 parent_id; +}; + +struct scmi_msg_powercap_set_cap_or_pai { + __le32 domain; + __le32 flags; +#define CAP_SET_ASYNC BIT(1) +#define CAP_SET_IGNORE_DRESP BIT(0) + __le32 value; +}; + +struct scmi_msg_resp_powercap_cap_set_complete { + __le32 domain; + __le32 power_cap; +}; + +struct scmi_msg_resp_powercap_meas_get { + __le32 power; + __le32 pai; +}; + +struct scmi_msg_powercap_notify_cap { + __le32 domain; + __le32 notify_enable; +}; + +struct scmi_msg_powercap_notify_thresh { + __le32 domain; + __le32 notify_enable; + __le32 power_thresh_low; + __le32 power_thresh_high; +}; + +struct scmi_powercap_cap_changed_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 power_cap; + __le32 pai; +}; + +struct scmi_powercap_meas_changed_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 power; +}; + +struct scmi_powercap_state { + bool meas_notif_enabled; + u64 thresholds; +#define THRESH_LOW(p, id) \ + (lower_32_bits((p)->states[(id)].thresholds)) +#define THRESH_HIGH(p, id) \ + (upper_32_bits((p)->states[(id)].thresholds)) +}; + +struct powercap_info { + u32 version; + int num_domains; + struct scmi_powercap_state *states; + struct scmi_powercap_info *powercaps; +}; + +static enum scmi_powercap_protocol_cmd evt_2_cmd[] = { + POWERCAP_CAP_NOTIFY, + POWERCAP_MEASUREMENTS_NOTIFY, +}; + +static int scmi_powercap_notify(const struct scmi_protocol_handle *ph, + u32 domain, int message_id, bool enable); + +static int +scmi_powercap_attributes_get(const struct scmi_protocol_handle *ph, + struct powercap_info *pi) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, + sizeof(u32), &t); + if (ret) + return ret; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + u32 attributes; + + attributes = get_unaligned_le32(t->rx.buf); + pi->num_domains = FIELD_GET(GENMASK(15, 0), attributes); + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static inline int +scmi_powercap_validate(unsigned int min_val, unsigned int max_val, + unsigned int step_val, bool configurable) +{ + if (!min_val || !max_val) + return -EPROTO; + + if ((configurable && min_val == max_val) || + (!configurable && min_val != max_val)) + return -EPROTO; + + if (min_val != max_val && !step_val) + return -EPROTO; + + return 0; +} + +static int +scmi_powercap_domain_attributes_get(const struct scmi_protocol_handle *ph, + struct powercap_info *pinfo, u32 domain) +{ + int ret; + u32 flags; + struct scmi_xfer *t; + struct scmi_powercap_info *dom_info = pinfo->powercaps + domain; + struct scmi_msg_resp_powercap_domain_attributes *resp; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_DOMAIN_ATTRIBUTES, + sizeof(domain), sizeof(*resp), &t); + if (ret) + return ret; + + put_unaligned_le32(domain, t->tx.buf); + resp = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + flags = le32_to_cpu(resp->attributes); + + dom_info->id = domain; + dom_info->notify_powercap_cap_change = + SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(flags); + dom_info->notify_powercap_measurement_change = + SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(flags); + dom_info->async_powercap_cap_set = + SUPPORTS_ASYNC_POWERCAP_CAP_SET(flags); + dom_info->powercap_cap_config = + SUPPORTS_POWERCAP_CAP_CONFIGURATION(flags); + dom_info->powercap_monitoring = + SUPPORTS_POWERCAP_MONITORING(flags); + dom_info->powercap_pai_config = + SUPPORTS_POWERCAP_PAI_CONFIGURATION(flags); + dom_info->powercap_scale_mw = + SUPPORTS_POWER_UNITS_MW(flags); + dom_info->powercap_scale_uw = + SUPPORTS_POWER_UNITS_UW(flags); + dom_info->fastchannels = + SUPPORTS_POWERCAP_FASTCHANNELS(flags); + + strscpy(dom_info->name, resp->name, SCMI_SHORT_NAME_MAX_SIZE); + + dom_info->min_pai = le32_to_cpu(resp->min_pai); + dom_info->max_pai = le32_to_cpu(resp->max_pai); + dom_info->pai_step = le32_to_cpu(resp->pai_step); + ret = scmi_powercap_validate(dom_info->min_pai, + dom_info->max_pai, + dom_info->pai_step, + dom_info->powercap_pai_config); + if (ret) { + dev_err(ph->dev, + "Platform reported inconsistent PAI config for domain %d - %s\n", + dom_info->id, dom_info->name); + goto clean; + } + + dom_info->min_power_cap = le32_to_cpu(resp->min_power_cap); + dom_info->max_power_cap = le32_to_cpu(resp->max_power_cap); + dom_info->power_cap_step = le32_to_cpu(resp->power_cap_step); + ret = scmi_powercap_validate(dom_info->min_power_cap, + dom_info->max_power_cap, + dom_info->power_cap_step, + dom_info->powercap_cap_config); + if (ret) { + dev_err(ph->dev, + "Platform reported inconsistent CAP config for domain %d - %s\n", + dom_info->id, dom_info->name); + goto clean; + } + + dom_info->sustainable_power = + le32_to_cpu(resp->sustainable_power); + dom_info->accuracy = le32_to_cpu(resp->accuracy); + + dom_info->parent_id = le32_to_cpu(resp->parent_id); + if (dom_info->parent_id != SCMI_POWERCAP_ROOT_ZONE_ID && + (dom_info->parent_id >= pinfo->num_domains || + dom_info->parent_id == dom_info->id)) { + dev_err(ph->dev, + "Platform reported inconsistent parent ID for domain %d - %s\n", + dom_info->id, dom_info->name); + ret = -ENODEV; + } + } + +clean: + ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && SUPPORTS_EXTENDED_NAMES(flags)) + ph->hops->extended_name_get(ph, POWERCAP_DOMAIN_NAME_GET, + domain, dom_info->name, + SCMI_MAX_STR_SIZE); + + return ret; +} + +static int scmi_powercap_num_domains_get(const struct scmi_protocol_handle *ph) +{ + struct powercap_info *pi = ph->get_priv(ph); + + return pi->num_domains; +} + +static const struct scmi_powercap_info * +scmi_powercap_dom_info_get(const struct scmi_protocol_handle *ph, u32 domain_id) +{ + struct powercap_info *pi = ph->get_priv(ph); + + if (domain_id >= pi->num_domains) + return NULL; + + return pi->powercaps + domain_id; +} + +static int scmi_powercap_xfer_cap_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_cap) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_GET, sizeof(u32), + sizeof(u32), &t); + if (ret) + return ret; + + put_unaligned_le32(domain_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *power_cap = get_unaligned_le32(t->rx.buf); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_powercap_cap_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_cap) +{ + struct scmi_powercap_info *dom; + struct powercap_info *pi = ph->get_priv(ph); + + if (!power_cap || domain_id >= pi->num_domains) + return -EINVAL; + + dom = pi->powercaps + domain_id; + if (dom->fc_info && dom->fc_info[POWERCAP_FC_CAP].get_addr) { + *power_cap = ioread32(dom->fc_info[POWERCAP_FC_CAP].get_addr); + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_GET, + domain_id, *power_cap, 0); + return 0; + } + + return scmi_powercap_xfer_cap_get(ph, domain_id, power_cap); +} + +static int scmi_powercap_xfer_cap_set(const struct scmi_protocol_handle *ph, + const struct scmi_powercap_info *pc, + u32 power_cap, bool ignore_dresp) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_powercap_set_cap_or_pai *msg; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_SET, + sizeof(*msg), 0, &t); + if (ret) + return ret; + + msg = t->tx.buf; + msg->domain = cpu_to_le32(pc->id); + msg->flags = + cpu_to_le32(FIELD_PREP(CAP_SET_ASYNC, !!pc->async_powercap_cap_set) | + FIELD_PREP(CAP_SET_IGNORE_DRESP, !!ignore_dresp)); + msg->value = cpu_to_le32(power_cap); + + if (!pc->async_powercap_cap_set || ignore_dresp) { + ret = ph->xops->do_xfer(ph, t); + } else { + ret = ph->xops->do_xfer_with_response(ph, t); + if (!ret) { + struct scmi_msg_resp_powercap_cap_set_complete *resp; + + resp = t->rx.buf; + if (le32_to_cpu(resp->domain) == pc->id) + dev_dbg(ph->dev, + "Powercap ID %d CAP set async to %u\n", + pc->id, + get_unaligned_le32(&resp->power_cap)); + else + ret = -EPROTO; + } + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_powercap_cap_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 power_cap, + bool ignore_dresp) +{ + const struct scmi_powercap_info *pc; + + pc = scmi_powercap_dom_info_get(ph, domain_id); + if (!pc || !pc->powercap_cap_config || !power_cap || + power_cap < pc->min_power_cap || + power_cap > pc->max_power_cap) + return -EINVAL; + + if (pc->fc_info && pc->fc_info[POWERCAP_FC_CAP].set_addr) { + struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_CAP]; + + iowrite32(power_cap, fci->set_addr); + ph->hops->fastchannel_db_ring(fci->set_db); + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_SET, + domain_id, power_cap, 0); + return 0; + } + + return scmi_powercap_xfer_cap_set(ph, pc, power_cap, ignore_dresp); +} + +static int scmi_powercap_xfer_pai_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *pai) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_GET, sizeof(u32), + sizeof(u32), &t); + if (ret) + return ret; + + put_unaligned_le32(domain_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *pai = get_unaligned_le32(t->rx.buf); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_powercap_pai_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *pai) +{ + struct scmi_powercap_info *dom; + struct powercap_info *pi = ph->get_priv(ph); + + if (!pai || domain_id >= pi->num_domains) + return -EINVAL; + + dom = pi->powercaps + domain_id; + if (dom->fc_info && dom->fc_info[POWERCAP_FC_PAI].get_addr) { + *pai = ioread32(dom->fc_info[POWERCAP_FC_PAI].get_addr); + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_GET, + domain_id, *pai, 0); + return 0; + } + + return scmi_powercap_xfer_pai_get(ph, domain_id, pai); +} + +static int scmi_powercap_xfer_pai_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 pai) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_powercap_set_cap_or_pai *msg; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_SET, + sizeof(*msg), 0, &t); + if (ret) + return ret; + + msg = t->tx.buf; + msg->domain = cpu_to_le32(domain_id); + msg->flags = cpu_to_le32(0); + msg->value = cpu_to_le32(pai); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_powercap_pai_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 pai) +{ + const struct scmi_powercap_info *pc; + + pc = scmi_powercap_dom_info_get(ph, domain_id); + if (!pc || !pc->powercap_pai_config || !pai || + pai < pc->min_pai || pai > pc->max_pai) + return -EINVAL; + + if (pc->fc_info && pc->fc_info[POWERCAP_FC_PAI].set_addr) { + struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_PAI]; + + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_SET, + domain_id, pai, 0); + iowrite32(pai, fci->set_addr); + ph->hops->fastchannel_db_ring(fci->set_db); + return 0; + } + + return scmi_powercap_xfer_pai_set(ph, domain_id, pai); +} + +static int scmi_powercap_measurements_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *average_power, + u32 *pai) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_powercap_meas_get *resp; + const struct scmi_powercap_info *pc; + + pc = scmi_powercap_dom_info_get(ph, domain_id); + if (!pc || !pc->powercap_monitoring || !pai || !average_power) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_MEASUREMENTS_GET, + sizeof(u32), sizeof(*resp), &t); + if (ret) + return ret; + + resp = t->rx.buf; + put_unaligned_le32(domain_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + *average_power = le32_to_cpu(resp->power); + *pai = le32_to_cpu(resp->pai); + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int +scmi_powercap_measurements_threshold_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_thresh_low, + u32 *power_thresh_high) +{ + struct powercap_info *pi = ph->get_priv(ph); + + if (!power_thresh_low || !power_thresh_high || + domain_id >= pi->num_domains) + return -EINVAL; + + *power_thresh_low = THRESH_LOW(pi, domain_id); + *power_thresh_high = THRESH_HIGH(pi, domain_id); + + return 0; +} + +static int +scmi_powercap_measurements_threshold_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 power_thresh_low, + u32 power_thresh_high) +{ + int ret = 0; + struct powercap_info *pi = ph->get_priv(ph); + + if (domain_id >= pi->num_domains || + power_thresh_low > power_thresh_high) + return -EINVAL; + + /* Anything to do ? */ + if (THRESH_LOW(pi, domain_id) == power_thresh_low && + THRESH_HIGH(pi, domain_id) == power_thresh_high) + return ret; + + pi->states[domain_id].thresholds = + (FIELD_PREP(GENMASK_ULL(31, 0), power_thresh_low) | + FIELD_PREP(GENMASK_ULL(63, 32), power_thresh_high)); + + /* Update thresholds if notification already enabled */ + if (pi->states[domain_id].meas_notif_enabled) + ret = scmi_powercap_notify(ph, domain_id, + POWERCAP_MEASUREMENTS_NOTIFY, + true); + + return ret; +} + +static const struct scmi_powercap_proto_ops powercap_proto_ops = { + .num_domains_get = scmi_powercap_num_domains_get, + .info_get = scmi_powercap_dom_info_get, + .cap_get = scmi_powercap_cap_get, + .cap_set = scmi_powercap_cap_set, + .pai_get = scmi_powercap_pai_get, + .pai_set = scmi_powercap_pai_set, + .measurements_get = scmi_powercap_measurements_get, + .measurements_threshold_set = scmi_powercap_measurements_threshold_set, + .measurements_threshold_get = scmi_powercap_measurements_threshold_get, +}; + +static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph, + u32 domain, struct scmi_fc_info **p_fc) +{ + struct scmi_fc_info *fc; + + fc = devm_kcalloc(ph->dev, POWERCAP_FC_MAX, sizeof(*fc), GFP_KERNEL); + if (!fc) + return; + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_CAP_SET, 4, domain, + &fc[POWERCAP_FC_CAP].set_addr, + &fc[POWERCAP_FC_CAP].set_db); + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_CAP_GET, 4, domain, + &fc[POWERCAP_FC_CAP].get_addr, NULL); + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_PAI_SET, 4, domain, + &fc[POWERCAP_FC_PAI].set_addr, + &fc[POWERCAP_FC_PAI].set_db); + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_PAI_GET, 4, domain, + &fc[POWERCAP_FC_PAI].get_addr, NULL); + + *p_fc = fc; +} + +static int scmi_powercap_notify(const struct scmi_protocol_handle *ph, + u32 domain, int message_id, bool enable) +{ + int ret; + struct scmi_xfer *t; + + switch (message_id) { + case POWERCAP_CAP_NOTIFY: + { + struct scmi_msg_powercap_notify_cap *notify; + + ret = ph->xops->xfer_get_init(ph, message_id, + sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->domain = cpu_to_le32(domain); + notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0); + break; + } + case POWERCAP_MEASUREMENTS_NOTIFY: + { + u32 low, high; + struct scmi_msg_powercap_notify_thresh *notify; + + /* + * Note that we have to pick the most recently configured + * thresholds to build a proper POWERCAP_MEASUREMENTS_NOTIFY + * enable request and we fail, complaining, if no thresholds + * were ever set, since this is an indication the API has been + * used wrongly. + */ + ret = scmi_powercap_measurements_threshold_get(ph, domain, + &low, &high); + if (ret) + return ret; + + if (enable && !low && !high) { + dev_err(ph->dev, + "Invalid Measurements Notify thresholds: %u/%u\n", + low, high); + return -EINVAL; + } + + ret = ph->xops->xfer_get_init(ph, message_id, + sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->domain = cpu_to_le32(domain); + notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0); + notify->power_thresh_low = cpu_to_le32(low); + notify->power_thresh_high = cpu_to_le32(high); + break; + } + default: + return -EINVAL; + } + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int +scmi_powercap_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret, cmd_id; + struct powercap_info *pi = ph->get_priv(ph); + + if (evt_id >= ARRAY_SIZE(evt_2_cmd) || src_id >= pi->num_domains) + return -EINVAL; + + cmd_id = evt_2_cmd[evt_id]; + ret = scmi_powercap_notify(ph, src_id, cmd_id, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + else if (cmd_id == POWERCAP_MEASUREMENTS_NOTIFY) + /* + * On success save the current notification enabled state, so + * as to be able to properly update the notification thresholds + * when they are modified on a domain for which measurement + * notifications were currently enabled. + * + * This is needed because the SCMI Notification core machinery + * and API does not support passing per-notification custom + * arguments at callback registration time. + * + * Note that this can be done here with a simple flag since the + * SCMI core Notifications code takes care of keeping proper + * per-domain enables refcounting, so that this helper function + * will be called only once (for enables) when the first user + * registers a callback on this domain and once more (disable) + * when the last user de-registers its callback. + */ + pi->states[src_id].meas_notif_enabled = enable; + + return ret; +} + +static void * +scmi_powercap_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + void *rep = NULL; + + switch (evt_id) { + case SCMI_EVENT_POWERCAP_CAP_CHANGED: + { + const struct scmi_powercap_cap_changed_notify_payld *p = payld; + struct scmi_powercap_cap_changed_report *r = report; + + if (sizeof(*p) != payld_sz) + break; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->power_cap = le32_to_cpu(p->power_cap); + r->pai = le32_to_cpu(p->pai); + *src_id = r->domain_id; + rep = r; + break; + } + case SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED: + { + const struct scmi_powercap_meas_changed_notify_payld *p = payld; + struct scmi_powercap_meas_changed_report *r = report; + + if (sizeof(*p) != payld_sz) + break; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->power = le32_to_cpu(p->power); + *src_id = r->domain_id; + rep = r; + break; + } + default: + break; + } + + return rep; +} + +static int +scmi_powercap_get_num_sources(const struct scmi_protocol_handle *ph) +{ + struct powercap_info *pi = ph->get_priv(ph); + + if (!pi) + return -EINVAL; + + return pi->num_domains; +} + +static const struct scmi_event powercap_events[] = { + { + .id = SCMI_EVENT_POWERCAP_CAP_CHANGED, + .max_payld_sz = + sizeof(struct scmi_powercap_cap_changed_notify_payld), + .max_report_sz = + sizeof(struct scmi_powercap_cap_changed_report), + }, + { + .id = SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED, + .max_payld_sz = + sizeof(struct scmi_powercap_meas_changed_notify_payld), + .max_report_sz = + sizeof(struct scmi_powercap_meas_changed_report), + }, +}; + +static const struct scmi_event_ops powercap_event_ops = { + .get_num_sources = scmi_powercap_get_num_sources, + .set_notify_enabled = scmi_powercap_set_notify_enabled, + .fill_custom_report = scmi_powercap_fill_custom_report, +}; + +static const struct scmi_protocol_events powercap_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &powercap_event_ops, + .evts = powercap_events, + .num_events = ARRAY_SIZE(powercap_events), +}; + +static int +scmi_powercap_protocol_init(const struct scmi_protocol_handle *ph) +{ + int domain, ret; + u32 version; + struct powercap_info *pinfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_dbg(ph->dev, "Powercap Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + ret = scmi_powercap_attributes_get(ph, pinfo); + if (ret) + return ret; + + pinfo->powercaps = devm_kcalloc(ph->dev, pinfo->num_domains, + sizeof(*pinfo->powercaps), + GFP_KERNEL); + if (!pinfo->powercaps) + return -ENOMEM; + + /* + * Note that any failure in retrieving any domain attribute leads to + * the whole Powercap protocol initialization failure: this way the + * reported Powercap domains are all assured, when accessed, to be well + * formed and correlated by sane parent-child relationship (if any). + */ + for (domain = 0; domain < pinfo->num_domains; domain++) { + ret = scmi_powercap_domain_attributes_get(ph, pinfo, domain); + if (ret) + return ret; + + if (pinfo->powercaps[domain].fastchannels) + scmi_powercap_domain_init_fc(ph, domain, + &pinfo->powercaps[domain].fc_info); + } + + pinfo->states = devm_kcalloc(ph->dev, pinfo->num_domains, + sizeof(*pinfo->states), GFP_KERNEL); + if (!pinfo->states) + return -ENOMEM; + + pinfo->version = version; + + return ph->set_priv(ph, pinfo); +} + +static const struct scmi_protocol scmi_powercap = { + .id = SCMI_PROTOCOL_POWERCAP, + .owner = THIS_MODULE, + .instance_init = &scmi_powercap_protocol_init, + .ops = &powercap_proto_ops, + .events = &powercap_protocol_events, +}; + +DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(powercap, scmi_powercap) diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h index 51c31379f9b3..2f3bf691db7c 100644 --- a/drivers/firmware/arm_scmi/protocols.h +++ b/drivers/firmware/arm_scmi/protocols.h @@ -215,6 +215,19 @@ struct scmi_iterator_ops { struct scmi_iterator_state *st, void *priv); }; +struct scmi_fc_db_info { + int width; + u64 set; + u64 mask; + void __iomem *addr; +}; + +struct scmi_fc_info { + void __iomem *set_addr; + void __iomem *get_addr; + struct scmi_fc_db_info *set_db; +}; + /** * struct scmi_proto_helpers_ops - References to common protocol helpers * @extended_name_get: A common helper function to retrieve extended naming @@ -230,6 +243,9 @@ struct scmi_iterator_ops { * provided in @ops. * @iter_response_run: A common helper to trigger the run of a previously * initialized iterator. + * @fastchannel_init: A common helper used to initialize FC descriptors by + * gathering FC descriptions from the SCMI platform server. + * @fastchannel_db_ring: A common helper to ring a FC doorbell. */ struct scmi_proto_helpers_ops { int (*extended_name_get)(const struct scmi_protocol_handle *ph, @@ -239,6 +255,12 @@ struct scmi_proto_helpers_ops { unsigned int max_resources, u8 msg_id, size_t tx_size, void *priv); int (*iter_response_run)(void *iter); + void (*fastchannel_init)(const struct scmi_protocol_handle *ph, + u8 describe_id, u32 message_id, + u32 valid_size, u32 domain, + void __iomem **p_addr, + struct scmi_fc_db_info **p_db); + void (*fastchannel_db_ring)(struct scmi_fc_db_info *db); }; /** @@ -315,5 +337,6 @@ DECLARE_SCMI_REGISTER_UNREGISTER(reset); DECLARE_SCMI_REGISTER_UNREGISTER(sensors); DECLARE_SCMI_REGISTER_UNREGISTER(voltage); DECLARE_SCMI_REGISTER_UNREGISTER(system); +DECLARE_SCMI_REGISTER_UNREGISTER(powercap); #endif /* _SCMI_PROTOCOLS_H */ diff --git a/drivers/firmware/arm_scmi/scmi_power_control.c b/drivers/firmware/arm_scmi/scmi_power_control.c new file mode 100644 index 000000000000..6eb7d2a4b6b1 --- /dev/null +++ b/drivers/firmware/arm_scmi/scmi_power_control.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SCMI Generic SystemPower Control driver. + * + * Copyright (C) 2020-2022 ARM Ltd. + */ +/* + * In order to handle platform originated SCMI SystemPower requests (like + * shutdowns or cold/warm resets) we register an SCMI Notification notifier + * block to react when such SCMI SystemPower events are emitted by platform. + * + * Once such a notification is received we act accordingly to perform the + * required system transition depending on the kind of request. + * + * Graceful requests are routed to userspace through the same API methods + * (orderly_poweroff/reboot()) used by ACPI when handling ACPI Shutdown bus + * events. + * + * Direct forceful requests are not supported since are not meant to be sent + * by the SCMI platform to an OSPM like Linux. + * + * Additionally, graceful request notifications can carry an optional timeout + * field stating the maximum amount of time allowed by the platform for + * completion after which they are converted to forceful ones: the assumption + * here is that even graceful requests can be upper-bound by a maximum final + * timeout strictly enforced by the platform itself which can ultimately cut + * the power off at will anytime; in order to avoid such extreme scenario, we + * track progress of graceful requests through the means of a reboot notifier + * converting timed-out graceful requests to forceful ones, so at least we + * try to perform a clean sync and shutdown/restart before the power is cut. + * + * Given the peculiar nature of SCMI SystemPower protocol, that is being in + * charge of triggering system wide shutdown/reboot events, there should be + * only one SCMI platform actively emitting SystemPower events. + * For this reason the SCMI core takes care to enforce the creation of one + * single unique device associated to the SCMI System Power protocol; no matter + * how many SCMI platforms are defined on the system, only one can be designated + * to support System Power: as a consequence this driver will never be probed + * more than once. + * + * For similar reasons as soon as the first valid SystemPower is received by + * this driver and the shutdown/reboot is started, any further notification + * possibly emitted by the platform will be ignored. + */ + +#include <linux/math.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/printk.h> +#include <linux/reboot.h> +#include <linux/scmi_protocol.h> +#include <linux/slab.h> +#include <linux/time64.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#ifndef MODULE +#include <linux/fs.h> +#endif + +enum scmi_syspower_state { + SCMI_SYSPOWER_IDLE, + SCMI_SYSPOWER_IN_PROGRESS, + SCMI_SYSPOWER_REBOOTING +}; + +/** + * struct scmi_syspower_conf - Common configuration + * + * @dev: A reference device + * @state: Current SystemPower state + * @state_mtx: @state related mutex + * @required_transition: The requested transition as decribed in the received + * SCMI SystemPower notification + * @userspace_nb: The notifier_block registered against the SCMI SystemPower + * notification to start the needed userspace interactions. + * @reboot_nb: A notifier_block optionally used to track reboot progress + * @forceful_work: A worker used to trigger a forceful transition once a + * graceful has timed out. + */ +struct scmi_syspower_conf { + struct device *dev; + enum scmi_syspower_state state; + /* Protect access to state */ + struct mutex state_mtx; + enum scmi_system_events required_transition; + + struct notifier_block userspace_nb; + struct notifier_block reboot_nb; + + struct delayed_work forceful_work; +}; + +#define userspace_nb_to_sconf(x) \ + container_of(x, struct scmi_syspower_conf, userspace_nb) + +#define reboot_nb_to_sconf(x) \ + container_of(x, struct scmi_syspower_conf, reboot_nb) + +#define dwork_to_sconf(x) \ + container_of(x, struct scmi_syspower_conf, forceful_work) + +/** + * scmi_reboot_notifier - A reboot notifier to catch an ongoing successful + * system transition + * @nb: Reference to the related notifier block + * @reason: The reason for the ongoing reboot + * @__unused: The cmd being executed on a restart request (unused) + * + * When an ongoing system transition is detected, compatible with the one + * requested by SCMI, cancel the delayed work. + * + * Return: NOTIFY_OK in any case + */ +static int scmi_reboot_notifier(struct notifier_block *nb, + unsigned long reason, void *__unused) +{ + struct scmi_syspower_conf *sc = reboot_nb_to_sconf(nb); + + mutex_lock(&sc->state_mtx); + switch (reason) { + case SYS_HALT: + case SYS_POWER_OFF: + if (sc->required_transition == SCMI_SYSTEM_SHUTDOWN) + sc->state = SCMI_SYSPOWER_REBOOTING; + break; + case SYS_RESTART: + if (sc->required_transition == SCMI_SYSTEM_COLDRESET || + sc->required_transition == SCMI_SYSTEM_WARMRESET) + sc->state = SCMI_SYSPOWER_REBOOTING; + break; + default: + break; + } + + if (sc->state == SCMI_SYSPOWER_REBOOTING) { + dev_dbg(sc->dev, "Reboot in progress...cancel delayed work.\n"); + cancel_delayed_work_sync(&sc->forceful_work); + } + mutex_unlock(&sc->state_mtx); + + return NOTIFY_OK; +} + +/** + * scmi_request_forceful_transition - Request forceful SystemPower transition + * @sc: A reference to the configuration data + * + * Initiates the required SystemPower transition without involving userspace: + * just trigger the action at the kernel level after issuing an emergency + * sync. (if possible at all) + */ +static inline void +scmi_request_forceful_transition(struct scmi_syspower_conf *sc) +{ + dev_dbg(sc->dev, "Serving forceful request:%d\n", + sc->required_transition); + +#ifndef MODULE + emergency_sync(); +#endif + switch (sc->required_transition) { + case SCMI_SYSTEM_SHUTDOWN: + kernel_power_off(); + break; + case SCMI_SYSTEM_COLDRESET: + case SCMI_SYSTEM_WARMRESET: + kernel_restart(NULL); + break; + default: + break; + } +} + +static void scmi_forceful_work_func(struct work_struct *work) +{ + struct scmi_syspower_conf *sc; + struct delayed_work *dwork; + + if (system_state > SYSTEM_RUNNING) + return; + + dwork = to_delayed_work(work); + sc = dwork_to_sconf(dwork); + + dev_dbg(sc->dev, "Graceful request timed out...forcing !\n"); + mutex_lock(&sc->state_mtx); + /* avoid deadlock by unregistering reboot notifier first */ + unregister_reboot_notifier(&sc->reboot_nb); + if (sc->state == SCMI_SYSPOWER_IN_PROGRESS) + scmi_request_forceful_transition(sc); + mutex_unlock(&sc->state_mtx); +} + +/** + * scmi_request_graceful_transition - Request graceful SystemPower transition + * @sc: A reference to the configuration data + * @timeout_ms: The desired timeout to wait for the shutdown to complete before + * system is forcibly shutdown. + * + * Initiates the required SystemPower transition, requesting userspace + * co-operation: it uses the same orderly_ methods used by ACPI Shutdown event + * processing. + * + * Takes care also to register a reboot notifier and to schedule a delayed work + * in order to detect if userspace actions are taking too long and in such a + * case to trigger a forceful transition. + */ +static void scmi_request_graceful_transition(struct scmi_syspower_conf *sc, + unsigned int timeout_ms) +{ + unsigned int adj_timeout_ms = 0; + + if (timeout_ms) { + int ret; + + sc->reboot_nb.notifier_call = &scmi_reboot_notifier; + ret = register_reboot_notifier(&sc->reboot_nb); + if (!ret) { + /* Wait only up to 75% of the advertised timeout */ + adj_timeout_ms = mult_frac(timeout_ms, 3, 4); + INIT_DELAYED_WORK(&sc->forceful_work, + scmi_forceful_work_func); + schedule_delayed_work(&sc->forceful_work, + msecs_to_jiffies(adj_timeout_ms)); + } else { + /* Carry on best effort even without a reboot notifier */ + dev_warn(sc->dev, + "Cannot register reboot notifier !\n"); + } + } + + dev_dbg(sc->dev, + "Serving graceful req:%d (timeout_ms:%u adj_timeout_ms:%u)\n", + sc->required_transition, timeout_ms, adj_timeout_ms); + + switch (sc->required_transition) { + case SCMI_SYSTEM_SHUTDOWN: + /* + * When triggered early at boot-time the 'orderly' call will + * partially fail due to the lack of userspace itself, but + * the force=true argument will start anyway a successful + * forced shutdown. + */ + orderly_poweroff(true); + break; + case SCMI_SYSTEM_COLDRESET: + case SCMI_SYSTEM_WARMRESET: + orderly_reboot(); + break; + default: + break; + } +} + +/** + * scmi_userspace_notifier - Notifier callback to act on SystemPower + * Notifications + * @nb: Reference to the related notifier block + * @event: The SystemPower notification event id + * @data: The SystemPower event report + * + * This callback is in charge of decoding the received SystemPower report + * and act accordingly triggering a graceful or forceful system transition. + * + * Note that once a valid SCMI SystemPower event starts being served, any + * other following SystemPower notification received from the same SCMI + * instance (handle) will be ignored. + * + * Return: NOTIFY_OK once a valid SystemPower event has been successfully + * processed. + */ +static int scmi_userspace_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct scmi_system_power_state_notifier_report *er = data; + struct scmi_syspower_conf *sc = userspace_nb_to_sconf(nb); + + if (er->system_state >= SCMI_SYSTEM_POWERUP) { + dev_err(sc->dev, "Ignoring unsupported system_state: 0x%X\n", + er->system_state); + return NOTIFY_DONE; + } + + if (!SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(er->flags)) { + dev_err(sc->dev, "Ignoring forceful notification.\n"); + return NOTIFY_DONE; + } + + /* + * Bail out if system is already shutting down or an SCMI SystemPower + * requested is already being served. + */ + if (system_state > SYSTEM_RUNNING) + return NOTIFY_DONE; + mutex_lock(&sc->state_mtx); + if (sc->state != SCMI_SYSPOWER_IDLE) { + dev_dbg(sc->dev, + "Transition already in progress...ignore.\n"); + mutex_unlock(&sc->state_mtx); + return NOTIFY_DONE; + } + sc->state = SCMI_SYSPOWER_IN_PROGRESS; + mutex_unlock(&sc->state_mtx); + + sc->required_transition = er->system_state; + + /* Leaving a trace in logs of who triggered the shutdown/reboot. */ + dev_info(sc->dev, "Serving shutdown/reboot request: %d\n", + sc->required_transition); + + scmi_request_graceful_transition(sc, er->timeout); + + return NOTIFY_OK; +} + +static int scmi_syspower_probe(struct scmi_device *sdev) +{ + int ret; + struct scmi_syspower_conf *sc; + struct scmi_handle *handle = sdev->handle; + + if (!handle) + return -ENODEV; + + ret = handle->devm_protocol_acquire(sdev, SCMI_PROTOCOL_SYSTEM); + if (ret) + return ret; + + sc = devm_kzalloc(&sdev->dev, sizeof(*sc), GFP_KERNEL); + if (!sc) + return -ENOMEM; + + sc->state = SCMI_SYSPOWER_IDLE; + mutex_init(&sc->state_mtx); + sc->required_transition = SCMI_SYSTEM_MAX; + sc->userspace_nb.notifier_call = &scmi_userspace_notifier; + sc->dev = &sdev->dev; + + return handle->notify_ops->devm_event_notifier_register(sdev, + SCMI_PROTOCOL_SYSTEM, + SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER, + NULL, &sc->userspace_nb); +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_SYSTEM, "syspower" }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_system_power_driver = { + .name = "scmi-system-power", + .probe = scmi_syspower_probe, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_system_power_driver); + +MODULE_AUTHOR("Cristian Marussi <cristian.marussi@arm.com>"); +MODULE_DESCRIPTION("ARM SCMI SystemPower Control driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c index 220e399118ad..9383d7584539 100644 --- a/drivers/firmware/arm_scmi/system.c +++ b/drivers/firmware/arm_scmi/system.c @@ -27,10 +27,12 @@ struct scmi_system_power_state_notifier_payld { __le32 agent_id; __le32 flags; __le32 system_state; + __le32 timeout; }; struct scmi_system_info { u32 version; + bool graceful_timeout_supported; }; static int scmi_system_request_notify(const struct scmi_protocol_handle *ph, @@ -72,17 +74,27 @@ scmi_system_fill_custom_report(const struct scmi_protocol_handle *ph, const void *payld, size_t payld_sz, void *report, u32 *src_id) { + size_t expected_sz; const struct scmi_system_power_state_notifier_payld *p = payld; struct scmi_system_power_state_notifier_report *r = report; + struct scmi_system_info *pinfo = ph->get_priv(ph); + expected_sz = pinfo->graceful_timeout_supported ? + sizeof(*p) : sizeof(*p) - sizeof(__le32); if (evt_id != SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER || - sizeof(*p) != payld_sz) + payld_sz != expected_sz) return NULL; r->timestamp = timestamp; r->agent_id = le32_to_cpu(p->agent_id); r->flags = le32_to_cpu(p->flags); r->system_state = le32_to_cpu(p->system_state); + if (pinfo->graceful_timeout_supported && + r->system_state == SCMI_SYSTEM_SHUTDOWN && + SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(r->flags)) + r->timeout = le32_to_cpu(p->timeout); + else + r->timeout = 0x00; *src_id = 0; return r; @@ -129,6 +141,9 @@ static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph) return -ENOMEM; pinfo->version = version; + if (PROTOCOL_REV_MAJOR(pinfo->version) >= 0x2) + pinfo->graceful_timeout_supported = true; + return ph->set_priv(ph, pinfo); } diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index ddf0b9ff9e15..435d0e2658a4 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -815,7 +815,7 @@ static int scpi_init_versions(struct scpi_drvinfo *info) info->firmware_version = le32_to_cpu(caps.platform_version); } /* Ignore error if not implemented */ - if (scpi_info->is_legacy && ret == -EOPNOTSUPP) + if (info->is_legacy && ret == -EOPNOTSUPP) return 0; return ret; @@ -913,13 +913,14 @@ static int scpi_probe(struct platform_device *pdev) struct resource res; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; + struct scpi_drvinfo *scpi_drvinfo; - scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL); - if (!scpi_info) + scpi_drvinfo = devm_kzalloc(dev, sizeof(*scpi_drvinfo), GFP_KERNEL); + if (!scpi_drvinfo) return -ENOMEM; if (of_match_device(legacy_scpi_of_match, &pdev->dev)) - scpi_info->is_legacy = true; + scpi_drvinfo->is_legacy = true; count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); if (count < 0) { @@ -927,19 +928,19 @@ static int scpi_probe(struct platform_device *pdev) return -ENODEV; } - scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan), - GFP_KERNEL); - if (!scpi_info->channels) + scpi_drvinfo->channels = + devm_kcalloc(dev, count, sizeof(struct scpi_chan), GFP_KERNEL); + if (!scpi_drvinfo->channels) return -ENOMEM; - ret = devm_add_action(dev, scpi_free_channels, scpi_info); + ret = devm_add_action(dev, scpi_free_channels, scpi_drvinfo); if (ret) return ret; - for (; scpi_info->num_chans < count; scpi_info->num_chans++) { + for (; scpi_drvinfo->num_chans < count; scpi_drvinfo->num_chans++) { resource_size_t size; - int idx = scpi_info->num_chans; - struct scpi_chan *pchan = scpi_info->channels + idx; + int idx = scpi_drvinfo->num_chans; + struct scpi_chan *pchan = scpi_drvinfo->channels + idx; struct mbox_client *cl = &pchan->cl; struct device_node *shmem = of_parse_phandle(np, "shmem", idx); @@ -986,45 +987,53 @@ static int scpi_probe(struct platform_device *pdev) return ret; } - scpi_info->commands = scpi_std_commands; + scpi_drvinfo->commands = scpi_std_commands; - platform_set_drvdata(pdev, scpi_info); + platform_set_drvdata(pdev, scpi_drvinfo); - if (scpi_info->is_legacy) { + if (scpi_drvinfo->is_legacy) { /* Replace with legacy variants */ scpi_ops.clk_set_val = legacy_scpi_clk_set_val; - scpi_info->commands = scpi_legacy_commands; + scpi_drvinfo->commands = scpi_legacy_commands; /* Fill priority bitmap */ for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++) set_bit(legacy_hpriority_cmds[idx], - scpi_info->cmd_priority); + scpi_drvinfo->cmd_priority); } - ret = scpi_init_versions(scpi_info); + scpi_info = scpi_drvinfo; + + ret = scpi_init_versions(scpi_drvinfo); if (ret) { dev_err(dev, "incorrect or no SCP firmware found\n"); + scpi_info = NULL; return ret; } - if (scpi_info->is_legacy && !scpi_info->protocol_version && - !scpi_info->firmware_version) + if (scpi_drvinfo->is_legacy && !scpi_drvinfo->protocol_version && + !scpi_drvinfo->firmware_version) dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n"); else dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n", FIELD_GET(PROTO_REV_MAJOR_MASK, - scpi_info->protocol_version), + scpi_drvinfo->protocol_version), FIELD_GET(PROTO_REV_MINOR_MASK, - scpi_info->protocol_version), + scpi_drvinfo->protocol_version), FIELD_GET(FW_REV_MAJOR_MASK, - scpi_info->firmware_version), + scpi_drvinfo->firmware_version), FIELD_GET(FW_REV_MINOR_MASK, - scpi_info->firmware_version), + scpi_drvinfo->firmware_version), FIELD_GET(FW_REV_PATCH_MASK, - scpi_info->firmware_version)); - scpi_info->scpi_ops = &scpi_ops; + scpi_drvinfo->firmware_version)); + + scpi_drvinfo->scpi_ops = &scpi_ops; - return devm_of_platform_populate(dev); + ret = devm_of_platform_populate(dev); + if (ret) + scpi_info = NULL; + + return ret; } static const struct of_device_id scpi_of_match[] = { diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 7aa4717cdcac..6cb7384ad2ac 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -2,18 +2,6 @@ menu "EFI (Extensible Firmware Interface) Support" depends on EFI -config EFI_VARS - tristate "EFI Variable Support via sysfs" - depends on EFI && (X86 || IA64) - default n - help - If you say Y here, you are able to get EFI (Extensible Firmware - Interface) variable information via sysfs. You may read, - write, create, and destroy EFI variables through this interface. - Note that this driver is only retained for compatibility with - legacy users: new users should use the efivarfs filesystem - instead. - config EFI_ESRT bool depends on EFI && !IA64 @@ -22,6 +10,7 @@ config EFI_ESRT config EFI_VARS_PSTORE tristate "Register efivars backend for pstore" depends on PSTORE + select UCS2_STRING default y help Say Y here to enable use efivars as a backend to pstore. This @@ -145,6 +134,7 @@ config EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER config EFI_BOOTLOADER_CONTROL tristate "EFI Bootloader Control" + select UCS2_STRING default n help This module installs a reboot hook, such that if reboot() is diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index c02ff25dd477..8d151e332584 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -17,7 +17,6 @@ ifneq ($(CONFIG_EFI_CAPSULE_LOADER),) obj-$(CONFIG_EFI) += capsule.o endif obj-$(CONFIG_EFI_PARAMS_FROM_FDT) += fdtparams.o -obj-$(CONFIG_EFI_VARS) += efivars.o obj-$(CONFIG_EFI_ESRT) += esrt.o obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o obj-$(CONFIG_UEFI_CPER) += cper.o diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c index b2c829e95bd1..3928dbff76d0 100644 --- a/drivers/firmware/efi/efi-init.c +++ b/drivers/firmware/efi/efi-init.c @@ -240,6 +240,7 @@ void __init efi_init(void) * And now, memblock is fully populated, it is time to do capping. */ early_init_dt_check_for_usable_mem_range(); + efi_find_mirror(); efi_esrt_init(); efi_mokvar_table_init(); diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 7e771c56c13c..3bddc152fcd4 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -6,6 +6,8 @@ #include <linux/slab.h> #include <linux/ucs2_string.h> +MODULE_IMPORT_NS(EFIVAR); + #define DUMP_NAME_LEN 66 #define EFIVARS_DATA_SIZE_MAX 1024 @@ -20,18 +22,25 @@ module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644); EFI_VARIABLE_BOOTSERVICE_ACCESS | \ EFI_VARIABLE_RUNTIME_ACCESS) -static LIST_HEAD(efi_pstore_list); -static DECLARE_WORK(efivar_work, NULL); - static int efi_pstore_open(struct pstore_info *psi) { - psi->data = NULL; + int err; + + err = efivar_lock(); + if (err) + return err; + + psi->data = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL); + if (!psi->data) + return -ENOMEM; + return 0; } static int efi_pstore_close(struct pstore_info *psi) { - psi->data = NULL; + efivar_unlock(); + kfree(psi->data); return 0; } @@ -40,22 +49,17 @@ static inline u64 generic_id(u64 timestamp, unsigned int part, int count) return (timestamp * 100 + part) * 1000 + count; } -static int efi_pstore_read_func(struct efivar_entry *entry, - struct pstore_record *record) +static int efi_pstore_read_func(struct pstore_record *record, + efi_char16_t *varname) { - efi_guid_t vendor = LINUX_EFI_CRASH_GUID; + unsigned long wlen, size = EFIVARS_DATA_SIZE_MAX; char name[DUMP_NAME_LEN], data_type; - int i; + efi_status_t status; int cnt; unsigned int part; - unsigned long size; u64 time; - if (efi_guidcmp(entry->var.VendorGuid, vendor)) - return 0; - - for (i = 0; i < DUMP_NAME_LEN; i++) - name[i] = entry->var.VariableName[i]; + ucs2_as_utf8(name, varname, DUMP_NAME_LEN); if (sscanf(name, "dump-type%u-%u-%d-%llu-%c", &record->type, &part, &cnt, &time, &data_type) == 5) { @@ -95,161 +99,75 @@ static int efi_pstore_read_func(struct efivar_entry *entry, } else return 0; - entry->var.DataSize = 1024; - __efivar_entry_get(entry, &entry->var.Attributes, - &entry->var.DataSize, entry->var.Data); - size = entry->var.DataSize; - memcpy(record->buf, entry->var.Data, - (size_t)min_t(unsigned long, EFIVARS_DATA_SIZE_MAX, size)); - - return size; -} - -/** - * efi_pstore_scan_sysfs_enter - * @pos: scanning entry - * @next: next entry - * @head: list head - */ -static void efi_pstore_scan_sysfs_enter(struct efivar_entry *pos, - struct efivar_entry *next, - struct list_head *head) -{ - pos->scanning = true; - if (&next->list != head) - next->scanning = true; -} - -/** - * __efi_pstore_scan_sysfs_exit - * @entry: deleting entry - * @turn_off_scanning: Check if a scanning flag should be turned off - */ -static inline int __efi_pstore_scan_sysfs_exit(struct efivar_entry *entry, - bool turn_off_scanning) -{ - if (entry->deleting) { - list_del(&entry->list); - efivar_entry_iter_end(); - kfree(entry); - if (efivar_entry_iter_begin()) - return -EINTR; - } else if (turn_off_scanning) - entry->scanning = false; - - return 0; -} - -/** - * efi_pstore_scan_sysfs_exit - * @pos: scanning entry - * @next: next entry - * @head: list head - * @stop: a flag checking if scanning will stop - */ -static int efi_pstore_scan_sysfs_exit(struct efivar_entry *pos, - struct efivar_entry *next, - struct list_head *head, bool stop) -{ - int ret = __efi_pstore_scan_sysfs_exit(pos, true); - - if (ret) - return ret; - - if (stop) - ret = __efi_pstore_scan_sysfs_exit(next, &next->list != head); - return ret; -} + record->buf = kmalloc(size, GFP_KERNEL); + if (!record->buf) + return -ENOMEM; -/** - * efi_pstore_sysfs_entry_iter - * - * @record: pstore record to pass to callback - * - * You MUST call efivar_entry_iter_begin() before this function, and - * efivar_entry_iter_end() afterwards. - * - */ -static int efi_pstore_sysfs_entry_iter(struct pstore_record *record) -{ - struct efivar_entry **pos = (struct efivar_entry **)&record->psi->data; - struct efivar_entry *entry, *n; - struct list_head *head = &efi_pstore_list; - int size = 0; - int ret; - - if (!*pos) { - list_for_each_entry_safe(entry, n, head, list) { - efi_pstore_scan_sysfs_enter(entry, n, head); - - size = efi_pstore_read_func(entry, record); - ret = efi_pstore_scan_sysfs_exit(entry, n, head, - size < 0); - if (ret) - return ret; - if (size) - break; - } - *pos = n; - return size; + status = efivar_get_variable(varname, &LINUX_EFI_CRASH_GUID, NULL, + &size, record->buf); + if (status != EFI_SUCCESS) { + kfree(record->buf); + return -EIO; } - list_for_each_entry_safe_from((*pos), n, head, list) { - efi_pstore_scan_sysfs_enter((*pos), n, head); - - size = efi_pstore_read_func((*pos), record); - ret = efi_pstore_scan_sysfs_exit((*pos), n, head, size < 0); - if (ret) - return ret; - if (size) - break; + /* + * Store the name of the variable in the pstore_record priv field, so + * we can reuse it later if we need to delete the EFI variable from the + * variable store. + */ + wlen = (ucs2_strnlen(varname, DUMP_NAME_LEN) + 1) * sizeof(efi_char16_t); + record->priv = kmemdup(varname, wlen, GFP_KERNEL); + if (!record->priv) { + kfree(record->buf); + return -ENOMEM; } - *pos = n; + return size; } -/** - * efi_pstore_read - * - * This function returns a size of NVRAM entry logged via efi_pstore_write(). - * The meaning and behavior of efi_pstore/pstore are as below. - * - * size > 0: Got data of an entry logged via efi_pstore_write() successfully, - * and pstore filesystem will continue reading subsequent entries. - * size == 0: Entry was not logged via efi_pstore_write(), - * and efi_pstore driver will continue reading subsequent entries. - * size < 0: Failed to get data of entry logging via efi_pstore_write(), - * and pstore will stop reading entry. - */ static ssize_t efi_pstore_read(struct pstore_record *record) { - ssize_t size; + efi_char16_t *varname = record->psi->data; + efi_guid_t guid = LINUX_EFI_CRASH_GUID; + unsigned long varname_size; + efi_status_t status; - record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL); - if (!record->buf) - return -ENOMEM; + for (;;) { + varname_size = EFIVARS_DATA_SIZE_MAX; - if (efivar_entry_iter_begin()) { - size = -EINTR; - goto out; - } - size = efi_pstore_sysfs_entry_iter(record); - efivar_entry_iter_end(); + /* + * If this is the first read() call in the pstore enumeration, + * varname will be the empty string, and the GetNextVariable() + * runtime service call will return the first EFI variable in + * its own enumeration order, ignoring the guid argument. + * + * Subsequent calls to GetNextVariable() must pass the name and + * guid values returned by the previous call, which is why we + * store varname in record->psi->data. Given that we only + * enumerate variables with the efi-pstore GUID, there is no + * need to record the guid return value. + */ + status = efivar_get_next_variable(&varname_size, varname, &guid); + if (status == EFI_NOT_FOUND) + return 0; -out: - if (size <= 0) { - kfree(record->buf); - record->buf = NULL; + if (status != EFI_SUCCESS) + return -EIO; + + /* skip variables that don't concern us */ + if (efi_guidcmp(guid, LINUX_EFI_CRASH_GUID)) + continue; + + return efi_pstore_read_func(record, varname); } - return size; } static int efi_pstore_write(struct pstore_record *record) { char name[DUMP_NAME_LEN]; efi_char16_t efi_name[DUMP_NAME_LEN]; - efi_guid_t vendor = LINUX_EFI_CRASH_GUID; - int i, ret = 0; + efi_status_t status; + int i; record->id = generic_id(record->time.tv_sec, record->part, record->count); @@ -265,88 +183,26 @@ static int efi_pstore_write(struct pstore_record *record) for (i = 0; i < DUMP_NAME_LEN; i++) efi_name[i] = name[i]; - ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES, - false, record->size, record->psi->buf); - - if (record->reason == KMSG_DUMP_OOPS && try_module_get(THIS_MODULE)) - if (!schedule_work(&efivar_work)) - module_put(THIS_MODULE); - - return ret; + if (efivar_trylock()) + return -EBUSY; + status = efivar_set_variable_locked(efi_name, &LINUX_EFI_CRASH_GUID, + PSTORE_EFI_ATTRIBUTES, + record->size, record->psi->buf, + true); + efivar_unlock(); + return status == EFI_SUCCESS ? 0 : -EIO; }; -/* - * Clean up an entry with the same name - */ -static int efi_pstore_erase_func(struct efivar_entry *entry, void *data) -{ - efi_char16_t *efi_name = data; - efi_guid_t vendor = LINUX_EFI_CRASH_GUID; - unsigned long ucs2_len = ucs2_strlen(efi_name); - - if (efi_guidcmp(entry->var.VendorGuid, vendor)) - return 0; - - if (ucs2_strncmp(entry->var.VariableName, efi_name, (size_t)ucs2_len)) - return 0; - - if (entry->scanning) { - /* - * Skip deletion because this entry will be deleted - * after scanning is completed. - */ - entry->deleting = true; - } else - list_del(&entry->list); - - /* found */ - __efivar_entry_delete(entry); - - return 1; -} - -static int efi_pstore_erase_name(const char *name) -{ - struct efivar_entry *entry = NULL; - efi_char16_t efi_name[DUMP_NAME_LEN]; - int found, i; - - for (i = 0; i < DUMP_NAME_LEN; i++) { - efi_name[i] = name[i]; - if (name[i] == '\0') - break; - } - - if (efivar_entry_iter_begin()) - return -EINTR; - - found = __efivar_entry_iter(efi_pstore_erase_func, &efi_pstore_list, - efi_name, &entry); - efivar_entry_iter_end(); - - if (found && !entry->scanning) - kfree(entry); - - return found ? 0 : -ENOENT; -} - static int efi_pstore_erase(struct pstore_record *record) { - char name[DUMP_NAME_LEN]; - int ret; - - snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lld", - record->type, record->part, record->count, - (long long)record->time.tv_sec); - ret = efi_pstore_erase_name(name); - if (ret != -ENOENT) - return ret; + efi_status_t status; - snprintf(name, sizeof(name), "dump-type%u-%u-%lld", - record->type, record->part, (long long)record->time.tv_sec); - ret = efi_pstore_erase_name(name); + status = efivar_set_variable(record->priv, &LINUX_EFI_CRASH_GUID, + PSTORE_EFI_ATTRIBUTES, 0, NULL); - return ret; + if (status != EFI_SUCCESS && status != EFI_NOT_FOUND) + return -EIO; + return 0; } static struct pstore_info efi_pstore_info = { @@ -360,77 +216,14 @@ static struct pstore_info efi_pstore_info = { .erase = efi_pstore_erase, }; -static int efi_pstore_callback(efi_char16_t *name, efi_guid_t vendor, - unsigned long name_size, void *data) -{ - struct efivar_entry *entry; - int ret; - - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - memcpy(entry->var.VariableName, name, name_size); - entry->var.VendorGuid = vendor; - - ret = efivar_entry_add(entry, &efi_pstore_list); - if (ret) - kfree(entry); - - return ret; -} - -static int efi_pstore_update_entry(efi_char16_t *name, efi_guid_t vendor, - unsigned long name_size, void *data) -{ - struct efivar_entry *entry = data; - - if (efivar_entry_find(name, vendor, &efi_pstore_list, false)) - return 0; - - memcpy(entry->var.VariableName, name, name_size); - memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); - - return 1; -} - -static void efi_pstore_update_entries(struct work_struct *work) -{ - struct efivar_entry *entry; - int err; - - /* Add new sysfs entries */ - while (1) { - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return; - - err = efivar_init(efi_pstore_update_entry, entry, - false, &efi_pstore_list); - if (!err) - break; - - efivar_entry_add(entry, &efi_pstore_list); - } - - kfree(entry); - module_put(THIS_MODULE); -} - static __init int efivars_pstore_init(void) { - int ret; - - if (!efivars_kobject() || !efivar_supports_writes()) + if (!efivar_supports_writes()) return 0; if (efivars_pstore_disable) return 0; - ret = efivar_init(efi_pstore_callback, NULL, true, &efi_pstore_list); - if (ret) - return ret; - efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL); if (!efi_pstore_info.buf) return -ENOMEM; @@ -443,8 +236,6 @@ static __init int efivars_pstore_init(void) efi_pstore_info.bufsize = 0; } - INIT_WORK(&efivar_work, efi_pstore_update_entries); - return 0; } diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 860534bcfdac..e4080ad96089 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -202,7 +202,7 @@ static void generic_ops_unregister(void) } #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS -#define EFIVAR_SSDT_NAME_MAX 16 +#define EFIVAR_SSDT_NAME_MAX 16UL static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; static int __init efivar_ssdt_setup(char *str) { @@ -219,83 +219,62 @@ static int __init efivar_ssdt_setup(char *str) } __setup("efivar_ssdt=", efivar_ssdt_setup); -static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor, - unsigned long name_size, void *data) -{ - struct efivar_entry *entry; - struct list_head *list = data; - char utf8_name[EFIVAR_SSDT_NAME_MAX]; - int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size); - - ucs2_as_utf8(utf8_name, name, limit - 1); - if (strncmp(utf8_name, efivar_ssdt, limit) != 0) - return 0; - - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return 0; - - memcpy(entry->var.VariableName, name, name_size); - memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t)); - - efivar_entry_add(entry, list); - - return 0; -} - static __init int efivar_ssdt_load(void) { - LIST_HEAD(entries); - struct efivar_entry *entry, *aux; - unsigned long size; - void *data; - int ret; + unsigned long name_size = 256; + efi_char16_t *name = NULL; + efi_status_t status; + efi_guid_t guid; if (!efivar_ssdt[0]) return 0; - ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries); - - list_for_each_entry_safe(entry, aux, &entries, list) { - pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, - &entry->var.VendorGuid); + name = kzalloc(name_size, GFP_KERNEL); + if (!name) + return -ENOMEM; - list_del(&entry->list); + for (;;) { + char utf8_name[EFIVAR_SSDT_NAME_MAX]; + unsigned long data_size = 0; + void *data; + int limit; - ret = efivar_entry_size(entry, &size); - if (ret) { - pr_err("failed to get var size\n"); - goto free_entry; + status = efi.get_next_variable(&name_size, name, &guid); + if (status == EFI_NOT_FOUND) { + break; + } else if (status == EFI_BUFFER_TOO_SMALL) { + name = krealloc(name, name_size, GFP_KERNEL); + if (!name) + return -ENOMEM; + continue; } - data = kmalloc(size, GFP_KERNEL); - if (!data) { - ret = -ENOMEM; - goto free_entry; - } + limit = min(EFIVAR_SSDT_NAME_MAX, name_size); + ucs2_as_utf8(utf8_name, name, limit - 1); + if (strncmp(utf8_name, efivar_ssdt, limit) != 0) + continue; - ret = efivar_entry_get(entry, NULL, &size, data); - if (ret) { - pr_err("failed to get var data\n"); - goto free_data; - } + pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid); - ret = acpi_load_table(data, NULL); - if (ret) { - pr_err("failed to load table: %d\n", ret); - goto free_data; - } + status = efi.get_variable(name, &guid, NULL, &data_size, NULL); + if (status != EFI_BUFFER_TOO_SMALL || !data_size) + return -EIO; - goto free_entry; + data = kmalloc(data_size, GFP_KERNEL); + if (!data) + return -ENOMEM; -free_data: + status = efi.get_variable(name, &guid, NULL, &data_size, data); + if (status == EFI_SUCCESS) { + acpi_status ret = acpi_load_table(data, NULL); + if (ret) + pr_err("failed to load table: %u\n", ret); + } else { + pr_err("failed to get var data: 0x%lx\n", status); + } kfree(data); - -free_entry: - kfree(entry); } - - return ret; + return 0; } #else static inline int efivar_ssdt_load(void) { return 0; } @@ -446,6 +425,29 @@ err_put: subsys_initcall(efisubsys_init); +void __init efi_find_mirror(void) +{ + efi_memory_desc_t *md; + u64 mirror_size = 0, total_size = 0; + + if (!efi_enabled(EFI_MEMMAP)) + return; + + for_each_efi_memory_desc(md) { + unsigned long long start = md->phys_addr; + unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; + + total_size += size; + if (md->attribute & EFI_MEMORY_MORE_RELIABLE) { + memblock_mark_mirror(start, size); + mirror_size += size; + } + } + if (mirror_size) + pr_info("Memory: %lldM/%lldM mirrored memory\n", + mirror_size>>20, total_size>>20); +} + /* * Find the efi memory descriptor for a given physical address. Given a * physical address, determine if it exists within an EFI Memory Map entry, @@ -897,6 +899,7 @@ int efi_status_to_err(efi_status_t status) return err; } +EXPORT_SYMBOL_GPL(efi_status_to_err); static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c index 15a47539dc56..8ced7af8e56d 100644 --- a/drivers/firmware/efi/efibc.c +++ b/drivers/firmware/efi/efibc.c @@ -10,69 +10,51 @@ #include <linux/module.h> #include <linux/reboot.h> #include <linux/slab.h> +#include <linux/ucs2_string.h> -static void efibc_str_to_str16(const char *str, efi_char16_t *str16) -{ - size_t i; - - for (i = 0; i < strlen(str); i++) - str16[i] = str[i]; - - str16[i] = '\0'; -} +#define MAX_DATA_LEN 512 -static int efibc_set_variable(const char *name, const char *value) +static int efibc_set_variable(efi_char16_t *name, efi_char16_t *value, + unsigned long len) { - int ret; - efi_guid_t guid = LINUX_EFI_LOADER_ENTRY_GUID; - struct efivar_entry *entry; - size_t size = (strlen(value) + 1) * sizeof(efi_char16_t); + efi_status_t status; - if (size > sizeof(entry->var.Data)) { - pr_err("value is too large (%zu bytes) for '%s' EFI variable\n", size, name); - return -EINVAL; - } + status = efi.set_variable(name, &LINUX_EFI_LOADER_ENTRY_GUID, + EFI_VARIABLE_NON_VOLATILE + | EFI_VARIABLE_BOOTSERVICE_ACCESS + | EFI_VARIABLE_RUNTIME_ACCESS, + len * sizeof(efi_char16_t), value); - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) { - pr_err("failed to allocate efivar entry for '%s' EFI variable\n", name); - return -ENOMEM; + if (status != EFI_SUCCESS) { + pr_err("failed to set EFI variable: 0x%lx\n", status); + return -EIO; } - - efibc_str_to_str16(name, entry->var.VariableName); - efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data); - memcpy(&entry->var.VendorGuid, &guid, sizeof(guid)); - - ret = efivar_entry_set_safe(entry->var.VariableName, - entry->var.VendorGuid, - EFI_VARIABLE_NON_VOLATILE - | EFI_VARIABLE_BOOTSERVICE_ACCESS - | EFI_VARIABLE_RUNTIME_ACCESS, - false, size, entry->var.Data); - - if (ret) - pr_err("failed to set %s EFI variable: 0x%x\n", - name, ret); - - kfree(entry); - return ret; + return 0; } static int efibc_reboot_notifier_call(struct notifier_block *notifier, unsigned long event, void *data) { - const char *reason = "shutdown"; + efi_char16_t *reason = event == SYS_RESTART ? L"reboot" + : L"shutdown"; + const u8 *str = data; + efi_char16_t *wdata; + unsigned long l; int ret; - if (event == SYS_RESTART) - reason = "reboot"; - - ret = efibc_set_variable("LoaderEntryRebootReason", reason); + ret = efibc_set_variable(L"LoaderEntryRebootReason", reason, + ucs2_strlen(reason)); if (ret || !data) return NOTIFY_DONE; - efibc_set_variable("LoaderEntryOneShot", (char *)data); + wdata = kmalloc(MAX_DATA_LEN * sizeof(efi_char16_t), GFP_KERNEL); + for (l = 0; l < MAX_DATA_LEN - 1 && str[l] != '\0'; l++) + wdata[l] = str[l]; + wdata[l] = L'\0'; + + efibc_set_variable(L"LoaderEntryOneShot", wdata, l); + kfree(wdata); return NOTIFY_DONE; } @@ -84,7 +66,7 @@ static int __init efibc_init(void) { int ret; - if (!efivars_kobject() || !efivar_supports_writes()) + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) return -ENODEV; ret = register_reboot_notifier(&efibc_reboot_notifier); diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c deleted file mode 100644 index ea0bc39dc965..000000000000 --- a/drivers/firmware/efi/efivars.c +++ /dev/null @@ -1,671 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Originally from efivars.c, - * - * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com> - * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com> - * - * This code takes all variables accessible from EFI runtime and - * exports them via sysfs - */ - -#include <linux/efi.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/ucs2_string.h> -#include <linux/compat.h> - -#define EFIVARS_VERSION "0.08" -#define EFIVARS_DATE "2004-May-17" - -MODULE_AUTHOR("Matt Domsch <Matt_Domsch@Dell.com>"); -MODULE_DESCRIPTION("sysfs interface to EFI Variables"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(EFIVARS_VERSION); - -static LIST_HEAD(efivar_sysfs_list); - -static struct kset *efivars_kset; - -static struct bin_attribute *efivars_new_var; -static struct bin_attribute *efivars_del_var; - -struct compat_efi_variable { - efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)]; - efi_guid_t VendorGuid; - __u32 DataSize; - __u8 Data[1024]; - __u32 Status; - __u32 Attributes; -} __packed; - -struct efivar_attribute { - struct attribute attr; - ssize_t (*show) (struct efivar_entry *entry, char *buf); - ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count); -}; - -#define EFIVAR_ATTR(_name, _mode, _show, _store) \ -struct efivar_attribute efivar_attr_##_name = { \ - .attr = {.name = __stringify(_name), .mode = _mode}, \ - .show = _show, \ - .store = _store, \ -}; - -#define to_efivar_attr(_attr) container_of(_attr, struct efivar_attribute, attr) -#define to_efivar_entry(obj) container_of(obj, struct efivar_entry, kobj) - -/* - * Prototype for sysfs creation function - */ -static int -efivar_create_sysfs_entry(struct efivar_entry *new_var); - -static ssize_t -efivar_guid_read(struct efivar_entry *entry, char *buf) -{ - struct efi_variable *var = &entry->var; - char *str = buf; - - if (!entry || !buf) - return 0; - - efi_guid_to_str(&var->VendorGuid, str); - str += strlen(str); - str += sprintf(str, "\n"); - - return str - buf; -} - -static ssize_t -efivar_attr_read(struct efivar_entry *entry, char *buf) -{ - struct efi_variable *var = &entry->var; - unsigned long size = sizeof(var->Data); - char *str = buf; - int ret; - - if (!entry || !buf) - return -EINVAL; - - ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); - var->DataSize = size; - if (ret) - return -EIO; - - if (var->Attributes & EFI_VARIABLE_NON_VOLATILE) - str += sprintf(str, "EFI_VARIABLE_NON_VOLATILE\n"); - if (var->Attributes & EFI_VARIABLE_BOOTSERVICE_ACCESS) - str += sprintf(str, "EFI_VARIABLE_BOOTSERVICE_ACCESS\n"); - if (var->Attributes & EFI_VARIABLE_RUNTIME_ACCESS) - str += sprintf(str, "EFI_VARIABLE_RUNTIME_ACCESS\n"); - if (var->Attributes & EFI_VARIABLE_HARDWARE_ERROR_RECORD) - str += sprintf(str, "EFI_VARIABLE_HARDWARE_ERROR_RECORD\n"); - if (var->Attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS) - str += sprintf(str, - "EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS\n"); - if (var->Attributes & - EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS) - str += sprintf(str, - "EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS\n"); - if (var->Attributes & EFI_VARIABLE_APPEND_WRITE) - str += sprintf(str, "EFI_VARIABLE_APPEND_WRITE\n"); - return str - buf; -} - -static ssize_t -efivar_size_read(struct efivar_entry *entry, char *buf) -{ - struct efi_variable *var = &entry->var; - unsigned long size = sizeof(var->Data); - char *str = buf; - int ret; - - if (!entry || !buf) - return -EINVAL; - - ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); - var->DataSize = size; - if (ret) - return -EIO; - - str += sprintf(str, "0x%lx\n", var->DataSize); - return str - buf; -} - -static ssize_t -efivar_data_read(struct efivar_entry *entry, char *buf) -{ - struct efi_variable *var = &entry->var; - unsigned long size = sizeof(var->Data); - int ret; - - if (!entry || !buf) - return -EINVAL; - - ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); - var->DataSize = size; - if (ret) - return -EIO; - - memcpy(buf, var->Data, var->DataSize); - return var->DataSize; -} - -static inline int -sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor, - unsigned long size, u32 attributes, u8 *data) -{ - /* - * If only updating the variable data, then the name - * and guid should remain the same - */ - if (memcmp(name, var->VariableName, sizeof(var->VariableName)) || - efi_guidcmp(vendor, var->VendorGuid)) { - printk(KERN_ERR "efivars: Cannot edit the wrong variable!\n"); - return -EINVAL; - } - - if ((size <= 0) || (attributes == 0)){ - printk(KERN_ERR "efivars: DataSize & Attributes must be valid!\n"); - return -EINVAL; - } - - if ((attributes & ~EFI_VARIABLE_MASK) != 0 || - efivar_validate(vendor, name, data, size) == false) { - printk(KERN_ERR "efivars: Malformed variable content\n"); - return -EINVAL; - } - - return 0; -} - -static void -copy_out_compat(struct efi_variable *dst, struct compat_efi_variable *src) -{ - memcpy(dst->VariableName, src->VariableName, EFI_VAR_NAME_LEN); - memcpy(dst->Data, src->Data, sizeof(src->Data)); - - dst->VendorGuid = src->VendorGuid; - dst->DataSize = src->DataSize; - dst->Attributes = src->Attributes; -} - -/* - * We allow each variable to be edited via rewriting the - * entire efi variable structure. - */ -static ssize_t -efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) -{ - struct efi_variable *new_var, *var = &entry->var; - efi_char16_t *name; - unsigned long size; - efi_guid_t vendor; - u32 attributes; - u8 *data; - int err; - - if (!entry || !buf) - return -EINVAL; - - if (in_compat_syscall()) { - struct compat_efi_variable *compat; - - if (count != sizeof(*compat)) - return -EINVAL; - - compat = (struct compat_efi_variable *)buf; - attributes = compat->Attributes; - vendor = compat->VendorGuid; - name = compat->VariableName; - size = compat->DataSize; - data = compat->Data; - - err = sanity_check(var, name, vendor, size, attributes, data); - if (err) - return err; - - copy_out_compat(&entry->var, compat); - } else { - if (count != sizeof(struct efi_variable)) - return -EINVAL; - - new_var = (struct efi_variable *)buf; - - attributes = new_var->Attributes; - vendor = new_var->VendorGuid; - name = new_var->VariableName; - size = new_var->DataSize; - data = new_var->Data; - - err = sanity_check(var, name, vendor, size, attributes, data); - if (err) - return err; - - memcpy(&entry->var, new_var, count); - } - - err = efivar_entry_set(entry, attributes, size, data, NULL); - if (err) { - printk(KERN_WARNING "efivars: set_variable() failed: status=%d\n", err); - return -EIO; - } - - return count; -} - -static ssize_t -efivar_show_raw(struct efivar_entry *entry, char *buf) -{ - struct efi_variable *var = &entry->var; - struct compat_efi_variable *compat; - unsigned long datasize = sizeof(var->Data); - size_t size; - int ret; - - if (!entry || !buf) - return 0; - - ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data); - var->DataSize = datasize; - if (ret) - return -EIO; - - if (in_compat_syscall()) { - compat = (struct compat_efi_variable *)buf; - - size = sizeof(*compat); - memcpy(compat->VariableName, var->VariableName, - EFI_VAR_NAME_LEN); - memcpy(compat->Data, var->Data, sizeof(compat->Data)); - - compat->VendorGuid = var->VendorGuid; - compat->DataSize = var->DataSize; - compat->Attributes = var->Attributes; - } else { - size = sizeof(*var); - memcpy(buf, var, size); - } - - return size; -} - -/* - * Generic read/write functions that call the specific functions of - * the attributes... - */ -static ssize_t efivar_attr_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct efivar_entry *var = to_efivar_entry(kobj); - struct efivar_attribute *efivar_attr = to_efivar_attr(attr); - ssize_t ret = -EIO; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - - if (efivar_attr->show) { - ret = efivar_attr->show(var, buf); - } - return ret; -} - -static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr, - const char *buf, size_t count) -{ - struct efivar_entry *var = to_efivar_entry(kobj); - struct efivar_attribute *efivar_attr = to_efivar_attr(attr); - ssize_t ret = -EIO; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - - if (efivar_attr->store) - ret = efivar_attr->store(var, buf, count); - - return ret; -} - -static const struct sysfs_ops efivar_attr_ops = { - .show = efivar_attr_show, - .store = efivar_attr_store, -}; - -static void efivar_release(struct kobject *kobj) -{ - struct efivar_entry *var = to_efivar_entry(kobj); - kfree(var); -} - -static EFIVAR_ATTR(guid, 0400, efivar_guid_read, NULL); -static EFIVAR_ATTR(attributes, 0400, efivar_attr_read, NULL); -static EFIVAR_ATTR(size, 0400, efivar_size_read, NULL); -static EFIVAR_ATTR(data, 0400, efivar_data_read, NULL); -static EFIVAR_ATTR(raw_var, 0600, efivar_show_raw, efivar_store_raw); - -static struct attribute *def_attrs[] = { - &efivar_attr_guid.attr, - &efivar_attr_size.attr, - &efivar_attr_attributes.attr, - &efivar_attr_data.attr, - &efivar_attr_raw_var.attr, - NULL, -}; -ATTRIBUTE_GROUPS(def); - -static struct kobj_type efivar_ktype = { - .release = efivar_release, - .sysfs_ops = &efivar_attr_ops, - .default_groups = def_groups, -}; - -static ssize_t efivar_create(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t pos, size_t count) -{ - struct compat_efi_variable *compat = (struct compat_efi_variable *)buf; - struct efi_variable *new_var = (struct efi_variable *)buf; - struct efivar_entry *new_entry; - bool need_compat = in_compat_syscall(); - efi_char16_t *name; - unsigned long size; - u32 attributes; - u8 *data; - int err; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - - if (need_compat) { - if (count != sizeof(*compat)) - return -EINVAL; - - attributes = compat->Attributes; - name = compat->VariableName; - size = compat->DataSize; - data = compat->Data; - } else { - if (count != sizeof(*new_var)) - return -EINVAL; - - attributes = new_var->Attributes; - name = new_var->VariableName; - size = new_var->DataSize; - data = new_var->Data; - } - - if ((attributes & ~EFI_VARIABLE_MASK) != 0 || - efivar_validate(new_var->VendorGuid, name, data, - size) == false) { - printk(KERN_ERR "efivars: Malformed variable content\n"); - return -EINVAL; - } - - new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); - if (!new_entry) - return -ENOMEM; - - if (need_compat) - copy_out_compat(&new_entry->var, compat); - else - memcpy(&new_entry->var, new_var, sizeof(*new_var)); - - err = efivar_entry_set(new_entry, attributes, size, - data, &efivar_sysfs_list); - if (err) { - if (err == -EEXIST) - err = -EINVAL; - goto out; - } - - if (efivar_create_sysfs_entry(new_entry)) { - printk(KERN_WARNING "efivars: failed to create sysfs entry.\n"); - kfree(new_entry); - } - return count; - -out: - kfree(new_entry); - return err; -} - -static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t pos, size_t count) -{ - struct efi_variable *del_var = (struct efi_variable *)buf; - struct compat_efi_variable *compat; - struct efivar_entry *entry; - efi_char16_t *name; - efi_guid_t vendor; - int err = 0; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - - if (in_compat_syscall()) { - if (count != sizeof(*compat)) - return -EINVAL; - - compat = (struct compat_efi_variable *)buf; - name = compat->VariableName; - vendor = compat->VendorGuid; - } else { - if (count != sizeof(*del_var)) - return -EINVAL; - - name = del_var->VariableName; - vendor = del_var->VendorGuid; - } - - if (efivar_entry_iter_begin()) - return -EINTR; - entry = efivar_entry_find(name, vendor, &efivar_sysfs_list, true); - if (!entry) - err = -EINVAL; - else if (__efivar_entry_delete(entry)) - err = -EIO; - - if (err) { - efivar_entry_iter_end(); - return err; - } - - if (!entry->scanning) { - efivar_entry_iter_end(); - efivar_unregister(entry); - } else - efivar_entry_iter_end(); - - /* It's dead Jim.... */ - return count; -} - -/** - * efivar_create_sysfs_entry - create a new entry in sysfs - * @new_var: efivar entry to create - * - * Returns 0 on success, negative error code on failure - */ -static int -efivar_create_sysfs_entry(struct efivar_entry *new_var) -{ - int short_name_size; - char *short_name; - unsigned long utf8_name_size; - efi_char16_t *variable_name = new_var->var.VariableName; - int ret; - - /* - * Length of the variable bytes in UTF8, plus the '-' separator, - * plus the GUID, plus trailing NUL - */ - utf8_name_size = ucs2_utf8size(variable_name); - short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1; - - short_name = kmalloc(short_name_size, GFP_KERNEL); - if (!short_name) - return -ENOMEM; - - ucs2_as_utf8(short_name, variable_name, short_name_size); - - /* This is ugly, but necessary to separate one vendor's - private variables from another's. */ - short_name[utf8_name_size] = '-'; - efi_guid_to_str(&new_var->var.VendorGuid, - short_name + utf8_name_size + 1); - - new_var->kobj.kset = efivars_kset; - - ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype, - NULL, "%s", short_name); - kfree(short_name); - if (ret) { - kobject_put(&new_var->kobj); - return ret; - } - - kobject_uevent(&new_var->kobj, KOBJ_ADD); - if (efivar_entry_add(new_var, &efivar_sysfs_list)) { - efivar_unregister(new_var); - return -EINTR; - } - - return 0; -} - -static int -create_efivars_bin_attributes(void) -{ - struct bin_attribute *attr; - int error; - - /* new_var */ - attr = kzalloc(sizeof(*attr), GFP_KERNEL); - if (!attr) - return -ENOMEM; - - attr->attr.name = "new_var"; - attr->attr.mode = 0200; - attr->write = efivar_create; - efivars_new_var = attr; - - /* del_var */ - attr = kzalloc(sizeof(*attr), GFP_KERNEL); - if (!attr) { - error = -ENOMEM; - goto out_free; - } - attr->attr.name = "del_var"; - attr->attr.mode = 0200; - attr->write = efivar_delete; - efivars_del_var = attr; - - sysfs_bin_attr_init(efivars_new_var); - sysfs_bin_attr_init(efivars_del_var); - - /* Register */ - error = sysfs_create_bin_file(&efivars_kset->kobj, efivars_new_var); - if (error) { - printk(KERN_ERR "efivars: unable to create new_var sysfs file" - " due to error %d\n", error); - goto out_free; - } - - error = sysfs_create_bin_file(&efivars_kset->kobj, efivars_del_var); - if (error) { - printk(KERN_ERR "efivars: unable to create del_var sysfs file" - " due to error %d\n", error); - sysfs_remove_bin_file(&efivars_kset->kobj, efivars_new_var); - goto out_free; - } - - return 0; -out_free: - kfree(efivars_del_var); - efivars_del_var = NULL; - kfree(efivars_new_var); - efivars_new_var = NULL; - return error; -} - -static int efivars_sysfs_callback(efi_char16_t *name, efi_guid_t vendor, - unsigned long name_size, void *data) -{ - struct efivar_entry *entry; - - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - memcpy(entry->var.VariableName, name, name_size); - memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); - - efivar_create_sysfs_entry(entry); - - return 0; -} - -static int efivar_sysfs_destroy(struct efivar_entry *entry, void *data) -{ - int err = efivar_entry_remove(entry); - - if (err) - return err; - efivar_unregister(entry); - return 0; -} - -static void efivars_sysfs_exit(void) -{ - /* Remove all entries and destroy */ - int err; - - err = __efivar_entry_iter(efivar_sysfs_destroy, &efivar_sysfs_list, - NULL, NULL); - if (err) { - pr_err("efivars: Failed to destroy sysfs entries\n"); - return; - } - - if (efivars_new_var) - sysfs_remove_bin_file(&efivars_kset->kobj, efivars_new_var); - if (efivars_del_var) - sysfs_remove_bin_file(&efivars_kset->kobj, efivars_del_var); - kfree(efivars_new_var); - kfree(efivars_del_var); - kset_unregister(efivars_kset); -} - -static int efivars_sysfs_init(void) -{ - struct kobject *parent_kobj = efivars_kobject(); - int error = 0; - - /* No efivars has been registered yet */ - if (!parent_kobj || !efivar_supports_writes()) - return 0; - - printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION, - EFIVARS_DATE); - - efivars_kset = kset_create_and_add("vars", NULL, parent_kobj); - if (!efivars_kset) { - printk(KERN_ERR "efivars: Subsystem registration failed.\n"); - return -ENOMEM; - } - - efivar_init(efivars_sysfs_callback, NULL, true, &efivar_sysfs_list); - - error = create_efivars_bin_attributes(); - if (error) { - efivars_sysfs_exit(); - return error; - } - - return 0; -} - -module_init(efivars_sysfs_init); -module_exit(efivars_sysfs_exit); diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c index 4df55a55da84..6ec7970dbd40 100644 --- a/drivers/firmware/efi/memmap.c +++ b/drivers/firmware/efi/memmap.c @@ -59,8 +59,7 @@ static void __init efi_memmap_free(void) * Depending on whether mm_init() has already been invoked or not, * either memblock or "normal" page allocation is used. * - * Returns the physical address of the allocated memory map on - * success, zero on failure. + * Returns zero on success, a negative error code on failure. */ int __init efi_memmap_alloc(unsigned int num_entries, struct efi_memory_map_data *data) @@ -245,7 +244,7 @@ int __init efi_memmap_install(struct efi_memory_map_data *data) * @range: Address range (start, end) to split around * * Returns the number of additional EFI memmap entries required to - * accomodate @range. + * accommodate @range. */ int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range) { diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index cae590bd08f2..dd74d2ad3184 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -6,306 +6,24 @@ * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com> */ -#include <linux/capability.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/init.h> -#include <linux/mm.h> #include <linux/module.h> #include <linux/string.h> #include <linux/smp.h> #include <linux/efi.h> -#include <linux/sysfs.h> -#include <linux/device.h> -#include <linux/slab.h> -#include <linux/ctype.h> #include <linux/ucs2_string.h> /* Private pointer to registered efivars */ static struct efivars *__efivars; -/* - * efivars_lock protects three things: - * 1) efivarfs_list and efivars_sysfs_list - * 2) ->ops calls - * 3) (un)registration of __efivars - */ static DEFINE_SEMAPHORE(efivars_lock); -static bool -validate_device_path(efi_char16_t *var_name, int match, u8 *buffer, - unsigned long len) -{ - struct efi_generic_dev_path *node; - int offset = 0; - - node = (struct efi_generic_dev_path *)buffer; - - if (len < sizeof(*node)) - return false; - - while (offset <= len - sizeof(*node) && - node->length >= sizeof(*node) && - node->length <= len - offset) { - offset += node->length; - - if ((node->type == EFI_DEV_END_PATH || - node->type == EFI_DEV_END_PATH2) && - node->sub_type == EFI_DEV_END_ENTIRE) - return true; - - node = (struct efi_generic_dev_path *)(buffer + offset); - } - - /* - * If we're here then either node->length pointed past the end - * of the buffer or we reached the end of the buffer without - * finding a device path end node. - */ - return false; -} - -static bool -validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer, - unsigned long len) -{ - /* An array of 16-bit integers */ - if ((len % 2) != 0) - return false; - - return true; -} - -static bool -validate_load_option(efi_char16_t *var_name, int match, u8 *buffer, - unsigned long len) -{ - u16 filepathlength; - int i, desclength = 0, namelen; - - namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN); - - /* Either "Boot" or "Driver" followed by four digits of hex */ - for (i = match; i < match+4; i++) { - if (var_name[i] > 127 || - hex_to_bin(var_name[i] & 0xff) < 0) - return true; - } - - /* Reject it if there's 4 digits of hex and then further content */ - if (namelen > match + 4) - return false; - - /* A valid entry must be at least 8 bytes */ - if (len < 8) - return false; - - filepathlength = buffer[4] | buffer[5] << 8; - - /* - * There's no stored length for the description, so it has to be - * found by hand - */ - desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; - - /* Each boot entry must have a descriptor */ - if (!desclength) - return false; - - /* - * If the sum of the length of the description, the claimed filepath - * length and the original header are greater than the length of the - * variable, it's malformed - */ - if ((desclength + filepathlength + 6) > len) - return false; - - /* - * And, finally, check the filepath - */ - return validate_device_path(var_name, match, buffer + desclength + 6, - filepathlength); -} - -static bool -validate_uint16(efi_char16_t *var_name, int match, u8 *buffer, - unsigned long len) -{ - /* A single 16-bit integer */ - if (len != 2) - return false; - - return true; -} - -static bool -validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer, - unsigned long len) -{ - int i; - - for (i = 0; i < len; i++) { - if (buffer[i] > 127) - return false; - - if (buffer[i] == 0) - return true; - } - - return false; -} - -struct variable_validate { - efi_guid_t vendor; - char *name; - bool (*validate)(efi_char16_t *var_name, int match, u8 *data, - unsigned long len); -}; - -/* - * This is the list of variables we need to validate, as well as the - * whitelist for what we think is safe not to default to immutable. - * - * If it has a validate() method that's not NULL, it'll go into the - * validation routine. If not, it is assumed valid, but still used for - * whitelisting. - * - * Note that it's sorted by {vendor,name}, but globbed names must come after - * any other name with the same prefix. - */ -static const struct variable_validate variable_validate[] = { - { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 }, - { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order }, - { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option }, - { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order }, - { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option }, - { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path }, - { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path }, - { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path }, - { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path }, - { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path }, - { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path }, - { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string }, - { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL }, - { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string }, - { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 }, - { LINUX_EFI_CRASH_GUID, "*", NULL }, - { NULL_GUID, "", NULL }, -}; - -/* - * Check if @var_name matches the pattern given in @match_name. - * - * @var_name: an array of @len non-NUL characters. - * @match_name: a NUL-terminated pattern string, optionally ending in "*". A - * final "*" character matches any trailing characters @var_name, - * including the case when there are none left in @var_name. - * @match: on output, the number of non-wildcard characters in @match_name - * that @var_name matches, regardless of the return value. - * @return: whether @var_name fully matches @match_name. - */ -static bool -variable_matches(const char *var_name, size_t len, const char *match_name, - int *match) -{ - for (*match = 0; ; (*match)++) { - char c = match_name[*match]; - - switch (c) { - case '*': - /* Wildcard in @match_name means we've matched. */ - return true; - - case '\0': - /* @match_name has ended. Has @var_name too? */ - return (*match == len); - - default: - /* - * We've reached a non-wildcard char in @match_name. - * Continue only if there's an identical character in - * @var_name. - */ - if (*match < len && c == var_name[*match]) - continue; - return false; - } - } -} - -bool -efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, - unsigned long data_size) -{ - int i; - unsigned long utf8_size; - u8 *utf8_name; - - utf8_size = ucs2_utf8size(var_name); - utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL); - if (!utf8_name) - return false; - - ucs2_as_utf8(utf8_name, var_name, utf8_size); - utf8_name[utf8_size] = '\0'; - - for (i = 0; variable_validate[i].name[0] != '\0'; i++) { - const char *name = variable_validate[i].name; - int match = 0; - - if (efi_guidcmp(vendor, variable_validate[i].vendor)) - continue; - - if (variable_matches(utf8_name, utf8_size+1, name, &match)) { - if (variable_validate[i].validate == NULL) - break; - kfree(utf8_name); - return variable_validate[i].validate(var_name, match, - data, data_size); - } - } - kfree(utf8_name); - return true; -} -EXPORT_SYMBOL_GPL(efivar_validate); - -bool -efivar_variable_is_removable(efi_guid_t vendor, const char *var_name, - size_t len) -{ - int i; - bool found = false; - int match = 0; - - /* - * Check if our variable is in the validated variables list - */ - for (i = 0; variable_validate[i].name[0] != '\0'; i++) { - if (efi_guidcmp(variable_validate[i].vendor, vendor)) - continue; - - if (variable_matches(var_name, len, - variable_validate[i].name, &match)) { - found = true; - break; - } - } - - /* - * If it's in our list, it is removable. - */ - return found; -} -EXPORT_SYMBOL_GPL(efivar_variable_is_removable); - -static efi_status_t -check_var_size(u32 attributes, unsigned long size) +efi_status_t check_var_size(u32 attributes, unsigned long size) { const struct efivar_operations *fops; - if (!__efivars) - return EFI_UNSUPPORTED; - fops = __efivars->ops; if (!fops->query_variable_store) @@ -313,15 +31,12 @@ check_var_size(u32 attributes, unsigned long size) return fops->query_variable_store(attributes, size, false); } +EXPORT_SYMBOL_NS_GPL(check_var_size, EFIVAR); -static efi_status_t -check_var_size_nonblocking(u32 attributes, unsigned long size) +efi_status_t check_var_size_nonblocking(u32 attributes, unsigned long size) { const struct efivar_operations *fops; - if (!__efivars) - return EFI_UNSUPPORTED; - fops = __efivars->ops; if (!fops->query_variable_store) @@ -329,894 +44,228 @@ check_var_size_nonblocking(u32 attributes, unsigned long size) return fops->query_variable_store(attributes, size, true); } - -static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor, - struct list_head *head) -{ - struct efivar_entry *entry, *n; - unsigned long strsize1, strsize2; - bool found = false; - - strsize1 = ucs2_strsize(variable_name, 1024); - list_for_each_entry_safe(entry, n, head, list) { - strsize2 = ucs2_strsize(entry->var.VariableName, 1024); - if (strsize1 == strsize2 && - !memcmp(variable_name, &(entry->var.VariableName), - strsize2) && - !efi_guidcmp(entry->var.VendorGuid, - *vendor)) { - found = true; - break; - } - } - return found; -} - -/* - * Returns the size of variable_name, in bytes, including the - * terminating NULL character, or variable_name_size if no NULL - * character is found among the first variable_name_size bytes. - */ -static unsigned long var_name_strnsize(efi_char16_t *variable_name, - unsigned long variable_name_size) -{ - unsigned long len; - efi_char16_t c; - - /* - * The variable name is, by definition, a NULL-terminated - * string, so make absolutely sure that variable_name_size is - * the value we expect it to be. If not, return the real size. - */ - for (len = 2; len <= variable_name_size; len += sizeof(c)) { - c = variable_name[(len / sizeof(c)) - 1]; - if (!c) - break; - } - - return min(len, variable_name_size); -} - -/* - * Print a warning when duplicate EFI variables are encountered and - * disable the sysfs workqueue since the firmware is buggy. - */ -static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid, - unsigned long len16) -{ - size_t i, len8 = len16 / sizeof(efi_char16_t); - char *str8; - - str8 = kzalloc(len8, GFP_KERNEL); - if (!str8) - return; - - for (i = 0; i < len8; i++) - str8[i] = str16[i]; - - printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n", - str8, vendor_guid); - kfree(str8); -} +EXPORT_SYMBOL_NS_GPL(check_var_size_nonblocking, EFIVAR); /** - * efivar_init - build the initial list of EFI variables - * @func: callback function to invoke for every variable - * @data: function-specific data to pass to @func - * @duplicates: error if we encounter duplicates on @head? - * @head: initialised head of variable list - * - * Get every EFI variable from the firmware and invoke @func. @func - * should call efivar_entry_add() to build the list of variables. + * efivars_kobject - get the kobject for the registered efivars * - * Returns 0 on success, or a kernel error code on failure. + * If efivars_register() has not been called we return NULL, + * otherwise return the kobject used at registration time. */ -int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), - void *data, bool duplicates, struct list_head *head) +struct kobject *efivars_kobject(void) { - const struct efivar_operations *ops; - unsigned long variable_name_size = 1024; - efi_char16_t *variable_name; - efi_status_t status; - efi_guid_t vendor_guid; - int err = 0; - if (!__efivars) - return -EFAULT; - - ops = __efivars->ops; - - variable_name = kzalloc(variable_name_size, GFP_KERNEL); - if (!variable_name) { - printk(KERN_ERR "efivars: Memory allocation failed.\n"); - return -ENOMEM; - } - - if (down_interruptible(&efivars_lock)) { - err = -EINTR; - goto free; - } - - /* - * Per EFI spec, the maximum storage allocated for both - * the variable name and variable data is 1024 bytes. - */ - - do { - variable_name_size = 1024; - - status = ops->get_next_variable(&variable_name_size, - variable_name, - &vendor_guid); - switch (status) { - case EFI_SUCCESS: - if (duplicates) - up(&efivars_lock); - - variable_name_size = var_name_strnsize(variable_name, - variable_name_size); - - /* - * Some firmware implementations return the - * same variable name on multiple calls to - * get_next_variable(). Terminate the loop - * immediately as there is no guarantee that - * we'll ever see a different variable name, - * and may end up looping here forever. - */ - if (duplicates && - variable_is_present(variable_name, &vendor_guid, - head)) { - dup_variable_bug(variable_name, &vendor_guid, - variable_name_size); - status = EFI_NOT_FOUND; - } else { - err = func(variable_name, vendor_guid, - variable_name_size, data); - if (err) - status = EFI_NOT_FOUND; - } - - if (duplicates) { - if (down_interruptible(&efivars_lock)) { - err = -EINTR; - goto free; - } - } - - break; - case EFI_UNSUPPORTED: - err = -EOPNOTSUPP; - status = EFI_NOT_FOUND; - break; - case EFI_NOT_FOUND: - break; - default: - printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n", - status); - status = EFI_NOT_FOUND; - break; - } - - } while (status != EFI_NOT_FOUND); - - up(&efivars_lock); -free: - kfree(variable_name); + return NULL; - return err; + return __efivars->kobject; } -EXPORT_SYMBOL_GPL(efivar_init); +EXPORT_SYMBOL_GPL(efivars_kobject); /** - * efivar_entry_add - add entry to variable list - * @entry: entry to add to list - * @head: list head + * efivars_register - register an efivars + * @efivars: efivars to register + * @ops: efivars operations + * @kobject: @efivars-specific kobject * - * Returns 0 on success, or a kernel error code on failure. + * Only a single efivars can be registered at any time. */ -int efivar_entry_add(struct efivar_entry *entry, struct list_head *head) +int efivars_register(struct efivars *efivars, + const struct efivar_operations *ops, + struct kobject *kobject) { if (down_interruptible(&efivars_lock)) return -EINTR; - list_add(&entry->list, head); - up(&efivars_lock); - return 0; -} -EXPORT_SYMBOL_GPL(efivar_entry_add); + efivars->ops = ops; + efivars->kobject = kobject; -/** - * efivar_entry_remove - remove entry from variable list - * @entry: entry to remove from list - * - * Returns 0 on success, or a kernel error code on failure. - */ -int efivar_entry_remove(struct efivar_entry *entry) -{ - if (down_interruptible(&efivars_lock)) - return -EINTR; - list_del(&entry->list); - up(&efivars_lock); + __efivars = efivars; - return 0; -} -EXPORT_SYMBOL_GPL(efivar_entry_remove); + pr_info("Registered efivars operations\n"); -/* - * efivar_entry_list_del_unlock - remove entry from variable list - * @entry: entry to remove - * - * Remove @entry from the variable list and release the list lock. - * - * NOTE: slightly weird locking semantics here - we expect to be - * called with the efivars lock already held, and we release it before - * returning. This is because this function is usually called after - * set_variable() while the lock is still held. - */ -static void efivar_entry_list_del_unlock(struct efivar_entry *entry) -{ - list_del(&entry->list); up(&efivars_lock); -} -/** - * __efivar_entry_delete - delete an EFI variable - * @entry: entry containing EFI variable to delete - * - * Delete the variable from the firmware but leave @entry on the - * variable list. - * - * This function differs from efivar_entry_delete() because it does - * not remove @entry from the variable list. Also, it is safe to be - * called from within a efivar_entry_iter_begin() and - * efivar_entry_iter_end() region, unlike efivar_entry_delete(). - * - * Returns 0 on success, or a converted EFI status code if - * set_variable() fails. - */ -int __efivar_entry_delete(struct efivar_entry *entry) -{ - efi_status_t status; - - if (!__efivars) - return -EINVAL; - - status = __efivars->ops->set_variable(entry->var.VariableName, - &entry->var.VendorGuid, - 0, 0, NULL); - - return efi_status_to_err(status); -} -EXPORT_SYMBOL_GPL(__efivar_entry_delete); - -/** - * efivar_entry_delete - delete variable and remove entry from list - * @entry: entry containing variable to delete - * - * Delete the variable from the firmware and remove @entry from the - * variable list. It is the caller's responsibility to free @entry - * once we return. - * - * Returns 0 on success, -EINTR if we can't grab the semaphore, - * converted EFI status code if set_variable() fails. - */ -int efivar_entry_delete(struct efivar_entry *entry) -{ - const struct efivar_operations *ops; - efi_status_t status; - - if (down_interruptible(&efivars_lock)) - return -EINTR; - - if (!__efivars) { - up(&efivars_lock); - return -EINVAL; - } - ops = __efivars->ops; - status = ops->set_variable(entry->var.VariableName, - &entry->var.VendorGuid, - 0, 0, NULL); - if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND)) { - up(&efivars_lock); - return efi_status_to_err(status); - } - - efivar_entry_list_del_unlock(entry); return 0; } -EXPORT_SYMBOL_GPL(efivar_entry_delete); +EXPORT_SYMBOL_GPL(efivars_register); /** - * efivar_entry_set - call set_variable() - * @entry: entry containing the EFI variable to write - * @attributes: variable attributes - * @size: size of @data buffer - * @data: buffer containing variable data - * @head: head of variable list - * - * Calls set_variable() for an EFI variable. If creating a new EFI - * variable, this function is usually followed by efivar_entry_add(). - * - * Before writing the variable, the remaining EFI variable storage - * space is checked to ensure there is enough room available. - * - * If @head is not NULL a lookup is performed to determine whether - * the entry is already on the list. + * efivars_unregister - unregister an efivars + * @efivars: efivars to unregister * - * Returns 0 on success, -EINTR if we can't grab the semaphore, - * -EEXIST if a lookup is performed and the entry already exists on - * the list, or a converted EFI status code if set_variable() fails. + * The caller must have already removed every entry from the list, + * failure to do so is an error. */ -int efivar_entry_set(struct efivar_entry *entry, u32 attributes, - unsigned long size, void *data, struct list_head *head) +int efivars_unregister(struct efivars *efivars) { - const struct efivar_operations *ops; - efi_status_t status; - efi_char16_t *name = entry->var.VariableName; - efi_guid_t vendor = entry->var.VendorGuid; + int rv; if (down_interruptible(&efivars_lock)) return -EINTR; if (!__efivars) { - up(&efivars_lock); - return -EINVAL; - } - ops = __efivars->ops; - if (head && efivar_entry_find(name, vendor, head, false)) { - up(&efivars_lock); - return -EEXIST; - } - - status = check_var_size(attributes, size + ucs2_strsize(name, 1024)); - if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED) - status = ops->set_variable(name, &vendor, - attributes, size, data); - - up(&efivars_lock); - - return efi_status_to_err(status); - -} -EXPORT_SYMBOL_GPL(efivar_entry_set); - -/* - * efivar_entry_set_nonblocking - call set_variable_nonblocking() - * - * This function is guaranteed to not block and is suitable for calling - * from crash/panic handlers. - * - * Crucially, this function will not block if it cannot acquire - * efivars_lock. Instead, it returns -EBUSY. - */ -static int -efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor, - u32 attributes, unsigned long size, void *data) -{ - const struct efivar_operations *ops; - efi_status_t status; - - if (down_trylock(&efivars_lock)) - return -EBUSY; - - if (!__efivars) { - up(&efivars_lock); - return -EINVAL; - } - - status = check_var_size_nonblocking(attributes, - size + ucs2_strsize(name, 1024)); - if (status != EFI_SUCCESS) { - up(&efivars_lock); - return -ENOSPC; - } - - ops = __efivars->ops; - status = ops->set_variable_nonblocking(name, &vendor, attributes, - size, data); - - up(&efivars_lock); - return efi_status_to_err(status); -} - -/** - * efivar_entry_set_safe - call set_variable() if enough space in firmware - * @name: buffer containing the variable name - * @vendor: variable vendor guid - * @attributes: variable attributes - * @block: can we block in this context? - * @size: size of @data buffer - * @data: buffer containing variable data - * - * Ensures there is enough free storage in the firmware for this variable, and - * if so, calls set_variable(). If creating a new EFI variable, this function - * is usually followed by efivar_entry_add(). - * - * Returns 0 on success, -ENOSPC if the firmware does not have enough - * space for set_variable() to succeed, or a converted EFI status code - * if set_variable() fails. - */ -int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, - bool block, unsigned long size, void *data) -{ - const struct efivar_operations *ops; - efi_status_t status; - unsigned long varsize; - - if (!__efivars) - return -EINVAL; - - ops = __efivars->ops; - if (!ops->query_variable_store) - return -ENOSYS; - - /* - * If the EFI variable backend provides a non-blocking - * ->set_variable() operation and we're in a context where we - * cannot block, then we need to use it to avoid live-locks, - * since the implication is that the regular ->set_variable() - * will block. - * - * If no ->set_variable_nonblocking() is provided then - * ->set_variable() is assumed to be non-blocking. - */ - if (!block && ops->set_variable_nonblocking) - return efivar_entry_set_nonblocking(name, vendor, attributes, - size, data); - - varsize = size + ucs2_strsize(name, 1024); - if (!block) { - if (down_trylock(&efivars_lock)) - return -EBUSY; - status = check_var_size_nonblocking(attributes, varsize); - } else { - if (down_interruptible(&efivars_lock)) - return -EINTR; - status = check_var_size(attributes, varsize); + printk(KERN_ERR "efivars not registered\n"); + rv = -EINVAL; + goto out; } - if (status != EFI_SUCCESS) { - up(&efivars_lock); - return -ENOSPC; + if (__efivars != efivars) { + rv = -EINVAL; + goto out; } - status = ops->set_variable(name, &vendor, attributes, size, data); + pr_info("Unregistered efivars operations\n"); + __efivars = NULL; + rv = 0; +out: up(&efivars_lock); - - return efi_status_to_err(status); + return rv; } -EXPORT_SYMBOL_GPL(efivar_entry_set_safe); +EXPORT_SYMBOL_GPL(efivars_unregister); -/** - * efivar_entry_find - search for an entry - * @name: the EFI variable name - * @guid: the EFI variable vendor's guid - * @head: head of the variable list - * @remove: should we remove the entry from the list? - * - * Search for an entry on the variable list that has the EFI variable - * name @name and vendor guid @guid. If an entry is found on the list - * and @remove is true, the entry is removed from the list. - * - * The caller MUST call efivar_entry_iter_begin() and - * efivar_entry_iter_end() before and after the invocation of this - * function, respectively. - * - * Returns the entry if found on the list, %NULL otherwise. - */ -struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, - struct list_head *head, bool remove) +int efivar_supports_writes(void) { - struct efivar_entry *entry, *n; - int strsize1, strsize2; - bool found = false; - - list_for_each_entry_safe(entry, n, head, list) { - strsize1 = ucs2_strsize(name, 1024); - strsize2 = ucs2_strsize(entry->var.VariableName, 1024); - if (strsize1 == strsize2 && - !memcmp(name, &(entry->var.VariableName), strsize1) && - !efi_guidcmp(guid, entry->var.VendorGuid)) { - found = true; - break; - } - } - - if (!found) - return NULL; - - if (remove) { - if (entry->scanning) { - /* - * The entry will be deleted - * after scanning is completed. - */ - entry->deleting = true; - } else - list_del(&entry->list); - } - - return entry; + return __efivars && __efivars->ops->set_variable; } -EXPORT_SYMBOL_GPL(efivar_entry_find); +EXPORT_SYMBOL_GPL(efivar_supports_writes); -/** - * efivar_entry_size - obtain the size of a variable - * @entry: entry for this variable - * @size: location to store the variable's size +/* + * efivar_lock() - obtain the efivar lock, wait for it if needed + * @return 0 on success, error code on failure */ -int efivar_entry_size(struct efivar_entry *entry, unsigned long *size) +int efivar_lock(void) { - const struct efivar_operations *ops; - efi_status_t status; - - *size = 0; - if (down_interruptible(&efivars_lock)) return -EINTR; - if (!__efivars) { + if (!__efivars->ops) { up(&efivars_lock); - return -EINVAL; + return -ENODEV; } - ops = __efivars->ops; - status = ops->get_variable(entry->var.VariableName, - &entry->var.VendorGuid, NULL, size, NULL); - up(&efivars_lock); - - if (status != EFI_BUFFER_TOO_SMALL) - return efi_status_to_err(status); - return 0; } -EXPORT_SYMBOL_GPL(efivar_entry_size); +EXPORT_SYMBOL_NS_GPL(efivar_lock, EFIVAR); -/** - * __efivar_entry_get - call get_variable() - * @entry: read data for this variable - * @attributes: variable attributes - * @size: size of @data buffer - * @data: buffer to store variable data - * - * The caller MUST call efivar_entry_iter_begin() and - * efivar_entry_iter_end() before and after the invocation of this - * function, respectively. - */ -int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes, - unsigned long *size, void *data) -{ - efi_status_t status; - - if (!__efivars) - return -EINVAL; - - status = __efivars->ops->get_variable(entry->var.VariableName, - &entry->var.VendorGuid, - attributes, size, data); - - return efi_status_to_err(status); -} -EXPORT_SYMBOL_GPL(__efivar_entry_get); - -/** - * efivar_entry_get - call get_variable() - * @entry: read data for this variable - * @attributes: variable attributes - * @size: size of @data buffer - * @data: buffer to store variable data +/* + * efivar_lock() - obtain the efivar lock if it is free + * @return 0 on success, error code on failure */ -int efivar_entry_get(struct efivar_entry *entry, u32 *attributes, - unsigned long *size, void *data) +int efivar_trylock(void) { - efi_status_t status; - - if (down_interruptible(&efivars_lock)) - return -EINTR; - - if (!__efivars) { + if (down_trylock(&efivars_lock)) + return -EBUSY; + if (!__efivars->ops) { up(&efivars_lock); - return -EINVAL; - } - - status = __efivars->ops->get_variable(entry->var.VariableName, - &entry->var.VendorGuid, - attributes, size, data); - up(&efivars_lock); - - return efi_status_to_err(status); -} -EXPORT_SYMBOL_GPL(efivar_entry_get); - -/** - * efivar_entry_set_get_size - call set_variable() and get new size (atomic) - * @entry: entry containing variable to set and get - * @attributes: attributes of variable to be written - * @size: size of data buffer - * @data: buffer containing data to write - * @set: did the set_variable() call succeed? - * - * This is a pretty special (complex) function. See efivarfs_file_write(). - * - * Atomically call set_variable() for @entry and if the call is - * successful, return the new size of the variable from get_variable() - * in @size. The success of set_variable() is indicated by @set. - * - * Returns 0 on success, -EINVAL if the variable data is invalid, - * -ENOSPC if the firmware does not have enough available space, or a - * converted EFI status code if either of set_variable() or - * get_variable() fail. - * - * If the EFI variable does not exist when calling set_variable() - * (EFI_NOT_FOUND), @entry is removed from the variable list. - */ -int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, - unsigned long *size, void *data, bool *set) -{ - const struct efivar_operations *ops; - efi_char16_t *name = entry->var.VariableName; - efi_guid_t *vendor = &entry->var.VendorGuid; - efi_status_t status; - int err; - - *set = false; - - if (efivar_validate(*vendor, name, data, *size) == false) - return -EINVAL; - - /* - * The lock here protects the get_variable call, the conditional - * set_variable call, and removal of the variable from the efivars - * list (in the case of an authenticated delete). - */ - if (down_interruptible(&efivars_lock)) - return -EINTR; - - if (!__efivars) { - err = -EINVAL; - goto out; - } - - /* - * Ensure that the available space hasn't shrunk below the safe level - */ - status = check_var_size(attributes, *size + ucs2_strsize(name, 1024)); - if (status != EFI_SUCCESS) { - if (status != EFI_UNSUPPORTED) { - err = efi_status_to_err(status); - goto out; - } - - if (*size > 65536) { - err = -ENOSPC; - goto out; - } - } - - ops = __efivars->ops; - - status = ops->set_variable(name, vendor, attributes, *size, data); - if (status != EFI_SUCCESS) { - err = efi_status_to_err(status); - goto out; + return -ENODEV; } - - *set = true; - - /* - * Writing to the variable may have caused a change in size (which - * could either be an append or an overwrite), or the variable to be - * deleted. Perform a GetVariable() so we can tell what actually - * happened. - */ - *size = 0; - status = ops->get_variable(entry->var.VariableName, - &entry->var.VendorGuid, - NULL, size, NULL); - - if (status == EFI_NOT_FOUND) - efivar_entry_list_del_unlock(entry); - else - up(&efivars_lock); - - if (status && status != EFI_BUFFER_TOO_SMALL) - return efi_status_to_err(status); - return 0; - -out: - up(&efivars_lock); - return err; - } -EXPORT_SYMBOL_GPL(efivar_entry_set_get_size); +EXPORT_SYMBOL_NS_GPL(efivar_trylock, EFIVAR); -/** - * efivar_entry_iter_begin - begin iterating the variable list - * - * Lock the variable list to prevent entry insertion and removal until - * efivar_entry_iter_end() is called. This function is usually used in - * conjunction with __efivar_entry_iter() or efivar_entry_iter(). - */ -int efivar_entry_iter_begin(void) -{ - return down_interruptible(&efivars_lock); -} -EXPORT_SYMBOL_GPL(efivar_entry_iter_begin); - -/** - * efivar_entry_iter_end - finish iterating the variable list - * - * Unlock the variable list and allow modifications to the list again. +/* + * efivar_unlock() - release the efivar lock */ -void efivar_entry_iter_end(void) +void efivar_unlock(void) { up(&efivars_lock); } -EXPORT_SYMBOL_GPL(efivar_entry_iter_end); +EXPORT_SYMBOL_NS_GPL(efivar_unlock, EFIVAR); -/** - * __efivar_entry_iter - iterate over variable list - * @func: callback function - * @head: head of the variable list - * @data: function-specific data to pass to callback - * @prev: entry to begin iterating from - * - * Iterate over the list of EFI variables and call @func with every - * entry on the list. It is safe for @func to remove entries in the - * list via efivar_entry_delete(). - * - * You MUST call efivar_entry_iter_begin() before this function, and - * efivar_entry_iter_end() afterwards. - * - * It is possible to begin iteration from an arbitrary entry within - * the list by passing @prev. @prev is updated on return to point to - * the last entry passed to @func. To begin iterating from the - * beginning of the list @prev must be %NULL. +/* + * efivar_get_variable() - retrieve a variable identified by name/vendor * - * The restrictions for @func are the same as documented for - * efivar_entry_iter(). + * Must be called with efivars_lock held. */ -int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *), - struct list_head *head, void *data, - struct efivar_entry **prev) +efi_status_t efivar_get_variable(efi_char16_t *name, efi_guid_t *vendor, + u32 *attr, unsigned long *size, void *data) { - struct efivar_entry *entry, *n; - int err = 0; - - if (!prev || !*prev) { - list_for_each_entry_safe(entry, n, head, list) { - err = func(entry, data); - if (err) - break; - } - - if (prev) - *prev = entry; - - return err; - } - - - list_for_each_entry_safe_continue((*prev), n, head, list) { - err = func(*prev, data); - if (err) - break; - } - - return err; + return __efivars->ops->get_variable(name, vendor, attr, size, data); } -EXPORT_SYMBOL_GPL(__efivar_entry_iter); +EXPORT_SYMBOL_NS_GPL(efivar_get_variable, EFIVAR); -/** - * efivar_entry_iter - iterate over variable list - * @func: callback function - * @head: head of variable list - * @data: function-specific data to pass to callback - * - * Iterate over the list of EFI variables and call @func with every - * entry on the list. It is safe for @func to remove entries in the - * list via efivar_entry_delete() while iterating. +/* + * efivar_get_next_variable() - enumerate the next name/vendor pair * - * Some notes for the callback function: - * - a non-zero return value indicates an error and terminates the loop - * - @func is called from atomic context + * Must be called with efivars_lock held. */ -int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), - struct list_head *head, void *data) +efi_status_t efivar_get_next_variable(unsigned long *name_size, + efi_char16_t *name, efi_guid_t *vendor) { - int err = 0; - - err = efivar_entry_iter_begin(); - if (err) - return err; - err = __efivar_entry_iter(func, head, data, NULL); - efivar_entry_iter_end(); - - return err; + return __efivars->ops->get_next_variable(name_size, name, vendor); } -EXPORT_SYMBOL_GPL(efivar_entry_iter); +EXPORT_SYMBOL_NS_GPL(efivar_get_next_variable, EFIVAR); -/** - * efivars_kobject - get the kobject for the registered efivars +/* + * efivar_set_variable_blocking() - local helper function for set_variable * - * If efivars_register() has not been called we return NULL, - * otherwise return the kobject used at registration time. + * Must be called with efivars_lock held. */ -struct kobject *efivars_kobject(void) +static efi_status_t +efivar_set_variable_blocking(efi_char16_t *name, efi_guid_t *vendor, + u32 attr, unsigned long data_size, void *data) { - if (!__efivars) - return NULL; + efi_status_t status; - return __efivars->kobject; + if (data_size > 0) { + status = check_var_size(attr, data_size + + ucs2_strsize(name, 1024)); + if (status != EFI_SUCCESS) + return status; + } + return __efivars->ops->set_variable(name, vendor, attr, data_size, data); } -EXPORT_SYMBOL_GPL(efivars_kobject); -/** - * efivars_register - register an efivars - * @efivars: efivars to register - * @ops: efivars operations - * @kobject: @efivars-specific kobject +/* + * efivar_set_variable_locked() - set a variable identified by name/vendor * - * Only a single efivars can be registered at any time. + * Must be called with efivars_lock held. If @nonblocking is set, it will use + * non-blocking primitives so it is guaranteed not to sleep. */ -int efivars_register(struct efivars *efivars, - const struct efivar_operations *ops, - struct kobject *kobject) +efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor, + u32 attr, unsigned long data_size, + void *data, bool nonblocking) { - if (down_interruptible(&efivars_lock)) - return -EINTR; - - efivars->ops = ops; - efivars->kobject = kobject; - - __efivars = efivars; + efi_set_variable_t *setvar; + efi_status_t status; - pr_info("Registered efivars operations\n"); + if (!nonblocking) + return efivar_set_variable_blocking(name, vendor, attr, + data_size, data); - up(&efivars_lock); - - return 0; + /* + * If no _nonblocking variant exists, the ordinary one + * is assumed to be non-blocking. + */ + setvar = __efivars->ops->set_variable_nonblocking ?: + __efivars->ops->set_variable; + + if (data_size > 0) { + status = check_var_size_nonblocking(attr, data_size + + ucs2_strsize(name, 1024)); + if (status != EFI_SUCCESS) + return status; + } + return setvar(name, vendor, attr, data_size, data); } -EXPORT_SYMBOL_GPL(efivars_register); +EXPORT_SYMBOL_NS_GPL(efivar_set_variable_locked, EFIVAR); -/** - * efivars_unregister - unregister an efivars - * @efivars: efivars to unregister +/* + * efivar_set_variable() - set a variable identified by name/vendor * - * The caller must have already removed every entry from the list, - * failure to do so is an error. + * Can be called without holding the efivars_lock. Will sleep on obtaining the + * lock, or on obtaining other locks that are needed in order to complete the + * call. */ -int efivars_unregister(struct efivars *efivars) +efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor, + u32 attr, unsigned long data_size, void *data) { - int rv; - - if (down_interruptible(&efivars_lock)) - return -EINTR; - - if (!__efivars) { - printk(KERN_ERR "efivars not registered\n"); - rv = -EINVAL; - goto out; - } - - if (__efivars != efivars) { - rv = -EINVAL; - goto out; - } - - pr_info("Unregistered efivars operations\n"); - __efivars = NULL; + efi_status_t status; - rv = 0; -out: - up(&efivars_lock); - return rv; -} -EXPORT_SYMBOL_GPL(efivars_unregister); + if (efivar_lock()) + return EFI_ABORTED; -int efivar_supports_writes(void) -{ - return __efivars && __efivars->ops->set_variable; + status = efivar_set_variable_blocking(name, vendor, attr, data_size, data); + efivar_unlock(); + return status; } -EXPORT_SYMBOL_GPL(efivar_supports_writes); +EXPORT_SYMBOL_NS_GPL(efivar_set_variable, EFIVAR); diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c index 1829ba220576..9f918b9e6f8f 100644 --- a/drivers/firmware/qcom_scm-legacy.c +++ b/drivers/firmware/qcom_scm-legacy.c @@ -120,6 +120,9 @@ static void __scm_legacy_do(const struct arm_smccc_args *smc, /** * scm_legacy_call() - Sends a command to the SCM and waits for the command to * finish processing. + * @dev: device + * @desc: descriptor structure containing arguments and return values + * @res: results from SMC call * * A note on cache maintenance: * Note that any buffers that are expected to be accessed by the secure world @@ -211,6 +214,7 @@ out: /** * scm_legacy_call_atomic() - Send an atomic SCM command with up to 5 arguments * and 3 return values + * @unused: device, legacy argument, not used, can be NULL * @desc: SCM call descriptor containing arguments * @res: SCM call return values * diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 3163660fa8e2..cdbfe54c8146 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -7,6 +7,7 @@ #include <linux/cpumask.h> #include <linux/export.h> #include <linux/dma-mapping.h> +#include <linux/interconnect.h> #include <linux/module.h> #include <linux/types.h> #include <linux/qcom_scm.h> @@ -31,8 +32,13 @@ struct qcom_scm { struct clk *core_clk; struct clk *iface_clk; struct clk *bus_clk; + struct icc_path *path; struct reset_controller_dev reset; + /* control access to the interconnect path */ + struct mutex scm_bw_lock; + int scm_vote_count; + u64 dload_mode_addr; }; @@ -99,6 +105,42 @@ static void qcom_scm_clk_disable(void) clk_disable_unprepare(__scm->bus_clk); } +static int qcom_scm_bw_enable(void) +{ + int ret = 0; + + if (!__scm->path) + return 0; + + if (IS_ERR(__scm->path)) + return -EINVAL; + + mutex_lock(&__scm->scm_bw_lock); + if (!__scm->scm_vote_count) { + ret = icc_set_bw(__scm->path, 0, UINT_MAX); + if (ret < 0) { + dev_err(__scm->dev, "failed to set bandwidth request\n"); + goto err_bw; + } + } + __scm->scm_vote_count++; +err_bw: + mutex_unlock(&__scm->scm_bw_lock); + + return ret; +} + +static void qcom_scm_bw_disable(void) +{ + if (IS_ERR_OR_NULL(__scm->path)) + return; + + mutex_lock(&__scm->scm_bw_lock); + if (__scm->scm_vote_count-- == 1) + icc_set_bw(__scm->path, 0, 0); + mutex_unlock(&__scm->scm_bw_lock); +} + enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; static DEFINE_SPINLOCK(scm_query_lock); @@ -444,10 +486,15 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, if (ret) goto out; + ret = qcom_scm_bw_enable(); + if (ret) + return ret; + desc.args[1] = mdata_phys; ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_bw_disable(); qcom_scm_clk_disable(); out: @@ -507,7 +554,12 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) if (ret) return ret; + ret = qcom_scm_bw_enable(); + if (ret) + return ret; + ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_bw_disable(); qcom_scm_clk_disable(); return ret ? : res.result[0]; @@ -537,7 +589,12 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral) if (ret) return ret; + ret = qcom_scm_bw_enable(); + if (ret) + return ret; + ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_bw_disable(); qcom_scm_clk_disable(); return ret ? : res.result[0]; @@ -566,8 +623,13 @@ int qcom_scm_pas_shutdown(u32 peripheral) if (ret) return ret; + ret = qcom_scm_bw_enable(); + if (ret) + return ret; + ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_bw_disable(); qcom_scm_clk_disable(); return ret ? : res.result[0]; @@ -1277,8 +1339,15 @@ static int qcom_scm_probe(struct platform_device *pdev) if (ret < 0) return ret; + mutex_init(&scm->scm_bw_lock); + clks = (unsigned long)of_device_get_match_data(&pdev->dev); + scm->path = devm_of_icc_get(&pdev->dev, NULL); + if (IS_ERR(scm->path)) + return dev_err_probe(&pdev->dev, PTR_ERR(scm->path), + "failed to acquire interconnect path\n"); + scm->core_clk = devm_clk_get(&pdev->dev, "core"); if (IS_ERR(scm->core_clk)) { if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) @@ -1337,7 +1406,7 @@ static int qcom_scm_probe(struct platform_device *pdev) /* * If requested enable "download mode", from this point on warmboot - * will cause the the boot stages to enter download mode, unless + * will cause the boot stages to enter download mode, unless * disabled below by a clean shutdown/reboot. */ if (download_mode) diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c index fd89899aeeed..0c440afd5224 100644 --- a/drivers/firmware/tegra/bpmp-debugfs.c +++ b/drivers/firmware/tegra/bpmp-debugfs.c @@ -474,7 +474,7 @@ static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp, mode |= attrs & DEBUGFS_S_IWUSR ? 0200 : 0; dentry = debugfs_create_file(name, mode, parent, bpmp, &bpmp_debug_fops); - if (!dentry) { + if (IS_ERR(dentry)) { err = -ENOMEM; goto out; } @@ -725,7 +725,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf, if (t & DEBUGFS_S_ISDIR) { dentry = debugfs_create_dir(name, parent); - if (!dentry) + if (IS_ERR(dentry)) return -ENOMEM; err = bpmp_populate_dir(bpmp, seqbuf, dentry, depth+1); if (err < 0) @@ -738,7 +738,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf, dentry = debugfs_create_file(name, mode, parent, bpmp, &debugfs_fops); - if (!dentry) + if (IS_ERR(dentry)) return -ENOMEM; } } @@ -788,11 +788,11 @@ int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp) return 0; root = debugfs_create_dir("bpmp", NULL); - if (!root) + if (IS_ERR(root)) return -ENOMEM; bpmp->debugfs_mirror = debugfs_create_dir("debug", root); - if (!bpmp->debugfs_mirror) { + if (IS_ERR(bpmp->debugfs_mirror)) { err = -ENOMEM; goto out; } diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c index 5654c5e9862b..037db21de510 100644 --- a/drivers/firmware/tegra/bpmp.c +++ b/drivers/firmware/tegra/bpmp.c @@ -201,7 +201,7 @@ static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel, int err; if (data && size > 0) - memcpy(data, channel->ib->data, size); + memcpy_fromio(data, channel->ib->data, size); err = tegra_bpmp_ack_response(channel); if (err < 0) @@ -245,7 +245,7 @@ static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel, channel->ob->flags = flags; if (data && size > 0) - memcpy(channel->ob->data, data, size); + memcpy_toio(channel->ob->data, data, size); return tegra_bpmp_post_request(channel); } @@ -420,7 +420,7 @@ void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code, channel->ob->code = code; if (data && size > 0) - memcpy(channel->ob->data, data, size); + memcpy_toio(channel->ob->data, data, size); err = tegra_bpmp_post_response(channel); if (WARN_ON(err < 0)) diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 7977a494a651..d1f652802181 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -2,7 +2,7 @@ /* * Xilinx Zynq MPSoC Firmware layer * - * Copyright (C) 2014-2021 Xilinx, Inc. + * Copyright (C) 2014-2022 Xilinx, Inc. * * Michal Simek <michal.simek@xilinx.com> * Davorin Mista <davorin.mista@aggios.com> @@ -340,6 +340,20 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1, static u32 pm_api_version; static u32 pm_tz_version; +int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset) +{ + int ret; + + ret = zynqmp_pm_invoke_fn(TF_A_PM_REGISTER_SGI, sgi_num, reset, 0, 0, + NULL); + if (!ret) + return ret; + + /* try old implementation as fallback strategy if above fails */ + return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num, + reset, NULL); +} + /** * zynqmp_pm_get_api_version() - Get version number of PMU PM firmware * @version: Returned version value diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c index b2c90bdd39d0..52d7b8d99170 100644 --- a/drivers/gpio/gpio-msc313.c +++ b/drivers/gpio/gpio-msc313.c @@ -550,15 +550,12 @@ static struct irq_chip msc313_gpio_irqchip = { * so we need to provide the fwspec. Essentially gpiochip_populate_parent_fwspec_twocell * that puts GIC_SPI into the first cell. */ -static void *msc313_gpio_populate_parent_fwspec(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type) +static int msc313_gpio_populate_parent_fwspec(struct gpio_chip *gc, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) { - struct irq_fwspec *fwspec; - - fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL); - if (!fwspec) - return NULL; + struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = gc->irq.parent_domain->fwnode; fwspec->param_count = 3; @@ -566,7 +563,7 @@ static void *msc313_gpio_populate_parent_fwspec(struct gpio_chip *gc, fwspec->param[1] = parent_hwirq; fwspec->param[2] = parent_type; - return fwspec; + return 0; } static int msc313e_gpio_child_to_parent_hwirq(struct gpio_chip *chip, diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index ff2d2a1f9c73..e4fb4cb38a0f 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -443,15 +443,12 @@ static int tegra_gpio_child_to_parent_hwirq(struct gpio_chip *chip, return 0; } -static void *tegra_gpio_populate_parent_fwspec(struct gpio_chip *chip, - unsigned int parent_hwirq, - unsigned int parent_type) +static int tegra_gpio_populate_parent_fwspec(struct gpio_chip *chip, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) { - struct irq_fwspec *fwspec; - - fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL); - if (!fwspec) - return NULL; + struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = chip->irq.parent_domain->fwnode; fwspec->param_count = 3; @@ -459,7 +456,7 @@ static void *tegra_gpio_populate_parent_fwspec(struct gpio_chip *chip, fwspec->param[1] = parent_hwirq; fwspec->param[2] = parent_type; - return fwspec; + return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c index de28a68daea0..54d9fa7da9c1 100644 --- a/drivers/gpio/gpio-tegra186.c +++ b/drivers/gpio/gpio-tegra186.c @@ -621,16 +621,13 @@ static int tegra186_gpio_irq_domain_translate(struct irq_domain *domain, return 0; } -static void *tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip, - unsigned int parent_hwirq, - unsigned int parent_type) +static int tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) { struct tegra_gpio *gpio = gpiochip_get_data(chip); - struct irq_fwspec *fwspec; - - fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL); - if (!fwspec) - return NULL; + struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = chip->irq.parent_domain->fwnode; fwspec->param_count = 3; @@ -638,7 +635,7 @@ static void *tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip, fwspec->param[1] = parent_hwirq; fwspec->param[2] = parent_type; - return fwspec; + return 0; } static int tegra186_gpio_child_to_parent_hwirq(struct gpio_chip *chip, diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c index 9f66deab46ea..cc62c6e64103 100644 --- a/drivers/gpio/gpio-thunderx.c +++ b/drivers/gpio/gpio-thunderx.c @@ -15,8 +15,6 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/spinlock.h> -#include <asm-generic/msi.h> - #define GPIO_RX_DAT 0x0 #define GPIO_TX_SET 0x8 @@ -408,18 +406,15 @@ static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc, return 0; } -static void *thunderx_gpio_populate_parent_alloc_info(struct gpio_chip *chip, - unsigned int parent_hwirq, - unsigned int parent_type) +static int thunderx_gpio_populate_parent_alloc_info(struct gpio_chip *chip, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) { - msi_alloc_info_t *info; - - info = kmalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return NULL; + msi_alloc_info_t *info = &gfwspec->msiinfo; info->hwirq = parent_hwirq; - return info; + return 0; } static int thunderx_gpio_probe(struct pci_dev *pdev, diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c index e6534ea1eaa7..5e108ba9956a 100644 --- a/drivers/gpio/gpio-visconti.c +++ b/drivers/gpio/gpio-visconti.c @@ -103,15 +103,12 @@ static int visconti_gpio_child_to_parent_hwirq(struct gpio_chip *gc, return -EINVAL; } -static void *visconti_gpio_populate_parent_fwspec(struct gpio_chip *chip, - unsigned int parent_hwirq, - unsigned int parent_type) +static int visconti_gpio_populate_parent_fwspec(struct gpio_chip *chip, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) { - struct irq_fwspec *fwspec; - - fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL); - if (!fwspec) - return NULL; + struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = chip->irq.parent_domain->fwnode; fwspec->param_count = 3; @@ -119,7 +116,7 @@ static void *visconti_gpio_populate_parent_fwspec(struct gpio_chip *chip, fwspec->param[1] = parent_hwirq; fwspec->param[2] = parent_type; - return fwspec; + return 0; } static int visconti_gpio_probe(struct platform_device *pdev) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 9535f48e18d1..68d9f95d7799 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1107,7 +1107,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d, irq_hw_number_t hwirq; unsigned int type = IRQ_TYPE_NONE; struct irq_fwspec *fwspec = data; - void *parent_arg; + union gpio_irq_fwspec gpio_parent_fwspec = {}; unsigned int parent_hwirq; unsigned int parent_type; struct gpio_irq_chip *girq = &gc->irq; @@ -1147,14 +1147,15 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d, irq_set_probe(irq); /* This parent only handles asserted level IRQs */ - parent_arg = girq->populate_parent_alloc_arg(gc, parent_hwirq, parent_type); - if (!parent_arg) - return -ENOMEM; + ret = girq->populate_parent_alloc_arg(gc, &gpio_parent_fwspec, + parent_hwirq, parent_type); + if (ret) + return ret; chip_dbg(gc, "alloc_irqs_parent for %d parent hwirq %d\n", irq, parent_hwirq); irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key); - ret = irq_domain_alloc_irqs_parent(d, irq, 1, parent_arg); + ret = irq_domain_alloc_irqs_parent(d, irq, 1, &gpio_parent_fwspec); /* * If the parent irqdomain is msi, the interrupts have already * been allocated, so the EEXIST is good. @@ -1166,7 +1167,6 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d, "failed to allocate parent hwirq %d for hwirq %lu\n", parent_hwirq, hwirq); - kfree(parent_arg); return ret; } @@ -1181,15 +1181,18 @@ static void gpiochip_hierarchy_setup_domain_ops(struct irq_domain_ops *ops) ops->activate = gpiochip_irq_domain_activate; ops->deactivate = gpiochip_irq_domain_deactivate; ops->alloc = gpiochip_hierarchy_irq_domain_alloc; - ops->free = irq_domain_free_irqs_common; /* - * We only allow overriding the translate() function for + * We only allow overriding the translate() and free() functions for * hierarchical chips, and this should only be done if the user - * really need something other than 1:1 translation. + * really need something other than 1:1 translation for translate() + * callback and free if user wants to free up any resources which + * were allocated during callbacks, for example populate_parent_alloc_arg. */ if (!ops->translate) ops->translate = gpiochip_hierarchy_irq_domain_translate; + if (!ops->free) + ops->free = irq_domain_free_irqs_common; } static int gpiochip_hierarchy_add_domain(struct gpio_chip *gc) @@ -1230,34 +1233,28 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc) return !!gc->irq.parent_domain; } -void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type) +int gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) { - struct irq_fwspec *fwspec; - - fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL); - if (!fwspec) - return NULL; + struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = gc->irq.parent_domain->fwnode; fwspec->param_count = 2; fwspec->param[0] = parent_hwirq; fwspec->param[1] = parent_type; - return fwspec; + return 0; } EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_twocell); -void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type) +int gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) { - struct irq_fwspec *fwspec; - - fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL); - if (!fwspec) - return NULL; + struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = gc->irq.parent_domain->fwnode; fwspec->param_count = 4; @@ -1266,7 +1263,7 @@ void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, fwspec->param[2] = 0; fwspec->param[3] = parent_type; - return fwspec; + return 0; } EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_fourcell); diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 0ba0598eba20..ec6771e87e73 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -6,7 +6,7 @@ config DRM_AMD_DC bool "AMD DC - Enable new display engine" default y select SND_HDA_COMPONENT if SND_HDA_CORE - select DRM_AMD_DC_DCN if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) + select DRM_AMD_DC_DCN if (X86 || PPC_LONG_DOUBLE_128) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) help Choose this option if you want to use the new display engine support for AMDGPU. This adds required support for Vega and diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index ed25168619fc..dc7d2e5b16c8 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -552,8 +552,7 @@ EXPORT_SYMBOL(drm_release_noglobal); * Since events are used by the KMS API for vblank and page flip completion this * means all modern display drivers must use it. * - * @offset is ignored, DRM events are read like a pipe. Therefore drivers also - * must set the &file_operation.llseek to no_llseek(). Polling support is + * @offset is ignored, DRM events are read like a pipe. Polling support is * provided by drm_poll(). * * This function will only ever read a full event. Therefore userspace must diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 094f06b4ce33..8423df021b71 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -216,8 +216,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, * However...! * * The mmu-notifier can be invalidated for a - * migrate_page, that is alreadying holding the lock - * on the page. Such a try_to_unmap() will result + * migrate_folio, that is alreadying holding the lock + * on the folio. Such a try_to_unmap() will result * in us calling put_pages() and so recursively try * to lock the page. We avoid that deadlock with * a trylock_page() and in exchange we risk missing diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 1431f1e9dbee..04e435bce79b 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -201,6 +201,8 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine); int intel_engine_stop_cs(struct intel_engine_cs *engine); void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine); +void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine); + void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask); u64 intel_engine_get_active_head(const struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 14c6ddbbfde8..5b6ce10cb158 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1282,10 +1282,10 @@ static int __intel_engine_stop_cs(struct intel_engine_cs *engine, intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING)); /* - * Wa_22011802037 : gen12, Prior to doing a reset, ensure CS is + * Wa_22011802037 : gen11, gen12, Prior to doing a reset, ensure CS is * stopped, set ring stop bit and prefetch disable bit to halt CS */ - if (GRAPHICS_VER(engine->i915) == 12) + if (IS_GRAPHICS_VER(engine->i915, 11, 12)) intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base), _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE)); @@ -1308,6 +1308,18 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine) return -ENODEV; ENGINE_TRACE(engine, "\n"); + /* + * TODO: Find out why occasionally stopping the CS times out. Seen + * especially with gem_eio tests. + * + * Occasionally trying to stop the cs times out, but does not adversely + * affect functionality. The timeout is set as a config parameter that + * defaults to 100ms. In most cases the follow up operation is to wait + * for pending MI_FORCE_WAKES. The assumption is that this timeout is + * sufficient for any pending MI_FORCEWAKEs to complete. Once root + * caused, the caller must check and handle the return from this + * function. + */ if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) { ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n", @@ -1334,6 +1346,78 @@ void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine) ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); } +static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine) +{ + static const i915_reg_t _reg[I915_NUM_ENGINES] = { + [RCS0] = MSG_IDLE_CS, + [BCS0] = MSG_IDLE_BCS, + [VCS0] = MSG_IDLE_VCS0, + [VCS1] = MSG_IDLE_VCS1, + [VCS2] = MSG_IDLE_VCS2, + [VCS3] = MSG_IDLE_VCS3, + [VCS4] = MSG_IDLE_VCS4, + [VCS5] = MSG_IDLE_VCS5, + [VCS6] = MSG_IDLE_VCS6, + [VCS7] = MSG_IDLE_VCS7, + [VECS0] = MSG_IDLE_VECS0, + [VECS1] = MSG_IDLE_VECS1, + [VECS2] = MSG_IDLE_VECS2, + [VECS3] = MSG_IDLE_VECS3, + [CCS0] = MSG_IDLE_CS, + [CCS1] = MSG_IDLE_CS, + [CCS2] = MSG_IDLE_CS, + [CCS3] = MSG_IDLE_CS, + }; + u32 val; + + if (!_reg[engine->id].reg) { + drm_err(&engine->i915->drm, + "MSG IDLE undefined for engine id %u\n", engine->id); + return 0; + } + + val = intel_uncore_read(engine->uncore, _reg[engine->id]); + + /* bits[29:25] & bits[13:9] >> shift */ + return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT; +} + +static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask) +{ + int ret; + + /* Ensure GPM receives fw up/down after CS is stopped */ + udelay(1); + + /* Wait for forcewake request to complete in GPM */ + ret = __intel_wait_for_register_fw(gt->uncore, + GEN9_PWRGT_DOMAIN_STATUS, + fw_mask, fw_mask, 5000, 0, NULL); + + /* Ensure CS receives fw ack from GPM */ + udelay(1); + + if (ret) + GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret); +} + +/* + * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any + * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The + * pending status is indicated by bits[13:9] (masked by bits[29:25]) in the + * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we + * are concerned only with the gt reset here, we use a logical OR of pending + * forcewakeups from all reset domains and then wait for them to complete by + * querying PWRGT_DOMAIN_STATUS. + */ +void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine) +{ + u32 fw_pending = __cs_pending_mi_force_wakes(engine); + + if (fw_pending) + __gpm_wait_for_fw_complete(engine->gt, fw_pending); +} + static u32 read_subslice_reg(const struct intel_engine_cs *engine, int slice, int subslice, i915_reg_t reg) diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index 2b0266cab66b..0627fa10d2dc 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -2968,6 +2968,13 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine) ring_set_paused(engine, 1); intel_engine_stop_cs(engine); + /* + * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need + * to wait for any pending mi force wakeups + */ + if (IS_GRAPHICS_VER(engine->i915, 11, 12)) + intel_engine_wait_for_pending_mi_fw(engine); + engine->execlists.reset_ccid = active_ccid(engine); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 2c4ad4a65089..8c6885f43d1a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -310,8 +310,8 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc) if (IS_DG2(gt->i915)) flags |= GUC_WA_DUAL_QUEUE; - /* Wa_22011802037: graphics version 12 */ - if (GRAPHICS_VER(gt->i915) == 12) + /* Wa_22011802037: graphics version 11/12 */ + if (IS_GRAPHICS_VER(gt->i915, 11, 12)) flags |= GUC_WA_PRE_PARSER; /* Wa_16011777198:dg2 */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 9ffb343d0f79..2d9f5f1c79d3 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1578,87 +1578,18 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub) lrc_update_regs(ce, engine, head); } -static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine) -{ - static const i915_reg_t _reg[I915_NUM_ENGINES] = { - [RCS0] = MSG_IDLE_CS, - [BCS0] = MSG_IDLE_BCS, - [VCS0] = MSG_IDLE_VCS0, - [VCS1] = MSG_IDLE_VCS1, - [VCS2] = MSG_IDLE_VCS2, - [VCS3] = MSG_IDLE_VCS3, - [VCS4] = MSG_IDLE_VCS4, - [VCS5] = MSG_IDLE_VCS5, - [VCS6] = MSG_IDLE_VCS6, - [VCS7] = MSG_IDLE_VCS7, - [VECS0] = MSG_IDLE_VECS0, - [VECS1] = MSG_IDLE_VECS1, - [VECS2] = MSG_IDLE_VECS2, - [VECS3] = MSG_IDLE_VECS3, - [CCS0] = MSG_IDLE_CS, - [CCS1] = MSG_IDLE_CS, - [CCS2] = MSG_IDLE_CS, - [CCS3] = MSG_IDLE_CS, - }; - u32 val; - - if (!_reg[engine->id].reg) - return 0; - - val = intel_uncore_read(engine->uncore, _reg[engine->id]); - - /* bits[29:25] & bits[13:9] >> shift */ - return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT; -} - -static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask) -{ - int ret; - - /* Ensure GPM receives fw up/down after CS is stopped */ - udelay(1); - - /* Wait for forcewake request to complete in GPM */ - ret = __intel_wait_for_register_fw(gt->uncore, - GEN9_PWRGT_DOMAIN_STATUS, - fw_mask, fw_mask, 5000, 0, NULL); - - /* Ensure CS receives fw ack from GPM */ - udelay(1); - - if (ret) - GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret); -} - -/* - * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any - * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The - * pending status is indicated by bits[13:9] (masked by bits[ 29:25]) in the - * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we - * are concerned only with the gt reset here, we use a logical OR of pending - * forcewakeups from all reset domains and then wait for them to complete by - * querying PWRGT_DOMAIN_STATUS. - */ static void guc_engine_reset_prepare(struct intel_engine_cs *engine) { - u32 fw_pending; - - if (GRAPHICS_VER(engine->i915) != 12) + if (!IS_GRAPHICS_VER(engine->i915, 11, 12)) return; - /* - * Wa_22011802037 - * TODO: Occasionally trying to stop the cs times out, but does not - * adversely affect functionality. The timeout is set as a config - * parameter that defaults to 100ms. Assuming that this timeout is - * sufficient for any pending MI_FORCEWAKEs to complete, ignore the - * timeout returned here until it is root caused. - */ intel_engine_stop_cs(engine); - fw_pending = __cs_pending_mi_force_wakes(engine); - if (fw_pending) - __gpm_wait_for_fw_complete(engine->gt, fw_pending); + /* + * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need + * to wait for any pending mi force wakeups + */ + intel_engine_wait_for_pending_mi_fw(engine); } static void guc_reset_nop(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 7ba66ad68a8a..16356611b5b9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -680,7 +680,11 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm, goto out_free_dma; for (i = 0; i < npages; i += max) { - args.end = start + (max << PAGE_SHIFT); + if (args.start + (max << PAGE_SHIFT) > end) + args.end = end; + else + args.end = args.start + (max << PAGE_SHIFT); + ret = migrate_vma_setup(&args); if (ret) goto out_free_pfns; diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index 768242a78e2b..5422363690e7 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -627,7 +627,7 @@ static const struct drm_connector_funcs simpledrm_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static int +static enum drm_mode_status simpledrm_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe, const struct drm_display_mode *mode) { diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 590d3d550acb..e70d9614bec2 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -100,6 +100,7 @@ config SENSORS_AD7418 config SENSORS_ADM1021 tristate "Analog Devices ADM1021 and compatibles" depends on I2C + depends on SENSORS_LM90=n help If you say yes here you get support for Analog Devices ADM1021 and ADM1023 sensor chips and clones: Maxim MAX1617 and MAX1617A, @@ -256,13 +257,13 @@ config SENSORS_AHT10 will be called aht10. config SENSORS_AQUACOMPUTER_D5NEXT - tristate "Aquacomputer D5 Next, Octo, Farbwerk, and Farbwerk 360" + tristate "Aquacomputer D5 Next, Octo, Quadro, Farbwerk, and Farbwerk 360" depends on USB_HID select CRC16 help If you say yes here you get support for sensors and fans of - the Aquacomputer D5 Next watercooling pump, Octo fan - controller, Farbwerk and Farbwerk 360 RGB controllers, where + the Aquacomputer D5 Next watercooling pump, Octo and Quadro fan + controllers, Farbwerk and Farbwerk 360 RGB controllers, where available. This driver can also be built as a module. If so, the module @@ -381,7 +382,7 @@ config SENSORS_ARM_SCPI config SENSORS_ASB100 tristate "Asus ASB100 Bach" - depends on X86 && I2C + depends on (X86 || COMPILE_TEST) && I2C select HWMON_VID help If you say yes here you get support for the ASB100 Bach sensor @@ -626,7 +627,7 @@ config SENSORS_MC13783_ADC config SENSORS_FSCHMD tristate "Fujitsu Siemens Computers sensor chips" - depends on X86 && I2C + depends on (X86 || COMPILE_TEST) && I2C help If you say yes here you get support for the following Fujitsu Siemens Computers (FSC) sensor chips: Poseidon, Scylla, Hermes, @@ -1102,6 +1103,7 @@ config SENSORS_MAX6639 config SENSORS_MAX6642 tristate "Maxim MAX6642 sensor chip" depends on I2C + depends on SENSORS_LM90=n help If you say yes here you get support for MAX6642 sensor chip. MAX6642 is a SMBus-Compatible Remote/Local Temperature Sensor @@ -1357,12 +1359,15 @@ config SENSORS_LM90 tristate "National Semiconductor LM90 and compatibles" depends on I2C help - If you say yes here you get support for National Semiconductor LM90, - LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A, - Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6654, MAX6657, MAX6658, - MAX6659, MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, - ON Semiconductor NCT1008, Winbond/Nuvoton W83L771W/G/AWG/ASG, - Philips SA56004, GMT G781, Texas Instruments TMP451 and TMP461 + If you say yes here you get support for National Semiconductor LM84, + LM90, LM86, LM89 and LM99, Analog Devices ADM1020, ADM2021, ADM1021A, + ADM1023, ADM1032, ADT7461, ADT7461A, ADT7481, ADT7482, and ADT7483A, + Maxim MAX1617, MAX6642, MAX6646, MAX6647, MAX6648, MAX6649, MAX6654, + MAX6657, MAX6658, MAX6659, MAX6680, MAX6681, MAX6692, MAX6695, + MAX6696, + ON Semiconductor NCT1008, NCT210, NCT72, NCT214, NCT218, + Winbond/Nuvoton W83L771W/G/AWG/ASG, + Philips NE1618, SA56004, GMT G781, Texas Instruments TMP451 and TMP461 sensor chips. This driver can also be built as a module. If so, the module diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c index a0e69f7ece36..66430553cc45 100644 --- a/drivers/hwmon/aquacomputer_d5next.c +++ b/drivers/hwmon/aquacomputer_d5next.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * hwmon driver for Aquacomputer devices (D5 Next, Farbwerk, Farbwerk 360, Octo) + * hwmon driver for Aquacomputer devices (D5 Next, Farbwerk, Farbwerk 360, Octo, + * Quadro) * * Aquacomputer devices send HID reports (with ID 0x01) every second to report * sensor values. @@ -21,17 +22,19 @@ #define USB_VENDOR_ID_AQUACOMPUTER 0x0c70 #define USB_PRODUCT_ID_FARBWERK 0xf00a +#define USB_PRODUCT_ID_QUADRO 0xf00d #define USB_PRODUCT_ID_D5NEXT 0xf00e #define USB_PRODUCT_ID_FARBWERK360 0xf010 #define USB_PRODUCT_ID_OCTO 0xf011 -enum kinds { d5next, farbwerk, farbwerk360, octo }; +enum kinds { d5next, farbwerk, farbwerk360, octo, quadro }; static const char *const aqc_device_names[] = { [d5next] = "d5next", [farbwerk] = "farbwerk", [farbwerk360] = "farbwerk360", - [octo] = "octo" + [octo] = "octo", + [quadro] = "quadro" }; #define DRIVER_NAME "aquacomputer_d5next" @@ -54,60 +57,61 @@ static u8 secondary_ctrl_report[] = { 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x34, 0xC6 }; -/* Register offsets for the D5 Next pump */ -#define D5NEXT_POWER_CYCLES 24 - -#define D5NEXT_COOLANT_TEMP 87 - -#define D5NEXT_PUMP_SPEED 116 -#define D5NEXT_FAN_SPEED 103 - -#define D5NEXT_PUMP_POWER 114 -#define D5NEXT_FAN_POWER 101 - -#define D5NEXT_PUMP_VOLTAGE 110 -#define D5NEXT_FAN_VOLTAGE 97 -#define D5NEXT_5V_VOLTAGE 57 +/* Register offsets for all Aquacomputer devices */ +#define AQC_TEMP_SENSOR_SIZE 0x02 +#define AQC_TEMP_SENSOR_DISCONNECTED 0x7FFF +#define AQC_FAN_PERCENT_OFFSET 0x00 +#define AQC_FAN_VOLTAGE_OFFSET 0x02 +#define AQC_FAN_CURRENT_OFFSET 0x04 +#define AQC_FAN_POWER_OFFSET 0x06 +#define AQC_FAN_SPEED_OFFSET 0x08 -#define D5NEXT_PUMP_CURRENT 112 -#define D5NEXT_FAN_CURRENT 99 +/* Register offsets for the D5 Next pump */ +#define D5NEXT_POWER_CYCLES 0x18 +#define D5NEXT_COOLANT_TEMP 0x57 +#define D5NEXT_NUM_FANS 2 +#define D5NEXT_NUM_SENSORS 1 +#define D5NEXT_PUMP_OFFSET 0x6c +#define D5NEXT_FAN_OFFSET 0x5f +#define D5NEXT_5V_VOLTAGE 0x39 +#define D5NEXT_12V_VOLTAGE 0x37 +#define D5NEXT_CTRL_REPORT_SIZE 0x329 +static u8 d5next_sensor_fan_offsets[] = { D5NEXT_PUMP_OFFSET, D5NEXT_FAN_OFFSET }; + +/* Pump and fan speed registers in D5 Next control report (from 0-100%) */ +static u16 d5next_ctrl_fan_offsets[] = { 0x97, 0x42 }; /* Register offsets for the Farbwerk RGB controller */ #define FARBWERK_NUM_SENSORS 4 #define FARBWERK_SENSOR_START 0x2f -#define FARBWERK_SENSOR_SIZE 0x02 -#define FARBWERK_SENSOR_DISCONNECTED 0x7FFF /* Register offsets for the Farbwerk 360 RGB controller */ #define FARBWERK360_NUM_SENSORS 4 #define FARBWERK360_SENSOR_START 0x32 -#define FARBWERK360_SENSOR_SIZE 0x02 -#define FARBWERK360_SENSOR_DISCONNECTED 0x7FFF /* Register offsets for the Octo fan controller */ #define OCTO_POWER_CYCLES 0x18 #define OCTO_NUM_FANS 8 -#define OCTO_FAN_PERCENT_OFFSET 0x00 -#define OCTO_FAN_VOLTAGE_OFFSET 0x02 -#define OCTO_FAN_CURRENT_OFFSET 0x04 -#define OCTO_FAN_POWER_OFFSET 0x06 -#define OCTO_FAN_SPEED_OFFSET 0x08 - -static u8 octo_sensor_fan_offsets[] = { 0x7D, 0x8A, 0x97, 0xA4, 0xB1, 0xBE, 0xCB, 0xD8 }; - #define OCTO_NUM_SENSORS 4 #define OCTO_SENSOR_START 0x3D -#define OCTO_SENSOR_SIZE 0x02 -#define OCTO_SENSOR_DISCONNECTED 0x7FFF - -#define OCTO_CTRL_REPORT_SIZE 0x65F -#define OCTO_CTRL_REPORT_CHECKSUM_OFFSET 0x65D -#define OCTO_CTRL_REPORT_CHECKSUM_START 0x01 -#define OCTO_CTRL_REPORT_CHECKSUM_LENGTH 0x65C +#define OCTO_CTRL_REPORT_SIZE 0x65F +static u8 octo_sensor_fan_offsets[] = { 0x7D, 0x8A, 0x97, 0xA4, 0xB1, 0xBE, 0xCB, 0xD8 }; /* Fan speed registers in Octo control report (from 0-100%) */ static u16 octo_ctrl_fan_offsets[] = { 0x5B, 0xB0, 0x105, 0x15A, 0x1AF, 0x204, 0x259, 0x2AE }; +/* Register offsets for the Quadro fan controller */ +#define QUADRO_POWER_CYCLES 0x18 +#define QUADRO_NUM_FANS 4 +#define QUADRO_NUM_SENSORS 4 +#define QUADRO_SENSOR_START 0x34 +#define QUADRO_CTRL_REPORT_SIZE 0x3c1 +#define QUADRO_FLOW_SENSOR_OFFSET 0x6e +static u8 quadro_sensor_fan_offsets[] = { 0x70, 0x7D, 0x8A, 0x97 }; + +/* Fan speed registers in Quadro control report (from 0-100%) */ +static u16 quadro_ctrl_fan_offsets[] = { 0x36, 0x8b, 0xe0, 0x135 }; + /* Labels for D5 Next */ static const char *const label_d5next_temp[] = { "Coolant temp" @@ -126,7 +130,8 @@ static const char *const label_d5next_power[] = { static const char *const label_d5next_voltages[] = { "Pump voltage", "Fan voltage", - "+5V voltage" + "+5V voltage", + "+12V voltage" }; static const char *const label_d5next_current[] = { @@ -134,7 +139,7 @@ static const char *const label_d5next_current[] = { "Fan current" }; -/* Labels for Farbwerk, Farbwerk 360 and Octo temperature sensors */ +/* Labels for Farbwerk, Farbwerk 360 and Octo and Quadro temperature sensors */ static const char *const label_temp_sensors[] = { "Sensor 1", "Sensor 2", @@ -142,7 +147,7 @@ static const char *const label_temp_sensors[] = { "Sensor 4" }; -/* Labels for Octo */ +/* Labels for Octo and Quadro (except speed) */ static const char *const label_fan_speed[] = { "Fan 1 speed", "Fan 2 speed", @@ -187,6 +192,15 @@ static const char *const label_fan_current[] = { "Fan 8 current" }; +/* Labels for Quadro fan speeds */ +static const char *const label_quadro_speeds[] = { + "Fan 1 speed", + "Fan 2 speed", + "Fan 3 speed", + "Fan 4 speed", + "Flow speed [dL/h]" +}; + struct aqc_data { struct hid_device *hdev; struct device *hwmon_dev; @@ -201,11 +215,19 @@ struct aqc_data { int checksum_length; int checksum_offset; + int num_fans; + u8 *fan_sensor_offsets; + u16 *fan_ctrl_offsets; + int num_temp_sensors; + int temp_sensor_start_offset; + u16 power_cycle_count_offset; + u8 flow_sensor_offset; + /* General info, same across all devices */ u32 serial_number[2]; u16 firmware_version; - /* How many times the device was powered on */ + /* How many times the device was powered on, if available */ u32 power_cycles; /* Sensor values */ @@ -323,56 +345,47 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3 switch (type) { case hwmon_temp: - switch (priv->kind) { - case d5next: - if (channel == 0) - return 0444; - break; - case farbwerk: - case farbwerk360: - case octo: + if (channel < priv->num_temp_sensors) return 0444; - default: - break; - } break; case hwmon_pwm: - switch (priv->kind) { - case octo: + if (priv->fan_ctrl_offsets && channel < priv->num_fans) { switch (attr) { case hwmon_pwm_input: return 0644; default: break; } - break; - default: - break; } break; case hwmon_fan: - case hwmon_power: - case hwmon_curr: switch (priv->kind) { - case d5next: - if (channel < 2) + case quadro: + /* Special case to support flow sensor */ + if (channel < priv->num_fans + 1) return 0444; break; - case octo: - return 0444; default: + if (channel < priv->num_fans) + return 0444; break; } break; + case hwmon_power: + case hwmon_curr: + if (channel < priv->num_fans) + return 0444; + break; case hwmon_in: switch (priv->kind) { case d5next: - if (channel < 3) + /* Special case to support +5V and +12V voltage sensors */ + if (channel < priv->num_fans + 2) return 0444; break; - case octo: - return 0444; default: + if (channel < priv->num_fans) + return 0444; break; } break; @@ -406,16 +419,12 @@ static int aqc_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, *val = priv->power_input[channel]; break; case hwmon_pwm: - switch (priv->kind) { - case octo: - ret = aqc_get_ctrl_val(priv, octo_ctrl_fan_offsets[channel]); + if (priv->fan_ctrl_offsets) { + ret = aqc_get_ctrl_val(priv, priv->fan_ctrl_offsets[channel]); if (ret < 0) return ret; *val = aqc_percent_to_pwm(ret); - break; - default: - break; } break; case hwmon_in: @@ -469,19 +478,15 @@ static int aqc_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, case hwmon_pwm: switch (attr) { case hwmon_pwm_input: - switch (priv->kind) { - case octo: + if (priv->fan_ctrl_offsets) { pwm_value = aqc_pwm_to_percent(val); if (pwm_value < 0) return pwm_value; - ret = aqc_set_ctrl_val(priv, octo_ctrl_fan_offsets[channel], + ret = aqc_set_ctrl_val(priv, priv->fan_ctrl_offsets[channel], pwm_value); if (ret < 0) return ret; - break; - default: - break; } break; default: @@ -576,76 +581,42 @@ static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8 priv->serial_number[1] = get_unaligned_be16(data + SERIAL_SECOND_PART); priv->firmware_version = get_unaligned_be16(data + FIRMWARE_VERSION); - /* Sensor readings */ - switch (priv->kind) { - case d5next: - priv->power_cycles = get_unaligned_be32(data + D5NEXT_POWER_CYCLES); - - priv->temp_input[0] = get_unaligned_be16(data + D5NEXT_COOLANT_TEMP) * 10; + /* Temperature sensor readings */ + for (i = 0; i < priv->num_temp_sensors; i++) { + sensor_value = get_unaligned_be16(data + + priv->temp_sensor_start_offset + + i * AQC_TEMP_SENSOR_SIZE); + if (sensor_value == AQC_TEMP_SENSOR_DISCONNECTED) + priv->temp_input[i] = -ENODATA; + else + priv->temp_input[i] = sensor_value * 10; + } - priv->speed_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_SPEED); - priv->speed_input[1] = get_unaligned_be16(data + D5NEXT_FAN_SPEED); + /* Fan speed and related readings */ + for (i = 0; i < priv->num_fans; i++) { + priv->speed_input[i] = + get_unaligned_be16(data + priv->fan_sensor_offsets[i] + AQC_FAN_SPEED_OFFSET); + priv->power_input[i] = + get_unaligned_be16(data + priv->fan_sensor_offsets[i] + + AQC_FAN_POWER_OFFSET) * 10000; + priv->voltage_input[i] = + get_unaligned_be16(data + priv->fan_sensor_offsets[i] + + AQC_FAN_VOLTAGE_OFFSET) * 10; + priv->current_input[i] = + get_unaligned_be16(data + priv->fan_sensor_offsets[i] + AQC_FAN_CURRENT_OFFSET); + } - priv->power_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_POWER) * 10000; - priv->power_input[1] = get_unaligned_be16(data + D5NEXT_FAN_POWER) * 10000; + if (priv->power_cycle_count_offset != 0) + priv->power_cycles = get_unaligned_be32(data + priv->power_cycle_count_offset); - priv->voltage_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_VOLTAGE) * 10; - priv->voltage_input[1] = get_unaligned_be16(data + D5NEXT_FAN_VOLTAGE) * 10; + /* Special-case sensor readings */ + switch (priv->kind) { + case d5next: priv->voltage_input[2] = get_unaligned_be16(data + D5NEXT_5V_VOLTAGE) * 10; - - priv->current_input[0] = get_unaligned_be16(data + D5NEXT_PUMP_CURRENT); - priv->current_input[1] = get_unaligned_be16(data + D5NEXT_FAN_CURRENT); - break; - case farbwerk: - /* Temperature sensor readings */ - for (i = 0; i < FARBWERK_NUM_SENSORS; i++) { - sensor_value = get_unaligned_be16(data + FARBWERK_SENSOR_START + - i * FARBWERK_SENSOR_SIZE); - if (sensor_value == FARBWERK_SENSOR_DISCONNECTED) - priv->temp_input[i] = -ENODATA; - else - priv->temp_input[i] = sensor_value * 10; - } - break; - case farbwerk360: - /* Temperature sensor readings */ - for (i = 0; i < FARBWERK360_NUM_SENSORS; i++) { - sensor_value = get_unaligned_be16(data + FARBWERK360_SENSOR_START + - i * FARBWERK360_SENSOR_SIZE); - if (sensor_value == FARBWERK360_SENSOR_DISCONNECTED) - priv->temp_input[i] = -ENODATA; - else - priv->temp_input[i] = sensor_value * 10; - } + priv->voltage_input[3] = get_unaligned_be16(data + D5NEXT_12V_VOLTAGE) * 10; break; - case octo: - priv->power_cycles = get_unaligned_be32(data + OCTO_POWER_CYCLES); - - /* Fan speed and related readings */ - for (i = 0; i < OCTO_NUM_FANS; i++) { - priv->speed_input[i] = - get_unaligned_be16(data + octo_sensor_fan_offsets[i] + - OCTO_FAN_SPEED_OFFSET); - priv->power_input[i] = - get_unaligned_be16(data + octo_sensor_fan_offsets[i] + - OCTO_FAN_POWER_OFFSET) * 10000; - priv->voltage_input[i] = - get_unaligned_be16(data + octo_sensor_fan_offsets[i] + - OCTO_FAN_VOLTAGE_OFFSET) * 10; - priv->current_input[i] = - get_unaligned_be16(data + octo_sensor_fan_offsets[i] + - OCTO_FAN_CURRENT_OFFSET); - } - - /* Temperature sensor readings */ - for (i = 0; i < OCTO_NUM_SENSORS; i++) { - sensor_value = get_unaligned_be16(data + OCTO_SENSOR_START + - i * OCTO_SENSOR_SIZE); - if (sensor_value == OCTO_SENSOR_DISCONNECTED) - priv->temp_input[i] = -ENODATA; - else - priv->temp_input[i] = sensor_value * 10; - } + case quadro: + priv->speed_input[4] = get_unaligned_be16(data + priv->flow_sensor_offset); break; default: break; @@ -699,14 +670,8 @@ static void aqc_debugfs_init(struct aqc_data *priv) debugfs_create_file("serial_number", 0444, priv->debugfs, priv, &serial_number_fops); debugfs_create_file("firmware_version", 0444, priv->debugfs, priv, &firmware_version_fops); - switch (priv->kind) { - case d5next: - case octo: + if (priv->power_cycle_count_offset != 0) debugfs_create_file("power_cycles", 0444, priv->debugfs, priv, &power_cycles_fops); - break; - default: - break; - } } #else @@ -747,6 +712,14 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id) case USB_PRODUCT_ID_D5NEXT: priv->kind = d5next; + priv->num_fans = D5NEXT_NUM_FANS; + priv->fan_sensor_offsets = d5next_sensor_fan_offsets; + priv->fan_ctrl_offsets = d5next_ctrl_fan_offsets; + priv->num_temp_sensors = D5NEXT_NUM_SENSORS; + priv->temp_sensor_start_offset = D5NEXT_COOLANT_TEMP; + priv->power_cycle_count_offset = D5NEXT_POWER_CYCLES; + priv->buffer_size = D5NEXT_CTRL_REPORT_SIZE; + priv->temp_label = label_d5next_temp; priv->speed_label = label_d5next_speeds; priv->power_label = label_d5next_power; @@ -756,19 +729,29 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id) case USB_PRODUCT_ID_FARBWERK: priv->kind = farbwerk; + priv->num_fans = 0; + priv->num_temp_sensors = FARBWERK_NUM_SENSORS; + priv->temp_sensor_start_offset = FARBWERK_SENSOR_START; priv->temp_label = label_temp_sensors; break; case USB_PRODUCT_ID_FARBWERK360: priv->kind = farbwerk360; + priv->num_fans = 0; + priv->num_temp_sensors = FARBWERK360_NUM_SENSORS; + priv->temp_sensor_start_offset = FARBWERK360_SENSOR_START; priv->temp_label = label_temp_sensors; break; case USB_PRODUCT_ID_OCTO: priv->kind = octo; + + priv->num_fans = OCTO_NUM_FANS; + priv->fan_sensor_offsets = octo_sensor_fan_offsets; + priv->fan_ctrl_offsets = octo_ctrl_fan_offsets; + priv->num_temp_sensors = OCTO_NUM_SENSORS; + priv->temp_sensor_start_offset = OCTO_SENSOR_START; + priv->power_cycle_count_offset = OCTO_POWER_CYCLES; priv->buffer_size = OCTO_CTRL_REPORT_SIZE; - priv->checksum_start = OCTO_CTRL_REPORT_CHECKSUM_START; - priv->checksum_length = OCTO_CTRL_REPORT_CHECKSUM_LENGTH; - priv->checksum_offset = OCTO_CTRL_REPORT_CHECKSUM_OFFSET; priv->temp_label = label_temp_sensors; priv->speed_label = label_fan_speed; @@ -776,10 +759,34 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id) priv->voltage_label = label_fan_voltage; priv->current_label = label_fan_current; break; + case USB_PRODUCT_ID_QUADRO: + priv->kind = quadro; + + priv->num_fans = QUADRO_NUM_FANS; + priv->fan_sensor_offsets = quadro_sensor_fan_offsets; + priv->fan_ctrl_offsets = quadro_ctrl_fan_offsets; + priv->num_temp_sensors = QUADRO_NUM_SENSORS; + priv->temp_sensor_start_offset = QUADRO_SENSOR_START; + priv->power_cycle_count_offset = QUADRO_POWER_CYCLES; + priv->buffer_size = QUADRO_CTRL_REPORT_SIZE; + priv->flow_sensor_offset = QUADRO_FLOW_SENSOR_OFFSET; + + priv->temp_label = label_temp_sensors; + priv->speed_label = label_quadro_speeds; + priv->power_label = label_fan_power; + priv->voltage_label = label_fan_voltage; + priv->current_label = label_fan_current; + break; default: break; } + if (priv->buffer_size != 0) { + priv->checksum_start = 0x01; + priv->checksum_length = priv->buffer_size - 3; + priv->checksum_offset = priv->buffer_size - 2; + } + priv->name = aqc_device_names[priv->kind]; priv->buffer = devm_kzalloc(&hdev->dev, priv->buffer_size, GFP_KERNEL); @@ -825,6 +832,7 @@ static const struct hid_device_id aqc_table[] = { { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_FARBWERK) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_FARBWERK360) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_OCTO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_QUADRO) }, { } }; diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c index 3cb88d6fbec0..d11f674e3dc3 100644 --- a/drivers/hwmon/aspeed-pwm-tacho.c +++ b/drivers/hwmon/aspeed-pwm-tacho.c @@ -159,7 +159,7 @@ * 11: reserved. */ #define M_TACH_MODE 0x02 /* 10b */ -#define M_TACH_UNIT 0x0210 +#define M_TACH_UNIT 0x0420 #define INIT_FAN_CTRL 0xFF /* How long we sleep in us while waiting for an RPM result. */ diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c index 3633ab691662..61a4684fc020 100644 --- a/drivers/hwmon/asus-ec-sensors.c +++ b/drivers/hwmon/asus-ec-sensors.c @@ -54,6 +54,10 @@ static char *mutex_path_override; /* ACPI mutex for locking access to the EC for the firmware */ #define ASUS_HW_ACCESS_MUTEX_ASMX "\\AMW0.ASMX" +#define ASUS_HW_ACCESS_MUTEX_RMTW_ASMX "\\RMTW.ASMX" + +#define ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0 "\\_SB_.PCI0.SBRG.SIO1.MUT0" + #define MAX_IDENTICAL_BOARD_VARIATIONS 3 /* Moniker for the ACPI global lock (':' is not allowed in ASL identifiers) */ @@ -119,6 +123,18 @@ enum ec_sensors { ec_sensor_temp_water_in, /* "Water_Out" temperature sensor reading [℃] */ ec_sensor_temp_water_out, + /* "Water_Block_In" temperature sensor reading [℃] */ + ec_sensor_temp_water_block_in, + /* "Water_Block_Out" temperature sensor reading [℃] */ + ec_sensor_temp_water_block_out, + /* "T_sensor_2" temperature sensor reading [℃] */ + ec_sensor_temp_t_sensor_2, + /* "Extra_1" temperature sensor reading [℃] */ + ec_sensor_temp_sensor_extra_1, + /* "Extra_2" temperature sensor reading [℃] */ + ec_sensor_temp_sensor_extra_2, + /* "Extra_3" temperature sensor reading [℃] */ + ec_sensor_temp_sensor_extra_3, }; #define SENSOR_TEMP_CHIPSET BIT(ec_sensor_temp_chipset) @@ -134,11 +150,19 @@ enum ec_sensors { #define SENSOR_CURR_CPU BIT(ec_sensor_curr_cpu) #define SENSOR_TEMP_WATER_IN BIT(ec_sensor_temp_water_in) #define SENSOR_TEMP_WATER_OUT BIT(ec_sensor_temp_water_out) +#define SENSOR_TEMP_WATER_BLOCK_IN BIT(ec_sensor_temp_water_block_in) +#define SENSOR_TEMP_WATER_BLOCK_OUT BIT(ec_sensor_temp_water_block_out) +#define SENSOR_TEMP_T_SENSOR_2 BIT(ec_sensor_temp_t_sensor_2) +#define SENSOR_TEMP_SENSOR_EXTRA_1 BIT(ec_sensor_temp_sensor_extra_1) +#define SENSOR_TEMP_SENSOR_EXTRA_2 BIT(ec_sensor_temp_sensor_extra_2) +#define SENSOR_TEMP_SENSOR_EXTRA_3 BIT(ec_sensor_temp_sensor_extra_3) enum board_family { family_unknown, family_amd_400_series, family_amd_500_series, + family_intel_300_series, + family_intel_600_series }; /* All the known sensors for ASUS EC controllers */ @@ -195,12 +219,53 @@ static const struct ec_sensor_info sensors_family_amd_500[] = { EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00), [ec_sensor_temp_water_out] = EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01), + [ec_sensor_temp_water_block_in] = + EC_SENSOR("Water_Block_In", hwmon_temp, 1, 0x01, 0x02), + [ec_sensor_temp_water_block_out] = + EC_SENSOR("Water_Block_Out", hwmon_temp, 1, 0x01, 0x03), + [ec_sensor_temp_sensor_extra_1] = + EC_SENSOR("Extra_1", hwmon_temp, 1, 0x01, 0x09), + [ec_sensor_temp_t_sensor_2] = + EC_SENSOR("T_sensor_2", hwmon_temp, 1, 0x01, 0x0a), + [ec_sensor_temp_sensor_extra_2] = + EC_SENSOR("Extra_2", hwmon_temp, 1, 0x01, 0x0b), + [ec_sensor_temp_sensor_extra_3] = + EC_SENSOR("Extra_3", hwmon_temp, 1, 0x01, 0x0c), +}; + +static const struct ec_sensor_info sensors_family_intel_300[] = { + [ec_sensor_temp_chipset] = + EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a), + [ec_sensor_temp_cpu] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x3b), + [ec_sensor_temp_mb] = + EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x3c), + [ec_sensor_temp_t_sensor] = + EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d), + [ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e), + [ec_sensor_fan_cpu_opt] = + EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0), + [ec_sensor_fan_vrm_hs] = EC_SENSOR("VRM HS", hwmon_fan, 2, 0x00, 0xb2), + [ec_sensor_fan_water_flow] = + EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xbc), + [ec_sensor_temp_water_in] = + EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00), + [ec_sensor_temp_water_out] = + EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01), +}; + +static const struct ec_sensor_info sensors_family_intel_600[] = { + [ec_sensor_temp_t_sensor] = + EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d), + [ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e), }; /* Shortcuts for common combinations */ #define SENSOR_SET_TEMP_CHIPSET_CPU_MB \ (SENSOR_TEMP_CHIPSET | SENSOR_TEMP_CPU | SENSOR_TEMP_MB) #define SENSOR_SET_TEMP_WATER (SENSOR_TEMP_WATER_IN | SENSOR_TEMP_WATER_OUT) +#define SENSOR_SET_WATER_BLOCK \ + (SENSOR_TEMP_WATER_BLOCK_IN | SENSOR_TEMP_WATER_BLOCK_OUT) + struct ec_board_info { const char *board_names[MAX_IDENTICAL_BOARD_VARIATIONS]; @@ -273,6 +338,18 @@ static const struct ec_board_info board_info[] = { .family = family_amd_500_series, }, { + .board_names = { + "ROG MAXIMUS XI HERO", + "ROG MAXIMUS XI HERO (WI-FI)", + }, + .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | + SENSOR_TEMP_T_SENSOR | + SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER | + SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW, + .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, + .family = family_intel_300_series, + }, + { .board_names = {"ROG CROSSHAIR VIII IMPACT"}, .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM | @@ -324,12 +401,31 @@ static const struct ec_board_info board_info[] = { }, { .board_names = {"ROG STRIX X570-I GAMING"}, - .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_FAN_VRM_HS | - SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU | - SENSOR_IN_CPU_CORE, + .sensors = SENSOR_TEMP_CHIPSET | SENSOR_TEMP_VRM | + SENSOR_TEMP_T_SENSOR | + SENSOR_FAN_VRM_HS | SENSOR_FAN_CHIPSET | + SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE, .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, .family = family_amd_500_series, }, + { + .board_names = {"ROG STRIX Z690-A GAMING WIFI D4"}, + .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM, + .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX, + .family = family_intel_600_series, + }, + { + .board_names = {"ROG ZENITH II EXTREME"}, + .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR | + SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER | + SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET | SENSOR_FAN_VRM_HS | + SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE | + SENSOR_SET_WATER_BLOCK | + SENSOR_TEMP_T_SENSOR_2 | SENSOR_TEMP_SENSOR_EXTRA_1 | + SENSOR_TEMP_SENSOR_EXTRA_2 | SENSOR_TEMP_SENSOR_EXTRA_3, + .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0, + .family = family_amd_500_series, + }, {} }; @@ -799,6 +895,12 @@ static int __init asus_ec_probe(struct platform_device *pdev) case family_amd_500_series: ec_data->sensors_info = sensors_family_amd_500; break; + case family_intel_300_series: + ec_data->sensors_info = sensors_family_intel_300; + break; + case family_intel_600_series: + ec_data->sensors_info = sensors_family_intel_600; + break; default: dev_err(dev, "Unknown board family: %d", ec_data->board_info->family); diff --git a/drivers/hwmon/asus_wmi_sensors.c b/drivers/hwmon/asus_wmi_sensors.c index 9e935e34c998..6e8a908171f0 100644 --- a/drivers/hwmon/asus_wmi_sensors.c +++ b/drivers/hwmon/asus_wmi_sensors.c @@ -514,22 +514,20 @@ static int asus_wmi_configure_sensor_setup(struct device *dev, int i, idx; int err; - temp_sensor = devm_kcalloc(dev, 1, sizeof(*temp_sensor), GFP_KERNEL); - if (!temp_sensor) - return -ENOMEM; - for (i = 0; i < sensor_data->wmi.sensor_count; i++) { - err = asus_wmi_sensor_info(i, temp_sensor); + struct asus_wmi_sensor_info sensor; + + err = asus_wmi_sensor_info(i, &sensor); if (err) return err; - switch (temp_sensor->data_type) { + switch (sensor.data_type) { case TEMPERATURE_C: case VOLTAGE: case CURRENT: case FAN_RPM: case WATER_FLOW: - type = asus_data_types[temp_sensor->data_type]; + type = asus_data_types[sensor.data_type]; if (!nr_count[type]) nr_types++; nr_count[type]++; diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index 071aa6f4e109..7f8d95dd2717 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -130,7 +130,7 @@ struct smm_regs { unsigned int edx; unsigned int esi; unsigned int edi; -} __packed; +}; static const char * const temp_labels[] = { "CPU", @@ -175,77 +175,35 @@ static int i8k_smm_func(void *par) struct smm_regs *regs = par; int eax = regs->eax; int ebx = regs->ebx; + unsigned char carry; long long duration; - int rc; /* SMM requires CPU 0 */ if (smp_processor_id() != 0) return -EBUSY; -#if defined(CONFIG_X86_64) - asm volatile("pushq %%rax\n\t" - "movl 0(%%rax),%%edx\n\t" - "pushq %%rdx\n\t" - "movl 4(%%rax),%%ebx\n\t" - "movl 8(%%rax),%%ecx\n\t" - "movl 12(%%rax),%%edx\n\t" - "movl 16(%%rax),%%esi\n\t" - "movl 20(%%rax),%%edi\n\t" - "popq %%rax\n\t" - "out %%al,$0xb2\n\t" - "out %%al,$0x84\n\t" - "xchgq %%rax,(%%rsp)\n\t" - "movl %%ebx,4(%%rax)\n\t" - "movl %%ecx,8(%%rax)\n\t" - "movl %%edx,12(%%rax)\n\t" - "movl %%esi,16(%%rax)\n\t" - "movl %%edi,20(%%rax)\n\t" - "popq %%rdx\n\t" - "movl %%edx,0(%%rax)\n\t" - "pushfq\n\t" - "popq %%rax\n\t" - "andl $1,%%eax\n" - : "=a"(rc) - : "a"(regs) - : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); -#else - asm volatile("pushl %%eax\n\t" - "movl 0(%%eax),%%edx\n\t" - "push %%edx\n\t" - "movl 4(%%eax),%%ebx\n\t" - "movl 8(%%eax),%%ecx\n\t" - "movl 12(%%eax),%%edx\n\t" - "movl 16(%%eax),%%esi\n\t" - "movl 20(%%eax),%%edi\n\t" - "popl %%eax\n\t" - "out %%al,$0xb2\n\t" - "out %%al,$0x84\n\t" - "xchgl %%eax,(%%esp)\n\t" - "movl %%ebx,4(%%eax)\n\t" - "movl %%ecx,8(%%eax)\n\t" - "movl %%edx,12(%%eax)\n\t" - "movl %%esi,16(%%eax)\n\t" - "movl %%edi,20(%%eax)\n\t" - "popl %%edx\n\t" - "movl %%edx,0(%%eax)\n\t" - "lahf\n\t" - "shrl $8,%%eax\n\t" - "andl $1,%%eax\n" - : "=a"(rc) - : "a"(regs) - : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); -#endif - if (rc != 0 || (regs->eax & 0xffff) == 0xffff || regs->eax == eax) - rc = -EINVAL; + asm volatile("out %%al,$0xb2\n\t" + "out %%al,$0x84\n\t" + "setc %0\n" + : "=mr" (carry), + "+a" (regs->eax), + "+b" (regs->ebx), + "+c" (regs->ecx), + "+d" (regs->edx), + "+S" (regs->esi), + "+D" (regs->edi)); duration = ktime_us_delta(ktime_get(), calltime); - pr_debug("smm(0x%.4x 0x%.4x) = 0x%.4x (took %7lld usecs)\n", eax, ebx, - (rc ? 0xffff : regs->eax & 0xffff), duration); + pr_debug("smm(0x%.4x 0x%.4x) = 0x%.4x carry: %d (took %7lld usecs)\n", + eax, ebx, regs->eax & 0xffff, carry, duration); if (duration > DELL_SMM_MAX_DURATION) pr_warn_once("SMM call took %lld usecs!\n", duration); - return rc; + if (carry || (regs->eax & 0xffff) == 0xffff || regs->eax == eax) + return -EINVAL; + + return 0; } /* @@ -1132,6 +1090,13 @@ static const struct i8k_config_data i8k_config_data[] __initconst = { static const struct dmi_system_id i8k_dmi_table[] __initconst = { { + .ident = "Dell G5 5590", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G5 5590"), + }, + }, + { .ident = "Dell Inspiron", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer"), @@ -1365,6 +1330,14 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = { }, .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3], }, + { + .ident = "Dell XPS 13 7390", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 13 7390"), + }, + .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3], + }, { } }; diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c index 1eb37106a220..5bac2b0fc7bb 100644 --- a/drivers/hwmon/drivetemp.c +++ b/drivers/hwmon/drivetemp.c @@ -621,3 +621,4 @@ module_exit(drivetemp_exit); MODULE_AUTHOR("Guenter Roeck <linus@roeck-us.net>"); MODULE_DESCRIPTION("Hard drive temperature monitor"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:drivetemp"); diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index 6830e029995d..19b6c643059a 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c @@ -49,6 +49,7 @@ #define SIO_F81768D_ID 0x1210 /* Chipset ID */ #define SIO_F81865_ID 0x0704 /* Chipset ID */ #define SIO_F81866_ID 0x1010 /* Chipset ID */ +#define SIO_F71858AD_ID 0x0903 /* Chipset ID */ #define SIO_F81966_ID 0x1502 /* Chipset ID */ #define REGION_LENGTH 8 @@ -2638,6 +2639,7 @@ static int __init f71882fg_find(int sioaddr, struct f71882fg_sio_data *sio_data) sio_data->type = f71808a; break; case SIO_F71858_ID: + case SIO_F71858AD_ID: sio_data->type = f71858fg; break; case SIO_F71862_ID: diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c index 1fe37418ff46..d64be48f1ef6 100644 --- a/drivers/hwmon/gsc-hwmon.c +++ b/drivers/hwmon/gsc-hwmon.c @@ -269,10 +269,13 @@ gsc_hwmon_get_devtree_pdata(struct device *dev) /* fan controller base address */ fan = of_find_compatible_node(dev->parent->of_node, NULL, "gw,gsc-fan"); if (fan && of_property_read_u32(fan, "reg", &pdata->fan_base)) { + of_node_put(fan); dev_err(dev, "fan node without base\n"); return ERR_PTR(-EINVAL); } + of_node_put(fan); + /* allocate structures for channels and count instances of each type */ device_for_each_child_node(dev, child) { if (fwnode_property_read_string(child, "label", &ch->name)) { diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 4e239bd75b1d..5a9d47a229e4 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -428,6 +428,10 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) data->ccd_offset = 0x154; k10temp_get_ccd_support(pdev, data, 8); break; + case 0xa0 ... 0xaf: + data->ccd_offset = 0x300; + k10temp_get_ccd_support(pdev, data, 8); + break; } } else if (boot_cpu_data.x86 == 0x19) { data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; @@ -445,6 +449,11 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) data->ccd_offset = 0x300; k10temp_get_ccd_support(pdev, data, 8); break; + case 0x60 ... 0x6f: + case 0x70 ... 0x7f: + data->ccd_offset = 0x308; + k10temp_get_ccd_support(pdev, data, 8); + break; case 0x10 ... 0x1f: case 0xa0 ... 0xaf: data->ccd_offset = 0x300; @@ -489,10 +498,13 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, {} }; diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h index a398171162a8..b803ada5e3c9 100644 --- a/drivers/hwmon/lm75.h +++ b/drivers/hwmon/lm75.h @@ -11,7 +11,8 @@ * which contains this code, we don't worry about the wasted space. */ -#include <linux/kernel.h> +#include <linux/minmax.h> +#include <linux/types.h> /* straight from the datasheet */ #define LM75_TEMP_MIN (-55000) diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index 3820f0e61510..03d07da8c2dc 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -42,7 +42,8 @@ * accordingly, and is done during initialization. Extended precision is only * available at conversion rates of 1 Hz and slower. Note that extended * precision is not enabled by default, as this driver initializes all chips - * to 2 Hz by design. + * to 2 Hz by design. The driver also supports MAX6690, which is practically + * identical to MAX6654. * * This driver also supports the MAX6646, MAX6647, MAX6648, MAX6649 and * MAX6692 chips made by Maxim. These are again similar to the LM86, @@ -64,6 +65,14 @@ * and extended mode. They are mostly compatible with LM90 except for a data * format difference for the temperature value registers. * + * This driver also supports ADT7481, ADT7482, and ADT7483 from Analog Devices + * / ON Semiconductor. The chips are similar to ADT7461 but support two external + * temperature sensors. + * + * This driver also supports NCT72, NCT214, and NCT218 from ON Semiconductor. + * The chips are similar to ADT7461/ADT7461A but have full PEC support + * (undocumented). + * * This driver also supports the SA56004 from Philips. This device is * pin-compatible with the LM86, the ED/EDP parts are also address-compatible. * @@ -75,23 +84,34 @@ * They are mostly compatible with ADT7461 except for local temperature * low byte register and max conversion rate. * + * This driver also supports MAX1617 and various clones such as G767 + * and NE1617. Such clones will be detected as MAX1617. + * + * This driver also supports NE1618 from Philips. It is similar to NE1617 + * but supports 11 bit external temperature values. + * * Since the LM90 was the first chipset supported by this driver, most * comments will refer to this chipset, but are actually general and * concern all supported chipsets, unless mentioned otherwise. */ -#include <linux/module.h> +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/i2c.h> #include <linux/init.h> -#include <linux/slab.h> +#include <linux/interrupt.h> #include <linux/jiffies.h> -#include <linux/i2c.h> #include <linux/hwmon.h> -#include <linux/err.h> +#include <linux/module.h> #include <linux/mutex.h> #include <linux/of_device.h> -#include <linux/sysfs.h> -#include <linux/interrupt.h> #include <linux/regulator/consumer.h> +#include <linux/slab.h> +#include <linux/workqueue.h> + +/* The maximum number of channels currently supported */ +#define MAX_CHANNELS 3 /* * Addresses to scan @@ -112,119 +132,131 @@ static const unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; -enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680, - max6646, w83l771, max6696, sa56004, g781, tmp451, tmp461, max6654 }; +enum chips { adm1023, adm1032, adt7461, adt7461a, adt7481, + g781, lm84, lm90, lm99, + max1617, max6642, max6646, max6648, max6654, max6657, max6659, max6680, max6696, + nct210, nct72, ne1618, sa56004, tmp451, tmp461, w83l771, +}; /* * The LM90 registers */ -#define LM90_REG_R_MAN_ID 0xFE -#define LM90_REG_R_CHIP_ID 0xFF -#define LM90_REG_R_CONFIG1 0x03 -#define LM90_REG_W_CONFIG1 0x09 -#define LM90_REG_R_CONFIG2 0xBF -#define LM90_REG_W_CONFIG2 0xBF -#define LM90_REG_R_CONVRATE 0x04 -#define LM90_REG_W_CONVRATE 0x0A -#define LM90_REG_R_STATUS 0x02 -#define LM90_REG_R_LOCAL_TEMP 0x00 -#define LM90_REG_R_LOCAL_HIGH 0x05 -#define LM90_REG_W_LOCAL_HIGH 0x0B -#define LM90_REG_R_LOCAL_LOW 0x06 -#define LM90_REG_W_LOCAL_LOW 0x0C -#define LM90_REG_R_LOCAL_CRIT 0x20 -#define LM90_REG_W_LOCAL_CRIT 0x20 -#define LM90_REG_R_REMOTE_TEMPH 0x01 -#define LM90_REG_R_REMOTE_TEMPL 0x10 -#define LM90_REG_R_REMOTE_OFFSH 0x11 -#define LM90_REG_W_REMOTE_OFFSH 0x11 -#define LM90_REG_R_REMOTE_OFFSL 0x12 -#define LM90_REG_W_REMOTE_OFFSL 0x12 -#define LM90_REG_R_REMOTE_HIGHH 0x07 -#define LM90_REG_W_REMOTE_HIGHH 0x0D -#define LM90_REG_R_REMOTE_HIGHL 0x13 -#define LM90_REG_W_REMOTE_HIGHL 0x13 -#define LM90_REG_R_REMOTE_LOWH 0x08 -#define LM90_REG_W_REMOTE_LOWH 0x0E -#define LM90_REG_R_REMOTE_LOWL 0x14 -#define LM90_REG_W_REMOTE_LOWL 0x14 -#define LM90_REG_R_REMOTE_CRIT 0x19 -#define LM90_REG_W_REMOTE_CRIT 0x19 -#define LM90_REG_R_TCRIT_HYST 0x21 -#define LM90_REG_W_TCRIT_HYST 0x21 +#define LM90_REG_MAN_ID 0xFE +#define LM90_REG_CHIP_ID 0xFF +#define LM90_REG_CONFIG1 0x03 +#define LM90_REG_CONFIG2 0xBF +#define LM90_REG_CONVRATE 0x04 +#define LM90_REG_STATUS 0x02 +#define LM90_REG_LOCAL_TEMP 0x00 +#define LM90_REG_LOCAL_HIGH 0x05 +#define LM90_REG_LOCAL_LOW 0x06 +#define LM90_REG_LOCAL_CRIT 0x20 +#define LM90_REG_REMOTE_TEMPH 0x01 +#define LM90_REG_REMOTE_TEMPL 0x10 +#define LM90_REG_REMOTE_OFFSH 0x11 +#define LM90_REG_REMOTE_OFFSL 0x12 +#define LM90_REG_REMOTE_HIGHH 0x07 +#define LM90_REG_REMOTE_HIGHL 0x13 +#define LM90_REG_REMOTE_LOWH 0x08 +#define LM90_REG_REMOTE_LOWL 0x14 +#define LM90_REG_REMOTE_CRIT 0x19 +#define LM90_REG_TCRIT_HYST 0x21 /* MAX6646/6647/6649/6654/6657/6658/6659/6695/6696 registers */ -#define MAX6657_REG_R_LOCAL_TEMPL 0x11 -#define MAX6696_REG_R_STATUS2 0x12 -#define MAX6659_REG_R_REMOTE_EMERG 0x16 -#define MAX6659_REG_W_REMOTE_EMERG 0x16 -#define MAX6659_REG_R_LOCAL_EMERG 0x17 -#define MAX6659_REG_W_LOCAL_EMERG 0x17 +#define MAX6657_REG_LOCAL_TEMPL 0x11 +#define MAX6696_REG_STATUS2 0x12 +#define MAX6659_REG_REMOTE_EMERG 0x16 +#define MAX6659_REG_LOCAL_EMERG 0x17 /* SA56004 registers */ -#define SA56004_REG_R_LOCAL_TEMPL 0x22 +#define SA56004_REG_LOCAL_TEMPL 0x22 #define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */ /* TMP451/TMP461 registers */ -#define TMP451_REG_R_LOCAL_TEMPL 0x15 +#define TMP451_REG_LOCAL_TEMPL 0x15 #define TMP451_REG_CONALERT 0x22 #define TMP461_REG_CHEN 0x16 #define TMP461_REG_DFC 0x24 -/* - * Device flags - */ -#define LM90_FLAG_ADT7461_EXT (1 << 0) /* ADT7461 extended mode */ +/* ADT7481 registers */ +#define ADT7481_REG_STATUS2 0x23 +#define ADT7481_REG_CONFIG2 0x24 + +#define ADT7481_REG_MAN_ID 0x3e +#define ADT7481_REG_CHIP_ID 0x3d + /* Device features */ -#define LM90_HAVE_OFFSET (1 << 1) /* temperature offset register */ -#define LM90_HAVE_REM_LIMIT_EXT (1 << 3) /* extended remote limit */ -#define LM90_HAVE_EMERGENCY (1 << 4) /* 3rd upper (emergency) limit */ -#define LM90_HAVE_EMERGENCY_ALARM (1 << 5)/* emergency alarm */ -#define LM90_HAVE_TEMP3 (1 << 6) /* 3rd temperature sensor */ -#define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert */ -#define LM90_HAVE_EXTENDED_TEMP (1 << 8) /* extended temperature support*/ -#define LM90_PAUSE_FOR_CONFIG (1 << 9) /* Pause conversion for config */ -#define LM90_HAVE_CRIT (1 << 10)/* Chip supports CRIT/OVERT register */ -#define LM90_HAVE_CRIT_ALRM_SWP (1 << 11)/* critical alarm bits swapped */ +#define LM90_HAVE_EXTENDED_TEMP BIT(0) /* extended temperature support */ +#define LM90_HAVE_OFFSET BIT(1) /* temperature offset register */ +#define LM90_HAVE_UNSIGNED_TEMP BIT(2) /* temperatures are unsigned */ +#define LM90_HAVE_REM_LIMIT_EXT BIT(3) /* extended remote limit */ +#define LM90_HAVE_EMERGENCY BIT(4) /* 3rd upper (emergency) limit */ +#define LM90_HAVE_EMERGENCY_ALARM BIT(5)/* emergency alarm */ +#define LM90_HAVE_TEMP3 BIT(6) /* 3rd temperature sensor */ +#define LM90_HAVE_BROKEN_ALERT BIT(7) /* Broken alert */ +#define LM90_PAUSE_FOR_CONFIG BIT(8) /* Pause conversion for config */ +#define LM90_HAVE_CRIT BIT(9) /* Chip supports CRIT/OVERT register */ +#define LM90_HAVE_CRIT_ALRM_SWP BIT(10) /* critical alarm bits swapped */ +#define LM90_HAVE_PEC BIT(11) /* Chip supports PEC */ +#define LM90_HAVE_PARTIAL_PEC BIT(12) /* Partial PEC support (adm1032)*/ +#define LM90_HAVE_ALARMS BIT(13) /* Create 'alarms' attribute */ +#define LM90_HAVE_EXT_UNSIGNED BIT(14) /* extended unsigned temperature*/ +#define LM90_HAVE_LOW BIT(15) /* low limits */ +#define LM90_HAVE_CONVRATE BIT(16) /* conversion rate */ +#define LM90_HAVE_REMOTE_EXT BIT(17) /* extended remote temperature */ +#define LM90_HAVE_FAULTQUEUE BIT(18) /* configurable samples count */ /* LM90 status */ -#define LM90_STATUS_LTHRM (1 << 0) /* local THERM limit tripped */ -#define LM90_STATUS_RTHRM (1 << 1) /* remote THERM limit tripped */ -#define LM90_STATUS_ROPEN (1 << 2) /* remote is an open circuit */ -#define LM90_STATUS_RLOW (1 << 3) /* remote low temp limit tripped */ -#define LM90_STATUS_RHIGH (1 << 4) /* remote high temp limit tripped */ -#define LM90_STATUS_LLOW (1 << 5) /* local low temp limit tripped */ -#define LM90_STATUS_LHIGH (1 << 6) /* local high temp limit tripped */ -#define LM90_STATUS_BUSY (1 << 7) /* conversion is ongoing */ - -#define MAX6696_STATUS2_R2THRM (1 << 1) /* remote2 THERM limit tripped */ -#define MAX6696_STATUS2_R2OPEN (1 << 2) /* remote2 is an open circuit */ -#define MAX6696_STATUS2_R2LOW (1 << 3) /* remote2 low temp limit tripped */ -#define MAX6696_STATUS2_R2HIGH (1 << 4) /* remote2 high temp limit tripped */ -#define MAX6696_STATUS2_ROT2 (1 << 5) /* remote emergency limit tripped */ -#define MAX6696_STATUS2_R2OT2 (1 << 6) /* remote2 emergency limit tripped */ -#define MAX6696_STATUS2_LOT2 (1 << 7) /* local emergency limit tripped */ +#define LM90_STATUS_LTHRM BIT(0) /* local THERM limit tripped */ +#define LM90_STATUS_RTHRM BIT(1) /* remote THERM limit tripped */ +#define LM90_STATUS_ROPEN BIT(2) /* remote is an open circuit */ +#define LM90_STATUS_RLOW BIT(3) /* remote low temp limit tripped */ +#define LM90_STATUS_RHIGH BIT(4) /* remote high temp limit tripped */ +#define LM90_STATUS_LLOW BIT(5) /* local low temp limit tripped */ +#define LM90_STATUS_LHIGH BIT(6) /* local high temp limit tripped */ +#define LM90_STATUS_BUSY BIT(7) /* conversion is ongoing */ + +/* MAX6695/6696 and ADT7481 2nd status register */ +#define MAX6696_STATUS2_R2THRM BIT(1) /* remote2 THERM limit tripped */ +#define MAX6696_STATUS2_R2OPEN BIT(2) /* remote2 is an open circuit */ +#define MAX6696_STATUS2_R2LOW BIT(3) /* remote2 low temp limit tripped */ +#define MAX6696_STATUS2_R2HIGH BIT(4) /* remote2 high temp limit tripped */ +#define MAX6696_STATUS2_ROT2 BIT(5) /* remote emergency limit tripped */ +#define MAX6696_STATUS2_R2OT2 BIT(6) /* remote2 emergency limit tripped */ +#define MAX6696_STATUS2_LOT2 BIT(7) /* local emergency limit tripped */ /* * Driver data (common to all clients) */ static const struct i2c_device_id lm90_id[] = { + { "adm1020", max1617 }, + { "adm1021", max1617 }, + { "adm1023", adm1023 }, { "adm1032", adm1032 }, + { "adt7421", adt7461a }, { "adt7461", adt7461 }, - { "adt7461a", adt7461 }, + { "adt7461a", adt7461a }, + { "adt7481", adt7481 }, + { "adt7482", adt7481 }, + { "adt7483a", adt7481 }, { "g781", g781 }, + { "gl523sm", max1617 }, + { "lm84", lm84 }, + { "lm86", lm90 }, + { "lm89", lm90 }, { "lm90", lm90 }, - { "lm86", lm86 }, - { "lm89", lm86 }, { "lm99", lm99 }, + { "max1617", max1617 }, + { "max6642", max6642 }, { "max6646", max6646 }, { "max6647", max6646 }, + { "max6648", max6648 }, { "max6649", max6646 }, { "max6654", max6654 }, { "max6657", max6657 }, @@ -232,11 +264,20 @@ static const struct i2c_device_id lm90_id[] = { { "max6659", max6659 }, { "max6680", max6680 }, { "max6681", max6680 }, + { "max6690", max6654 }, + { "max6692", max6648 }, { "max6695", max6696 }, { "max6696", max6696 }, - { "nct1008", adt7461 }, + { "mc1066", max1617 }, + { "nct1008", adt7461a }, + { "nct210", nct210 }, + { "nct214", nct72 }, + { "nct218", nct72 }, + { "nct72", nct72 }, + { "ne1618", ne1618 }, { "w83l771", w83l771 }, { "sa56004", sa56004 }, + { "thmc10", max1617 }, { "tmp451", tmp451 }, { "tmp461", tmp461 }, { } @@ -254,7 +295,11 @@ static const struct of_device_id __maybe_unused lm90_of_match[] = { }, { .compatible = "adi,adt7461a", - .data = (void *)adt7461 + .data = (void *)adt7461a + }, + { + .compatible = "adi,adt7481", + .data = (void *)adt7481 }, { .compatible = "gmt,g781", @@ -266,11 +311,11 @@ static const struct of_device_id __maybe_unused lm90_of_match[] = { }, { .compatible = "national,lm86", - .data = (void *)lm86 + .data = (void *)lm90 }, { .compatible = "national,lm89", - .data = (void *)lm86 + .data = (void *)lm90 }, { .compatible = "national,lm99", @@ -322,7 +367,19 @@ static const struct of_device_id __maybe_unused lm90_of_match[] = { }, { .compatible = "onnn,nct1008", - .data = (void *)adt7461 + .data = (void *)adt7461a + }, + { + .compatible = "onnn,nct214", + .data = (void *)nct72 + }, + { + .compatible = "onnn,nct218", + .data = (void *)nct72 + }, + { + .compatible = "onnn,nct72", + .data = (void *)nct72 }, { .compatible = "winbond,w83l771", @@ -352,115 +409,252 @@ struct lm90_params { u16 alert_alarms; /* Which alarm bits trigger ALERT# */ /* Upper 8 bits for max6695/96 */ u8 max_convrate; /* Maximum conversion rate register value */ + u8 resolution; /* 16-bit resolution (default 11 bit) */ + u8 reg_status2; /* 2nd status register (optional) */ u8 reg_local_ext; /* Extended local temp register (optional) */ + u8 faultqueue_mask; /* fault queue bit mask */ + u8 faultqueue_depth; /* fault queue depth if mask is used */ }; static const struct lm90_params lm90_params[] = { + [adm1023] = { + .flags = LM90_HAVE_ALARMS | LM90_HAVE_OFFSET | LM90_HAVE_BROKEN_ALERT + | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_LOW | LM90_HAVE_CONVRATE + | LM90_HAVE_REMOTE_EXT, + .alert_alarms = 0x7c, + .resolution = 8, + .max_convrate = 7, + }, [adm1032] = { .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT - | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT, + | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT + | LM90_HAVE_PARTIAL_PEC | LM90_HAVE_ALARMS + | LM90_HAVE_LOW | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT + | LM90_HAVE_FAULTQUEUE, .alert_alarms = 0x7c, .max_convrate = 10, }, [adt7461] = { + /* + * Standard temperature range is supposed to be unsigned, + * but that does not match reality. Negative temperatures + * are always reported. + */ + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT + | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP + | LM90_HAVE_CRIT | LM90_HAVE_PARTIAL_PEC + | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE + | LM90_HAVE_REMOTE_EXT | LM90_HAVE_FAULTQUEUE, + .alert_alarms = 0x7c, + .max_convrate = 10, + .resolution = 10, + }, + [adt7461a] = { .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP - | LM90_HAVE_CRIT, + | LM90_HAVE_CRIT | LM90_HAVE_PEC | LM90_HAVE_ALARMS + | LM90_HAVE_LOW | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT + | LM90_HAVE_FAULTQUEUE, .alert_alarms = 0x7c, .max_convrate = 10, }, + [adt7481] = { + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT + | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP + | LM90_HAVE_UNSIGNED_TEMP | LM90_HAVE_PEC + | LM90_HAVE_TEMP3 | LM90_HAVE_CRIT | LM90_HAVE_LOW + | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT + | LM90_HAVE_FAULTQUEUE, + .alert_alarms = 0x1c7c, + .max_convrate = 11, + .resolution = 10, + .reg_status2 = ADT7481_REG_STATUS2, + }, [g781] = { .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT - | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT, + | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT + | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE + | LM90_HAVE_REMOTE_EXT | LM90_HAVE_FAULTQUEUE, .alert_alarms = 0x7c, .max_convrate = 7, }, - [lm86] = { - .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT - | LM90_HAVE_CRIT, - .alert_alarms = 0x7b, - .max_convrate = 9, + [lm84] = { + .flags = LM90_HAVE_ALARMS, + .resolution = 8, }, [lm90] = { .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT - | LM90_HAVE_CRIT, + | LM90_HAVE_CRIT | LM90_HAVE_ALARMS | LM90_HAVE_LOW + | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT + | LM90_HAVE_FAULTQUEUE, .alert_alarms = 0x7b, .max_convrate = 9, + .faultqueue_mask = BIT(0), + .faultqueue_depth = 3, }, [lm99] = { .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT - | LM90_HAVE_CRIT, + | LM90_HAVE_CRIT | LM90_HAVE_ALARMS | LM90_HAVE_LOW + | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT + | LM90_HAVE_FAULTQUEUE, .alert_alarms = 0x7b, .max_convrate = 9, + .faultqueue_mask = BIT(0), + .faultqueue_depth = 3, + }, + [max1617] = { + .flags = LM90_HAVE_CONVRATE | LM90_HAVE_BROKEN_ALERT | + LM90_HAVE_LOW | LM90_HAVE_ALARMS, + .alert_alarms = 0x78, + .resolution = 8, + .max_convrate = 7, + }, + [max6642] = { + .flags = LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXT_UNSIGNED + | LM90_HAVE_REMOTE_EXT | LM90_HAVE_FAULTQUEUE, + .alert_alarms = 0x50, + .resolution = 10, + .reg_local_ext = MAX6657_REG_LOCAL_TEMPL, + .faultqueue_mask = BIT(4), + .faultqueue_depth = 2, }, [max6646] = { - .flags = LM90_HAVE_CRIT | LM90_HAVE_BROKEN_ALERT, + .flags = LM90_HAVE_CRIT | LM90_HAVE_BROKEN_ALERT + | LM90_HAVE_EXT_UNSIGNED | LM90_HAVE_ALARMS | LM90_HAVE_LOW + | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT, .alert_alarms = 0x7c, .max_convrate = 6, - .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, + .reg_local_ext = MAX6657_REG_LOCAL_TEMPL, + }, + [max6648] = { + .flags = LM90_HAVE_UNSIGNED_TEMP | LM90_HAVE_CRIT + | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_LOW + | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT, + .alert_alarms = 0x7c, + .max_convrate = 6, + .reg_local_ext = MAX6657_REG_LOCAL_TEMPL, }, [max6654] = { - .flags = LM90_HAVE_BROKEN_ALERT, + .flags = LM90_HAVE_BROKEN_ALERT | LM90_HAVE_ALARMS | LM90_HAVE_LOW + | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT, .alert_alarms = 0x7c, .max_convrate = 7, - .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, + .reg_local_ext = MAX6657_REG_LOCAL_TEMPL, }, [max6657] = { - .flags = LM90_PAUSE_FOR_CONFIG | LM90_HAVE_CRIT, + .flags = LM90_PAUSE_FOR_CONFIG | LM90_HAVE_CRIT + | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE + | LM90_HAVE_REMOTE_EXT, .alert_alarms = 0x7c, .max_convrate = 8, - .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, + .reg_local_ext = MAX6657_REG_LOCAL_TEMPL, }, [max6659] = { - .flags = LM90_HAVE_EMERGENCY | LM90_HAVE_CRIT, + .flags = LM90_HAVE_EMERGENCY | LM90_HAVE_CRIT + | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE + | LM90_HAVE_REMOTE_EXT, .alert_alarms = 0x7c, .max_convrate = 8, - .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, + .reg_local_ext = MAX6657_REG_LOCAL_TEMPL, }, [max6680] = { + /* + * Apparent temperatures of 128 degrees C or higher are reported + * and treated as negative temperatures (meaning min_alarm will + * be set). + */ .flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT - | LM90_HAVE_CRIT_ALRM_SWP | LM90_HAVE_BROKEN_ALERT, + | LM90_HAVE_CRIT_ALRM_SWP | LM90_HAVE_BROKEN_ALERT + | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE + | LM90_HAVE_REMOTE_EXT, .alert_alarms = 0x7c, .max_convrate = 7, }, [max6696] = { .flags = LM90_HAVE_EMERGENCY - | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3 | LM90_HAVE_CRIT, + | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3 | LM90_HAVE_CRIT + | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE + | LM90_HAVE_REMOTE_EXT | LM90_HAVE_FAULTQUEUE, .alert_alarms = 0x1c7c, .max_convrate = 6, - .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, + .reg_status2 = MAX6696_REG_STATUS2, + .reg_local_ext = MAX6657_REG_LOCAL_TEMPL, + .faultqueue_mask = BIT(5), + .faultqueue_depth = 4, + }, + [nct72] = { + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT + | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP + | LM90_HAVE_CRIT | LM90_HAVE_PEC | LM90_HAVE_UNSIGNED_TEMP + | LM90_HAVE_LOW | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT + | LM90_HAVE_FAULTQUEUE, + .alert_alarms = 0x7c, + .max_convrate = 10, + .resolution = 10, + }, + [nct210] = { + .flags = LM90_HAVE_ALARMS | LM90_HAVE_BROKEN_ALERT + | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_LOW | LM90_HAVE_CONVRATE + | LM90_HAVE_REMOTE_EXT, + .alert_alarms = 0x7c, + .resolution = 11, + .max_convrate = 7, + }, + [ne1618] = { + .flags = LM90_PAUSE_FOR_CONFIG | LM90_HAVE_BROKEN_ALERT + | LM90_HAVE_LOW | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT, + .alert_alarms = 0x7c, + .resolution = 11, + .max_convrate = 7, }, [w83l771] = { - .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT, + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT + | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE + | LM90_HAVE_REMOTE_EXT, .alert_alarms = 0x7c, .max_convrate = 8, }, [sa56004] = { - .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT, + /* + * Apparent temperatures of 128 degrees C or higher are reported + * and treated as negative temperatures (meaning min_alarm will + * be set). + */ + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT + | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE + | LM90_HAVE_REMOTE_EXT | LM90_HAVE_FAULTQUEUE, .alert_alarms = 0x7b, .max_convrate = 9, - .reg_local_ext = SA56004_REG_R_LOCAL_TEMPL, + .reg_local_ext = SA56004_REG_LOCAL_TEMPL, + .faultqueue_mask = BIT(0), + .faultqueue_depth = 3, }, [tmp451] = { .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT - | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT, + | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT + | LM90_HAVE_UNSIGNED_TEMP | LM90_HAVE_ALARMS | LM90_HAVE_LOW + | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT | LM90_HAVE_FAULTQUEUE, .alert_alarms = 0x7c, .max_convrate = 9, - .reg_local_ext = TMP451_REG_R_LOCAL_TEMPL, + .resolution = 12, + .reg_local_ext = TMP451_REG_LOCAL_TEMPL, }, [tmp461] = { .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT - | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT, + | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT + | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE + | LM90_HAVE_REMOTE_EXT | LM90_HAVE_FAULTQUEUE, .alert_alarms = 0x7c, .max_convrate = 9, - .reg_local_ext = TMP451_REG_R_LOCAL_TEMPL, + .resolution = 12, + .reg_local_ext = TMP451_REG_LOCAL_TEMPL, }, }; /* - * TEMP8 register index + * temperature register index */ -enum lm90_temp8_reg_index { +enum lm90_temp_reg_index { LOCAL_LOW = 0, LOCAL_HIGH, LOCAL_CRIT, @@ -469,14 +663,8 @@ enum lm90_temp8_reg_index { REMOTE_EMERG, /* max6659 and max6695/96 */ REMOTE2_CRIT, /* max6695/96 only */ REMOTE2_EMERG, /* max6695/96 only */ - TEMP8_REG_NUM -}; -/* - * TEMP11 register index - */ -enum lm90_temp11_reg_index { - REMOTE_TEMP = 0, + REMOTE_TEMP, REMOTE_LOW, REMOTE_HIGH, REMOTE_OFFSET, /* except max6646, max6657/58/59, and max6695/96 */ @@ -484,7 +672,9 @@ enum lm90_temp11_reg_index { REMOTE2_TEMP, /* max6695/96 only */ REMOTE2_LOW, /* max6695/96 only */ REMOTE2_HIGH, /* max6695/96 only */ - TEMP11_REG_NUM + REMOTE2_OFFSET, + + TEMP_REG_NUM }; /* @@ -494,13 +684,20 @@ enum lm90_temp11_reg_index { struct lm90_data { struct i2c_client *client; struct device *hwmon_dev; - u32 channel_config[4]; + u32 chip_config[2]; + u32 channel_config[MAX_CHANNELS + 1]; + const char *channel_label[MAX_CHANNELS]; + struct hwmon_channel_info chip_info; struct hwmon_channel_info temp_info; const struct hwmon_channel_info *info[3]; struct hwmon_chip_info chip; struct mutex update_lock; + struct delayed_work alert_work; + struct work_struct report_work; bool valid; /* true if register values are valid */ + bool alarms_valid; /* true if status register values are valid */ unsigned long last_updated; /* in jiffies */ + unsigned long alarms_updated; /* in jiffies */ int kind; u32 flags; @@ -509,16 +706,23 @@ struct lm90_data { u8 config; /* Current configuration register value */ u8 config_orig; /* Original configuration register value */ u8 convrate_orig; /* Original conversion rate register value */ + u8 resolution; /* temperature resolution in bit */ u16 alert_alarms; /* Which alarm bits trigger ALERT# */ /* Upper 8 bits for max6695/96 */ u8 max_convrate; /* Maximum conversion rate */ + u8 reg_status2; /* 2nd status register (optional) */ u8 reg_local_ext; /* local extension register offset */ + u8 reg_remote_ext; /* remote temperature low byte */ + u8 faultqueue_mask; /* fault queue mask */ + u8 faultqueue_depth; /* fault queue mask */ /* registers values */ - s8 temp8[TEMP8_REG_NUM]; - s16 temp11[TEMP11_REG_NUM]; + u16 temp[TEMP_REG_NUM]; u8 temp_hyst; - u16 alarms; /* bitvector (upper 8 bits for max6695/96) */ + u8 conalert; + u16 reported_alarms; /* alarms reported as sysfs/udev events */ + u16 current_alarms; /* current alarms, reported by chip */ + u16 alarms; /* alarms not yet reported to user */ }; /* @@ -526,10 +730,10 @@ struct lm90_data { */ /* - * The ADM1032 supports PEC but not on write byte transactions, so we need + * If the chip supports PEC but not on write byte transactions, we need * to explicitly ask for a transaction without PEC. */ -static inline s32 adm1032_write_byte(struct i2c_client *client, u8 value) +static inline s32 lm90_write_no_pec(struct i2c_client *client, u8 value) { return i2c_smbus_xfer(client->adapter, client->addr, client->flags & ~I2C_CLIENT_PEC, @@ -538,47 +742,96 @@ static inline s32 adm1032_write_byte(struct i2c_client *client, u8 value) /* * It is assumed that client->update_lock is held (unless we are in - * detection or initialization steps). This matters when PEC is enabled, - * because we don't want the address pointer to change between the write - * byte and the read byte transactions. + * detection or initialization steps). This matters when PEC is enabled + * for chips with partial PEC support, because we don't want the address + * pointer to change between the write byte and the read byte transactions. */ static int lm90_read_reg(struct i2c_client *client, u8 reg) { + struct lm90_data *data = i2c_get_clientdata(client); + bool partial_pec = (client->flags & I2C_CLIENT_PEC) && + (data->flags & LM90_HAVE_PARTIAL_PEC); int err; - if (client->flags & I2C_CLIENT_PEC) { - err = adm1032_write_byte(client, reg); - if (err >= 0) - err = i2c_smbus_read_byte(client); - } else - err = i2c_smbus_read_byte_data(client, reg); + if (partial_pec) { + err = lm90_write_no_pec(client, reg); + if (err) + return err; + return i2c_smbus_read_byte(client); + } + return i2c_smbus_read_byte_data(client, reg); +} - return err; +/* + * Return register write address + * + * The write address for registers 0x03 .. 0x08 is the read address plus 6. + * For other registers the write address matches the read address. + */ +static u8 lm90_write_reg_addr(u8 reg) +{ + if (reg >= LM90_REG_CONFIG1 && reg <= LM90_REG_REMOTE_LOWH) + return reg + 6; + return reg; } -static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl) +/* + * Write into LM90 register. + * Convert register address to write address if needed, then execute the + * operation. + */ +static int lm90_write_reg(struct i2c_client *client, u8 reg, u8 val) +{ + return i2c_smbus_write_byte_data(client, lm90_write_reg_addr(reg), val); +} + +/* + * Write into 16-bit LM90 register. + * Convert register addresses to write address if needed, then execute the + * operation. + */ +static int lm90_write16(struct i2c_client *client, u8 regh, u8 regl, u16 val) +{ + int ret; + + ret = lm90_write_reg(client, regh, val >> 8); + if (ret < 0 || !regl) + return ret; + return lm90_write_reg(client, regl, val & 0xff); +} + +static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, + bool is_volatile) { int oldh, newh, l; - /* - * There is a trick here. We have to read two registers to have the - * sensor temperature, but we have to beware a conversion could occur - * between the readings. The datasheet says we should either use - * the one-shot conversion register, which we don't want to do - * (disables hardware monitoring) or monitor the busy bit, which is - * impossible (we can't read the values and monitor that bit at the - * exact same time). So the solution used here is to read the high - * byte once, then the low byte, then the high byte again. If the new - * high byte matches the old one, then we have a valid reading. Else - * we have to read the low byte again, and now we believe we have a - * correct reading. - */ oldh = lm90_read_reg(client, regh); if (oldh < 0) return oldh; + + if (!regl) + return oldh << 8; + l = lm90_read_reg(client, regl); if (l < 0) return l; + + if (!is_volatile) + return (oldh << 8) | l; + + /* + * For volatile registers we have to use a trick. + * We have to read two registers to have the sensor temperature, + * but we have to beware a conversion could occur between the + * readings. The datasheet says we should either use + * the one-shot conversion register, which we don't want to do + * (disables hardware monitoring) or monitor the busy bit, which is + * impossible (we can't read the values and monitor that bit at the + * exact same time). So the solution used here is to read the high + * the high byte again. If the new high byte matches the old one, + * then we have a valid reading. Otherwise we have to read the low + * byte again, and now we believe we have a correct reading. + */ newh = lm90_read_reg(client, regh); if (newh < 0) return newh; @@ -595,9 +848,7 @@ static int lm90_update_confreg(struct lm90_data *data, u8 config) if (data->config != config) { int err; - err = i2c_smbus_write_byte_data(data->client, - LM90_REG_W_CONFIG1, - config); + err = lm90_write_reg(data->client, LM90_REG_CONFIG1, config); if (err) return err; data->config = config; @@ -613,18 +864,14 @@ static int lm90_update_confreg(struct lm90_data *data, u8 config) * various registers have different meanings as a result of selecting a * non-default remote channel. */ -static int lm90_select_remote_channel(struct lm90_data *data, int channel) +static int lm90_select_remote_channel(struct lm90_data *data, bool second) { - int err = 0; + u8 config = data->config & ~0x08; - if (data->kind == max6696) { - u8 config = data->config & ~0x08; + if (second) + config |= 0x08; - if (channel) - config |= 0x08; - err = lm90_update_confreg(data, config); - } - return err; + return lm90_update_confreg(data, config); } static int lm90_write_convrate(struct lm90_data *data, int val) @@ -640,7 +887,7 @@ static int lm90_write_convrate(struct lm90_data *data, int val) } /* Set conv rate */ - err = i2c_smbus_write_byte_data(data->client, LM90_REG_W_CONVRATE, val); + err = lm90_write_reg(data->client, LM90_REG_CONVRATE, val); /* Revert change to config */ lm90_update_confreg(data, config); @@ -673,6 +920,26 @@ static int lm90_set_convrate(struct i2c_client *client, struct lm90_data *data, return err; } +static int lm90_set_faultqueue(struct i2c_client *client, + struct lm90_data *data, int val) +{ + int err; + + if (data->faultqueue_mask) { + err = lm90_update_confreg(data, val <= data->faultqueue_depth / 2 ? + data->config & ~data->faultqueue_mask : + data->config | data->faultqueue_mask); + } else { + static const u8 values[4] = {0, 2, 6, 0x0e}; + + data->conalert = (data->conalert & 0xf1) | values[val - 1]; + err = lm90_write_reg(data->client, TMP451_REG_CONALERT, + data->conalert); + } + + return err; +} + static int lm90_update_limits(struct device *dev) { struct lm90_data *data = dev_get_drvdata(dev); @@ -680,97 +947,260 @@ static int lm90_update_limits(struct device *dev) int val; if (data->flags & LM90_HAVE_CRIT) { - val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT); + val = lm90_read_reg(client, LM90_REG_LOCAL_CRIT); if (val < 0) return val; - data->temp8[LOCAL_CRIT] = val; + data->temp[LOCAL_CRIT] = val << 8; - val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT); + val = lm90_read_reg(client, LM90_REG_REMOTE_CRIT); if (val < 0) return val; - data->temp8[REMOTE_CRIT] = val; + data->temp[REMOTE_CRIT] = val << 8; - val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST); + val = lm90_read_reg(client, LM90_REG_TCRIT_HYST); if (val < 0) return val; data->temp_hyst = val; } - - val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH); - if (val < 0) - return val; - data->temp11[REMOTE_LOW] = val << 8; - - if (data->flags & LM90_HAVE_REM_LIMIT_EXT) { - val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWL); + if ((data->flags & LM90_HAVE_FAULTQUEUE) && !data->faultqueue_mask) { + val = lm90_read_reg(client, TMP451_REG_CONALERT); if (val < 0) return val; - data->temp11[REMOTE_LOW] |= val; + data->conalert = val; } - val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH); + val = lm90_read16(client, LM90_REG_REMOTE_LOWH, + (data->flags & LM90_HAVE_REM_LIMIT_EXT) ? LM90_REG_REMOTE_LOWL : 0, + false); if (val < 0) return val; - data->temp11[REMOTE_HIGH] = val << 8; + data->temp[REMOTE_LOW] = val; - if (data->flags & LM90_HAVE_REM_LIMIT_EXT) { - val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHL); - if (val < 0) - return val; - data->temp11[REMOTE_HIGH] |= val; - } + val = lm90_read16(client, LM90_REG_REMOTE_HIGHH, + (data->flags & LM90_HAVE_REM_LIMIT_EXT) ? LM90_REG_REMOTE_HIGHL : 0, + false); + if (val < 0) + return val; + data->temp[REMOTE_HIGH] = val; if (data->flags & LM90_HAVE_OFFSET) { - val = lm90_read16(client, LM90_REG_R_REMOTE_OFFSH, - LM90_REG_R_REMOTE_OFFSL); + val = lm90_read16(client, LM90_REG_REMOTE_OFFSH, + LM90_REG_REMOTE_OFFSL, false); if (val < 0) return val; - data->temp11[REMOTE_OFFSET] = val; + data->temp[REMOTE_OFFSET] = val; } if (data->flags & LM90_HAVE_EMERGENCY) { - val = lm90_read_reg(client, MAX6659_REG_R_LOCAL_EMERG); + val = lm90_read_reg(client, MAX6659_REG_LOCAL_EMERG); if (val < 0) return val; - data->temp8[LOCAL_EMERG] = val; + data->temp[LOCAL_EMERG] = val << 8; - val = lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG); + val = lm90_read_reg(client, MAX6659_REG_REMOTE_EMERG); if (val < 0) return val; - data->temp8[REMOTE_EMERG] = val; + data->temp[REMOTE_EMERG] = val << 8; } - if (data->kind == max6696) { - val = lm90_select_remote_channel(data, 1); + if (data->flags & LM90_HAVE_TEMP3) { + val = lm90_select_remote_channel(data, true); if (val < 0) return val; - val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT); + val = lm90_read_reg(client, LM90_REG_REMOTE_CRIT); if (val < 0) return val; - data->temp8[REMOTE2_CRIT] = val; + data->temp[REMOTE2_CRIT] = val << 8; + + if (data->flags & LM90_HAVE_EMERGENCY) { + val = lm90_read_reg(client, MAX6659_REG_REMOTE_EMERG); + if (val < 0) + return val; + data->temp[REMOTE2_EMERG] = val << 8; + } - val = lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG); + val = lm90_read_reg(client, LM90_REG_REMOTE_LOWH); if (val < 0) return val; - data->temp8[REMOTE2_EMERG] = val; + data->temp[REMOTE2_LOW] = val << 8; - val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH); + val = lm90_read_reg(client, LM90_REG_REMOTE_HIGHH); if (val < 0) return val; - data->temp11[REMOTE2_LOW] = val << 8; + data->temp[REMOTE2_HIGH] = val << 8; + + if (data->flags & LM90_HAVE_OFFSET) { + val = lm90_read16(client, LM90_REG_REMOTE_OFFSH, + LM90_REG_REMOTE_OFFSL, false); + if (val < 0) + return val; + data->temp[REMOTE2_OFFSET] = val; + } + + lm90_select_remote_channel(data, false); + } + + return 0; +} + +static void lm90_report_alarms(struct work_struct *work) +{ + struct lm90_data *data = container_of(work, struct lm90_data, report_work); + u16 cleared_alarms, new_alarms, current_alarms; + struct device *hwmon_dev = data->hwmon_dev; + struct device *dev = &data->client->dev; + int st, st2; + + current_alarms = data->current_alarms; + cleared_alarms = data->reported_alarms & ~current_alarms; + new_alarms = current_alarms & ~data->reported_alarms; + + if (!cleared_alarms && !new_alarms) + return; + + st = new_alarms & 0xff; + st2 = new_alarms >> 8; + + if ((st & (LM90_STATUS_LLOW | LM90_STATUS_LHIGH | LM90_STATUS_LTHRM)) || + (st2 & MAX6696_STATUS2_LOT2)) + dev_dbg(dev, "temp%d out of range, please check!\n", 1); + if ((st & (LM90_STATUS_RLOW | LM90_STATUS_RHIGH | LM90_STATUS_RTHRM)) || + (st2 & MAX6696_STATUS2_ROT2)) + dev_dbg(dev, "temp%d out of range, please check!\n", 2); + if (st & LM90_STATUS_ROPEN) + dev_dbg(dev, "temp%d diode open, please check!\n", 2); + if (st2 & (MAX6696_STATUS2_R2LOW | MAX6696_STATUS2_R2HIGH | + MAX6696_STATUS2_R2THRM | MAX6696_STATUS2_R2OT2)) + dev_dbg(dev, "temp%d out of range, please check!\n", 3); + if (st2 & MAX6696_STATUS2_R2OPEN) + dev_dbg(dev, "temp%d diode open, please check!\n", 3); + + st |= cleared_alarms & 0xff; + st2 |= cleared_alarms >> 8; + + if (st & LM90_STATUS_LLOW) + hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_min_alarm, 0); + if (st & LM90_STATUS_RLOW) + hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_min_alarm, 1); + if (st2 & MAX6696_STATUS2_R2LOW) + hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_min_alarm, 2); + + if (st & LM90_STATUS_LHIGH) + hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_max_alarm, 0); + if (st & LM90_STATUS_RHIGH) + hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_max_alarm, 1); + if (st2 & MAX6696_STATUS2_R2HIGH) + hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_max_alarm, 2); + + if (st & LM90_STATUS_LTHRM) + hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_crit_alarm, 0); + if (st & LM90_STATUS_RTHRM) + hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_crit_alarm, 1); + if (st2 & MAX6696_STATUS2_R2THRM) + hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_crit_alarm, 2); + + if (st2 & MAX6696_STATUS2_LOT2) + hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_emergency_alarm, 0); + if (st2 & MAX6696_STATUS2_ROT2) + hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_emergency_alarm, 1); + if (st2 & MAX6696_STATUS2_R2OT2) + hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_emergency_alarm, 2); + + data->reported_alarms = current_alarms; +} + +static int lm90_update_alarms_locked(struct lm90_data *data, bool force) +{ + if (force || !data->alarms_valid || + time_after(jiffies, data->alarms_updated + msecs_to_jiffies(data->update_interval))) { + struct i2c_client *client = data->client; + bool check_enable; + u16 alarms; + int val; - val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH); + data->alarms_valid = false; + + val = lm90_read_reg(client, LM90_REG_STATUS); if (val < 0) return val; - data->temp11[REMOTE2_HIGH] = val << 8; + alarms = val & ~LM90_STATUS_BUSY; - lm90_select_remote_channel(data, 0); - } + if (data->reg_status2) { + val = lm90_read_reg(client, data->reg_status2); + if (val < 0) + return val; + alarms |= val << 8; + } + /* + * If the update is forced (called from interrupt or alert + * handler) and alarm data is valid, the alarms may have been + * updated after the last update interval, and the status + * register may still be cleared. Only add additional alarms + * in this case. Alarms will be cleared later if appropriate. + */ + if (force && data->alarms_valid) + data->current_alarms |= alarms; + else + data->current_alarms = alarms; + data->alarms |= alarms; + + check_enable = (client->irq || !(data->config_orig & 0x80)) && + (data->config & 0x80); + + if (force || check_enable) + schedule_work(&data->report_work); + /* + * Re-enable ALERT# output if it was originally enabled, relevant + * alarms are all clear, and alerts are currently disabled. + * Otherwise (re)schedule worker if needed. + */ + if (check_enable) { + if (!(data->current_alarms & data->alert_alarms)) { + dev_dbg(&client->dev, "Re-enabling ALERT#\n"); + lm90_update_confreg(data, data->config & ~0x80); + /* + * We may have been called from the update handler. + * If so, the worker, if scheduled, is no longer + * needed. Cancel it. Don't synchronize because + * it may already be running. + */ + cancel_delayed_work(&data->alert_work); + } else { + schedule_delayed_work(&data->alert_work, + max_t(int, HZ, msecs_to_jiffies(data->update_interval))); + } + } + data->alarms_updated = jiffies; + data->alarms_valid = true; + } return 0; } +static int lm90_update_alarms(struct lm90_data *data, bool force) +{ + int err; + + mutex_lock(&data->update_lock); + err = lm90_update_alarms_locked(data, force); + mutex_unlock(&data->update_lock); + + return err; +} + +static void lm90_alert_work(struct work_struct *__work) +{ + struct delayed_work *delayed_work = container_of(__work, struct delayed_work, work); + struct lm90_data *data = container_of(delayed_work, struct lm90_data, alert_work); + + /* Nothing to do if alerts are enabled */ + if (!(data->config & 0x80)) + return; + + lm90_update_alarms(data, true); +} + static int lm90_update_device(struct device *dev) { struct lm90_data *data = dev_get_drvdata(dev); @@ -791,71 +1221,46 @@ static int lm90_update_device(struct device *dev) data->valid = false; - val = lm90_read_reg(client, LM90_REG_R_LOCAL_LOW); + val = lm90_read_reg(client, LM90_REG_LOCAL_LOW); if (val < 0) return val; - data->temp8[LOCAL_LOW] = val; + data->temp[LOCAL_LOW] = val << 8; - val = lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH); + val = lm90_read_reg(client, LM90_REG_LOCAL_HIGH); if (val < 0) return val; - data->temp8[LOCAL_HIGH] = val; + data->temp[LOCAL_HIGH] = val << 8; - if (data->reg_local_ext) { - val = lm90_read16(client, LM90_REG_R_LOCAL_TEMP, - data->reg_local_ext); - if (val < 0) - return val; - data->temp11[LOCAL_TEMP] = val; - } else { - val = lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP); - if (val < 0) - return val; - data->temp11[LOCAL_TEMP] = val << 8; - } - val = lm90_read16(client, LM90_REG_R_REMOTE_TEMPH, - LM90_REG_R_REMOTE_TEMPL); + val = lm90_read16(client, LM90_REG_LOCAL_TEMP, + data->reg_local_ext, true); if (val < 0) return val; - data->temp11[REMOTE_TEMP] = val; - - val = lm90_read_reg(client, LM90_REG_R_STATUS); + data->temp[LOCAL_TEMP] = val; + val = lm90_read16(client, LM90_REG_REMOTE_TEMPH, + data->reg_remote_ext, true); if (val < 0) return val; - data->alarms = val & ~LM90_STATUS_BUSY; + data->temp[REMOTE_TEMP] = val; - if (data->kind == max6696) { - val = lm90_select_remote_channel(data, 1); + if (data->flags & LM90_HAVE_TEMP3) { + val = lm90_select_remote_channel(data, true); if (val < 0) return val; - val = lm90_read16(client, LM90_REG_R_REMOTE_TEMPH, - LM90_REG_R_REMOTE_TEMPL); + val = lm90_read16(client, LM90_REG_REMOTE_TEMPH, + data->reg_remote_ext, true); if (val < 0) { - lm90_select_remote_channel(data, 0); + lm90_select_remote_channel(data, false); return val; } - data->temp11[REMOTE2_TEMP] = val; + data->temp[REMOTE2_TEMP] = val; - lm90_select_remote_channel(data, 0); - - val = lm90_read_reg(client, MAX6696_REG_R_STATUS2); - if (val < 0) - return val; - data->alarms |= val << 8; + lm90_select_remote_channel(data, false); } - /* - * Re-enable ALERT# output if it was originally enabled and - * relevant alarms are all clear - */ - if ((client->irq || !(data->config_orig & 0x80)) && - !(data->alarms & data->alert_alarms)) { - if (data->config & 0x80) { - dev_dbg(&client->dev, "Re-enabling ALERT#\n"); - lm90_update_confreg(data, data->config & ~0x80); - } - } + val = lm90_update_alarms_locked(data, false); + if (val < 0) + return val; data->last_updated = jiffies; data->valid = true; @@ -864,130 +1269,7 @@ static int lm90_update_device(struct device *dev) return 0; } -/* - * Conversions - * For local temperatures and limits, critical limits and the hysteresis - * value, the LM90 uses signed 8-bit values with LSB = 1 degree Celsius. - * For remote temperatures and limits, it uses signed 11-bit values with - * LSB = 0.125 degree Celsius, left-justified in 16-bit registers. Some - * Maxim chips use unsigned values. - */ - -static inline int temp_from_s8(s8 val) -{ - return val * 1000; -} - -static inline int temp_from_u8(u8 val) -{ - return val * 1000; -} - -static inline int temp_from_s16(s16 val) -{ - return val / 32 * 125; -} - -static inline int temp_from_u16(u16 val) -{ - return val / 32 * 125; -} - -static s8 temp_to_s8(long val) -{ - if (val <= -128000) - return -128; - if (val >= 127000) - return 127; - if (val < 0) - return (val - 500) / 1000; - return (val + 500) / 1000; -} - -static u8 temp_to_u8(long val) -{ - if (val <= 0) - return 0; - if (val >= 255000) - return 255; - return (val + 500) / 1000; -} - -static s16 temp_to_s16(long val) -{ - if (val <= -128000) - return 0x8000; - if (val >= 127875) - return 0x7FE0; - if (val < 0) - return (val - 62) / 125 * 32; - return (val + 62) / 125 * 32; -} - -static u8 hyst_to_reg(long val) -{ - if (val <= 0) - return 0; - if (val >= 30500) - return 31; - return (val + 500) / 1000; -} - -/* - * ADT7461 in compatibility mode is almost identical to LM90 except that - * attempts to write values that are outside the range 0 < temp < 127 are - * treated as the boundary value. - * - * ADT7461 in "extended mode" operation uses unsigned integers offset by - * 64 (e.g., 0 -> -64 degC). The range is restricted to -64..191 degC. - */ -static inline int temp_from_u8_adt7461(struct lm90_data *data, u8 val) -{ - if (data->flags & LM90_FLAG_ADT7461_EXT) - return (val - 64) * 1000; - return temp_from_s8(val); -} - -static inline int temp_from_u16_adt7461(struct lm90_data *data, u16 val) -{ - if (data->flags & LM90_FLAG_ADT7461_EXT) - return (val - 0x4000) / 64 * 250; - return temp_from_s16(val); -} - -static u8 temp_to_u8_adt7461(struct lm90_data *data, long val) -{ - if (data->flags & LM90_FLAG_ADT7461_EXT) { - if (val <= -64000) - return 0; - if (val >= 191000) - return 0xFF; - return (val + 500 + 64000) / 1000; - } - if (val <= 0) - return 0; - if (val >= 127000) - return 127; - return (val + 500) / 1000; -} - -static u16 temp_to_u16_adt7461(struct lm90_data *data, long val) -{ - if (data->flags & LM90_FLAG_ADT7461_EXT) { - if (val <= -64000) - return 0; - if (val >= 191750) - return 0xFFC0; - return (val + 64000 + 125) / 250 * 64; - } - if (val <= 0) - return 0; - if (val >= 127750) - return 0x7FC0; - return (val + 125) / 250 * 64; -} - -/* pec used for ADM1032 only */ +/* pec used for devices with PEC support */ static ssize_t pec_show(struct device *dev, struct device_attribute *dummy, char *buf) { @@ -1023,196 +1305,211 @@ static ssize_t pec_store(struct device *dev, struct device_attribute *dummy, static DEVICE_ATTR_RW(pec); -static int lm90_get_temp11(struct lm90_data *data, int index) +static int lm90_temp_get_resolution(struct lm90_data *data, int index) { - s16 temp11 = data->temp11[index]; - int temp; + switch (index) { + case REMOTE_TEMP: + if (data->reg_remote_ext) + return data->resolution; + return 8; + case REMOTE_OFFSET: + case REMOTE2_OFFSET: + case REMOTE2_TEMP: + return data->resolution; + case LOCAL_TEMP: + if (data->reg_local_ext) + return data->resolution; + return 8; + case REMOTE_LOW: + case REMOTE_HIGH: + case REMOTE2_LOW: + case REMOTE2_HIGH: + if (data->flags & LM90_HAVE_REM_LIMIT_EXT) + return data->resolution; + return 8; + default: + return 8; + } +} - if (data->flags & LM90_HAVE_EXTENDED_TEMP) - temp = temp_from_u16_adt7461(data, temp11); - else if (data->kind == max6646) - temp = temp_from_u16(temp11); +static int lm90_temp_from_reg(u32 flags, u16 regval, u8 resolution) +{ + int val; + + if (flags & LM90_HAVE_EXTENDED_TEMP) + val = regval - 0x4000; + else if (flags & (LM90_HAVE_UNSIGNED_TEMP | LM90_HAVE_EXT_UNSIGNED)) + val = regval; else - temp = temp_from_s16(temp11); + val = (s16)regval; + + return ((val >> (16 - resolution)) * 1000) >> (resolution - 8); +} + +static int lm90_get_temp(struct lm90_data *data, int index, int channel) +{ + int temp = lm90_temp_from_reg(data->flags, data->temp[index], + lm90_temp_get_resolution(data, index)); - /* +16 degrees offset for temp2 for the LM99 */ - if (data->kind == lm99 && index <= 2) + /* +16 degrees offset for remote temperature on LM99 */ + if (data->kind == lm99 && channel) temp += 16000; return temp; } -static int lm90_set_temp11(struct lm90_data *data, int index, long val) +static u16 lm90_temp_to_reg(u32 flags, long val, u8 resolution) { - static struct reg { - u8 high; - u8 low; - } reg[] = { - [REMOTE_LOW] = { LM90_REG_W_REMOTE_LOWH, LM90_REG_W_REMOTE_LOWL }, - [REMOTE_HIGH] = { LM90_REG_W_REMOTE_HIGHH, LM90_REG_W_REMOTE_HIGHL }, - [REMOTE_OFFSET] = { LM90_REG_W_REMOTE_OFFSH, LM90_REG_W_REMOTE_OFFSL }, - [REMOTE2_LOW] = { LM90_REG_W_REMOTE_LOWH, LM90_REG_W_REMOTE_LOWL }, - [REMOTE2_HIGH] = { LM90_REG_W_REMOTE_HIGHH, LM90_REG_W_REMOTE_HIGHL } + int fraction = resolution > 8 ? + 1000 - DIV_ROUND_CLOSEST(1000, BIT(resolution - 8)) : 0; + + if (flags & LM90_HAVE_EXTENDED_TEMP) { + val = clamp_val(val, -64000, 191000 + fraction); + val += 64000; + } else if (flags & LM90_HAVE_EXT_UNSIGNED) { + val = clamp_val(val, 0, 255000 + fraction); + } else if (flags & LM90_HAVE_UNSIGNED_TEMP) { + val = clamp_val(val, 0, 127000 + fraction); + } else { + val = clamp_val(val, -128000, 127000 + fraction); + } + + return DIV_ROUND_CLOSEST(val << (resolution - 8), 1000) << (16 - resolution); +} + +static int lm90_set_temp(struct lm90_data *data, int index, int channel, long val) +{ + static const u8 regs[] = { + [LOCAL_LOW] = LM90_REG_LOCAL_LOW, + [LOCAL_HIGH] = LM90_REG_LOCAL_HIGH, + [LOCAL_CRIT] = LM90_REG_LOCAL_CRIT, + [REMOTE_CRIT] = LM90_REG_REMOTE_CRIT, + [LOCAL_EMERG] = MAX6659_REG_LOCAL_EMERG, + [REMOTE_EMERG] = MAX6659_REG_REMOTE_EMERG, + [REMOTE2_CRIT] = LM90_REG_REMOTE_CRIT, + [REMOTE2_EMERG] = MAX6659_REG_REMOTE_EMERG, + [REMOTE_LOW] = LM90_REG_REMOTE_LOWH, + [REMOTE_HIGH] = LM90_REG_REMOTE_HIGHH, + [REMOTE2_LOW] = LM90_REG_REMOTE_LOWH, + [REMOTE2_HIGH] = LM90_REG_REMOTE_HIGHH, }; struct i2c_client *client = data->client; - struct reg *regp = ®[index]; + u8 regh = regs[index]; + u8 regl = 0; int err; - /* +16 degrees offset for temp2 for the LM99 */ - if (data->kind == lm99 && index <= 2) { + if (channel && (data->flags & LM90_HAVE_REM_LIMIT_EXT)) { + if (index == REMOTE_LOW || index == REMOTE2_LOW) + regl = LM90_REG_REMOTE_LOWL; + else if (index == REMOTE_HIGH || index == REMOTE2_HIGH) + regl = LM90_REG_REMOTE_HIGHL; + } + + /* +16 degrees offset for remote temperature on LM99 */ + if (data->kind == lm99 && channel) { /* prevent integer underflow */ val = max(val, -128000l); val -= 16000; } - if (data->flags & LM90_HAVE_EXTENDED_TEMP) - data->temp11[index] = temp_to_u16_adt7461(data, val); - else if (data->kind == max6646) - data->temp11[index] = temp_to_u8(val) << 8; - else if (data->flags & LM90_HAVE_REM_LIMIT_EXT) - data->temp11[index] = temp_to_s16(val); - else - data->temp11[index] = temp_to_s8(val) << 8; + data->temp[index] = lm90_temp_to_reg(data->flags, val, + lm90_temp_get_resolution(data, index)); - lm90_select_remote_channel(data, index >= 3); - err = i2c_smbus_write_byte_data(client, regp->high, - data->temp11[index] >> 8); - if (err < 0) - return err; - if (data->flags & LM90_HAVE_REM_LIMIT_EXT) - err = i2c_smbus_write_byte_data(client, regp->low, - data->temp11[index] & 0xff); + if (channel > 1) + lm90_select_remote_channel(data, true); + + err = lm90_write16(client, regh, regl, data->temp[index]); + + if (channel > 1) + lm90_select_remote_channel(data, false); - lm90_select_remote_channel(data, 0); return err; } -static int lm90_get_temp8(struct lm90_data *data, int index) +static int lm90_get_temphyst(struct lm90_data *data, int index, int channel) { - s8 temp8 = data->temp8[index]; - int temp; + int temp = lm90_get_temp(data, index, channel); - if (data->flags & LM90_HAVE_EXTENDED_TEMP) - temp = temp_from_u8_adt7461(data, temp8); - else if (data->kind == max6646) - temp = temp_from_u8(temp8); - else - temp = temp_from_s8(temp8); - - /* +16 degrees offset for temp2 for the LM99 */ - if (data->kind == lm99 && index == 3) - temp += 16000; - - return temp; + return temp - data->temp_hyst * 1000; } -static int lm90_set_temp8(struct lm90_data *data, int index, long val) +static int lm90_set_temphyst(struct lm90_data *data, long val) { - static const u8 reg[TEMP8_REG_NUM] = { - LM90_REG_W_LOCAL_LOW, - LM90_REG_W_LOCAL_HIGH, - LM90_REG_W_LOCAL_CRIT, - LM90_REG_W_REMOTE_CRIT, - MAX6659_REG_W_LOCAL_EMERG, - MAX6659_REG_W_REMOTE_EMERG, - LM90_REG_W_REMOTE_CRIT, - MAX6659_REG_W_REMOTE_EMERG, - }; - struct i2c_client *client = data->client; - int err; + int temp = lm90_get_temp(data, LOCAL_CRIT, 0); - /* +16 degrees offset for temp2 for the LM99 */ - if (data->kind == lm99 && index == 3) { - /* prevent integer underflow */ - val = max(val, -128000l); - val -= 16000; - } + /* prevent integer overflow/underflow */ + val = clamp_val(val, -128000l, 255000l); + data->temp_hyst = clamp_val(DIV_ROUND_CLOSEST(temp - val, 1000), 0, 31); - if (data->flags & LM90_HAVE_EXTENDED_TEMP) - data->temp8[index] = temp_to_u8_adt7461(data, val); - else if (data->kind == max6646) - data->temp8[index] = temp_to_u8(val); - else - data->temp8[index] = temp_to_s8(val); + return lm90_write_reg(data->client, LM90_REG_TCRIT_HYST, data->temp_hyst); +} - lm90_select_remote_channel(data, index >= 6); - err = i2c_smbus_write_byte_data(client, reg[index], data->temp8[index]); - lm90_select_remote_channel(data, 0); +static int lm90_get_temp_offset(struct lm90_data *data, int index) +{ + int res = lm90_temp_get_resolution(data, index); - return err; + return lm90_temp_from_reg(0, data->temp[index], res); } -static int lm90_get_temphyst(struct lm90_data *data, int index) +static int lm90_set_temp_offset(struct lm90_data *data, int index, int channel, long val) { - int temp; + int err; - if (data->flags & LM90_HAVE_EXTENDED_TEMP) - temp = temp_from_u8_adt7461(data, data->temp8[index]); - else if (data->kind == max6646) - temp = temp_from_u8(data->temp8[index]); - else - temp = temp_from_s8(data->temp8[index]); + val = lm90_temp_to_reg(0, val, lm90_temp_get_resolution(data, index)); - /* +16 degrees offset for temp2 for the LM99 */ - if (data->kind == lm99 && index == 3) - temp += 16000; + /* For ADT7481 we can use the same registers for remote channel 1 and 2 */ + if (channel > 1) + lm90_select_remote_channel(data, true); - return temp - temp_from_s8(data->temp_hyst); -} + err = lm90_write16(data->client, LM90_REG_REMOTE_OFFSH, LM90_REG_REMOTE_OFFSL, val); -static int lm90_set_temphyst(struct lm90_data *data, long val) -{ - struct i2c_client *client = data->client; - int temp; - int err; + if (channel > 1) + lm90_select_remote_channel(data, false); - if (data->flags & LM90_HAVE_EXTENDED_TEMP) - temp = temp_from_u8_adt7461(data, data->temp8[LOCAL_CRIT]); - else if (data->kind == max6646) - temp = temp_from_u8(data->temp8[LOCAL_CRIT]); - else - temp = temp_from_s8(data->temp8[LOCAL_CRIT]); + if (err) + return err; - /* prevent integer overflow/underflow */ - val = clamp_val(val, -128000l, 255000l); + data->temp[index] = val; - data->temp_hyst = hyst_to_reg(temp - val); - err = i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST, - data->temp_hyst); - return err; + return 0; } -static const u8 lm90_temp_index[3] = { +static const u8 lm90_temp_index[MAX_CHANNELS] = { LOCAL_TEMP, REMOTE_TEMP, REMOTE2_TEMP }; -static const u8 lm90_temp_min_index[3] = { +static const u8 lm90_temp_min_index[MAX_CHANNELS] = { LOCAL_LOW, REMOTE_LOW, REMOTE2_LOW }; -static const u8 lm90_temp_max_index[3] = { +static const u8 lm90_temp_max_index[MAX_CHANNELS] = { LOCAL_HIGH, REMOTE_HIGH, REMOTE2_HIGH }; -static const u8 lm90_temp_crit_index[3] = { +static const u8 lm90_temp_crit_index[MAX_CHANNELS] = { LOCAL_CRIT, REMOTE_CRIT, REMOTE2_CRIT }; -static const u8 lm90_temp_emerg_index[3] = { +static const u8 lm90_temp_emerg_index[MAX_CHANNELS] = { LOCAL_EMERG, REMOTE_EMERG, REMOTE2_EMERG }; -static const u8 lm90_min_alarm_bits[3] = { 5, 3, 11 }; -static const u8 lm90_max_alarm_bits[3] = { 6, 4, 12 }; -static const u8 lm90_crit_alarm_bits[3] = { 0, 1, 9 }; -static const u8 lm90_crit_alarm_bits_swapped[3] = { 1, 0, 9 }; -static const u8 lm90_emergency_alarm_bits[3] = { 15, 13, 14 }; -static const u8 lm90_fault_bits[3] = { 0, 2, 10 }; +static const s8 lm90_temp_offset_index[MAX_CHANNELS] = { + -1, REMOTE_OFFSET, REMOTE2_OFFSET +}; + +static const u16 lm90_min_alarm_bits[MAX_CHANNELS] = { BIT(5), BIT(3), BIT(11) }; +static const u16 lm90_max_alarm_bits[MAX_CHANNELS] = { BIT(6), BIT(4), BIT(12) }; +static const u16 lm90_crit_alarm_bits[MAX_CHANNELS] = { BIT(0), BIT(1), BIT(9) }; +static const u16 lm90_crit_alarm_bits_swapped[MAX_CHANNELS] = { BIT(1), BIT(0), BIT(9) }; +static const u16 lm90_emergency_alarm_bits[MAX_CHANNELS] = { BIT(15), BIT(13), BIT(14) }; +static const u16 lm90_fault_bits[MAX_CHANNELS] = { BIT(0), BIT(2), BIT(10) }; static int lm90_temp_read(struct device *dev, u32 attr, int channel, long *val) { struct lm90_data *data = dev_get_drvdata(dev); int err; + u16 bit; mutex_lock(&data->update_lock); err = lm90_update_device(dev); @@ -1222,56 +1519,57 @@ static int lm90_temp_read(struct device *dev, u32 attr, int channel, long *val) switch (attr) { case hwmon_temp_input: - *val = lm90_get_temp11(data, lm90_temp_index[channel]); + *val = lm90_get_temp(data, lm90_temp_index[channel], channel); break; case hwmon_temp_min_alarm: - *val = (data->alarms >> lm90_min_alarm_bits[channel]) & 1; - break; case hwmon_temp_max_alarm: - *val = (data->alarms >> lm90_max_alarm_bits[channel]) & 1; - break; case hwmon_temp_crit_alarm: - if (data->flags & LM90_HAVE_CRIT_ALRM_SWP) - *val = (data->alarms >> lm90_crit_alarm_bits_swapped[channel]) & 1; - else - *val = (data->alarms >> lm90_crit_alarm_bits[channel]) & 1; - break; case hwmon_temp_emergency_alarm: - *val = (data->alarms >> lm90_emergency_alarm_bits[channel]) & 1; - break; case hwmon_temp_fault: - *val = (data->alarms >> lm90_fault_bits[channel]) & 1; + switch (attr) { + case hwmon_temp_min_alarm: + bit = lm90_min_alarm_bits[channel]; + break; + case hwmon_temp_max_alarm: + bit = lm90_max_alarm_bits[channel]; + break; + case hwmon_temp_crit_alarm: + if (data->flags & LM90_HAVE_CRIT_ALRM_SWP) + bit = lm90_crit_alarm_bits_swapped[channel]; + else + bit = lm90_crit_alarm_bits[channel]; + break; + case hwmon_temp_emergency_alarm: + bit = lm90_emergency_alarm_bits[channel]; + break; + case hwmon_temp_fault: + bit = lm90_fault_bits[channel]; + break; + } + *val = !!(data->alarms & bit); + data->alarms &= ~bit; + data->alarms |= data->current_alarms; break; case hwmon_temp_min: - if (channel == 0) - *val = lm90_get_temp8(data, - lm90_temp_min_index[channel]); - else - *val = lm90_get_temp11(data, - lm90_temp_min_index[channel]); + *val = lm90_get_temp(data, lm90_temp_min_index[channel], channel); break; case hwmon_temp_max: - if (channel == 0) - *val = lm90_get_temp8(data, - lm90_temp_max_index[channel]); - else - *val = lm90_get_temp11(data, - lm90_temp_max_index[channel]); + *val = lm90_get_temp(data, lm90_temp_max_index[channel], channel); break; case hwmon_temp_crit: - *val = lm90_get_temp8(data, lm90_temp_crit_index[channel]); + *val = lm90_get_temp(data, lm90_temp_crit_index[channel], channel); break; case hwmon_temp_crit_hyst: - *val = lm90_get_temphyst(data, lm90_temp_crit_index[channel]); + *val = lm90_get_temphyst(data, lm90_temp_crit_index[channel], channel); break; case hwmon_temp_emergency: - *val = lm90_get_temp8(data, lm90_temp_emerg_index[channel]); + *val = lm90_get_temp(data, lm90_temp_emerg_index[channel], channel); break; case hwmon_temp_emergency_hyst: - *val = lm90_get_temphyst(data, lm90_temp_emerg_index[channel]); + *val = lm90_get_temphyst(data, lm90_temp_emerg_index[channel], channel); break; case hwmon_temp_offset: - *val = lm90_get_temp11(data, REMOTE_OFFSET); + *val = lm90_get_temp_offset(data, lm90_temp_offset_index[channel]); break; default: return -EOPNOTSUPP; @@ -1292,36 +1590,27 @@ static int lm90_temp_write(struct device *dev, u32 attr, int channel, long val) switch (attr) { case hwmon_temp_min: - if (channel == 0) - err = lm90_set_temp8(data, - lm90_temp_min_index[channel], - val); - else - err = lm90_set_temp11(data, - lm90_temp_min_index[channel], - val); + err = lm90_set_temp(data, lm90_temp_min_index[channel], + channel, val); break; case hwmon_temp_max: - if (channel == 0) - err = lm90_set_temp8(data, - lm90_temp_max_index[channel], - val); - else - err = lm90_set_temp11(data, - lm90_temp_max_index[channel], - val); + err = lm90_set_temp(data, lm90_temp_max_index[channel], + channel, val); break; case hwmon_temp_crit: - err = lm90_set_temp8(data, lm90_temp_crit_index[channel], val); + err = lm90_set_temp(data, lm90_temp_crit_index[channel], + channel, val); break; case hwmon_temp_crit_hyst: err = lm90_set_temphyst(data, val); break; case hwmon_temp_emergency: - err = lm90_set_temp8(data, lm90_temp_emerg_index[channel], val); + err = lm90_set_temp(data, lm90_temp_emerg_index[channel], + channel, val); break; case hwmon_temp_offset: - err = lm90_set_temp11(data, REMOTE_OFFSET, val); + err = lm90_set_temp_offset(data, lm90_temp_offset_index[channel], + channel, val); break; default: err = -EOPNOTSUPP; @@ -1343,6 +1632,7 @@ static umode_t lm90_temp_is_visible(const void *data, u32 attr, int channel) case hwmon_temp_emergency_alarm: case hwmon_temp_emergency_hyst: case hwmon_temp_fault: + case hwmon_temp_label: return 0444; case hwmon_temp_min: case hwmon_temp_max: @@ -1377,6 +1667,28 @@ static int lm90_chip_read(struct device *dev, u32 attr, int channel, long *val) case hwmon_chip_alarms: *val = data->alarms; break; + case hwmon_chip_temp_samples: + if (data->faultqueue_mask) { + *val = (data->config & data->faultqueue_mask) ? + data->faultqueue_depth : 1; + } else { + switch (data->conalert & 0x0e) { + case 0x0: + default: + *val = 1; + break; + case 0x2: + *val = 2; + break; + case 0x6: + *val = 3; + break; + case 0xe: + *val = 4; + break; + } + } + break; default: return -EOPNOTSUPP; } @@ -1401,6 +1713,9 @@ static int lm90_chip_write(struct device *dev, u32 attr, int channel, long val) err = lm90_set_convrate(client, data, clamp_val(val, 0, 100000)); break; + case hwmon_chip_temp_samples: + err = lm90_set_faultqueue(client, data, clamp_val(val, 1, 4)); + break; default: err = -EOPNOTSUPP; break; @@ -1415,6 +1730,7 @@ static umode_t lm90_chip_is_visible(const void *data, u32 attr, int channel) { switch (attr) { case hwmon_chip_update_interval: + case hwmon_chip_temp_samples: return 0644; case hwmon_chip_alarms: return 0444; @@ -1436,6 +1752,16 @@ static int lm90_read(struct device *dev, enum hwmon_sensor_types type, } } +static int lm90_read_string(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +{ + struct lm90_data *data = dev_get_drvdata(dev); + + *str = data->channel_label[channel]; + + return 0; +} + static int lm90_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long val) { @@ -1462,125 +1788,359 @@ static umode_t lm90_is_visible(const void *data, enum hwmon_sensor_types type, } } -/* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm90_detect(struct i2c_client *client, - struct i2c_board_info *info) +static const char *lm90_detect_lm84(struct i2c_client *client) { - struct i2c_adapter *adapter = client->adapter; + static const u8 regs[] = { + LM90_REG_STATUS, LM90_REG_LOCAL_TEMP, LM90_REG_LOCAL_HIGH, + LM90_REG_REMOTE_TEMPH, LM90_REG_REMOTE_HIGHH + }; + int status = i2c_smbus_read_byte_data(client, LM90_REG_STATUS); + int reg1, reg2, reg3, reg4; + bool nonzero = false; + u8 ff = 0xff; + int i; + + if (status < 0 || (status & 0xab)) + return NULL; + + /* + * For LM84, undefined registers return the most recent value. + * Repeat several times, each time checking against a different + * (presumably) existing register. + */ + for (i = 0; i < ARRAY_SIZE(regs); i++) { + reg1 = i2c_smbus_read_byte_data(client, regs[i]); + reg2 = i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_TEMPL); + reg3 = i2c_smbus_read_byte_data(client, LM90_REG_LOCAL_LOW); + reg4 = i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_LOWH); + + if (reg1 < 0) + return NULL; + + /* If any register has a different value, this is not an LM84 */ + if (reg2 != reg1 || reg3 != reg1 || reg4 != reg1) + return NULL; + + nonzero |= reg1 || reg2 || reg3 || reg4; + ff &= reg1; + } + /* + * If all registers always returned 0 or 0xff, all bets are off, + * and we can not make any predictions about the chip type. + */ + return nonzero && ff != 0xff ? "lm84" : NULL; +} + +static const char *lm90_detect_max1617(struct i2c_client *client, int config1) +{ + int status = i2c_smbus_read_byte_data(client, LM90_REG_STATUS); + int llo, rlo, lhi, rhi; + + if (status < 0 || (status & 0x03)) + return NULL; + + if (config1 & 0x3f) + return NULL; + + /* + * Fail if unsupported registers return anything but 0xff. + * The calling code already checked man_id and chip_id. + * A byte read operation repeats the most recent read operation + * and should also return 0xff. + */ + if (i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_TEMPL) != 0xff || + i2c_smbus_read_byte_data(client, MAX6657_REG_LOCAL_TEMPL) != 0xff || + i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_LOWL) != 0xff || + i2c_smbus_read_byte(client) != 0xff) + return NULL; + + llo = i2c_smbus_read_byte_data(client, LM90_REG_LOCAL_LOW); + rlo = i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_LOWH); + + lhi = i2c_smbus_read_byte_data(client, LM90_REG_LOCAL_HIGH); + rhi = i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_HIGHH); + + if (llo < 0 || rlo < 0) + return NULL; + + /* + * A byte read operation repeats the most recent read and should + * return the same value. + */ + if (i2c_smbus_read_byte(client) != rhi) + return NULL; + + /* + * The following two checks are marginal since the checked values + * are strictly speaking valid. + */ + + /* fail for negative high limits; this also catches read errors */ + if ((s8)lhi < 0 || (s8)rhi < 0) + return NULL; + + /* fail if low limits are larger than or equal to high limits */ + if ((s8)llo >= lhi || (s8)rlo >= rhi) + return NULL; + + if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) { + /* + * Word read operations return 0xff in second byte + */ + if (i2c_smbus_read_word_data(client, LM90_REG_REMOTE_TEMPL) != + 0xffff) + return NULL; + if (i2c_smbus_read_word_data(client, LM90_REG_CONFIG1) != + (config1 | 0xff00)) + return NULL; + if (i2c_smbus_read_word_data(client, LM90_REG_LOCAL_HIGH) != + (lhi | 0xff00)) + return NULL; + } + + return "max1617"; +} + +static const char *lm90_detect_national(struct i2c_client *client, int chip_id, + int config1, int convrate) +{ + int config2 = i2c_smbus_read_byte_data(client, LM90_REG_CONFIG2); int address = client->addr; const char *name = NULL; - int man_id, chip_id, config1, config2, convrate; - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - return -ENODEV; + if (config2 < 0) + return NULL; - /* detection and identification */ - man_id = i2c_smbus_read_byte_data(client, LM90_REG_R_MAN_ID); - chip_id = i2c_smbus_read_byte_data(client, LM90_REG_R_CHIP_ID); - config1 = i2c_smbus_read_byte_data(client, LM90_REG_R_CONFIG1); - convrate = i2c_smbus_read_byte_data(client, LM90_REG_R_CONVRATE); - if (man_id < 0 || chip_id < 0 || config1 < 0 || convrate < 0) - return -ENODEV; + if ((config1 & 0x2a) || (config2 & 0xf8) || convrate > 0x09) + return NULL; - if (man_id == 0x01 || man_id == 0x5C || man_id == 0xA1) { - config2 = i2c_smbus_read_byte_data(client, LM90_REG_R_CONFIG2); - if (config2 < 0) - return -ENODEV; + if (address != 0x4c && address != 0x4d) + return NULL; + + switch (chip_id & 0xf0) { + case 0x10: /* LM86 */ + if (address == 0x4c) + name = "lm86"; + break; + case 0x20: /* LM90 */ + if (address == 0x4c) + name = "lm90"; + break; + case 0x30: /* LM89/LM99 */ + name = "lm99"; /* detect LM89 as LM99 */ + break; + default: + break; } - if ((address == 0x4C || address == 0x4D) - && man_id == 0x01) { /* National Semiconductor */ - if ((config1 & 0x2A) == 0x00 - && (config2 & 0xF8) == 0x00 - && convrate <= 0x09) { - if (address == 0x4C - && (chip_id & 0xF0) == 0x20) { /* LM90 */ - name = "lm90"; - } else - if ((chip_id & 0xF0) == 0x30) { /* LM89/LM99 */ - name = "lm99"; - dev_info(&adapter->dev, - "Assuming LM99 chip at 0x%02x\n", - address); - dev_info(&adapter->dev, - "If it is an LM89, instantiate it " - "with the new_device sysfs " - "interface\n"); - } else - if (address == 0x4C - && (chip_id & 0xF0) == 0x10) { /* LM86 */ - name = "lm86"; - } - } - } else - if ((address == 0x4C || address == 0x4D) - && man_id == 0x41) { /* Analog Devices */ - if ((chip_id & 0xF0) == 0x40 /* ADM1032 */ - && (config1 & 0x3F) == 0x00 - && convrate <= 0x0A) { + return name; +} + +static const char *lm90_detect_on(struct i2c_client *client, int chip_id, int config1, + int convrate) +{ + int address = client->addr; + const char *name = NULL; + + switch (chip_id) { + case 0xca: /* NCT218 */ + if ((address == 0x4c || address == 0x4d) && !(config1 & 0x1b) && + convrate <= 0x0a) + name = "nct218"; + break; + default: + break; + } + return name; +} + +static const char *lm90_detect_analog(struct i2c_client *client, bool common_address, + int chip_id, int config1, int convrate) +{ + int status = i2c_smbus_read_byte_data(client, LM90_REG_STATUS); + int config2 = i2c_smbus_read_byte_data(client, ADT7481_REG_CONFIG2); + int man_id2 = i2c_smbus_read_byte_data(client, ADT7481_REG_MAN_ID); + int chip_id2 = i2c_smbus_read_byte_data(client, ADT7481_REG_CHIP_ID); + int address = client->addr; + const char *name = NULL; + + if (status < 0 || config2 < 0 || man_id2 < 0 || chip_id2 < 0) + return NULL; + + /* + * The following chips should be detected by this function. Known + * register values are listed. Registers 0x3d .. 0x3e are undocumented + * for most of the chips, yet appear to return a well defined value. + * Register 0xff is undocumented for some of the chips. Register 0x3f + * is undocumented for all chips, but also returns a well defined value. + * Values are as reported from real chips unless mentioned otherwise. + * The code below checks values for registers 0x3d, 0x3e, and 0xff, + * but not for register 0x3f. + * + * Chip Register + * 3d 3e 3f fe ff Notes + * ---------------------------------------------------------- + * adm1020 00 00 00 41 39 + * adm1021 00 00 00 41 03 + * adm1021a 00 00 00 41 3c + * adm1023 00 00 00 41 3c same as adm1021a + * adm1032 00 00 00 41 42 + * + * adt7421 21 41 04 41 04 + * adt7461 00 00 00 41 51 + * adt7461a 61 41 05 41 57 + * adt7481 81 41 02 41 62 + * adt7482 - - - 41 65 datasheet + * 82 41 05 41 75 real chip + * adt7483 83 41 04 41 94 + * + * nct72 61 41 07 41 55 + * nct210 00 00 00 41 3f + * nct214 61 41 08 41 5a + * nct1008 - - - 41 57 datasheet rev. 3 + * 61 41 06 41 54 real chip + * + * nvt210 - - - 41 - datasheet + * nvt211 - - - 41 - datasheet + */ + switch (chip_id) { + case 0x00 ... 0x03: /* ADM1021 */ + case 0x05 ... 0x0f: + if (man_id2 == 0x00 && chip_id2 == 0x00 && common_address && + !(status & 0x03) && !(config1 & 0x3f) && !(convrate & 0xf8)) + name = "adm1021"; + break; + case 0x04: /* ADT7421 (undocumented) */ + if (man_id2 == 0x41 && chip_id2 == 0x21 && + (address == 0x4c || address == 0x4d) && + (config1 & 0x0b) == 0x08 && convrate <= 0x0a) + name = "adt7421"; + break; + case 0x30 ... 0x38: /* ADM1021A, ADM1023 */ + case 0x3a ... 0x3e: + /* + * ADM1021A and compatible chips will be mis-detected as + * ADM1023. Chips labeled 'ADM1021A' and 'ADM1023' were both + * found to have a Chip ID of 0x3c. + * ADM1021A does not officially support low byte registers + * (0x12 .. 0x14), but a chip labeled ADM1021A does support it. + * Official support for the temperature offset high byte + * register (0x11) was added to revision F of the ADM1021A + * datasheet. + * It is currently unknown if there is a means to distinguish + * ADM1021A from ADM1023, and/or if revisions of ADM1021A exist + * which differ in functionality from ADM1023. + */ + if (man_id2 == 0x00 && chip_id2 == 0x00 && common_address && + !(status & 0x03) && !(config1 & 0x3f) && !(convrate & 0xf8)) + name = "adm1023"; + break; + case 0x39: /* ADM1020 (undocumented) */ + if (man_id2 == 0x00 && chip_id2 == 0x00 && + (address == 0x4c || address == 0x4d || address == 0x4e) && + !(status & 0x03) && !(config1 & 0x3f) && !(convrate & 0xf8)) + name = "adm1020"; + break; + case 0x3f: /* NCT210 */ + if (man_id2 == 0x00 && chip_id2 == 0x00 && common_address && + !(status & 0x03) && !(config1 & 0x3f) && !(convrate & 0xf8)) + name = "nct210"; + break; + case 0x40 ... 0x4f: /* ADM1032 */ + if (man_id2 == 0x00 && chip_id2 == 0x00 && + (address == 0x4c || address == 0x4d) && !(config1 & 0x3f) && + convrate <= 0x0a) name = "adm1032"; - /* - * The ADM1032 supports PEC, but only if combined - * transactions are not used. - */ - if (i2c_check_functionality(adapter, - I2C_FUNC_SMBUS_BYTE)) - info->flags |= I2C_CLIENT_PEC; - } else - if (chip_id == 0x51 /* ADT7461 */ - && (config1 & 0x1B) == 0x00 - && convrate <= 0x0A) { + break; + case 0x51: /* ADT7461 */ + if (man_id2 == 0x00 && chip_id2 == 0x00 && + (address == 0x4c || address == 0x4d) && !(config1 & 0x1b) && + convrate <= 0x0a) name = "adt7461"; - } else - if (chip_id == 0x57 /* ADT7461A, NCT1008 */ - && (config1 & 0x1B) == 0x00 - && convrate <= 0x0A) { + break; + case 0x54: /* NCT1008 */ + if (man_id2 == 0x41 && chip_id2 == 0x61 && + (address == 0x4c || address == 0x4d) && !(config1 & 0x1b) && + convrate <= 0x0a) + name = "nct1008"; + break; + case 0x55: /* NCT72 */ + if (man_id2 == 0x41 && chip_id2 == 0x61 && + (address == 0x4c || address == 0x4d) && !(config1 & 0x1b) && + convrate <= 0x0a) + name = "nct72"; + break; + case 0x57: /* ADT7461A, NCT1008 (datasheet rev. 3) */ + if (man_id2 == 0x41 && chip_id2 == 0x61 && + (address == 0x4c || address == 0x4d) && !(config1 & 0x1b) && + convrate <= 0x0a) name = "adt7461a"; + break; + case 0x5a: /* NCT214 */ + if (man_id2 == 0x41 && chip_id2 == 0x61 && + common_address && !(config1 & 0x1b) && convrate <= 0x0a) + name = "nct214"; + break; + case 0x62: /* ADT7481, undocumented */ + if (man_id2 == 0x41 && chip_id2 == 0x81 && + (address == 0x4b || address == 0x4c) && !(config1 & 0x10) && + !(config2 & 0x7f) && (convrate & 0x0f) <= 0x0b) { + name = "adt7481"; } - } else - if (man_id == 0x4D) { /* Maxim */ - int emerg, emerg2, status2; + break; + case 0x65: /* ADT7482, datasheet */ + case 0x75: /* ADT7482, real chip */ + if (man_id2 == 0x41 && chip_id2 == 0x82 && + address == 0x4c && !(config1 & 0x10) && !(config2 & 0x7f) && + convrate <= 0x0a) + name = "adt7482"; + break; + case 0x94: /* ADT7483 */ + if (man_id2 == 0x41 && chip_id2 == 0x83 && + common_address && + ((address >= 0x18 && address <= 0x1a) || + (address >= 0x29 && address <= 0x2b) || + (address >= 0x4c && address <= 0x4e)) && + !(config1 & 0x10) && !(config2 & 0x7f) && convrate <= 0x0a) + name = "adt7483a"; + break; + default: + break; + } + + return name; +} + +static const char *lm90_detect_maxim(struct i2c_client *client, bool common_address, + int chip_id, int config1, int convrate) +{ + int man_id, emerg, emerg2, status2; + int address = client->addr; + const char *name = NULL; + + switch (chip_id) { + case 0x01: + if (!common_address) + break; /* - * We read MAX6659_REG_R_REMOTE_EMERG twice, and re-read - * LM90_REG_R_MAN_ID in between. If MAX6659_REG_R_REMOTE_EMERG + * We read MAX6659_REG_REMOTE_EMERG twice, and re-read + * LM90_REG_MAN_ID in between. If MAX6659_REG_REMOTE_EMERG * exists, both readings will reflect the same value. Otherwise, * the readings will be different. */ emerg = i2c_smbus_read_byte_data(client, - MAX6659_REG_R_REMOTE_EMERG); + MAX6659_REG_REMOTE_EMERG); man_id = i2c_smbus_read_byte_data(client, - LM90_REG_R_MAN_ID); + LM90_REG_MAN_ID); emerg2 = i2c_smbus_read_byte_data(client, - MAX6659_REG_R_REMOTE_EMERG); + MAX6659_REG_REMOTE_EMERG); status2 = i2c_smbus_read_byte_data(client, - MAX6696_REG_R_STATUS2); + MAX6696_REG_STATUS2); if (emerg < 0 || man_id < 0 || emerg2 < 0 || status2 < 0) - return -ENODEV; + return NULL; /* - * The MAX6657, MAX6658 and MAX6659 do NOT have a chip_id - * register. Reading from that address will return the last - * read value, which in our case is those of the man_id - * register. Likewise, the config1 register seems to lack a - * low nibble, so the value will be those of the previous - * read, so in our case those of the man_id register. - * MAX6659 has a third set of upper temperature limit registers. - * Those registers also return values on MAX6657 and MAX6658, - * thus the only way to detect MAX6659 is by its address. - * For this reason it will be mis-detected as MAX6657 if its - * address is 0x4C. - */ - if (chip_id == man_id - && (address == 0x4C || address == 0x4D || address == 0x4E) - && (config1 & 0x1F) == (man_id & 0x0F) - && convrate <= 0x09) { - if (address == 0x4C) - name = "max6657"; - else - name = "max6659"; - } else - /* * Even though MAX6695 and MAX6696 do not have a chip ID * register, reading it returns 0x01. Bit 4 of the config1 * register is unused and should return zero when read. Bit 0 of @@ -1591,90 +2151,288 @@ static int lm90_detect(struct i2c_client *client, * limit registers. We can detect those chips by checking if * one of those registers exists. */ - if (chip_id == 0x01 - && (config1 & 0x10) == 0x00 - && (status2 & 0x01) == 0x00 - && emerg == emerg2 - && convrate <= 0x07) { + if (!(config1 & 0x10) && !(status2 & 0x01) && emerg == emerg2 && + convrate <= 0x07) name = "max6696"; - } else /* * The chip_id register of the MAX6680 and MAX6681 holds the * revision of the chip. The lowest bit of the config1 register * is unused and should return zero when read, so should the - * second to last bit of config1 (software reset). + * second to last bit of config1 (software reset). Register + * address 0x12 (LM90_REG_REMOTE_OFFSL) exists for this chip and + * should differ from emerg2, and emerg2 should match man_id + * since it does not exist. */ - if (chip_id == 0x01 - && (config1 & 0x03) == 0x00 - && convrate <= 0x07) { + else if (!(config1 & 0x03) && convrate <= 0x07 && + emerg2 == man_id && emerg2 != status2) name = "max6680"; - } else /* - * The chip_id register of the MAX6646/6647/6649 holds the - * revision of the chip. The lowest 6 bits of the config1 - * register are unused and should return zero when read. + * MAX1617A does not have any extended registers (register + * address 0x10 or higher) except for manufacturer and + * device ID registers. Unlike other chips of this series, + * unsupported registers were observed to return a fixed value + * of 0x01. + * Note: Multiple chips with different markings labeled as + * "MAX1617" (no "A") were observed to report manufacturer ID + * 0x4d and device ID 0x01. It is unknown if other variants of + * MAX1617/MAX617A with different behavior exist. The detection + * code below works for those chips. */ - if (chip_id == 0x59 - && (config1 & 0x3f) == 0x00 - && convrate <= 0x07) { - name = "max6646"; - } else + else if (!(config1 & 0x03f) && convrate <= 0x07 && + emerg == 0x01 && emerg2 == 0x01 && status2 == 0x01) + name = "max1617"; + break; + case 0x08: /* * The chip_id of the MAX6654 holds the revision of the chip. * The lowest 3 bits of the config1 register are unused and * should return zero when read. */ - if (chip_id == 0x08 - && (config1 & 0x07) == 0x00 - && convrate <= 0x07) { + if (common_address && !(config1 & 0x07) && convrate <= 0x07) name = "max6654"; + break; + case 0x09: + /* + * The chip_id of the MAX6690 holds the revision of the chip. + * The lowest 3 bits of the config1 register are unused and + * should return zero when read. + * Note that MAX6654 and MAX6690 are practically the same chips. + * The only diference is the rated accuracy. Rev. 1 of the + * MAX6690 datasheet lists a chip ID of 0x08, and a chip labeled + * MAX6654 was observed to have a chip ID of 0x09. + */ + if (common_address && !(config1 & 0x07) && convrate <= 0x07) + name = "max6690"; + break; + case 0x4d: + /* + * MAX6642, MAX6657, MAX6658 and MAX6659 do NOT have a chip_id + * register. Reading from that address will return the last + * read value, which in our case is those of the man_id + * register, or 0x4d. + * MAX6642 does not have a conversion rate register, nor low + * limit registers. Reading from those registers returns the + * last read value. + * + * For MAX6657, MAX6658 and MAX6659, the config1 register lacks + * a low nibble, so the value will be those of the previous + * read, so in our case again those of the man_id register. + * MAX6659 has a third set of upper temperature limit registers. + * Those registers also return values on MAX6657 and MAX6658, + * thus the only way to detect MAX6659 is by its address. + * For this reason it will be mis-detected as MAX6657 if its + * address is 0x4c. + */ + if (address >= 0x48 && address <= 0x4f && config1 == convrate && + !(config1 & 0x0f)) { + int regval; + + /* + * We know that this is not a MAX6657/58/59 because its + * configuration register has the wrong value and it does + * not appear to have a conversion rate register. + */ + + /* re-read manufacturer ID to have a good baseline */ + if (i2c_smbus_read_byte_data(client, LM90_REG_MAN_ID) != 0x4d) + break; + + /* check various non-existing registers */ + if (i2c_smbus_read_byte_data(client, LM90_REG_CONVRATE) != 0x4d || + i2c_smbus_read_byte_data(client, LM90_REG_LOCAL_LOW) != 0x4d || + i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_LOWH) != 0x4d) + break; + + /* check for unused status register bits */ + regval = i2c_smbus_read_byte_data(client, LM90_REG_STATUS); + if (regval < 0 || (regval & 0x2b)) + break; + + /* re-check unsupported registers */ + if (i2c_smbus_read_byte_data(client, LM90_REG_CONVRATE) != regval || + i2c_smbus_read_byte_data(client, LM90_REG_LOCAL_LOW) != regval || + i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_LOWH) != regval) + break; + + name = "max6642"; + } else if ((address == 0x4c || address == 0x4d || address == 0x4e) && + (config1 & 0x1f) == 0x0d && convrate <= 0x09) { + if (address == 0x4c) + name = "max6657"; + else + name = "max6659"; } - } else - if (address == 0x4C - && man_id == 0x5C) { /* Winbond/Nuvoton */ - if ((config1 & 0x2A) == 0x00 - && (config2 & 0xF8) == 0x00) { - if (chip_id == 0x01 /* W83L771W/G */ - && convrate <= 0x09) { - name = "w83l771"; - } else - if ((chip_id & 0xFE) == 0x10 /* W83L771AWG/ASG */ - && convrate <= 0x08) { - name = "w83l771"; + break; + case 0x59: + /* + * The chip_id register of the MAX6646/6647/6649 holds the + * revision of the chip. The lowest 6 bits of the config1 + * register are unused and should return zero when read. + * The I2C address of MAX6648/6692 is fixed at 0x4c. + * MAX6646 is at address 0x4d, MAX6647 is at address 0x4e, + * and MAX6649 is at address 0x4c. A slight difference between + * the two sets of chips is that the remote temperature register + * reports different values if the DXP pin is open or shorted. + * We can use that information to help distinguish between the + * chips. MAX6648 will be mis-detected as MAX6649 if the remote + * diode is connected, but there isn't really anything we can + * do about that. + */ + if (!(config1 & 0x3f) && convrate <= 0x07) { + int temp; + + switch (address) { + case 0x4c: + /* + * MAX6649 reports an external temperature + * value of 0xff if DXP is open or shorted. + * MAX6648 reports 0x80 in that case. + */ + temp = i2c_smbus_read_byte_data(client, + LM90_REG_REMOTE_TEMPH); + if (temp == 0x80) + name = "max6648"; + else + name = "max6649"; + break; + case 0x4d: + name = "max6646"; + break; + case 0x4e: + name = "max6647"; + break; + default: + break; } } - } else - if (address >= 0x48 && address <= 0x4F - && man_id == 0xA1) { /* NXP Semiconductor/Philips */ - if (chip_id == 0x00 - && (config1 & 0x2A) == 0x00 - && (config2 & 0xFE) == 0x00 - && convrate <= 0x09) { - name = "sa56004"; + break; + default: + break; + } + + return name; +} + +static const char *lm90_detect_nuvoton(struct i2c_client *client, int chip_id, + int config1, int convrate) +{ + int config2 = i2c_smbus_read_byte_data(client, LM90_REG_CONFIG2); + int address = client->addr; + const char *name = NULL; + + if (config2 < 0) + return ERR_PTR(-ENODEV); + + if (address == 0x4c && !(config1 & 0x2a) && !(config2 & 0xf8)) { + if (chip_id == 0x01 && convrate <= 0x09) { + /* W83L771W/G */ + name = "w83l771"; + } else if ((chip_id & 0xfe) == 0x10 && convrate <= 0x08) { + /* W83L771AWG/ASG */ + name = "w83l771"; } - } else - if ((address == 0x4C || address == 0x4D) - && man_id == 0x47) { /* GMT */ - if (chip_id == 0x01 /* G781 */ - && (config1 & 0x3F) == 0x00 - && convrate <= 0x08) - name = "g781"; - } else - if (man_id == 0x55 && chip_id == 0x00 && - (config1 & 0x1B) == 0x00 && convrate <= 0x09) { + } + return name; +} + +static const char *lm90_detect_nxp(struct i2c_client *client, bool common_address, + int chip_id, int config1, int convrate) +{ + int address = client->addr; + const char *name = NULL; + int config2; + + switch (chip_id) { + case 0x00: + config2 = i2c_smbus_read_byte_data(client, LM90_REG_CONFIG2); + if (config2 < 0) + return NULL; + if (address >= 0x48 && address <= 0x4f && + !(config1 & 0x2a) && !(config2 & 0xfe) && convrate <= 0x09) + name = "sa56004"; + break; + case 0x80: + if (common_address && !(config1 & 0x3f) && convrate <= 0x07) + name = "ne1618"; + break; + default: + break; + } + return name; +} + +static const char *lm90_detect_gmt(struct i2c_client *client, int chip_id, + int config1, int convrate) +{ + int address = client->addr; + + /* + * According to the datasheet, G781 is supposed to be at I2C Address + * 0x4c and have a chip ID of 0x01. G781-1 is supposed to be at I2C + * address 0x4d and have a chip ID of 0x03. However, when support + * for G781 was added, chips at 0x4c and 0x4d were found to have a + * chip ID of 0x01. A G781-1 at I2C address 0x4d was now found with + * chip ID 0x03. + * To avoid detection failures, accept chip ID 0x01 and 0x03 at both + * addresses. + * G784 reports manufacturer ID 0x47 and chip ID 0x01. A public + * datasheet is not available. Extensive testing suggests that + * the chip appears to be fully compatible with G781. + * Available register dumps show that G751 also reports manufacturer + * ID 0x47 and chip ID 0x01 even though that chip does not officially + * support those registers. This makes chip detection somewhat + * vulnerable. To improve detection quality, read the offset low byte + * and alert fault queue registers and verify that only expected bits + * are set. + */ + if ((chip_id == 0x01 || chip_id == 0x03) && + (address == 0x4c || address == 0x4d) && + !(config1 & 0x3f) && convrate <= 0x08) { + int reg; + + reg = i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_OFFSL); + if (reg < 0 || reg & 0x1f) + return NULL; + reg = i2c_smbus_read_byte_data(client, TMP451_REG_CONALERT); + if (reg < 0 || reg & 0xf1) + return NULL; + + return "g781"; + } + + return NULL; +} + +static const char *lm90_detect_ti49(struct i2c_client *client, bool common_address, + int chip_id, int config1, int convrate) +{ + if (common_address && chip_id == 0x00 && !(config1 & 0x3f) && !(convrate & 0xf8)) { + /* THMC10: Unsupported registers return 0xff */ + if (i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_TEMPL) == 0xff && + i2c_smbus_read_byte_data(client, LM90_REG_REMOTE_CRIT) == 0xff) + return "thmc10"; + } + return NULL; +} + +static const char *lm90_detect_ti(struct i2c_client *client, int chip_id, + int config1, int convrate) +{ + int address = client->addr; + const char *name = NULL; + + if (chip_id == 0x00 && !(config1 & 0x1b) && convrate <= 0x09) { int local_ext, conalert, chen, dfc; local_ext = i2c_smbus_read_byte_data(client, - TMP451_REG_R_LOCAL_TEMPL); + TMP451_REG_LOCAL_TEMPL); conalert = i2c_smbus_read_byte_data(client, TMP451_REG_CONALERT); chen = i2c_smbus_read_byte_data(client, TMP461_REG_CHEN); dfc = i2c_smbus_read_byte_data(client, TMP461_REG_DFC); - if ((local_ext & 0x0F) == 0x00 && - (conalert & 0xf1) == 0x01 && - (chen & 0xfc) == 0x00 && - (dfc & 0xfc) == 0x00) { + if (!(local_ext & 0x0f) && (conalert & 0xf1) == 0x01 && + (chen & 0xfc) == 0x00 && (dfc & 0xfc) == 0x00) { if (address == 0x4c && !(chen & 0x03)) name = "tmp451"; else if (address >= 0x48 && address <= 0x4f) @@ -1682,10 +2440,110 @@ static int lm90_detect(struct i2c_client *client, } } - if (!name) { /* identification failed */ + return name; +} + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int lm90_detect(struct i2c_client *client, struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = client->adapter; + int man_id, chip_id, config1, convrate, lhigh; + const char *name = NULL; + int address = client->addr; + bool common_address = + (address >= 0x18 && address <= 0x1a) || + (address >= 0x29 && address <= 0x2b) || + (address >= 0x4c && address <= 0x4e); + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + + /* + * Get well defined register value for chips with neither man_id nor + * chip_id registers. + */ + lhigh = i2c_smbus_read_byte_data(client, LM90_REG_LOCAL_HIGH); + + /* detection and identification */ + man_id = i2c_smbus_read_byte_data(client, LM90_REG_MAN_ID); + chip_id = i2c_smbus_read_byte_data(client, LM90_REG_CHIP_ID); + config1 = i2c_smbus_read_byte_data(client, LM90_REG_CONFIG1); + convrate = i2c_smbus_read_byte_data(client, LM90_REG_CONVRATE); + if (man_id < 0 || chip_id < 0 || config1 < 0 || convrate < 0 || lhigh < 0) + return -ENODEV; + + /* Bail out immediately if all register report the same value */ + if (lhigh == man_id && lhigh == chip_id && lhigh == config1 && lhigh == convrate) + return -ENODEV; + + /* + * If reading man_id and chip_id both return the same value as lhigh, + * the chip may not support those registers and return the most recent read + * value. Check again with a different register and handle accordingly. + */ + if (man_id == lhigh && chip_id == lhigh) { + convrate = i2c_smbus_read_byte_data(client, LM90_REG_CONVRATE); + man_id = i2c_smbus_read_byte_data(client, LM90_REG_MAN_ID); + chip_id = i2c_smbus_read_byte_data(client, LM90_REG_CHIP_ID); + if (convrate < 0 || man_id < 0 || chip_id < 0) + return -ENODEV; + if (man_id == convrate && chip_id == convrate) + man_id = -1; + } + switch (man_id) { + case -1: /* Chip does not support man_id / chip_id */ + if (common_address && !convrate && !(config1 & 0x7f)) + name = lm90_detect_lm84(client); + break; + case 0x01: /* National Semiconductor */ + name = lm90_detect_national(client, chip_id, config1, convrate); + break; + case 0x1a: /* ON */ + name = lm90_detect_on(client, chip_id, config1, convrate); + break; + case 0x23: /* Genesys Logic */ + if (common_address && !(config1 & 0x3f) && !(convrate & 0xf8)) + name = "gl523sm"; + break; + case 0x41: /* Analog Devices */ + name = lm90_detect_analog(client, common_address, chip_id, config1, + convrate); + break; + case 0x47: /* GMT */ + name = lm90_detect_gmt(client, chip_id, config1, convrate); + break; + case 0x49: /* TI */ + name = lm90_detect_ti49(client, common_address, chip_id, config1, convrate); + break; + case 0x4d: /* Maxim Integrated */ + name = lm90_detect_maxim(client, common_address, chip_id, + config1, convrate); + break; + case 0x54: /* ON MC1066, Microchip TC1068, TCM1617 (originally TelCom) */ + if (common_address && !(config1 & 0x3f) && !(convrate & 0xf8)) + name = "mc1066"; + break; + case 0x55: /* TI */ + name = lm90_detect_ti(client, chip_id, config1, convrate); + break; + case 0x5c: /* Winbond/Nuvoton */ + name = lm90_detect_nuvoton(client, chip_id, config1, convrate); + break; + case 0xa1: /* NXP Semiconductor/Philips */ + name = lm90_detect_nxp(client, common_address, chip_id, config1, convrate); + break; + case 0xff: /* MAX1617, G767, NE1617 */ + if (common_address && chip_id == 0xff && convrate < 8) + name = lm90_detect_max1617(client, config1); + break; + default: + break; + } + + if (!name) { /* identification failed */ dev_dbg(&adapter->dev, - "Unsupported chip at 0x%02x (man_id=0x%02X, " - "chip_id=0x%02X)\n", address, man_id, chip_id); + "Unsupported chip at 0x%02x (man_id=0x%02X, chip_id=0x%02X)\n", + client->addr, man_id, chip_id); return -ENODEV; } @@ -1699,10 +2557,13 @@ static void lm90_restore_conf(void *_data) struct lm90_data *data = _data; struct i2c_client *client = data->client; + cancel_delayed_work_sync(&data->alert_work); + cancel_work_sync(&data->report_work); + /* Restore initial configuration */ - lm90_write_convrate(data, data->convrate_orig); - i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, - data->config_orig); + if (data->flags & LM90_HAVE_CONVRATE) + lm90_write_convrate(data, data->convrate_orig); + lm90_write_reg(client, LM90_REG_CONFIG1, data->config_orig); } static int lm90_init_client(struct i2c_client *client, struct lm90_data *data) @@ -1710,35 +2571,39 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data) struct device_node *np = client->dev.of_node; int config, convrate; - convrate = lm90_read_reg(client, LM90_REG_R_CONVRATE); - if (convrate < 0) - return convrate; - data->convrate_orig = convrate; + if (data->flags & LM90_HAVE_CONVRATE) { + convrate = lm90_read_reg(client, LM90_REG_CONVRATE); + if (convrate < 0) + return convrate; + data->convrate_orig = convrate; + lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */ + } else { + data->update_interval = 500; + } /* * Start the conversions. */ - config = lm90_read_reg(client, LM90_REG_R_CONFIG1); + config = lm90_read_reg(client, LM90_REG_CONFIG1); if (config < 0) return config; data->config_orig = config; data->config = config; - lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */ - /* Check Temperature Range Select */ if (data->flags & LM90_HAVE_EXTENDED_TEMP) { if (of_property_read_bool(np, "ti,extended-range-enable")) config |= 0x04; - - if (config & 0x04) - data->flags |= LM90_FLAG_ADT7461_EXT; + if (!(config & 0x04)) + data->flags &= ~LM90_HAVE_EXTENDED_TEMP; } /* * Put MAX6680/MAX8881 into extended resolution (bit 0x10, * 0.125 degree resolution) and range (0x08, extend range * to -64 degree) mode for the remote temperature sensor. + * Note that expeciments with an actual chip do not show a difference + * if bit 3 is set or not. */ if (data->kind == max6680) config |= 0x18; @@ -1753,9 +2618,9 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data) config |= 0x20; /* - * Select external channel 0 for max6695/96 + * Select external channel 0 for devices with three sensors */ - if (data->kind == max6696) + if (data->flags & LM90_HAVE_TEMP3) config &= ~0x08; /* @@ -1771,73 +2636,23 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data) return devm_add_action_or_reset(&client->dev, lm90_restore_conf, data); } -static bool lm90_is_tripped(struct i2c_client *client, u16 *status) +static bool lm90_is_tripped(struct i2c_client *client) { struct lm90_data *data = i2c_get_clientdata(client); - int st, st2 = 0; - - st = lm90_read_reg(client, LM90_REG_R_STATUS); - if (st < 0) - return false; - - if (data->kind == max6696) { - st2 = lm90_read_reg(client, MAX6696_REG_R_STATUS2); - if (st2 < 0) - return false; - } - - *status = st | (st2 << 8); + int ret; - if ((st & 0x7f) == 0 && (st2 & 0xfe) == 0) + ret = lm90_update_alarms(data, true); + if (ret < 0) return false; - if ((st & (LM90_STATUS_LLOW | LM90_STATUS_LHIGH | LM90_STATUS_LTHRM)) || - (st2 & MAX6696_STATUS2_LOT2)) - dev_dbg(&client->dev, - "temp%d out of range, please check!\n", 1); - if ((st & (LM90_STATUS_RLOW | LM90_STATUS_RHIGH | LM90_STATUS_RTHRM)) || - (st2 & MAX6696_STATUS2_ROT2)) - dev_dbg(&client->dev, - "temp%d out of range, please check!\n", 2); - if (st & LM90_STATUS_ROPEN) - dev_dbg(&client->dev, - "temp%d diode open, please check!\n", 2); - if (st2 & (MAX6696_STATUS2_R2LOW | MAX6696_STATUS2_R2HIGH | - MAX6696_STATUS2_R2THRM | MAX6696_STATUS2_R2OT2)) - dev_dbg(&client->dev, - "temp%d out of range, please check!\n", 3); - if (st2 & MAX6696_STATUS2_R2OPEN) - dev_dbg(&client->dev, - "temp%d diode open, please check!\n", 3); - - if (st & LM90_STATUS_LLOW) - hwmon_notify_event(data->hwmon_dev, hwmon_temp, - hwmon_temp_min_alarm, 0); - if (st & LM90_STATUS_RLOW) - hwmon_notify_event(data->hwmon_dev, hwmon_temp, - hwmon_temp_min_alarm, 1); - if (st2 & MAX6696_STATUS2_R2LOW) - hwmon_notify_event(data->hwmon_dev, hwmon_temp, - hwmon_temp_min_alarm, 2); - if (st & LM90_STATUS_LHIGH) - hwmon_notify_event(data->hwmon_dev, hwmon_temp, - hwmon_temp_max_alarm, 0); - if (st & LM90_STATUS_RHIGH) - hwmon_notify_event(data->hwmon_dev, hwmon_temp, - hwmon_temp_max_alarm, 1); - if (st2 & MAX6696_STATUS2_R2HIGH) - hwmon_notify_event(data->hwmon_dev, hwmon_temp, - hwmon_temp_max_alarm, 2); - - return true; + return !!data->current_alarms; } static irqreturn_t lm90_irq_thread(int irq, void *dev_id) { struct i2c_client *client = dev_id; - u16 status; - if (lm90_is_tripped(client, &status)) + if (lm90_is_tripped(client)) return IRQ_HANDLED; else return IRQ_NONE; @@ -1853,10 +2668,79 @@ static void lm90_regulator_disable(void *regulator) regulator_disable(regulator); } +static int lm90_probe_channel_from_dt(struct i2c_client *client, + struct device_node *child, + struct lm90_data *data) +{ + u32 id; + s32 val; + int err; + struct device *dev = &client->dev; + + err = of_property_read_u32(child, "reg", &id); + if (err) { + dev_err(dev, "missing reg property of %pOFn\n", child); + return err; + } + + if (id >= MAX_CHANNELS) { + dev_err(dev, "invalid reg property value %d in %pOFn\n", id, child); + return -EINVAL; + } + + err = of_property_read_string(child, "label", &data->channel_label[id]); + if (err == -ENODATA || err == -EILSEQ) { + dev_err(dev, "invalid label property in %pOFn\n", child); + return err; + } + + if (data->channel_label[id]) + data->channel_config[id] |= HWMON_T_LABEL; + + err = of_property_read_s32(child, "temperature-offset-millicelsius", &val); + if (!err) { + if (id == 0) { + dev_err(dev, "temperature-offset-millicelsius can't be set for internal channel\n"); + return -EINVAL; + } + + err = lm90_set_temp_offset(data, lm90_temp_offset_index[id], id, val); + if (err) { + dev_err(dev, "can't set temperature offset %d for channel %d (%d)\n", + val, id, err); + return err; + } + } + + return 0; +} + +static int lm90_parse_dt_channel_info(struct i2c_client *client, + struct lm90_data *data) +{ + int err; + struct device_node *child; + struct device *dev = &client->dev; + const struct device_node *np = dev->of_node; + + for_each_child_of_node(np, child) { + if (strcmp(child->name, "channel")) + continue; + + err = lm90_probe_channel_from_dt(client, child, data); + if (err) { + of_node_put(child); + return err; + } + } + + return 0; +} static const struct hwmon_ops lm90_ops = { .is_visible = lm90_is_visible, .read = lm90_read, + .read_string = lm90_read_string, .write = lm90_write, }; @@ -1891,41 +2775,63 @@ static int lm90_probe(struct i2c_client *client) data->client = client; i2c_set_clientdata(client, data); mutex_init(&data->update_lock); + INIT_DELAYED_WORK(&data->alert_work, lm90_alert_work); + INIT_WORK(&data->report_work, lm90_report_alarms); /* Set the device type */ if (client->dev.of_node) data->kind = (enum chips)of_device_get_match_data(&client->dev); else data->kind = i2c_match_id(lm90_id, client)->driver_data; - if (data->kind == adm1032) { - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) - client->flags &= ~I2C_CLIENT_PEC; - } /* * Different devices have different alarm bits triggering the * ALERT# output */ data->alert_alarms = lm90_params[data->kind].alert_alarms; + data->resolution = lm90_params[data->kind].resolution ? : 11; /* Set chip capabilities */ data->flags = lm90_params[data->kind].flags; + if ((data->flags & (LM90_HAVE_PEC | LM90_HAVE_PARTIAL_PEC)) && + !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_PEC)) + data->flags &= ~(LM90_HAVE_PEC | LM90_HAVE_PARTIAL_PEC); + + if ((data->flags & LM90_HAVE_PARTIAL_PEC) && + !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) + data->flags &= ~LM90_HAVE_PARTIAL_PEC; + data->chip.ops = &lm90_ops; data->chip.info = data->info; - data->info[0] = HWMON_CHANNEL_INFO(chip, - HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL | HWMON_C_ALARMS); + data->info[0] = &data->chip_info; + info = &data->chip_info; + info->type = hwmon_chip; + info->config = data->chip_config; + + data->chip_config[0] = HWMON_C_REGISTER_TZ; + if (data->flags & LM90_HAVE_ALARMS) + data->chip_config[0] |= HWMON_C_ALARMS; + if (data->flags & LM90_HAVE_CONVRATE) + data->chip_config[0] |= HWMON_C_UPDATE_INTERVAL; + if (data->flags & LM90_HAVE_FAULTQUEUE) + data->chip_config[0] |= HWMON_C_TEMP_SAMPLES; data->info[1] = &data->temp_info; info = &data->temp_info; info->type = hwmon_temp; info->config = data->channel_config; - data->channel_config[0] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | - HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM; - data->channel_config[1] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | - HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | HWMON_T_FAULT; + data->channel_config[0] = HWMON_T_INPUT | HWMON_T_MAX | + HWMON_T_MAX_ALARM; + data->channel_config[1] = HWMON_T_INPUT | HWMON_T_MAX | + HWMON_T_MAX_ALARM | HWMON_T_FAULT; + + if (data->flags & LM90_HAVE_LOW) { + data->channel_config[0] |= HWMON_T_MIN | HWMON_T_MIN_ALARM; + data->channel_config[1] |= HWMON_T_MIN | HWMON_T_MIN_ALARM; + } if (data->flags & LM90_HAVE_CRIT) { data->channel_config[0] |= HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_CRIT_HYST; @@ -1951,17 +2857,35 @@ static int lm90_probe(struct i2c_client *client) data->channel_config[2] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | HWMON_T_CRIT | HWMON_T_CRIT_HYST | - HWMON_T_EMERGENCY | HWMON_T_EMERGENCY_HYST | HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | - HWMON_T_CRIT_ALARM | HWMON_T_EMERGENCY_ALARM | - HWMON_T_FAULT; + HWMON_T_CRIT_ALARM | HWMON_T_FAULT; + if (data->flags & LM90_HAVE_EMERGENCY) { + data->channel_config[2] |= HWMON_T_EMERGENCY | + HWMON_T_EMERGENCY_HYST; + } + if (data->flags & LM90_HAVE_EMERGENCY_ALARM) + data->channel_config[2] |= HWMON_T_EMERGENCY_ALARM; + if (data->flags & LM90_HAVE_OFFSET) + data->channel_config[2] |= HWMON_T_OFFSET; } + data->faultqueue_mask = lm90_params[data->kind].faultqueue_mask; + data->faultqueue_depth = lm90_params[data->kind].faultqueue_depth; data->reg_local_ext = lm90_params[data->kind].reg_local_ext; + if (data->flags & LM90_HAVE_REMOTE_EXT) + data->reg_remote_ext = LM90_REG_REMOTE_TEMPL; + data->reg_status2 = lm90_params[data->kind].reg_status2; /* Set maximum conversion rate */ data->max_convrate = lm90_params[data->kind].max_convrate; + /* Parse device-tree channel information */ + if (client->dev.of_node) { + err = lm90_parse_dt_channel_info(client, data); + if (err) + return err; + } + /* Initialize the LM90 chip */ err = lm90_init_client(client, data); if (err < 0) { @@ -1973,7 +2897,7 @@ static int lm90_probe(struct i2c_client *client) * The 'pec' attribute is attached to the i2c device and thus created * separately. */ - if (client->flags & I2C_CLIENT_PEC) { + if (data->flags & (LM90_HAVE_PEC | LM90_HAVE_PARTIAL_PEC)) { err = device_create_file(dev, &dev_attr_pec); if (err) return err; @@ -2007,12 +2931,10 @@ static int lm90_probe(struct i2c_client *client) static void lm90_alert(struct i2c_client *client, enum i2c_alert_protocol type, unsigned int flag) { - u16 alarms; - if (type != I2C_PROTOCOL_SMBUS_ALERT) return; - if (lm90_is_tripped(client, &alarms)) { + if (lm90_is_tripped(client)) { /* * Disable ALERT# output, because these chips don't implement * SMBus alert correctly; they should only hold the alert line @@ -2021,9 +2943,13 @@ static void lm90_alert(struct i2c_client *client, enum i2c_alert_protocol type, struct lm90_data *data = i2c_get_clientdata(client); if ((data->flags & LM90_HAVE_BROKEN_ALERT) && - (alarms & data->alert_alarms)) { - dev_dbg(&client->dev, "Disabling ALERT#\n"); - lm90_update_confreg(data, data->config | 0x80); + (data->current_alarms & data->alert_alarms)) { + if (!(data->config & 0x80)) { + dev_dbg(&client->dev, "Disabling ALERT#\n"); + lm90_update_confreg(data, data->config | 0x80); + } + schedule_delayed_work(&data->alert_work, + max_t(int, HZ, msecs_to_jiffies(data->update_interval))); } } else { dev_dbg(&client->dev, "Everything OK\n"); diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c index ce2780768074..e093b1998296 100644 --- a/drivers/hwmon/mcp3021.c +++ b/drivers/hwmon/mcp3021.c @@ -7,7 +7,7 @@ * Reworked by Sven Schuchmann <schuchmann@schleissheimer.de> * DT support added by Clemens Gruber <clemens.gruber@pqgruber.com> * - * This driver export the value of analog input voltage to sysfs, the + * This driver exports the value of analog input voltage to sysfs, the * voltage unit is mV. Through the sysfs interface, lm-sensors tool * can also display the input voltage. */ @@ -45,19 +45,29 @@ enum chips { * Client data (each client gets its own) */ struct mcp3021_data { - struct device *hwmon_dev; + struct i2c_client *client; u32 vdd; /* supply and reference voltage in millivolt */ u16 sar_shift; u16 sar_mask; u8 output_res; }; -static int mcp3021_read16(struct i2c_client *client) +static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val) { - struct mcp3021_data *data = i2c_get_clientdata(client); - int ret; - u16 reg; + return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res); +} + +static int mcp3021_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct mcp3021_data *data = dev_get_drvdata(dev); + struct i2c_client *client = data->client; __be16 buf; + u16 reg; + int ret; + + if (type != hwmon_in) + return -EOPNOTSUPP; ret = i2c_master_recv(client, (char *)&buf, 2); if (ret < 0) @@ -74,39 +84,46 @@ static int mcp3021_read16(struct i2c_client *client) */ reg = (reg >> data->sar_shift) & data->sar_mask; - return reg; -} + *val = volts_from_reg(data, reg); -static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val) -{ - return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res); + return 0; } -static ssize_t in0_input_show(struct device *dev, - struct device_attribute *attr, char *buf) +static umode_t mcp3021_is_visible(const void *_data, + enum hwmon_sensor_types type, + u32 attr, int channel) { - struct i2c_client *client = to_i2c_client(dev); - struct mcp3021_data *data = i2c_get_clientdata(client); - int reg, in_input; + if (type != hwmon_in) + return 0; - reg = mcp3021_read16(client); - if (reg < 0) - return reg; + if (attr != hwmon_in_input) + return 0; - in_input = volts_from_reg(data, reg); - - return sprintf(buf, "%d\n", in_input); + return 0444; } -static DEVICE_ATTR_RO(in0_input); +static const struct hwmon_channel_info *mcp3021_info[] = { + HWMON_CHANNEL_INFO(in, HWMON_I_INPUT), + NULL +}; + +static const struct hwmon_ops mcp3021_hwmon_ops = { + .is_visible = mcp3021_is_visible, + .read = mcp3021_read, +}; + +static const struct hwmon_chip_info mcp3021_chip_info = { + .ops = &mcp3021_hwmon_ops, + .info = mcp3021_info, +}; static const struct i2c_device_id mcp3021_id[]; static int mcp3021_probe(struct i2c_client *client) { - int err; struct mcp3021_data *data = NULL; struct device_node *np = client->dev.of_node; + struct device *hwmon_dev; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; @@ -147,34 +164,17 @@ static int mcp3021_probe(struct i2c_client *client) break; } + data->client = client; + if (data->vdd > MCP3021_VDD_REF_MAX || data->vdd < MCP3021_VDD_REF_MIN) return -EINVAL; - err = sysfs_create_file(&client->dev.kobj, &dev_attr_in0_input.attr); - if (err) - return err; - - data->hwmon_dev = hwmon_device_register(&client->dev); - if (IS_ERR(data->hwmon_dev)) { - err = PTR_ERR(data->hwmon_dev); - goto exit_remove; - } - - return 0; - -exit_remove: - sysfs_remove_file(&client->dev.kobj, &dev_attr_in0_input.attr); - return err; -} - -static int mcp3021_remove(struct i2c_client *client) -{ - struct mcp3021_data *data = i2c_get_clientdata(client); - - hwmon_device_unregister(data->hwmon_dev); - sysfs_remove_file(&client->dev.kobj, &dev_attr_in0_input.attr); - - return 0; + hwmon_dev = devm_hwmon_device_register_with_info(&client->dev, + client->name, + data, + &mcp3021_chip_info, + NULL); + return PTR_ERR_OR_ZERO(hwmon_dev); } static const struct i2c_device_id mcp3021_id[] = { @@ -199,7 +199,6 @@ static struct i2c_driver mcp3021_driver = { .of_match_table = of_match_ptr(of_mcp3021_match), }, .probe_new = mcp3021_probe, - .remove = mcp3021_remove, .id_table = mcp3021_id, }; diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c index 6d46c9401898..ab30437221ce 100644 --- a/drivers/hwmon/nct6775-platform.c +++ b/drivers/hwmon/nct6775-platform.c @@ -1083,6 +1083,7 @@ static const char * const asus_wmi_boards[] = { "TUF GAMING B550M-PLUS", "TUF GAMING B550M-PLUS (WI-FI)", "TUF GAMING B550-PLUS", + "TUF GAMING B550-PLUS WIFI II", "TUF GAMING B550-PRO", "TUF GAMING X570-PLUS", "TUF GAMING X570-PLUS (WI-FI)", @@ -1200,10 +1201,8 @@ static int __init sensors_nct6775_platform_init(void) exit_device_put: platform_device_put(pdev[i]); exit_device_unregister: - while (--i >= 0) { - if (pdev[i]) - platform_device_unregister(pdev[i]); - } + while (i--) + platform_device_unregister(pdev[i]); exit_unregister: platform_driver_unregister(&nct6775_driver); return err; @@ -1213,10 +1212,8 @@ static void __exit sensors_nct6775_platform_exit(void) { int i; - for (i = 0; i < ARRAY_SIZE(pdev); i++) { - if (pdev[i]) - platform_device_unregister(pdev[i]); - } + for (i = 0; i < ARRAY_SIZE(pdev); i++) + platform_device_unregister(pdev[i]); platform_driver_unregister(&nct6775_driver); } diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index 157b73a3da29..45407b12db4b 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -729,18 +729,14 @@ static ssize_t occ_show_extended(struct device *dev, rc = sysfs_emit(buf, "%u", get_unaligned_be32(&extn->sensor_id)); } else { - rc = sysfs_emit(buf, "%02x%02x%02x%02x\n", - extn->name[0], extn->name[1], - extn->name[2], extn->name[3]); + rc = sysfs_emit(buf, "%4phN\n", extn->name); } break; case 1: rc = sysfs_emit(buf, "%02x\n", extn->flags); break; case 2: - rc = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n", - extn->data[0], extn->data[1], extn->data[2], - extn->data[3], extn->data[4], extn->data[5]); + rc = sysfs_emit(buf, "%6phN\n", extn->data); break; default: return -EINVAL; diff --git a/drivers/hwmon/occ/p9_sbe.c b/drivers/hwmon/occ/p9_sbe.c index a91937e28e12..c1e0a1d96cd4 100644 --- a/drivers/hwmon/occ/p9_sbe.c +++ b/drivers/hwmon/occ/p9_sbe.c @@ -55,8 +55,7 @@ static bool p9_sbe_occ_save_ffdc(struct p9_sbe_occ *ctx, const void *resp, mutex_lock(&ctx->sbe_error_lock); if (!ctx->sbe_error) { if (resp_len > ctx->ffdc_size) { - if (ctx->ffdc) - kvfree(ctx->ffdc); + kvfree(ctx->ffdc); ctx->ffdc = kvmalloc(resp_len, GFP_KERNEL); if (!ctx->ffdc) { ctx->ffdc_len = 0; @@ -170,8 +169,7 @@ static int p9_sbe_occ_remove(struct platform_device *pdev) ctx->sbe = NULL; occ_shutdown(occ); - if (ctx->ffdc) - kvfree(ctx->ffdc); + kvfree(ctx->ffdc); return 0; } diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig index dfae76db65ae..951e4a9ff2d6 100644 --- a/drivers/hwmon/pmbus/Kconfig +++ b/drivers/hwmon/pmbus/Kconfig @@ -181,6 +181,15 @@ config SENSORS_LM25066_REGULATOR If you say yes here you get regulator support for National Semiconductor LM25066, LM5064, and LM5066. +config SENSORS_LT7182S + tristate "Analog Devices LT7182S" + help + If you say yes here you get hardware monitoring support for Analog + Devices LT7182S. + + This driver can also be built as a module. If so, the module will + be called lt7182s. + config SENSORS_LTC2978 tristate "Linear Technologies LTC2978 and compatibles" help diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile index 4678fba5012c..e2fe86f98965 100644 --- a/drivers/hwmon/pmbus/Makefile +++ b/drivers/hwmon/pmbus/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_SENSORS_IR38064) += ir38064.o obj-$(CONFIG_SENSORS_IRPS5401) += irps5401.o obj-$(CONFIG_SENSORS_ISL68137) += isl68137.o obj-$(CONFIG_SENSORS_LM25066) += lm25066.o +obj-$(CONFIG_SENSORS_LT7182S) += lt7182s.o obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o obj-$(CONFIG_SENSORS_MAX15301) += max15301.o diff --git a/drivers/hwmon/pmbus/lt7182s.c b/drivers/hwmon/pmbus/lt7182s.c new file mode 100644 index 000000000000..4cfe476fc92d --- /dev/null +++ b/drivers/hwmon/pmbus/lt7182s.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Hardware monitoring driver for Analog Devices LT7182S + * + * Copyright (c) 2022 Guenter Roeck + * + */ + +#include <linux/bits.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include "pmbus.h" + +#define LT7182S_NUM_PAGES 2 + +#define MFR_READ_EXTVCC 0xcd +#define MFR_READ_ITH 0xce +#define MFR_CONFIG_ALL_LT7182S 0xd1 +#define MFR_IOUT_PEAK 0xd7 +#define MFR_ADC_CONTROL_LT7182S 0xd8 + +#define MFR_DEBUG_TELEMETRY BIT(0) + +#define MFR_VOUT_PEAK 0xdd +#define MFR_VIN_PEAK 0xde +#define MFR_TEMPERATURE_1_PEAK 0xdf +#define MFR_CLEAR_PEAKS 0xe3 + +#define MFR_CONFIG_IEEE BIT(8) + +static int lt7182s_read_word_data(struct i2c_client *client, int page, int phase, int reg) +{ + int ret; + + switch (reg) { + case PMBUS_VIRT_READ_VMON: + if (page == 0 || page == 1) + ret = pmbus_read_word_data(client, page, phase, MFR_READ_ITH); + else + ret = pmbus_read_word_data(client, 0, phase, MFR_READ_EXTVCC); + break; + case PMBUS_VIRT_READ_IOUT_MAX: + ret = pmbus_read_word_data(client, page, phase, MFR_IOUT_PEAK); + break; + case PMBUS_VIRT_READ_VOUT_MAX: + ret = pmbus_read_word_data(client, page, phase, MFR_VOUT_PEAK); + break; + case PMBUS_VIRT_READ_VIN_MAX: + ret = pmbus_read_word_data(client, page, phase, MFR_VIN_PEAK); + break; + case PMBUS_VIRT_READ_TEMP_MAX: + ret = pmbus_read_word_data(client, page, phase, MFR_TEMPERATURE_1_PEAK); + break; + case PMBUS_VIRT_RESET_VIN_HISTORY: + ret = (page == 0) ? 0 : -ENODATA; + break; + default: + ret = -ENODATA; + break; + } + return ret; +} + +static int lt7182s_write_word_data(struct i2c_client *client, int page, int reg, u16 word) +{ + int ret; + + switch (reg) { + case PMBUS_VIRT_RESET_VIN_HISTORY: + ret = pmbus_write_byte(client, 0, MFR_CLEAR_PEAKS); + break; + default: + ret = -ENODATA; + break; + } + return ret; +} + +static struct pmbus_driver_info lt7182s_info = { + .pages = LT7182S_NUM_PAGES, + .format[PSC_VOLTAGE_IN] = linear, + .format[PSC_VOLTAGE_OUT] = linear, + .format[PSC_CURRENT_IN] = linear, + .format[PSC_CURRENT_OUT] = linear, + .format[PSC_TEMPERATURE] = linear, + .format[PSC_POWER] = linear, + .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | + PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP, + .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | + PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT | + PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_STATUS_INPUT, + .read_word_data = lt7182s_read_word_data, + .write_word_data = lt7182s_write_word_data, +}; + +static int lt7182s_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct pmbus_driver_info *info; + u8 buf[I2C_SMBUS_BLOCK_MAX + 1]; + int ret; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_BYTE_DATA | + I2C_FUNC_SMBUS_READ_WORD_DATA | + I2C_FUNC_SMBUS_READ_BLOCK_DATA)) + return -ENODEV; + + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf); + if (ret < 0) { + dev_err(dev, "Failed to read PMBUS_MFR_ID\n"); + return ret; + } + if (ret != 3 || strncmp(buf, "ADI", 3)) { + buf[ret] = '\0'; + dev_err(dev, "Manufacturer '%s' not supported\n", buf); + return -ENODEV; + } + + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf); + if (ret < 0) { + dev_err(dev, "Failed to read PMBUS_MFR_MODEL\n"); + return ret; + } + if (ret != 7 || strncmp(buf, "LT7182S", 7)) { + buf[ret] = '\0'; + dev_err(dev, "Model '%s' not supported\n", buf); + return -ENODEV; + } + + info = devm_kmemdup(dev, <7182s_info, + sizeof(struct pmbus_driver_info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + /* Set data format to IEEE754 if configured */ + ret = i2c_smbus_read_word_data(client, MFR_CONFIG_ALL_LT7182S); + if (ret < 0) + return ret; + if (ret & MFR_CONFIG_IEEE) { + info->format[PSC_VOLTAGE_IN] = ieee754; + info->format[PSC_VOLTAGE_OUT] = ieee754; + info->format[PSC_CURRENT_IN] = ieee754; + info->format[PSC_CURRENT_OUT] = ieee754; + info->format[PSC_TEMPERATURE] = ieee754; + info->format[PSC_POWER] = ieee754; + } + + /* Enable VMON output if configured */ + ret = i2c_smbus_read_byte_data(client, MFR_ADC_CONTROL_LT7182S); + if (ret < 0) + return ret; + if (ret & MFR_DEBUG_TELEMETRY) { + info->pages = 3; + info->func[0] |= PMBUS_HAVE_VMON; + info->func[1] |= PMBUS_HAVE_VMON; + info->func[2] = PMBUS_HAVE_VMON; + } + + return pmbus_do_probe(client, info); +} + +static const struct i2c_device_id lt7182s_id[] = { + { "lt7182s", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, lt7182s_id); + +static const struct of_device_id __maybe_unused lt7182s_of_match[] = { + { .compatible = "adi,lt7182s" }, + {} +}; + +static struct i2c_driver lt7182s_driver = { + .driver = { + .name = "lt7182s", + .of_match_table = of_match_ptr(lt7182s_of_match), + }, + .probe_new = lt7182s_probe, + .id_table = lt7182s_id, +}; + +module_i2c_driver(lt7182s_driver); + +MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); +MODULE_DESCRIPTION("PMBus driver for Analog Devices LT7182S"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(PMBUS); diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c index 531aa674a928..6d2592731ba3 100644 --- a/drivers/hwmon/pmbus/ltc2978.c +++ b/drivers/hwmon/pmbus/ltc2978.c @@ -562,7 +562,24 @@ static const struct i2c_device_id ltc2978_id[] = { MODULE_DEVICE_TABLE(i2c, ltc2978_id); #if IS_ENABLED(CONFIG_SENSORS_LTC2978_REGULATOR) +#define LTC2978_ADC_RES 0xFFFF +#define LTC2978_N_ADC 122 +#define LTC2978_MAX_UV (LTC2978_ADC_RES * LTC2978_N_ADC) +#define LTC2978_UV_STEP 1000 +#define LTC2978_N_VOLTAGES ((LTC2978_MAX_UV / LTC2978_UV_STEP) + 1) + static const struct regulator_desc ltc2978_reg_desc[] = { + PMBUS_REGULATOR_STEP("vout", 0, LTC2978_N_VOLTAGES, LTC2978_UV_STEP), + PMBUS_REGULATOR_STEP("vout", 1, LTC2978_N_VOLTAGES, LTC2978_UV_STEP), + PMBUS_REGULATOR_STEP("vout", 2, LTC2978_N_VOLTAGES, LTC2978_UV_STEP), + PMBUS_REGULATOR_STEP("vout", 3, LTC2978_N_VOLTAGES, LTC2978_UV_STEP), + PMBUS_REGULATOR_STEP("vout", 4, LTC2978_N_VOLTAGES, LTC2978_UV_STEP), + PMBUS_REGULATOR_STEP("vout", 5, LTC2978_N_VOLTAGES, LTC2978_UV_STEP), + PMBUS_REGULATOR_STEP("vout", 6, LTC2978_N_VOLTAGES, LTC2978_UV_STEP), + PMBUS_REGULATOR_STEP("vout", 7, LTC2978_N_VOLTAGES, LTC2978_UV_STEP), +}; + +static const struct regulator_desc ltc2978_reg_desc_default[] = { PMBUS_REGULATOR("vout", 0), PMBUS_REGULATOR("vout", 1), PMBUS_REGULATOR("vout", 2), @@ -839,10 +856,29 @@ static int ltc2978_probe(struct i2c_client *client) #if IS_ENABLED(CONFIG_SENSORS_LTC2978_REGULATOR) info->num_regulators = info->pages; - info->reg_desc = ltc2978_reg_desc; - if (info->num_regulators > ARRAY_SIZE(ltc2978_reg_desc)) { - dev_err(&client->dev, "num_regulators too large!"); - info->num_regulators = ARRAY_SIZE(ltc2978_reg_desc); + switch (data->id) { + case ltc2972: + case ltc2974: + case ltc2975: + case ltc2977: + case ltc2978: + case ltc2979: + case ltc2980: + case ltm2987: + info->reg_desc = ltc2978_reg_desc; + if (info->num_regulators > ARRAY_SIZE(ltc2978_reg_desc)) { + dev_warn(&client->dev, "num_regulators too large!"); + info->num_regulators = ARRAY_SIZE(ltc2978_reg_desc); + } + break; + default: + info->reg_desc = ltc2978_reg_desc_default; + if (info->num_regulators > ARRAY_SIZE(ltc2978_reg_desc_default)) { + dev_warn(&client->dev, "num_regulators too large!"); + info->num_regulators = + ARRAY_SIZE(ltc2978_reg_desc_default); + } + break; } #endif diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index c031a9700ace..7daaf0caf4d3 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h @@ -406,7 +406,7 @@ enum pmbus_sensor_classes { #define PMBUS_PHASE_VIRTUAL BIT(30) /* Phases on this page are virtual */ #define PMBUS_PAGE_VIRTUAL BIT(31) /* Page is virtual */ -enum pmbus_data_format { linear = 0, direct, vid }; +enum pmbus_data_format { linear = 0, ieee754, direct, vid }; enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv }; struct pmbus_driver_info { @@ -463,8 +463,8 @@ struct pmbus_driver_info { extern const struct regulator_ops pmbus_regulator_ops; -/* Macro for filling in array of struct regulator_desc */ -#define PMBUS_REGULATOR(_name, _id) \ +/* Macros for filling in array of struct regulator_desc */ +#define PMBUS_REGULATOR_STEP(_name, _id, _voltages, _step) \ [_id] = { \ .name = (_name # _id), \ .supply_name = "vin", \ @@ -474,8 +474,12 @@ extern const struct regulator_ops pmbus_regulator_ops; .ops = &pmbus_regulator_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ + .n_voltages = _voltages, \ + .uV_step = _step, \ } +#define PMBUS_REGULATOR(_name, _id) PMBUS_REGULATOR_STEP(_name, _id, 0, 0) + /* Function declarations */ void pmbus_clear_cache(struct i2c_client *client); diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 02912022853d..f10bac8860fc 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -104,6 +104,9 @@ struct pmbus_data { s16 currpage; /* current page, -1 for unknown/unset */ s16 currphase; /* current phase, 0xff for all, -1 for unknown/unset */ + + int vout_low[PMBUS_PAGES]; /* voltage low margin */ + int vout_high[PMBUS_PAGES]; /* voltage high margin */ }; struct pmbus_debugfs_entry { @@ -441,6 +444,18 @@ int pmbus_update_byte_data(struct i2c_client *client, int page, u8 reg, } EXPORT_SYMBOL_NS_GPL(pmbus_update_byte_data, PMBUS); +static int pmbus_read_block_data(struct i2c_client *client, int page, u8 reg, + char *data_buf) +{ + int rv; + + rv = pmbus_set_page(client, page, 0xff); + if (rv < 0) + return rv; + + return i2c_smbus_read_block_data(client, reg, data_buf); +} + static struct pmbus_sensor *pmbus_find_sensor(struct pmbus_data *data, int page, int reg) { @@ -578,6 +593,22 @@ bool pmbus_check_word_register(struct i2c_client *client, int page, int reg) } EXPORT_SYMBOL_NS_GPL(pmbus_check_word_register, PMBUS); +static bool __maybe_unused pmbus_check_block_register(struct i2c_client *client, + int page, int reg) +{ + int rv; + struct pmbus_data *data = i2c_get_clientdata(client); + char data_buf[I2C_SMBUS_BLOCK_MAX + 2]; + + rv = pmbus_read_block_data(client, page, reg, data_buf); + if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK)) + rv = pmbus_check_status_cml(client); + if (rv < 0 && (data->flags & PMBUS_READ_STATUS_AFTER_FAILED_CHECK)) + data->read_status(client, -1); + pmbus_clear_fault_page(client, -1); + return rv >= 0; +} + const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client) { struct pmbus_data *data = i2c_get_clientdata(client); @@ -612,6 +643,66 @@ static void pmbus_update_sensor_data(struct i2c_client *client, struct pmbus_sen } /* + * Convert ieee754 sensor values to milli- or micro-units + * depending on sensor type. + * + * ieee754 data format: + * bit 15: sign + * bit 10..14: exponent + * bit 0..9: mantissa + * exponent=0: + * v=(−1)^signbit * 2^(−14) * 0.significantbits + * exponent=1..30: + * v=(−1)^signbit * 2^(exponent - 15) * 1.significantbits + * exponent=31: + * v=NaN + * + * Add the number mantissa bits into the calculations for simplicity. + * To do that, add '10' to the exponent. By doing that, we can just add + * 0x400 to normal values and get the expected result. + */ +static long pmbus_reg2data_ieee754(struct pmbus_data *data, + struct pmbus_sensor *sensor) +{ + int exponent; + bool sign; + long val; + + /* only support half precision for now */ + sign = sensor->data & 0x8000; + exponent = (sensor->data >> 10) & 0x1f; + val = sensor->data & 0x3ff; + + if (exponent == 0) { /* subnormal */ + exponent = -(14 + 10); + } else if (exponent == 0x1f) { /* NaN, convert to min/max */ + exponent = 0; + val = 65504; + } else { + exponent -= (15 + 10); /* normal */ + val |= 0x400; + } + + /* scale result to milli-units for all sensors except fans */ + if (sensor->class != PSC_FAN) + val = val * 1000L; + + /* scale result to micro-units for power sensors */ + if (sensor->class == PSC_POWER) + val = val * 1000L; + + if (exponent >= 0) + val <<= exponent; + else + val >>= -exponent; + + if (sign) + val = -val; + + return val; +} + +/* * Convert linear sensor values to milli- or micro-units * depending on sensor type. */ @@ -741,6 +832,9 @@ static s64 pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor) case vid: val = pmbus_reg2data_vid(data, sensor); break; + case ieee754: + val = pmbus_reg2data_ieee754(data, sensor); + break; case linear: default: val = pmbus_reg2data_linear(data, sensor); @@ -749,8 +843,72 @@ static s64 pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor) return val; } -#define MAX_MANTISSA (1023 * 1000) -#define MIN_MANTISSA (511 * 1000) +#define MAX_IEEE_MANTISSA (0x7ff * 1000) +#define MIN_IEEE_MANTISSA (0x400 * 1000) + +static u16 pmbus_data2reg_ieee754(struct pmbus_data *data, + struct pmbus_sensor *sensor, long val) +{ + u16 exponent = (15 + 10); + long mantissa; + u16 sign = 0; + + /* simple case */ + if (val == 0) + return 0; + + if (val < 0) { + sign = 0x8000; + val = -val; + } + + /* Power is in uW. Convert to mW before converting. */ + if (sensor->class == PSC_POWER) + val = DIV_ROUND_CLOSEST(val, 1000L); + + /* + * For simplicity, convert fan data to milli-units + * before calculating the exponent. + */ + if (sensor->class == PSC_FAN) + val = val * 1000; + + /* Reduce large mantissa until it fits into 10 bit */ + while (val > MAX_IEEE_MANTISSA && exponent < 30) { + exponent++; + val >>= 1; + } + /* + * Increase small mantissa to generate valid 'normal' + * number + */ + while (val < MIN_IEEE_MANTISSA && exponent > 1) { + exponent--; + val <<= 1; + } + + /* Convert mantissa from milli-units to units */ + mantissa = DIV_ROUND_CLOSEST(val, 1000); + + /* + * Ensure that the resulting number is within range. + * Valid range is 0x400..0x7ff, where bit 10 reflects + * the implied high bit in normalized ieee754 numbers. + * Set the range to 0x400..0x7ff to reflect this. + * The upper bit is then removed by the mask against + * 0x3ff in the final assignment. + */ + if (mantissa > 0x7ff) + mantissa = 0x7ff; + else if (mantissa < 0x400) + mantissa = 0x400; + + /* Convert to sign, 5 bit exponent, 10 bit mantissa */ + return sign | (mantissa & 0x3ff) | ((exponent << 10) & 0x7c00); +} + +#define MAX_LIN_MANTISSA (1023 * 1000) +#define MIN_LIN_MANTISSA (511 * 1000) static u16 pmbus_data2reg_linear(struct pmbus_data *data, struct pmbus_sensor *sensor, s64 val) @@ -796,12 +954,12 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data, val = val * 1000LL; /* Reduce large mantissa until it fits into 10 bit */ - while (val >= MAX_MANTISSA && exponent < 15) { + while (val >= MAX_LIN_MANTISSA && exponent < 15) { exponent++; val >>= 1; } /* Increase small mantissa to improve precision */ - while (val < MIN_MANTISSA && exponent > -15) { + while (val < MIN_LIN_MANTISSA && exponent > -15) { exponent--; val <<= 1; } @@ -875,6 +1033,9 @@ static u16 pmbus_data2reg(struct pmbus_data *data, case vid: regval = pmbus_data2reg_vid(data, sensor, val); break; + case ieee754: + regval = pmbus_data2reg_ieee754(data, sensor, val); + break; case linear: default: regval = pmbus_data2reg_linear(data, sensor, val); @@ -2369,6 +2530,10 @@ static int pmbus_identify_common(struct i2c_client *client, if (data->info->format[PSC_VOLTAGE_OUT] != direct) return -ENODEV; break; + case 3: /* ieee 754 half precision */ + if (data->info->format[PSC_VOLTAGE_OUT] != ieee754) + return -ENODEV; + break; default: return -ENODEV; } @@ -2388,6 +2553,42 @@ static int pmbus_read_status_word(struct i2c_client *client, int page) return _pmbus_read_word_data(client, page, 0xff, PMBUS_STATUS_WORD); } +/* PEC attribute support */ + +static ssize_t pec_show(struct device *dev, struct device_attribute *dummy, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + + return sysfs_emit(buf, "%d\n", !!(client->flags & I2C_CLIENT_PEC)); +} + +static ssize_t pec_store(struct device *dev, struct device_attribute *dummy, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + bool enable; + int err; + + err = kstrtobool(buf, &enable); + if (err < 0) + return err; + + if (enable) + client->flags |= I2C_CLIENT_PEC; + else + client->flags &= ~I2C_CLIENT_PEC; + + return count; +} + +static DEVICE_ATTR_RW(pec); + +static void pmbus_remove_pec(void *dev) +{ + device_remove_file(dev, &dev_attr_pec); +} + static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, struct pmbus_driver_info *info) { @@ -2474,6 +2675,20 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, return ret; } + if (client->flags & I2C_CLIENT_PEC) { + /* + * If I2C_CLIENT_PEC is set here, both the I2C adapter and the + * chip support PEC. Add 'pec' attribute to client device to let + * the user control it. + */ + ret = device_create_file(dev, &dev_attr_pec); + if (ret) + return ret; + ret = devm_add_action_or_reset(dev, pmbus_remove_pec, dev); + if (ret) + return ret; + } + return 0; } @@ -2636,6 +2851,58 @@ static int pmbus_regulator_get_error_flags(struct regulator_dev *rdev, unsigned return 0; } +static int pmbus_regulator_get_low_margin(struct i2c_client *client, int page) +{ + struct pmbus_data *data = i2c_get_clientdata(client); + struct pmbus_sensor s = { + .page = page, + .class = PSC_VOLTAGE_OUT, + .convert = true, + .data = -1, + }; + + if (!data->vout_low[page]) { + if (pmbus_check_word_register(client, page, PMBUS_MFR_VOUT_MIN)) + s.data = _pmbus_read_word_data(client, page, 0xff, + PMBUS_MFR_VOUT_MIN); + if (s.data < 0) { + s.data = _pmbus_read_word_data(client, page, 0xff, + PMBUS_VOUT_MARGIN_LOW); + if (s.data < 0) + return s.data; + } + data->vout_low[page] = pmbus_reg2data(data, &s); + } + + return data->vout_low[page]; +} + +static int pmbus_regulator_get_high_margin(struct i2c_client *client, int page) +{ + struct pmbus_data *data = i2c_get_clientdata(client); + struct pmbus_sensor s = { + .page = page, + .class = PSC_VOLTAGE_OUT, + .convert = true, + .data = -1, + }; + + if (!data->vout_high[page]) { + if (pmbus_check_word_register(client, page, PMBUS_MFR_VOUT_MAX)) + s.data = _pmbus_read_word_data(client, page, 0xff, + PMBUS_MFR_VOUT_MAX); + if (s.data < 0) { + s.data = _pmbus_read_word_data(client, page, 0xff, + PMBUS_VOUT_MARGIN_HIGH); + if (s.data < 0) + return s.data; + } + data->vout_high[page] = pmbus_reg2data(data, &s); + } + + return data->vout_high[page]; +} + static int pmbus_regulator_get_voltage(struct regulator_dev *rdev) { struct device *dev = rdev_get_dev(rdev); @@ -2671,24 +2938,13 @@ static int pmbus_regulator_set_voltage(struct regulator_dev *rdev, int min_uv, *selector = 0; - if (pmbus_check_word_register(client, s.page, PMBUS_MFR_VOUT_MIN)) - s.data = _pmbus_read_word_data(client, s.page, 0xff, PMBUS_MFR_VOUT_MIN); - if (s.data < 0) { - s.data = _pmbus_read_word_data(client, s.page, 0xff, PMBUS_VOUT_MARGIN_LOW); - if (s.data < 0) - return s.data; - } - low = pmbus_reg2data(data, &s); + low = pmbus_regulator_get_low_margin(client, s.page); + if (low < 0) + return low; - s.data = -1; - if (pmbus_check_word_register(client, s.page, PMBUS_MFR_VOUT_MAX)) - s.data = _pmbus_read_word_data(client, s.page, 0xff, PMBUS_MFR_VOUT_MAX); - if (s.data < 0) { - s.data = _pmbus_read_word_data(client, s.page, 0xff, PMBUS_VOUT_MARGIN_HIGH); - if (s.data < 0) - return s.data; - } - high = pmbus_reg2data(data, &s); + high = pmbus_regulator_get_high_margin(client, s.page); + if (high < 0) + return high; /* Make sure we are within margins */ if (low > val) @@ -2701,6 +2957,35 @@ static int pmbus_regulator_set_voltage(struct regulator_dev *rdev, int min_uv, return _pmbus_write_word_data(client, s.page, PMBUS_VOUT_COMMAND, (u16)val); } +static int pmbus_regulator_list_voltage(struct regulator_dev *rdev, + unsigned int selector) +{ + struct device *dev = rdev_get_dev(rdev); + struct i2c_client *client = to_i2c_client(dev->parent); + int val, low, high; + + if (selector >= rdev->desc->n_voltages || + selector < rdev->desc->linear_min_sel) + return -EINVAL; + + selector -= rdev->desc->linear_min_sel; + val = DIV_ROUND_CLOSEST(rdev->desc->min_uV + + (rdev->desc->uV_step * selector), 1000); /* convert to mV */ + + low = pmbus_regulator_get_low_margin(client, rdev_get_id(rdev)); + if (low < 0) + return low; + + high = pmbus_regulator_get_high_margin(client, rdev_get_id(rdev)); + if (high < 0) + return high; + + if (val >= low && val <= high) + return val * 1000; /* unit is uV */ + + return 0; +} + const struct regulator_ops pmbus_regulator_ops = { .enable = pmbus_regulator_enable, .disable = pmbus_regulator_disable, @@ -2708,6 +2993,7 @@ const struct regulator_ops pmbus_regulator_ops = { .get_error_flags = pmbus_regulator_get_error_flags, .get_voltage = pmbus_regulator_get_voltage, .set_voltage = pmbus_regulator_set_voltage, + .list_voltage = pmbus_regulator_list_voltage, }; EXPORT_SYMBOL_NS_GPL(pmbus_regulator_ops, PMBUS); @@ -2782,41 +3068,33 @@ static int pmbus_debugfs_get_status(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(pmbus_debugfs_ops_status, pmbus_debugfs_get_status, NULL, "0x%04llx\n"); -static int pmbus_debugfs_get_pec(void *data, u64 *val) -{ - struct i2c_client *client = data; - - *val = !!(client->flags & I2C_CLIENT_PEC); - - return 0; -} - -static int pmbus_debugfs_set_pec(void *data, u64 val) +static ssize_t pmbus_debugfs_mfr_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) { int rc; - struct i2c_client *client = data; - - if (!val) { - client->flags &= ~I2C_CLIENT_PEC; - return 0; - } - - if (val != 1) - return -EINVAL; + struct pmbus_debugfs_entry *entry = file->private_data; + char data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 }; - rc = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY); + rc = pmbus_read_block_data(entry->client, entry->page, entry->reg, + data); if (rc < 0) return rc; - if (!(rc & PB_CAPABILITY_ERROR_CHECK)) - return -EOPNOTSUPP; + /* Add newline at the end of a read data */ + data[rc] = '\n'; - client->flags |= I2C_CLIENT_PEC; + /* Include newline into the length */ + rc += 1; - return 0; + return simple_read_from_buffer(buf, count, ppos, data, rc); } -DEFINE_DEBUGFS_ATTRIBUTE(pmbus_debugfs_ops_pec, pmbus_debugfs_get_pec, - pmbus_debugfs_set_pec, "%llu\n"); + +static const struct file_operations pmbus_debugfs_ops_mfr = { + .llseek = noop_llseek, + .read = pmbus_debugfs_mfr_read, + .write = NULL, + .open = simple_open, +}; static void pmbus_remove_debugfs(void *data) { @@ -2846,16 +3124,80 @@ static int pmbus_init_debugfs(struct i2c_client *client, return -ENODEV; } - /* Allocate the max possible entries we need. */ + /* + * Allocate the max possible entries we need. + * 6 entries device-specific + * 10 entries page-specific + */ entries = devm_kcalloc(data->dev, - data->info->pages * 10, sizeof(*entries), + 6 + data->info->pages * 10, sizeof(*entries), GFP_KERNEL); if (!entries) return -ENOMEM; - debugfs_create_file("pec", 0664, data->debugfs, client, - &pmbus_debugfs_ops_pec); - + /* + * Add device-specific entries. + * Please note that the PMBUS standard allows all registers to be + * page-specific. + * To reduce the number of debugfs entries for devices with many pages + * assume that values of the following registers are the same for all + * pages and report values only for page 0. + */ + if (pmbus_check_block_register(client, 0, PMBUS_MFR_ID)) { + entries[idx].client = client; + entries[idx].page = 0; + entries[idx].reg = PMBUS_MFR_ID; + debugfs_create_file("mfr_id", 0444, data->debugfs, + &entries[idx++], + &pmbus_debugfs_ops_mfr); + } + + if (pmbus_check_block_register(client, 0, PMBUS_MFR_MODEL)) { + entries[idx].client = client; + entries[idx].page = 0; + entries[idx].reg = PMBUS_MFR_MODEL; + debugfs_create_file("mfr_model", 0444, data->debugfs, + &entries[idx++], + &pmbus_debugfs_ops_mfr); + } + + if (pmbus_check_block_register(client, 0, PMBUS_MFR_REVISION)) { + entries[idx].client = client; + entries[idx].page = 0; + entries[idx].reg = PMBUS_MFR_REVISION; + debugfs_create_file("mfr_revision", 0444, data->debugfs, + &entries[idx++], + &pmbus_debugfs_ops_mfr); + } + + if (pmbus_check_block_register(client, 0, PMBUS_MFR_LOCATION)) { + entries[idx].client = client; + entries[idx].page = 0; + entries[idx].reg = PMBUS_MFR_LOCATION; + debugfs_create_file("mfr_location", 0444, data->debugfs, + &entries[idx++], + &pmbus_debugfs_ops_mfr); + } + + if (pmbus_check_block_register(client, 0, PMBUS_MFR_DATE)) { + entries[idx].client = client; + entries[idx].page = 0; + entries[idx].reg = PMBUS_MFR_DATE; + debugfs_create_file("mfr_date", 0444, data->debugfs, + &entries[idx++], + &pmbus_debugfs_ops_mfr); + } + + if (pmbus_check_block_register(client, 0, PMBUS_MFR_SERIAL)) { + entries[idx].client = client; + entries[idx].page = 0; + entries[idx].reg = PMBUS_MFR_SERIAL; + debugfs_create_file("mfr_serial", 0444, data->debugfs, + &entries[idx++], + &pmbus_debugfs_ops_mfr); + } + + /* Add page specific entries */ for (i = 0; i < data->info->pages; ++i) { /* Check accessibility of status register if it's not page 0 */ if (!i || pmbus_check_status_register(client, i)) { diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c index 3ece53adabd6..de3a0886c2f7 100644 --- a/drivers/hwmon/sch56xx-common.c +++ b/drivers/hwmon/sch56xx-common.c @@ -523,6 +523,28 @@ static int __init sch56xx_device_add(int address, const char *name) return PTR_ERR_OR_ZERO(sch56xx_pdev); } +static const struct dmi_system_id sch56xx_dmi_override_table[] __initconst = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS W380"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO P710"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO E9900"), + }, + }, + { } +}; + /* For autoloading only */ static const struct dmi_system_id sch56xx_dmi_table[] __initconst = { { @@ -543,16 +565,18 @@ static int __init sch56xx_init(void) if (!dmi_check_system(sch56xx_dmi_table)) return -ENODEV; - /* - * Some machines like the Esprimo P720 and Esprimo C700 have - * onboard devices named " Antiope"/" Theseus" instead of - * "Antiope"/"Theseus", so we need to check for both. - */ - if (!dmi_find_device(DMI_DEV_TYPE_OTHER, "Antiope", NULL) && - !dmi_find_device(DMI_DEV_TYPE_OTHER, " Antiope", NULL) && - !dmi_find_device(DMI_DEV_TYPE_OTHER, "Theseus", NULL) && - !dmi_find_device(DMI_DEV_TYPE_OTHER, " Theseus", NULL)) - return -ENODEV; + if (!dmi_check_system(sch56xx_dmi_override_table)) { + /* + * Some machines like the Esprimo P720 and Esprimo C700 have + * onboard devices named " Antiope"/" Theseus" instead of + * "Antiope"/"Theseus", so we need to check for both. + */ + if (!dmi_find_device(DMI_DEV_TYPE_OTHER, "Antiope", NULL) && + !dmi_find_device(DMI_DEV_TYPE_OTHER, " Antiope", NULL) && + !dmi_find_device(DMI_DEV_TYPE_OTHER, "Theseus", NULL) && + !dmi_find_device(DMI_DEV_TYPE_OTHER, " Theseus", NULL)) + return -ENODEV; + } } /* diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c index 7f4a63959730..ae4d14257a11 100644 --- a/drivers/hwmon/sht15.c +++ b/drivers/hwmon/sht15.c @@ -1020,25 +1020,20 @@ err_release_reg: static int sht15_remove(struct platform_device *pdev) { struct sht15_data *data = platform_get_drvdata(pdev); + int ret; - /* - * Make sure any reads from the device are done and - * prevent new ones beginning - */ - mutex_lock(&data->read_lock); - if (sht15_soft_reset(data)) { - mutex_unlock(&data->read_lock); - return -EFAULT; - } hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &sht15_attr_group); + + ret = sht15_soft_reset(data); + if (ret) + dev_err(&pdev->dev, "Failed to reset device (%pe)\n", ERR_PTR(ret)); + if (!IS_ERR(data->reg)) { regulator_unregister_notifier(data->reg, &data->nb); regulator_disable(data->reg); } - mutex_unlock(&data->read_lock); - return 0; } diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c index 8bd6435c13e8..42762e87b014 100644 --- a/drivers/hwmon/tps23861.c +++ b/drivers/hwmon/tps23861.c @@ -140,7 +140,8 @@ static int tps23861_read_temp(struct tps23861_data *data, long *val) static int tps23861_read_voltage(struct tps23861_data *data, int channel, long *val) { - unsigned int regval; + __le16 regval; + long raw_val; int err; if (channel < TPS23861_NUM_PORTS) { @@ -155,7 +156,8 @@ static int tps23861_read_voltage(struct tps23861_data *data, int channel, if (err < 0) return err; - *val = (FIELD_GET(VOLTAGE_CURRENT_MASK, regval) * VOLTAGE_LSB) / 1000; + raw_val = le16_to_cpu(regval); + *val = (FIELD_GET(VOLTAGE_CURRENT_MASK, raw_val) * VOLTAGE_LSB) / 1000; return 0; } @@ -163,8 +165,9 @@ static int tps23861_read_voltage(struct tps23861_data *data, int channel, static int tps23861_read_current(struct tps23861_data *data, int channel, long *val) { - unsigned int current_lsb; - unsigned int regval; + long raw_val, current_lsb; + __le16 regval; + int err; if (data->shunt_resistor == SHUNT_RESISTOR_DEFAULT) @@ -178,7 +181,8 @@ static int tps23861_read_current(struct tps23861_data *data, int channel, if (err < 0) return err; - *val = (FIELD_GET(VOLTAGE_CURRENT_MASK, regval) * current_lsb) / 1000000; + raw_val = le16_to_cpu(regval); + *val = (FIELD_GET(VOLTAGE_CURRENT_MASK, raw_val) * current_lsb) / 1000000; return 0; } diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index a1bae59208e3..708a67c7faaa 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -486,7 +486,7 @@ config I2C_BCM_KONA config I2C_BRCMSTB tristate "BRCM Settop/DSL I2C controller" - depends on ARCH_BCM2835 || ARCH_BCM4908 || ARCH_BCM_63XX || \ + depends on ARCH_BCM2835 || ARCH_BCM4908 || ARCH_BCMBCA || \ ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST default y help diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index f5c6802aa6c3..3e101719689a 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -56,6 +56,7 @@ #include <asm/nospec-branch.h> #include <asm/mwait.h> #include <asm/msr.h> +#include <asm/fpu/api.h> #define INTEL_IDLE_VERSION "0.5.1" @@ -114,6 +115,11 @@ static unsigned int mwait_substates __initdata; #define CPUIDLE_FLAG_IBRS BIT(16) /* + * Initialize large xstate for the C6-state entrance. + */ +#define CPUIDLE_FLAG_INIT_XSTATE BIT(17) + +/* * MWAIT takes an 8-bit "hint" in EAX "suggesting" * the C-state (top nibble) and sub-state (bottom nibble) * 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc. @@ -162,7 +168,13 @@ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev, raw_local_irq_enable(); ret = __intel_idle(dev, drv, index); - raw_local_irq_disable(); + + /* + * The lockdep hardirqs state may be changed to 'on' with timer + * tick interrupt followed by __do_softirq(). Use local_irq_disable() + * to keep the hardirqs state correct. + */ + local_irq_disable(); return ret; } @@ -185,6 +197,13 @@ static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev, return ret; } +static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + fpu_idle_fpregs(); + return __intel_idle(dev, drv, index); +} + /** * intel_idle_s2idle - Ask the processor to enter the given idle state. * @dev: cpuidle device of the target CPU. @@ -200,8 +219,12 @@ static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev, static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - unsigned long eax = flg2MWAIT(drv->states[index].flags); unsigned long ecx = 1; /* break on interrupt flag */ + struct cpuidle_state *state = &drv->states[index]; + unsigned long eax = flg2MWAIT(state->flags); + + if (state->flags & CPUIDLE_FLAG_INIT_XSTATE) + fpu_idle_fpregs(); mwait_idle_with_hints(eax, ecx); @@ -905,16 +928,6 @@ static struct cpuidle_state adl_l_cstates[] __initdata = { .enter = NULL } }; -/* - * On Sapphire Rapids Xeon C1 has to be disabled if C1E is enabled, and vice - * versa. On SPR C1E is enabled only if "C1E promotion" bit is set in - * MSR_IA32_POWER_CTL. But in this case there effectively no C1, because C1 - * requests are promoted to C1E. If the "C1E promotion" bit is cleared, then - * both C1 and C1E requests end up with C1, so there is effectively no C1E. - * - * By default we enable C1 and disable C1E by marking it with - * 'CPUIDLE_FLAG_UNUSABLE'. - */ static struct cpuidle_state spr_cstates[] __initdata = { { .name = "C1", @@ -927,8 +940,7 @@ static struct cpuidle_state spr_cstates[] __initdata = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE | - CPUIDLE_FLAG_UNUSABLE, + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 2, .target_residency = 4, .enter = &intel_idle, @@ -936,7 +948,8 @@ static struct cpuidle_state spr_cstates[] __initdata = { { .name = "C6", .desc = "MWAIT 0x20", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_INIT_XSTATE, .exit_latency = 290, .target_residency = 800, .enter = &intel_idle, @@ -1750,17 +1763,6 @@ static void __init spr_idle_state_table_update(void) { unsigned long long msr; - /* Check if user prefers C1E over C1. */ - if ((preferred_states_mask & BIT(2)) && - !(preferred_states_mask & BIT(1))) { - /* Disable C1 and enable C1E. */ - spr_cstates[0].flags |= CPUIDLE_FLAG_UNUSABLE; - spr_cstates[1].flags &= ~CPUIDLE_FLAG_UNUSABLE; - - /* Enable C1E using the "C1E promotion" bit. */ - c1e_promotion = C1E_PROMOTION_ENABLE; - } - /* * By default, the C6 state assumes the worst-case scenario of package * C6. However, if PC6 is disabled, we update the numbers to match @@ -1851,6 +1853,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) drv->states[drv->state_count].enter = intel_idle_ibrs; } + if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_INIT_XSTATE) + drv->states[drv->state_count].enter = intel_idle_xstate; + if ((disabled_states_mask & BIT(drv->state_count)) || ((icpu->use_acpi || force_use_acpi) && intel_idle_off_by_default(mwait_hint) && diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 6058abf42ba7..7720ea270ed8 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -1282,8 +1282,7 @@ struct srp_terminate_context { int scsi_result; }; -static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr, - bool reserved) +static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr) { struct srp_terminate_context *context = context_ptr; struct srp_target_port *target = context->srp_target; diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c index d1f5354d5ea2..cbc6c0d4670a 100644 --- a/drivers/input/keyboard/applespi.c +++ b/drivers/input/keyboard/applespi.c @@ -1597,52 +1597,38 @@ static u32 applespi_notify(acpi_handle gpe_device, u32 gpe, void *context) static int applespi_get_saved_bl_level(struct applespi_data *applespi) { - struct efivar_entry *efivar_entry; + efi_status_t sts = EFI_NOT_FOUND; u16 efi_data = 0; - unsigned long efi_data_len; - int sts; - - efivar_entry = kmalloc(sizeof(*efivar_entry), GFP_KERNEL); - if (!efivar_entry) - return -ENOMEM; - - memcpy(efivar_entry->var.VariableName, EFI_BL_LEVEL_NAME, - sizeof(EFI_BL_LEVEL_NAME)); - efivar_entry->var.VendorGuid = EFI_BL_LEVEL_GUID; - efi_data_len = sizeof(efi_data); + unsigned long efi_data_len = sizeof(efi_data); - sts = efivar_entry_get(efivar_entry, NULL, &efi_data_len, &efi_data); - if (sts && sts != -ENOENT) + if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) + sts = efi.get_variable(EFI_BL_LEVEL_NAME, &EFI_BL_LEVEL_GUID, + NULL, &efi_data_len, &efi_data); + if (sts != EFI_SUCCESS && sts != EFI_NOT_FOUND) dev_warn(&applespi->spi->dev, - "Error getting backlight level from EFI vars: %d\n", + "Error getting backlight level from EFI vars: 0x%lx\n", sts); - kfree(efivar_entry); - - return sts ? sts : efi_data; + return sts != EFI_SUCCESS ? -ENODEV : efi_data; } static void applespi_save_bl_level(struct applespi_data *applespi, unsigned int level) { - efi_guid_t efi_guid; + efi_status_t sts = EFI_UNSUPPORTED; u32 efi_attr; - unsigned long efi_data_len; u16 efi_data; - int sts; - /* Save keyboard backlight level */ - efi_guid = EFI_BL_LEVEL_GUID; efi_data = (u16)level; - efi_data_len = sizeof(efi_data); efi_attr = EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS; - sts = efivar_entry_set_safe((efi_char16_t *)EFI_BL_LEVEL_NAME, efi_guid, - efi_attr, true, efi_data_len, &efi_data); - if (sts) + if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) + sts = efi.set_variable(EFI_BL_LEVEL_NAME, &EFI_BL_LEVEL_GUID, + efi_attr, sizeof(efi_data), &efi_data); + if (sts != EFI_SUCCESS) dev_warn(&applespi->spi->dev, - "Error saving backlight level to EFI vars: %d\n", sts); + "Error saving backlight level to EFI vars: 0x%lx\n", sts); } static int applespi_probe(struct spi_device *spi) diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c index e285a220c913..51bd66a45a11 100644 --- a/drivers/iommu/hyperv-iommu.c +++ b/drivers/iommu/hyperv-iommu.c @@ -194,7 +194,7 @@ hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg) u32 vector; struct irq_cfg *cfg; int ioapic_id; - struct cpumask *affinity; + const struct cpumask *affinity; int cpu; struct hv_interrupt_entry entry; struct hyperv_root_ir_data *data = irq_data->chip_data; diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index bbb11cb8b0f7..66b9fa408bf2 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -8,7 +8,7 @@ config IRQCHIP config ARM_GIC bool select IRQ_DOMAIN_HIERARCHY - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config ARM_GIC_PM bool @@ -34,7 +34,7 @@ config ARM_GIC_V3 bool select IRQ_DOMAIN_HIERARCHY select PARTITION_PERCPU - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config ARM_GIC_V3_ITS bool @@ -76,7 +76,7 @@ config ARMADA_370_XP_IRQ bool select GENERIC_IRQ_CHIP select PCI_MSI if PCI - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config ALPINE_MSI bool @@ -112,7 +112,7 @@ config BCM6345_L1_IRQ bool select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config BCM7038_L1_IRQ tristate "Broadcom STB 7038-style L1/L2 interrupt controller driver" @@ -120,7 +120,7 @@ config BCM7038_L1_IRQ default ARCH_BRCMSTB || BMIPS_GENERIC select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config BCM7120_L2_IRQ tristate "Broadcom STB 7120-style L2 interrupt controller driver" @@ -177,9 +177,9 @@ config MADERA_IRQ config IRQ_MIPS_CPU bool select GENERIC_IRQ_CHIP - select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING + select GENERIC_IRQ_IPI if SMP && SYS_SUPPORTS_MULTITHREADING select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config CLPS711X_IRQCHIP bool @@ -242,6 +242,14 @@ config RENESAS_RZA1_IRQC Enable support for the Renesas RZ/A1 Interrupt Controller, to use up to 8 external interrupts with configurable sense select. +config RENESAS_RZG2L_IRQC + bool "Renesas RZ/G2L (and alike SoC) IRQC support" if COMPILE_TEST + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN_HIERARCHY + help + Enable support for the Renesas RZ/G2L (and alike SoC) Interrupt Controller + for external devices. + config SL28CPLD_INTC bool "Kontron sl28cpld IRQ controller" depends on MFD_SL28CPLD=y || COMPILE_TEST @@ -294,7 +302,7 @@ config VERSATILE_FPGA_IRQ_NR config XTENSA_MX bool select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config XILINX_INTC bool "Xilinx Interrupt Controller IP" @@ -322,7 +330,8 @@ config KEYSTONE_IRQ config MIPS_GIC bool - select GENERIC_IRQ_IPI + select GENERIC_IRQ_IPI if SMP + select IRQ_DOMAIN_HIERARCHY select MIPS_CM config INGENIC_IRQ @@ -530,6 +539,7 @@ config SIFIVE_PLIC bool "SiFive Platform-Level Interrupt Controller" depends on RISCV select IRQ_DOMAIN_HIERARCHY + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP help This enables support for the PLIC chip found in SiFive (and potentially other) RISC-V systems. The PLIC controls devices @@ -546,6 +556,16 @@ config EXYNOS_IRQ_COMBINER Say yes here to add support for the IRQ combiner devices embedded in Samsung Exynos chips. +config IRQ_LOONGARCH_CPU + bool + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + select GENERIC_IRQ_EFFECTIVE_AFF_MASK + help + Support for the LoongArch CPU Interrupt Controller. For details of + irq chip hierarchy on LoongArch platforms please read the document + Documentation/loongarch/irq-chip-model.rst. + config LOONGSON_LIOINTC bool "Loongson Local I/O Interrupt Controller" depends on MACH_LOONGSON64 @@ -555,6 +575,16 @@ config LOONGSON_LIOINTC help Support for the Loongson Local I/O Interrupt Controller. +config LOONGSON_EIOINTC + bool "Loongson Extend I/O Interrupt Controller" + depends on LOONGARCH + depends on MACH_LOONGSON64 + default MACH_LOONGSON64 + select IRQ_DOMAIN_HIERARCHY + select GENERIC_IRQ_CHIP + help + Support for the Loongson3 Extend I/O Interrupt Vector Controller. + config LOONGSON_HTPIC bool "Loongson3 HyperTransport PIC Controller" depends on MACH_LOONGSON64 && MIPS @@ -574,7 +604,7 @@ config LOONGSON_HTVEC config LOONGSON_PCH_PIC bool "Loongson PCH PIC Controller" - depends on MACH_LOONGSON64 || COMPILE_TEST + depends on MACH_LOONGSON64 default MACH_LOONGSON64 select IRQ_DOMAIN_HIERARCHY select IRQ_FASTEOI_HIERARCHY_HANDLERS @@ -583,7 +613,7 @@ config LOONGSON_PCH_PIC config LOONGSON_PCH_MSI bool "Loongson PCH MSI Controller" - depends on MACH_LOONGSON64 || COMPILE_TEST + depends on MACH_LOONGSON64 depends on PCI default MACH_LOONGSON64 select IRQ_DOMAIN_HIERARCHY @@ -591,6 +621,14 @@ config LOONGSON_PCH_MSI help Support for the Loongson PCH MSI Controller. +config LOONGSON_PCH_LPC + bool "Loongson PCH LPC Controller" + depends on MACH_LOONGSON64 + default (MACH_LOONGSON64 && LOONGARCH) + select IRQ_DOMAIN_HIERARCHY + help + Support for the Loongson PCH LPC Controller. + config MST_IRQ bool "MStar Interrupt Controller" depends on ARCH_MEDIATEK || ARCH_MSTARV7 || COMPILE_TEST @@ -627,4 +665,13 @@ config MCHP_EIC help Support for Microchip External Interrupt Controller. +config SUNPLUS_SP7021_INTC + bool "Sunplus SP7021 interrupt controller" if COMPILE_TEST + default SOC_SP7021 + help + Support for the Sunplus SP7021 Interrupt Controller IP core. + SP7021 SoC has 2 Chips: C-Chip & P-Chip. This is used as a + chained controller, routing all interrupt source in P-Chip to + the primary controller on C-Chip. + endmenu diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 5b67450a9538..b6acbca2248b 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -51,6 +51,7 @@ obj-$(CONFIG_RDA_INTC) += irq-rda-intc.o obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o obj-$(CONFIG_RENESAS_RZA1_IRQC) += irq-renesas-rza1.o +obj-$(CONFIG_RENESAS_RZG2L_IRQC) += irq-renesas-rzg2l.o obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o @@ -103,11 +104,14 @@ obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o obj-$(CONFIG_TI_PRUSS_INTC) += irq-pruss-intc.o +obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o +obj-$(CONFIG_LOONGSON_EIOINTC) += irq-loongson-eiointc.o obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o obj-$(CONFIG_LOONGSON_HTVEC) += irq-loongson-htvec.o obj-$(CONFIG_LOONGSON_PCH_PIC) += irq-loongson-pch-pic.o obj-$(CONFIG_LOONGSON_PCH_MSI) += irq-loongson-pch-msi.o +obj-$(CONFIG_LOONGSON_PCH_LPC) += irq-loongson-pch-lpc.o obj-$(CONFIG_MST_IRQ) += irq-mst-intc.o obj-$(CONFIG_SL28CPLD_INTC) += irq-sl28cpld.o obj-$(CONFIG_MACH_REALTEK_RTL) += irq-realtek-rtl.o @@ -115,3 +119,4 @@ obj-$(CONFIG_WPCM450_AIC) += irq-wpcm450-aic.o obj-$(CONFIG_IRQ_IDT3243X) += irq-idt3243x.o obj-$(CONFIG_APPLE_AIC) += irq-apple-aic.o obj-$(CONFIG_MCHP_EIC) += irq-mchp-eic.o +obj-$(CONFIG_SUNPLUS_SP7021_INTC) += irq-sp7021-intc.o diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c index 142a7431745f..6899e37810a8 100644 --- a/drivers/irqchip/irq-bcm6345-l1.c +++ b/drivers/irqchip/irq-bcm6345-l1.c @@ -216,11 +216,11 @@ static int bcm6345_l1_set_affinity(struct irq_data *d, enabled = intc->cpus[old_cpu]->enable_cache[word] & mask; if (enabled) __bcm6345_l1_mask(d); - cpumask_copy(irq_data_get_affinity_mask(d), dest); + irq_data_update_affinity(d, dest); if (enabled) __bcm6345_l1_unmask(d); } else { - cpumask_copy(irq_data_get_affinity_mask(d), dest); + irq_data_update_affinity(d, dest); } raw_spin_unlock_irqrestore(&intc->lock, flags); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 2d25bca63d2a..262658fd5f9e 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1783,7 +1783,7 @@ static void gic_enable_nmi_support(void) * the security state of the GIC (controlled by the GICD_CTRL.DS bit) * and if Group 0 interrupts can be delivered to Linux in the non-secure * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the - * the ICC_PMR_EL1 register and the priority that software assigns to + * ICC_PMR_EL1 register and the priority that software assigns to * interrupts: * * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority @@ -2381,11 +2381,17 @@ static void __init gic_acpi_setup_kvm_info(void) vgic_set_kvm_info(&gic_v3_kvm_info); } +static struct fwnode_handle *gsi_domain_handle; + +static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi) +{ + return gsi_domain_handle; +} + static int __init gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_distributor *dist; - struct fwnode_handle *domain_handle; size_t size; int i, err; @@ -2417,18 +2423,18 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) if (err) goto out_redist_unmap; - domain_handle = irq_domain_alloc_fwnode(&dist->base_address); - if (!domain_handle) { + gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address); + if (!gsi_domain_handle) { err = -ENOMEM; goto out_redist_unmap; } err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs, - acpi_data.nr_redist_regions, 0, domain_handle); + acpi_data.nr_redist_regions, 0, gsi_domain_handle); if (err) goto out_fwhandle_free; - acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); + acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id); if (static_branch_likely(&supports_deactivate_key)) gic_acpi_setup_kvm_info(); @@ -2436,7 +2442,7 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) return 0; out_fwhandle_free: - irq_domain_free_fwnode(domain_handle); + irq_domain_free_fwnode(gsi_domain_handle); out_redist_unmap: for (i = 0; i < acpi_data.nr_redist_regions; i++) if (acpi_data.redist_regs[i].redist_base) diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 820404cb56bc..4c7bae0ec8f9 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1682,11 +1682,17 @@ static void __init gic_acpi_setup_kvm_info(void) vgic_set_kvm_info(&gic_v2_kvm_info); } +static struct fwnode_handle *gsi_domain_handle; + +static struct fwnode_handle *gic_v2_get_gsi_domain_id(u32 gsi) +{ + return gsi_domain_handle; +} + static int __init gic_v2_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_distributor *dist; - struct fwnode_handle *domain_handle; struct gic_chip_data *gic = &gic_data[0]; int count, ret; @@ -1724,22 +1730,22 @@ static int __init gic_v2_acpi_init(union acpi_subtable_headers *header, /* * Initialize GIC instance zero (no multi-GIC support). */ - domain_handle = irq_domain_alloc_fwnode(&dist->base_address); - if (!domain_handle) { + gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address); + if (!gsi_domain_handle) { pr_err("Unable to allocate domain handle\n"); gic_teardown(gic); return -ENOMEM; } - ret = __gic_init_bases(gic, domain_handle); + ret = __gic_init_bases(gic, gsi_domain_handle); if (ret) { pr_err("Failed to initialise GIC\n"); - irq_domain_free_fwnode(domain_handle); + irq_domain_free_fwnode(gsi_domain_handle); gic_teardown(gic); return ret; } - acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); + acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v2_get_gsi_domain_id); if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) gicv2m_init(NULL, gic_data[0].domain); diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c new file mode 100644 index 000000000000..327f3ab62c03 --- /dev/null +++ b/drivers/irqchip/irq-loongarch-cpu.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> + +#include <asm/loongarch.h> +#include <asm/setup.h> + +static struct irq_domain *irq_domain; +struct fwnode_handle *cpuintc_handle; + +static u32 lpic_gsi_to_irq(u32 gsi) +{ + /* Only pch irqdomain transferring is required for LoongArch. */ + if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ) + return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH); + + return 0; +} + +static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi) +{ + int id; + struct fwnode_handle *domain_handle = NULL; + + switch (gsi) { + case GSI_MIN_CPU_IRQ ... GSI_MAX_CPU_IRQ: + if (liointc_handle) + domain_handle = liointc_handle; + break; + + case GSI_MIN_LPC_IRQ ... GSI_MAX_LPC_IRQ: + if (pch_lpc_handle) + domain_handle = pch_lpc_handle; + break; + + case GSI_MIN_PCH_IRQ ... GSI_MAX_PCH_IRQ: + id = find_pch_pic(gsi); + if (id >= 0 && pch_pic_handle[id]) + domain_handle = pch_pic_handle[id]; + break; + } + + return domain_handle; +} + +static void mask_loongarch_irq(struct irq_data *d) +{ + clear_csr_ecfg(ECFGF(d->hwirq)); +} + +static void unmask_loongarch_irq(struct irq_data *d) +{ + set_csr_ecfg(ECFGF(d->hwirq)); +} + +static struct irq_chip cpu_irq_controller = { + .name = "CPUINTC", + .irq_mask = mask_loongarch_irq, + .irq_unmask = unmask_loongarch_irq, +}; + +static void handle_cpu_irq(struct pt_regs *regs) +{ + int hwirq; + unsigned int estat = read_csr_estat() & CSR_ESTAT_IS; + + while ((hwirq = ffs(estat))) { + estat &= ~BIT(hwirq - 1); + generic_handle_domain_irq(irq_domain, hwirq - 1); + } +} + +static int loongarch_cpu_intc_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_noprobe(irq); + irq_set_chip_and_handler(irq, &cpu_irq_controller, handle_percpu_irq); + + return 0; +} + +static const struct irq_domain_ops loongarch_cpu_intc_irq_domain_ops = { + .map = loongarch_cpu_intc_map, + .xlate = irq_domain_xlate_onecell, +}; + +static int __init +liointc_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_lio_pic *liointc_entry = (struct acpi_madt_lio_pic *)header; + + return liointc_acpi_init(irq_domain, liointc_entry); +} + +static int __init +eiointc_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_eio_pic *eiointc_entry = (struct acpi_madt_eio_pic *)header; + + return eiointc_acpi_init(irq_domain, eiointc_entry); +} + +static int __init acpi_cascade_irqdomain_init(void) +{ + acpi_table_parse_madt(ACPI_MADT_TYPE_LIO_PIC, + liointc_parse_madt, 0); + acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC, + eiointc_parse_madt, 0); + return 0; +} + +static int __init cpuintc_acpi_init(union acpi_subtable_headers *header, + const unsigned long end) +{ + if (irq_domain) + return 0; + + /* Mask interrupts. */ + clear_csr_ecfg(ECFG0_IM); + clear_csr_estat(ESTATF_IP); + + cpuintc_handle = irq_domain_alloc_fwnode(NULL); + irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM, + &loongarch_cpu_intc_irq_domain_ops, NULL); + + if (!irq_domain) + panic("Failed to add irqdomain for LoongArch CPU"); + + set_handle_irq(&handle_cpu_irq); + acpi_set_irq_model(ACPI_IRQ_MODEL_LPIC, lpic_get_gsi_domain_id); + acpi_set_gsi_to_irq_fallback(lpic_gsi_to_irq); + acpi_cascade_irqdomain_init(); + + return 0; +} + +IRQCHIP_ACPI_DECLARE(cpuintc_v1, ACPI_MADT_TYPE_CORE_PIC, + NULL, ACPI_MADT_CORE_PIC_VERSION_V1, cpuintc_acpi_init); diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c new file mode 100644 index 000000000000..80d8ca6f2d46 --- /dev/null +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -0,0 +1,395 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Loongson Extend I/O Interrupt Controller support + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#define pr_fmt(fmt) "eiointc: " fmt + +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> + +#define EIOINTC_REG_NODEMAP 0x14a0 +#define EIOINTC_REG_IPMAP 0x14c0 +#define EIOINTC_REG_ENABLE 0x1600 +#define EIOINTC_REG_BOUNCE 0x1680 +#define EIOINTC_REG_ISR 0x1800 +#define EIOINTC_REG_ROUTE 0x1c00 + +#define VEC_REG_COUNT 4 +#define VEC_COUNT_PER_REG 64 +#define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG) +#define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG) +#define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG) +#define EIOINTC_ALL_ENABLE 0xffffffff + +#define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE) + +static int nr_pics; + +struct eiointc_priv { + u32 node; + nodemask_t node_map; + cpumask_t cpuspan_map; + struct fwnode_handle *domain_handle; + struct irq_domain *eiointc_domain; +}; + +static struct eiointc_priv *eiointc_priv[MAX_IO_PICS]; + +static void eiointc_enable(void) +{ + uint64_t misc; + + misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); + misc |= IOCSR_MISC_FUNC_EXT_IOI_EN; + iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC); +} + +static int cpu_to_eio_node(int cpu) +{ + return cpu_logical_map(cpu) / CORES_PER_EIO_NODE; +} + +static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map) +{ + int i, node, cpu_node, route_node; + unsigned char coremap; + uint32_t pos_off, data, data_byte, data_mask; + + pos_off = pos & ~3; + data_byte = pos & 3; + data_mask = ~BIT_MASK(data_byte) & 0xf; + + /* Calculate node and coremap of target irq */ + cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE; + coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE); + + for_each_online_cpu(i) { + node = cpu_to_eio_node(i); + if (!node_isset(node, *node_map)) + continue; + + /* EIO node 0 is in charge of inter-node interrupt dispatch */ + route_node = (node == mnode) ? cpu_node : node; + data = ((coremap | (route_node << 4)) << (data_byte * 8)); + csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE); + } +} + +static DEFINE_RAW_SPINLOCK(affinity_lock); + +static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) +{ + unsigned int cpu; + unsigned long flags; + uint32_t vector, regaddr; + struct cpumask intersect_affinity; + struct eiointc_priv *priv = d->domain->host_data; + + raw_spin_lock_irqsave(&affinity_lock, flags); + + cpumask_and(&intersect_affinity, affinity, cpu_online_mask); + cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map); + + if (cpumask_empty(&intersect_affinity)) { + raw_spin_unlock_irqrestore(&affinity_lock, flags); + return -EINVAL; + } + cpu = cpumask_first(&intersect_affinity); + + vector = d->hwirq; + regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2); + + /* Mask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), 0x0, 0); + /* Set route for target vector */ + eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); + /* Unmask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE, 0x0, 0); + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + raw_spin_unlock_irqrestore(&affinity_lock, flags); + + return IRQ_SET_MASK_OK; +} + +static int eiointc_index(int node) +{ + int i; + + for (i = 0; i < nr_pics; i++) { + if (node_isset(node, eiointc_priv[i]->node_map)) + return i; + } + + return -1; +} + +static int eiointc_router_init(unsigned int cpu) +{ + int i, bit; + uint32_t data; + uint32_t node = cpu_to_eio_node(cpu); + uint32_t index = eiointc_index(node); + + if (index < 0) { + pr_err("Error: invalid nodemap!\n"); + return -1; + } + + if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) { + eiointc_enable(); + + for (i = 0; i < VEC_COUNT / 32; i++) { + data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2))); + iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4); + } + + for (i = 0; i < VEC_COUNT / 32 / 4; i++) { + bit = BIT(1 + index); /* Route to IP[1 + index] */ + data = bit | (bit << 8) | (bit << 16) | (bit << 24); + iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4); + } + + for (i = 0; i < VEC_COUNT / 4; i++) { + /* Route to Node-0 Core-0 */ + if (index == 0) + bit = BIT(cpu_logical_map(0)); + else + bit = (eiointc_priv[index]->node << 4) | 1; + + data = bit | (bit << 8) | (bit << 16) | (bit << 24); + iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4); + } + + for (i = 0; i < VEC_COUNT / 32; i++) { + data = 0xffffffff; + iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4); + iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4); + } + } + + return 0; +} + +static void eiointc_irq_dispatch(struct irq_desc *desc) +{ + int i; + u64 pending; + bool handled = false; + struct irq_chip *chip = irq_desc_get_chip(desc); + struct eiointc_priv *priv = irq_desc_get_handler_data(desc); + + chained_irq_enter(chip, desc); + + for (i = 0; i < VEC_REG_COUNT; i++) { + pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3)); + iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3)); + while (pending) { + int bit = __ffs(pending); + int irq = bit + VEC_COUNT_PER_REG * i; + + generic_handle_domain_irq(priv->eiointc_domain, irq); + pending &= ~BIT(bit); + handled = true; + } + } + + if (!handled) + spurious_interrupt(); + + chained_irq_exit(chip, desc); +} + +static void eiointc_ack_irq(struct irq_data *d) +{ +} + +static void eiointc_mask_irq(struct irq_data *d) +{ +} + +static void eiointc_unmask_irq(struct irq_data *d) +{ +} + +static struct irq_chip eiointc_irq_chip = { + .name = "EIOINTC", + .irq_ack = eiointc_ack_irq, + .irq_mask = eiointc_mask_irq, + .irq_unmask = eiointc_unmask_irq, + .irq_set_affinity = eiointc_set_irq_affinity, +}; + +static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int ret; + unsigned int i, type; + unsigned long hwirq = 0; + struct eiointc *priv = domain->host_data; + + ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip, + priv, handle_edge_irq, NULL, NULL); + } + + return 0; +} + +static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + + irq_set_handler(virq + i, NULL); + irq_domain_reset_irq_data(d); + } +} + +static const struct irq_domain_ops eiointc_domain_ops = { + .translate = irq_domain_translate_onecell, + .alloc = eiointc_domain_alloc, + .free = eiointc_domain_free, +}; + +static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group) +{ + int i; + + if (cpu_has_flatmode) + node = cpu_to_node(node * CORES_PER_EIO_NODE); + + for (i = 0; i < MAX_IO_PICS; i++) { + if (node == vec_group[i].node) { + vec_group[i].parent = parent; + return; + } + } +} + +struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group) +{ + int i; + + for (i = 0; i < MAX_IO_PICS; i++) { + if (node == vec_group[i].node) + return vec_group[i].parent; + } + return NULL; +} + +static int __init +pch_pic_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header; + unsigned int node = (pchpic_entry->address >> 44) & 0xf; + struct irq_domain *parent = acpi_get_vec_parent(node, pch_group); + + if (parent) + return pch_pic_acpi_init(parent, pchpic_entry); + + return -EINVAL; +} + +static int __init +pch_msi_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header; + struct irq_domain *parent = acpi_get_vec_parent(eiointc_priv[nr_pics - 1]->node, msi_group); + + if (parent) + return pch_msi_acpi_init(parent, pchmsi_entry); + + return -EINVAL; +} + +static int __init acpi_cascade_irqdomain_init(void) +{ + acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, + pch_pic_parse_madt, 0); + acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, + pch_msi_parse_madt, 1); + return 0; +} + +int __init eiointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_eio_pic *acpi_eiointc) +{ + int i, parent_irq; + unsigned long node_map; + struct eiointc_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_eiointc); + if (!priv->domain_handle) { + pr_err("Unable to allocate domain handle\n"); + goto out_free_priv; + } + + priv->node = acpi_eiointc->node; + node_map = acpi_eiointc->node_map ? : -1ULL; + + for_each_possible_cpu(i) { + if (node_map & (1ULL << cpu_to_eio_node(i))) { + node_set(cpu_to_eio_node(i), priv->node_map); + cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, cpumask_of(i)); + } + } + + /* Setup IRQ domain */ + priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, VEC_COUNT, + &eiointc_domain_ops, priv); + if (!priv->eiointc_domain) { + pr_err("loongson-eiointc: cannot add IRQ domain\n"); + goto out_free_handle; + } + + eiointc_priv[nr_pics++] = priv; + + eiointc_router_init(0); + + parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade); + irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv); + + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING, + "irqchip/loongarch/intc:starting", + eiointc_router_init, NULL); + + acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, pch_group); + acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, msi_group); + acpi_cascade_irqdomain_init(); + + return 0; + +out_free_handle: + irq_domain_free_fwnode(priv->domain_handle); + priv->domain_handle = NULL; +out_free_priv: + kfree(priv); + + return -ENOMEM; +} diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c index 8d05d8bcf56f..c4f3c886ad61 100644 --- a/drivers/irqchip/irq-loongson-liointc.c +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -23,7 +23,7 @@ #endif #define LIOINTC_CHIP_IRQ 32 -#define LIOINTC_NUM_PARENT 4 +#define LIOINTC_NUM_PARENT 4 #define LIOINTC_NUM_CORES 4 #define LIOINTC_INTC_CHIP_START 0x20 @@ -58,6 +58,8 @@ struct liointc_priv { bool has_lpc_irq_errata; }; +struct fwnode_handle *liointc_handle; + static void liointc_chained_handle_irq(struct irq_desc *desc) { struct liointc_handler_data *handler = irq_desc_get_handler_data(desc); @@ -153,97 +155,79 @@ static void liointc_resume(struct irq_chip_generic *gc) irq_gc_unlock_irqrestore(gc, flags); } -static const char * const parent_names[] = {"int0", "int1", "int2", "int3"}; -static const char * const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"}; +static int parent_irq[LIOINTC_NUM_PARENT]; +static u32 parent_int_map[LIOINTC_NUM_PARENT]; +static const char *const parent_names[] = {"int0", "int1", "int2", "int3"}; +static const char *const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"}; -static void __iomem *liointc_get_reg_byname(struct device_node *node, - const char *name) +static int liointc_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) { - int index = of_property_match_string(node, "reg-names", name); - - if (index < 0) - return NULL; - - return of_iomap(node, index); + if (WARN_ON(intsize < 1)) + return -EINVAL; + *out_hwirq = intspec[0] - GSI_MIN_CPU_IRQ; + *out_type = IRQ_TYPE_NONE; + return 0; } -static int __init liointc_of_init(struct device_node *node, - struct device_node *parent) +static const struct irq_domain_ops acpi_irq_gc_ops = { + .map = irq_map_generic_chip, + .unmap = irq_unmap_generic_chip, + .xlate = liointc_domain_xlate, +}; + +static int liointc_init(phys_addr_t addr, unsigned long size, int revision, + struct fwnode_handle *domain_handle, struct device_node *node) { + int i, err; + void __iomem *base; + struct irq_chip_type *ct; struct irq_chip_generic *gc; struct irq_domain *domain; - struct irq_chip_type *ct; struct liointc_priv *priv; - void __iomem *base; - u32 of_parent_int_map[LIOINTC_NUM_PARENT]; - int parent_irq[LIOINTC_NUM_PARENT]; - bool have_parent = FALSE; - int sz, i, err = 0; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - if (of_device_is_compatible(node, "loongson,liointc-2.0")) { - base = liointc_get_reg_byname(node, "main"); - if (!base) { - err = -ENODEV; - goto out_free_priv; - } + base = ioremap(addr, size); + if (!base) + goto out_free_priv; - for (i = 0; i < LIOINTC_NUM_CORES; i++) - priv->core_isr[i] = liointc_get_reg_byname(node, core_reg_names[i]); - if (!priv->core_isr[0]) { - err = -ENODEV; - goto out_iounmap_base; - } - } else { - base = of_iomap(node, 0); - if (!base) { - err = -ENODEV; - goto out_free_priv; - } + for (i = 0; i < LIOINTC_NUM_CORES; i++) + priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS; - for (i = 0; i < LIOINTC_NUM_CORES; i++) - priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS; - } + for (i = 0; i < LIOINTC_NUM_PARENT; i++) + priv->handler[i].parent_int_map = parent_int_map[i]; - for (i = 0; i < LIOINTC_NUM_PARENT; i++) { - parent_irq[i] = of_irq_get_byname(node, parent_names[i]); - if (parent_irq[i] > 0) - have_parent = TRUE; - } - if (!have_parent) { - err = -ENODEV; - goto out_iounmap_isr; - } + if (revision > 1) { + for (i = 0; i < LIOINTC_NUM_CORES; i++) { + int index = of_property_match_string(node, + "reg-names", core_reg_names[i]); - sz = of_property_read_variable_u32_array(node, - "loongson,parent_int_map", - &of_parent_int_map[0], - LIOINTC_NUM_PARENT, - LIOINTC_NUM_PARENT); - if (sz < 4) { - pr_err("loongson-liointc: No parent_int_map\n"); - err = -ENODEV; - goto out_iounmap_isr; - } + if (index < 0) + return -EINVAL; - for (i = 0; i < LIOINTC_NUM_PARENT; i++) - priv->handler[i].parent_int_map = of_parent_int_map[i]; + priv->core_isr[i] = of_iomap(node, index); + } + } /* Setup IRQ domain */ - domain = irq_domain_add_linear(node, 32, + if (!acpi_disabled) + domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ, + &acpi_irq_gc_ops, priv); + else + domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ, &irq_generic_chip_ops, priv); if (!domain) { pr_err("loongson-liointc: cannot add IRQ domain\n"); - err = -EINVAL; - goto out_iounmap_isr; + goto out_iounmap; } - err = irq_alloc_domain_generic_chips(domain, 32, 1, - node->full_name, handle_level_irq, - IRQ_NOPROBE, 0, 0); + err = irq_alloc_domain_generic_chips(domain, LIOINTC_CHIP_IRQ, 1, + (node ? node->full_name : "LIOINTC"), + handle_level_irq, 0, IRQ_NOPROBE, 0); if (err) { pr_err("loongson-liointc: unable to register IRQ domain\n"); goto out_free_domain; @@ -299,24 +283,93 @@ static int __init liointc_of_init(struct device_node *node, liointc_chained_handle_irq, &priv->handler[i]); } + liointc_handle = domain_handle; return 0; out_free_domain: irq_domain_remove(domain); -out_iounmap_isr: - for (i = 0; i < LIOINTC_NUM_CORES; i++) { - if (!priv->core_isr[i]) - continue; - iounmap(priv->core_isr[i]); - } -out_iounmap_base: +out_iounmap: iounmap(base); out_free_priv: kfree(priv); - return err; + return -EINVAL; +} + +#ifdef CONFIG_OF + +static int __init liointc_of_init(struct device_node *node, + struct device_node *parent) +{ + bool have_parent = FALSE; + int sz, i, index, revision, err = 0; + struct resource res; + + if (!of_device_is_compatible(node, "loongson,liointc-2.0")) { + index = 0; + revision = 1; + } else { + index = of_property_match_string(node, "reg-names", "main"); + revision = 2; + } + + if (of_address_to_resource(node, index, &res)) + return -EINVAL; + + for (i = 0; i < LIOINTC_NUM_PARENT; i++) { + parent_irq[i] = of_irq_get_byname(node, parent_names[i]); + if (parent_irq[i] > 0) + have_parent = TRUE; + } + if (!have_parent) + return -ENODEV; + + sz = of_property_read_variable_u32_array(node, + "loongson,parent_int_map", + &parent_int_map[0], + LIOINTC_NUM_PARENT, + LIOINTC_NUM_PARENT); + if (sz < 4) { + pr_err("loongson-liointc: No parent_int_map\n"); + return -ENODEV; + } + + err = liointc_init(res.start, resource_size(&res), + revision, of_node_to_fwnode(node), node); + if (err < 0) + return err; + + return 0; } IRQCHIP_DECLARE(loongson_liointc_1_0, "loongson,liointc-1.0", liointc_of_init); IRQCHIP_DECLARE(loongson_liointc_1_0a, "loongson,liointc-1.0a", liointc_of_init); IRQCHIP_DECLARE(loongson_liointc_2_0, "loongson,liointc-2.0", liointc_of_init); + +#endif + +#ifdef CONFIG_ACPI +int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic *acpi_liointc) +{ + int ret; + struct fwnode_handle *domain_handle; + + parent_int_map[0] = acpi_liointc->cascade_map[0]; + parent_int_map[1] = acpi_liointc->cascade_map[1]; + + parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]); + parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]); + + domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_liointc); + if (!domain_handle) { + pr_err("Unable to allocate domain handle\n"); + return -ENOMEM; + } + ret = liointc_init(acpi_liointc->address, acpi_liointc->size, + 1, domain_handle, NULL); + if (ret) + irq_domain_free_fwnode(domain_handle); + + return ret; +} +#endif diff --git a/drivers/irqchip/irq-loongson-pch-lpc.c b/drivers/irqchip/irq-loongson-pch-lpc.c new file mode 100644 index 000000000000..bf2324910a75 --- /dev/null +++ b/drivers/irqchip/irq-loongson-pch-lpc.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Loongson LPC Interrupt Controller support + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#define pr_fmt(fmt) "lpc: " fmt + +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> + +/* Registers */ +#define LPC_INT_CTL 0x00 +#define LPC_INT_ENA 0x04 +#define LPC_INT_STS 0x08 +#define LPC_INT_CLR 0x0c +#define LPC_INT_POL 0x10 +#define LPC_COUNT 16 + +/* LPC_INT_CTL */ +#define LPC_INT_CTL_EN BIT(31) + +struct pch_lpc { + void __iomem *base; + struct irq_domain *lpc_domain; + raw_spinlock_t lpc_lock; + u32 saved_reg_ctl; + u32 saved_reg_ena; + u32 saved_reg_pol; +}; + +struct fwnode_handle *pch_lpc_handle; + +static void lpc_irq_ack(struct irq_data *d) +{ + unsigned long flags; + struct pch_lpc *priv = d->domain->host_data; + + raw_spin_lock_irqsave(&priv->lpc_lock, flags); + writel(0x1 << d->hwirq, priv->base + LPC_INT_CLR); + raw_spin_unlock_irqrestore(&priv->lpc_lock, flags); +} + +static void lpc_irq_mask(struct irq_data *d) +{ + unsigned long flags; + struct pch_lpc *priv = d->domain->host_data; + + raw_spin_lock_irqsave(&priv->lpc_lock, flags); + writel(readl(priv->base + LPC_INT_ENA) & (~(0x1 << (d->hwirq))), + priv->base + LPC_INT_ENA); + raw_spin_unlock_irqrestore(&priv->lpc_lock, flags); +} + +static void lpc_irq_unmask(struct irq_data *d) +{ + unsigned long flags; + struct pch_lpc *priv = d->domain->host_data; + + raw_spin_lock_irqsave(&priv->lpc_lock, flags); + writel(readl(priv->base + LPC_INT_ENA) | (0x1 << (d->hwirq)), + priv->base + LPC_INT_ENA); + raw_spin_unlock_irqrestore(&priv->lpc_lock, flags); +} + +static int lpc_irq_set_type(struct irq_data *d, unsigned int type) +{ + u32 val; + u32 mask = 0x1 << (d->hwirq); + struct pch_lpc *priv = d->domain->host_data; + + if (!(type & IRQ_TYPE_LEVEL_MASK)) + return 0; + + val = readl(priv->base + LPC_INT_POL); + + if (type == IRQ_TYPE_LEVEL_HIGH) + val |= mask; + else + val &= ~mask; + + writel(val, priv->base + LPC_INT_POL); + + return 0; +} + +static const struct irq_chip pch_lpc_irq_chip = { + .name = "PCH LPC", + .irq_mask = lpc_irq_mask, + .irq_unmask = lpc_irq_unmask, + .irq_ack = lpc_irq_ack, + .irq_set_type = lpc_irq_set_type, + .flags = IRQCHIP_SKIP_SET_WAKE, +}; + +static void lpc_irq_dispatch(struct irq_desc *desc) +{ + u32 pending, bit; + struct irq_chip *chip = irq_desc_get_chip(desc); + struct pch_lpc *priv = irq_desc_get_handler_data(desc); + + chained_irq_enter(chip, desc); + + pending = readl(priv->base + LPC_INT_ENA); + pending &= readl(priv->base + LPC_INT_STS); + if (!pending) + spurious_interrupt(); + + while (pending) { + bit = __ffs(pending); + + generic_handle_domain_irq(priv->lpc_domain, bit); + pending &= ~BIT(bit); + } + chained_irq_exit(chip, desc); +} + +static int pch_lpc_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + irq_set_chip_and_handler(irq, &pch_lpc_irq_chip, handle_level_irq); + return 0; +} + +static const struct irq_domain_ops pch_lpc_domain_ops = { + .map = pch_lpc_map, + .translate = irq_domain_translate_twocell, +}; + +static void pch_lpc_reset(struct pch_lpc *priv) +{ + /* Enable the LPC interrupt, bit31: en bit30: edge */ + writel(LPC_INT_CTL_EN, priv->base + LPC_INT_CTL); + writel(0, priv->base + LPC_INT_ENA); + /* Clear all 18-bit interrpt bit */ + writel(GENMASK(17, 0), priv->base + LPC_INT_CLR); +} + +static int pch_lpc_disabled(struct pch_lpc *priv) +{ + return (readl(priv->base + LPC_INT_ENA) == 0xffffffff) && + (readl(priv->base + LPC_INT_STS) == 0xffffffff); +} + +int __init pch_lpc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lpc_pic *acpi_pchlpc) +{ + int parent_irq; + struct pch_lpc *priv; + struct irq_fwspec fwspec; + struct fwnode_handle *irq_handle; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + raw_spin_lock_init(&priv->lpc_lock); + + priv->base = ioremap(acpi_pchlpc->address, acpi_pchlpc->size); + if (!priv->base) + goto free_priv; + + if (pch_lpc_disabled(priv)) { + pr_err("Failed to get LPC status\n"); + goto iounmap_base; + } + + irq_handle = irq_domain_alloc_named_fwnode("lpcintc"); + if (!irq_handle) { + pr_err("Unable to allocate domain handle\n"); + goto iounmap_base; + } + + priv->lpc_domain = irq_domain_create_linear(irq_handle, LPC_COUNT, + &pch_lpc_domain_ops, priv); + if (!priv->lpc_domain) { + pr_err("Failed to create IRQ domain\n"); + goto free_irq_handle; + } + pch_lpc_reset(priv); + + fwspec.fwnode = parent->fwnode; + fwspec.param[0] = acpi_pchlpc->cascade + GSI_MIN_PCH_IRQ; + fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH; + fwspec.param_count = 2; + parent_irq = irq_create_fwspec_mapping(&fwspec); + irq_set_chained_handler_and_data(parent_irq, lpc_irq_dispatch, priv); + + pch_lpc_handle = irq_handle; + return 0; + +free_irq_handle: + irq_domain_free_fwnode(irq_handle); +iounmap_base: + iounmap(priv->base); +free_priv: + kfree(priv); + + return -ENOMEM; +} diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c index e3801c4a77ed..d0e8551bebfa 100644 --- a/drivers/irqchip/irq-loongson-pch-msi.c +++ b/drivers/irqchip/irq-loongson-pch-msi.c @@ -15,6 +15,8 @@ #include <linux/pci.h> #include <linux/slab.h> +static int nr_pics; + struct pch_msi_data { struct mutex msi_map_lock; phys_addr_t doorbell; @@ -23,6 +25,8 @@ struct pch_msi_data { unsigned long *msi_map; }; +static struct fwnode_handle *pch_msi_handle[MAX_IO_PICS]; + static void pch_msi_mask_msi_irq(struct irq_data *d) { pci_msi_mask_irq(d); @@ -154,12 +158,12 @@ static const struct irq_domain_ops pch_msi_middle_domain_ops = { }; static int pch_msi_init_domains(struct pch_msi_data *priv, - struct device_node *node, - struct irq_domain *parent) + struct irq_domain *parent, + struct fwnode_handle *domain_handle) { struct irq_domain *middle_domain, *msi_domain; - middle_domain = irq_domain_create_linear(of_node_to_fwnode(node), + middle_domain = irq_domain_create_linear(domain_handle, priv->num_irqs, &pch_msi_middle_domain_ops, priv); @@ -171,7 +175,7 @@ static int pch_msi_init_domains(struct pch_msi_data *priv, middle_domain->parent = parent; irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS); - msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), + msi_domain = pci_msi_create_irq_domain(domain_handle, &pch_msi_domain_info, middle_domain); if (!msi_domain) { @@ -183,19 +187,11 @@ static int pch_msi_init_domains(struct pch_msi_data *priv, return 0; } -static int pch_msi_init(struct device_node *node, - struct device_node *parent) +static int pch_msi_init(phys_addr_t msg_address, int irq_base, int irq_count, + struct irq_domain *parent_domain, struct fwnode_handle *domain_handle) { - struct pch_msi_data *priv; - struct irq_domain *parent_domain; - struct resource res; int ret; - - parent_domain = irq_find_host(parent); - if (!parent_domain) { - pr_err("Failed to find the parent domain\n"); - return -ENXIO; - } + struct pch_msi_data *priv; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) @@ -203,48 +199,95 @@ static int pch_msi_init(struct device_node *node, mutex_init(&priv->msi_map_lock); - ret = of_address_to_resource(node, 0, &res); - if (ret) { - pr_err("Failed to allocate resource\n"); - goto err_priv; - } - - priv->doorbell = res.start; - - if (of_property_read_u32(node, "loongson,msi-base-vec", - &priv->irq_first)) { - pr_err("Unable to parse MSI vec base\n"); - ret = -EINVAL; - goto err_priv; - } - - if (of_property_read_u32(node, "loongson,msi-num-vecs", - &priv->num_irqs)) { - pr_err("Unable to parse MSI vec number\n"); - ret = -EINVAL; - goto err_priv; - } + priv->doorbell = msg_address; + priv->irq_first = irq_base; + priv->num_irqs = irq_count; priv->msi_map = bitmap_zalloc(priv->num_irqs, GFP_KERNEL); - if (!priv->msi_map) { - ret = -ENOMEM; + if (!priv->msi_map) goto err_priv; - } pr_debug("Registering %d MSIs, starting at %d\n", priv->num_irqs, priv->irq_first); - ret = pch_msi_init_domains(priv, node, parent_domain); + ret = pch_msi_init_domains(priv, parent_domain, domain_handle); if (ret) goto err_map; + pch_msi_handle[nr_pics++] = domain_handle; return 0; err_map: bitmap_free(priv->msi_map); err_priv: kfree(priv); - return ret; + + return -EINVAL; +} + +#ifdef CONFIG_OF +static int pch_msi_of_init(struct device_node *node, struct device_node *parent) +{ + int err; + int irq_base, irq_count; + struct resource res; + struct irq_domain *parent_domain; + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("Failed to find the parent domain\n"); + return -ENXIO; + } + + if (of_address_to_resource(node, 0, &res)) { + pr_err("Failed to allocate resource\n"); + return -EINVAL; + } + + if (of_property_read_u32(node, "loongson,msi-base-vec", &irq_base)) { + pr_err("Unable to parse MSI vec base\n"); + return -EINVAL; + } + + if (of_property_read_u32(node, "loongson,msi-num-vecs", &irq_count)) { + pr_err("Unable to parse MSI vec number\n"); + return -EINVAL; + } + + err = pch_msi_init(res.start, irq_base, irq_count, parent_domain, of_node_to_fwnode(node)); + if (err < 0) + return err; + + return 0; } -IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_init); +IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_of_init); +#endif + +#ifdef CONFIG_ACPI +struct fwnode_handle *get_pch_msi_handle(int pci_segment) +{ + int i; + + for (i = 0; i < MAX_IO_PICS; i++) { + if (msi_group[i].pci_segment == pci_segment) + return pch_msi_handle[i]; + } + return NULL; +} + +int __init pch_msi_acpi_init(struct irq_domain *parent, + struct acpi_madt_msi_pic *acpi_pchmsi) +{ + int ret; + struct fwnode_handle *domain_handle; + + domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchmsi); + ret = pch_msi_init(acpi_pchmsi->msg_address, acpi_pchmsi->start, + acpi_pchmsi->count, parent, domain_handle); + if (ret < 0) + irq_domain_free_fwnode(domain_handle); + + return ret; +} +#endif diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index a4eb8a2181c7..b6f1392964b1 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -33,13 +33,40 @@ #define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG) #define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG) +static int nr_pics; + struct pch_pic { void __iomem *base; struct irq_domain *pic_domain; u32 ht_vec_base; raw_spinlock_t pic_lock; + u32 vec_count; + u32 gsi_base; }; +static struct pch_pic *pch_pic_priv[MAX_IO_PICS]; + +struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; + +int find_pch_pic(u32 gsi) +{ + int i; + + /* Find the PCH_PIC that manages this GSI. */ + for (i = 0; i < MAX_IO_PICS; i++) { + struct pch_pic *priv = pch_pic_priv[i]; + + if (!priv) + return -1; + + if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count)) + return i; + } + + pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi); + return -1; +} + static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit) { u32 reg; @@ -139,6 +166,28 @@ static struct irq_chip pch_pic_irq_chip = { .irq_set_type = pch_pic_set_type, }; +static int pch_pic_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + struct pch_pic *priv = d->host_data; + struct device_node *of_node = to_of_node(fwspec->fwnode); + + if (fwspec->param_count < 1) + return -EINVAL; + + if (of_node) { + *hwirq = fwspec->param[0] + priv->ht_vec_base; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; + } else { + *hwirq = fwspec->param[0] - priv->gsi_base; + *type = IRQ_TYPE_NONE; + } + + return 0; +} + static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { @@ -149,13 +198,13 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq, struct irq_fwspec parent_fwspec; struct pch_pic *priv = domain->host_data; - err = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type); + err = pch_pic_domain_translate(domain, fwspec, &hwirq, &type); if (err) return err; parent_fwspec.fwnode = domain->parent->fwnode; parent_fwspec.param_count = 1; - parent_fwspec.param[0] = hwirq + priv->ht_vec_base; + parent_fwspec.param[0] = hwirq; err = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec); if (err) @@ -170,7 +219,7 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq, } static const struct irq_domain_ops pch_pic_domain_ops = { - .translate = irq_domain_translate_twocell, + .translate = pch_pic_domain_translate, .alloc = pch_pic_alloc, .free = irq_domain_free_irqs_parent, }; @@ -180,7 +229,7 @@ static void pch_pic_reset(struct pch_pic *priv) int i; for (i = 0; i < PIC_COUNT; i++) { - /* Write vectored ID */ + /* Write vector ID */ writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i)); /* Hardcode route to HT0 Lo */ writeb(1, priv->base + PCH_INT_ROUTE(i)); @@ -198,50 +247,37 @@ static void pch_pic_reset(struct pch_pic *priv) } } -static int pch_pic_of_init(struct device_node *node, - struct device_node *parent) +static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base, + struct irq_domain *parent_domain, struct fwnode_handle *domain_handle, + u32 gsi_base) { struct pch_pic *priv; - struct irq_domain *parent_domain; - int err; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; raw_spin_lock_init(&priv->pic_lock); - priv->base = of_iomap(node, 0); - if (!priv->base) { - err = -ENOMEM; + priv->base = ioremap(addr, size); + if (!priv->base) goto free_priv; - } - parent_domain = irq_find_host(parent); - if (!parent_domain) { - pr_err("Failed to find the parent domain\n"); - err = -ENXIO; - goto iounmap_base; - } - - if (of_property_read_u32(node, "loongson,pic-base-vec", - &priv->ht_vec_base)) { - pr_err("Failed to determine pic-base-vec\n"); - err = -EINVAL; - goto iounmap_base; - } + priv->ht_vec_base = vec_base; + priv->vec_count = ((readq(priv->base) >> 48) & 0xff) + 1; + priv->gsi_base = gsi_base; priv->pic_domain = irq_domain_create_hierarchy(parent_domain, 0, - PIC_COUNT, - of_node_to_fwnode(node), - &pch_pic_domain_ops, - priv); + priv->vec_count, domain_handle, + &pch_pic_domain_ops, priv); + if (!priv->pic_domain) { pr_err("Failed to create IRQ domain\n"); - err = -ENOMEM; goto iounmap_base; } pch_pic_reset(priv); + pch_pic_handle[nr_pics] = domain_handle; + pch_pic_priv[nr_pics++] = priv; return 0; @@ -250,7 +286,86 @@ iounmap_base: free_priv: kfree(priv); - return err; + return -EINVAL; +} + +#ifdef CONFIG_OF + +static int pch_pic_of_init(struct device_node *node, + struct device_node *parent) +{ + int err, vec_base; + struct resource res; + struct irq_domain *parent_domain; + + if (of_address_to_resource(node, 0, &res)) + return -EINVAL; + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("Failed to find the parent domain\n"); + return -ENXIO; + } + + if (of_property_read_u32(node, "loongson,pic-base-vec", &vec_base)) { + pr_err("Failed to determine pic-base-vec\n"); + return -EINVAL; + } + + err = pch_pic_init(res.start, resource_size(&res), vec_base, + parent_domain, of_node_to_fwnode(node), 0); + if (err < 0) + return err; + + return 0; } IRQCHIP_DECLARE(pch_pic, "loongson,pch-pic-1.0", pch_pic_of_init); + +#endif + +#ifdef CONFIG_ACPI +static int __init +pch_lpc_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_lpc_pic *pchlpc_entry = (struct acpi_madt_lpc_pic *)header; + + return pch_lpc_acpi_init(pch_pic_priv[0]->pic_domain, pchlpc_entry); +} + +static int __init acpi_cascade_irqdomain_init(void) +{ + acpi_table_parse_madt(ACPI_MADT_TYPE_LPC_PIC, + pch_lpc_parse_madt, 0); + return 0; +} + +int __init pch_pic_acpi_init(struct irq_domain *parent, + struct acpi_madt_bio_pic *acpi_pchpic) +{ + int ret, vec_base; + struct fwnode_handle *domain_handle; + + vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ; + + domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchpic); + if (!domain_handle) { + pr_err("Unable to allocate domain handle\n"); + return -ENOMEM; + } + + ret = pch_pic_init(acpi_pchpic->address, acpi_pchpic->size, + vec_base, parent, domain_handle, acpi_pchpic->gsi_base); + + if (ret < 0) { + irq_domain_free_fwnode(domain_handle); + return ret; + } + + if (acpi_pchpic->id == 0) + acpi_cascade_irqdomain_init(); + + return ret; +} +#endif diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index ff89b36267dd..1ba0f1555c80 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -52,13 +52,15 @@ static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); static DEFINE_SPINLOCK(gic_lock); static struct irq_domain *gic_irq_domain; -static struct irq_domain *gic_ipi_domain; static int gic_shared_intrs; static unsigned int gic_cpu_pin; static unsigned int timer_cpu_pin; static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; + +#ifdef CONFIG_GENERIC_IRQ_IPI static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); +#endif /* CONFIG_GENERIC_IRQ_IPI */ static struct gic_all_vpes_chip_data { u32 map; @@ -472,9 +474,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, u32 map; if (hwirq >= GIC_SHARED_HWIRQ_BASE) { +#ifdef CONFIG_GENERIC_IRQ_IPI /* verify that shared irqs don't conflict with an IPI irq */ if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv)) return -EBUSY; +#endif /* CONFIG_GENERIC_IRQ_IPI */ err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, &gic_level_irq_controller, @@ -567,6 +571,8 @@ static const struct irq_domain_ops gic_irq_domain_ops = { .map = gic_irq_domain_map, }; +#ifdef CONFIG_GENERIC_IRQ_IPI + static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, @@ -670,6 +676,48 @@ static const struct irq_domain_ops gic_ipi_domain_ops = { .match = gic_ipi_domain_match, }; +static int gic_register_ipi_domain(struct device_node *node) +{ + struct irq_domain *gic_ipi_domain; + unsigned int v[2], num_ipis; + + gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, + IRQ_DOMAIN_FLAG_IPI_PER_CPU, + GIC_NUM_LOCAL_INTRS + gic_shared_intrs, + node, &gic_ipi_domain_ops, NULL); + if (!gic_ipi_domain) { + pr_err("Failed to add IPI domain"); + return -ENXIO; + } + + irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); + + if (node && + !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { + bitmap_set(ipi_resrv, v[0], v[1]); + } else { + /* + * Reserve 2 interrupts per possible CPU/VP for use as IPIs, + * meeting the requirements of arch/mips SMP. + */ + num_ipis = 2 * num_possible_cpus(); + bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); + } + + bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); + + return 0; +} + +#else /* !CONFIG_GENERIC_IRQ_IPI */ + +static inline int gic_register_ipi_domain(struct device_node *node) +{ + return 0; +} + +#endif /* !CONFIG_GENERIC_IRQ_IPI */ + static int gic_cpu_startup(unsigned int cpu) { /* Enable or disable EIC */ @@ -688,11 +736,12 @@ static int gic_cpu_startup(unsigned int cpu) static int __init gic_of_init(struct device_node *node, struct device_node *parent) { - unsigned int cpu_vec, i, gicconfig, v[2], num_ipis; + unsigned int cpu_vec, i, gicconfig; unsigned long reserved; phys_addr_t gic_base; struct resource res; size_t gic_len; + int ret; /* Find the first available CPU vector. */ i = 0; @@ -734,6 +783,10 @@ static int __init gic_of_init(struct device_node *node, } mips_gic_base = ioremap(gic_base, gic_len); + if (!mips_gic_base) { + pr_err("Failed to ioremap gic_base\n"); + return -ENOMEM; + } gicconfig = read_gic_config(); gic_shared_intrs = FIELD_GET(GIC_CONFIG_NUMINTERRUPTS, gicconfig); @@ -780,30 +833,9 @@ static int __init gic_of_init(struct device_node *node, return -ENXIO; } - gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, - IRQ_DOMAIN_FLAG_IPI_PER_CPU, - GIC_NUM_LOCAL_INTRS + gic_shared_intrs, - node, &gic_ipi_domain_ops, NULL); - if (!gic_ipi_domain) { - pr_err("Failed to add IPI domain"); - return -ENXIO; - } - - irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); - - if (node && - !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { - bitmap_set(ipi_resrv, v[0], v[1]); - } else { - /* - * Reserve 2 interrupts per possible CPU/VP for use as IPIs, - * meeting the requirements of arch/mips SMP. - */ - num_ipis = 2 * num_possible_cpus(); - bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); - } - - bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); + ret = gic_register_ipi_domain(node); + if (ret) + return ret; board_bind_eic_interrupt = &gic_bind_eic_interrupt; diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c new file mode 100644 index 000000000000..25fd8ee66565 --- /dev/null +++ b/drivers/irqchip/irq-renesas-rzg2l.c @@ -0,0 +1,393 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RZ/G2L IRQC Driver + * + * Copyright (C) 2022 Renesas Electronics Corporation. + * + * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/spinlock.h> + +#define IRQC_IRQ_START 1 +#define IRQC_IRQ_COUNT 8 +#define IRQC_TINT_START (IRQC_IRQ_START + IRQC_IRQ_COUNT) +#define IRQC_TINT_COUNT 32 +#define IRQC_NUM_IRQ (IRQC_TINT_START + IRQC_TINT_COUNT) + +#define ISCR 0x10 +#define IITSR 0x14 +#define TSCR 0x20 +#define TITSR0 0x24 +#define TITSR1 0x28 +#define TITSR0_MAX_INT 16 +#define TITSEL_WIDTH 0x2 +#define TSSR(n) (0x30 + ((n) * 4)) +#define TIEN BIT(7) +#define TSSEL_SHIFT(n) (8 * (n)) +#define TSSEL_MASK GENMASK(7, 0) +#define IRQ_MASK 0x3 + +#define TSSR_OFFSET(n) ((n) % 4) +#define TSSR_INDEX(n) ((n) / 4) + +#define TITSR_TITSEL_EDGE_RISING 0 +#define TITSR_TITSEL_EDGE_FALLING 1 +#define TITSR_TITSEL_LEVEL_HIGH 2 +#define TITSR_TITSEL_LEVEL_LOW 3 + +#define IITSR_IITSEL(n, sense) ((sense) << ((n) * 2)) +#define IITSR_IITSEL_LEVEL_LOW 0 +#define IITSR_IITSEL_EDGE_FALLING 1 +#define IITSR_IITSEL_EDGE_RISING 2 +#define IITSR_IITSEL_EDGE_BOTH 3 +#define IITSR_IITSEL_MASK(n) IITSR_IITSEL((n), 3) + +#define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x)) +#define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x)) + +struct rzg2l_irqc_priv { + void __iomem *base; + struct irq_fwspec fwspec[IRQC_NUM_IRQ]; + raw_spinlock_t lock; +}; + +static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data) +{ + return data->domain->host_data; +} + +static void rzg2l_irq_eoi(struct irq_data *d) +{ + unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START; + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + u32 bit = BIT(hw_irq); + u32 reg; + + reg = readl_relaxed(priv->base + ISCR); + if (reg & bit) + writel_relaxed(reg & ~bit, priv->base + ISCR); +} + +static void rzg2l_tint_eoi(struct irq_data *d) +{ + unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_TINT_START; + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + u32 bit = BIT(hw_irq); + u32 reg; + + reg = readl_relaxed(priv->base + TSCR); + if (reg & bit) + writel_relaxed(reg & ~bit, priv->base + TSCR); +} + +static void rzg2l_irqc_eoi(struct irq_data *d) +{ + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + unsigned int hw_irq = irqd_to_hwirq(d); + + raw_spin_lock(&priv->lock); + if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT) + rzg2l_irq_eoi(d); + else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) + rzg2l_tint_eoi(d); + raw_spin_unlock(&priv->lock); + irq_chip_eoi_parent(d); +} + +static void rzg2l_irqc_irq_disable(struct irq_data *d) +{ + unsigned int hw_irq = irqd_to_hwirq(d); + + if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) { + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + u32 offset = hw_irq - IRQC_TINT_START; + u32 tssr_offset = TSSR_OFFSET(offset); + u8 tssr_index = TSSR_INDEX(offset); + u32 reg; + + raw_spin_lock(&priv->lock); + reg = readl_relaxed(priv->base + TSSR(tssr_index)); + reg &= ~(TSSEL_MASK << tssr_offset); + writel_relaxed(reg, priv->base + TSSR(tssr_index)); + raw_spin_unlock(&priv->lock); + } + irq_chip_disable_parent(d); +} + +static void rzg2l_irqc_irq_enable(struct irq_data *d) +{ + unsigned int hw_irq = irqd_to_hwirq(d); + + if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) { + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + unsigned long tint = (uintptr_t)d->chip_data; + u32 offset = hw_irq - IRQC_TINT_START; + u32 tssr_offset = TSSR_OFFSET(offset); + u8 tssr_index = TSSR_INDEX(offset); + u32 reg; + + raw_spin_lock(&priv->lock); + reg = readl_relaxed(priv->base + TSSR(tssr_index)); + reg |= (TIEN | tint) << TSSEL_SHIFT(tssr_offset); + writel_relaxed(reg, priv->base + TSSR(tssr_index)); + raw_spin_unlock(&priv->lock); + } + irq_chip_enable_parent(d); +} + +static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type) +{ + unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START; + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + u16 sense, tmp; + + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_LEVEL_LOW: + sense = IITSR_IITSEL_LEVEL_LOW; + break; + + case IRQ_TYPE_EDGE_FALLING: + sense = IITSR_IITSEL_EDGE_FALLING; + break; + + case IRQ_TYPE_EDGE_RISING: + sense = IITSR_IITSEL_EDGE_RISING; + break; + + case IRQ_TYPE_EDGE_BOTH: + sense = IITSR_IITSEL_EDGE_BOTH; + break; + + default: + return -EINVAL; + } + + raw_spin_lock(&priv->lock); + tmp = readl_relaxed(priv->base + IITSR); + tmp &= ~IITSR_IITSEL_MASK(hw_irq); + tmp |= IITSR_IITSEL(hw_irq, sense); + writel_relaxed(tmp, priv->base + IITSR); + raw_spin_unlock(&priv->lock); + + return 0; +} + +static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type) +{ + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + unsigned int hwirq = irqd_to_hwirq(d); + u32 titseln = hwirq - IRQC_TINT_START; + u32 offset; + u8 sense; + u32 reg; + + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_EDGE_RISING: + sense = TITSR_TITSEL_EDGE_RISING; + break; + + case IRQ_TYPE_EDGE_FALLING: + sense = TITSR_TITSEL_EDGE_FALLING; + break; + + default: + return -EINVAL; + } + + offset = TITSR0; + if (titseln >= TITSR0_MAX_INT) { + titseln -= TITSR0_MAX_INT; + offset = TITSR1; + } + + raw_spin_lock(&priv->lock); + reg = readl_relaxed(priv->base + offset); + reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH)); + reg |= sense << (titseln * TITSEL_WIDTH); + writel_relaxed(reg, priv->base + offset); + raw_spin_unlock(&priv->lock); + + return 0; +} + +static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type) +{ + unsigned int hw_irq = irqd_to_hwirq(d); + int ret = -EINVAL; + + if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT) + ret = rzg2l_irq_set_type(d, type); + else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) + ret = rzg2l_tint_set_edge(d, type); + if (ret) + return ret; + + return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH); +} + +static const struct irq_chip irqc_chip = { + .name = "rzg2l-irqc", + .irq_eoi = rzg2l_irqc_eoi, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_disable = rzg2l_irqc_irq_disable, + .irq_enable = rzg2l_irqc_irq_enable, + .irq_get_irqchip_state = irq_chip_get_parent_state, + .irq_set_irqchip_state = irq_chip_set_parent_state, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_type = rzg2l_irqc_set_type, + .flags = IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE, +}; + +static int rzg2l_irqc_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct rzg2l_irqc_priv *priv = domain->host_data; + unsigned long tint = 0; + irq_hw_number_t hwirq; + unsigned int type; + int ret; + + ret = irq_domain_translate_twocell(domain, arg, &hwirq, &type); + if (ret) + return ret; + + /* + * For TINT interrupts ie where pinctrl driver is child of irqc domain + * the hwirq and TINT are encoded in fwspec->param[0]. + * hwirq for TINT range from 9-40, hwirq is embedded 0-15 bits and TINT + * from 16-31 bits. TINT from the pinctrl driver needs to be programmed + * in IRQC registers to enable a given gpio pin as interrupt. + */ + if (hwirq > IRQC_IRQ_COUNT) { + tint = TINT_EXTRACT_GPIOINT(hwirq); + hwirq = TINT_EXTRACT_HWIRQ(hwirq); + + if (hwirq < IRQC_TINT_START) + return -EINVAL; + } + + if (hwirq > (IRQC_NUM_IRQ - 1)) + return -EINVAL; + + ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &irqc_chip, + (void *)(uintptr_t)tint); + if (ret) + return ret; + + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &priv->fwspec[hwirq]); +} + +static const struct irq_domain_ops rzg2l_irqc_domain_ops = { + .alloc = rzg2l_irqc_alloc, + .free = irq_domain_free_irqs_common, + .translate = irq_domain_translate_twocell, +}; + +static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv, + struct device_node *np) +{ + struct of_phandle_args map; + unsigned int i; + int ret; + + for (i = 0; i < IRQC_NUM_IRQ; i++) { + ret = of_irq_parse_one(np, i, &map); + if (ret) + return ret; + of_phandle_args_to_fwspec(np, map.args, map.args_count, + &priv->fwspec[i]); + } + + return 0; +} + +static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent) +{ + struct irq_domain *irq_domain, *parent_domain; + struct platform_device *pdev; + struct reset_control *resetn; + struct rzg2l_irqc_priv *priv; + int ret; + + pdev = of_find_device_by_node(node); + if (!pdev) + return -ENODEV; + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + dev_err(&pdev->dev, "cannot find parent domain\n"); + return -ENODEV; + } + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + ret = rzg2l_irqc_parse_interrupts(priv, node); + if (ret) { + dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret); + return ret; + } + + resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (IS_ERR(resetn)) + return PTR_ERR(resetn); + + ret = reset_control_deassert(resetn); + if (ret) { + dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret); + return ret; + } + + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret); + goto pm_disable; + } + + raw_spin_lock_init(&priv->lock); + + irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ, + node, &rzg2l_irqc_domain_ops, + priv); + if (!irq_domain) { + dev_err(&pdev->dev, "failed to add irq domain\n"); + ret = -ENOMEM; + goto pm_put; + } + + return 0; + +pm_put: + pm_runtime_put(&pdev->dev); +pm_disable: + pm_runtime_disable(&pdev->dev); + reset_control_assert(resetn); + return ret; +} + +IRQCHIP_PLATFORM_DRIVER_BEGIN(rzg2l_irqc) +IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_init) +IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc) +MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>"); +MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index bb87e4c3b88e..ba4938188570 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -60,10 +60,13 @@ #define PLIC_DISABLE_THRESHOLD 0x7 #define PLIC_ENABLE_THRESHOLD 0 +#define PLIC_QUIRK_EDGE_INTERRUPT 0 + struct plic_priv { struct cpumask lmask; struct irq_domain *irqdomain; void __iomem *regs; + unsigned long plic_quirks; }; struct plic_handler { @@ -81,6 +84,8 @@ static int plic_parent_irq __ro_after_init; static bool plic_cpuhp_setup_done __ro_after_init; static DEFINE_PER_CPU(struct plic_handler, plic_handlers); +static int plic_irq_set_type(struct irq_data *d, unsigned int type); + static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable) { u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32); @@ -103,37 +108,43 @@ static inline void plic_irq_toggle(const struct cpumask *mask, struct irq_data *d, int enable) { int cpu; - struct plic_priv *priv = irq_data_get_irq_chip_data(d); - writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); for_each_cpu(cpu, mask) { struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); - if (handler->present && - cpumask_test_cpu(cpu, &handler->priv->lmask)) - plic_toggle(handler, d->hwirq, enable); + plic_toggle(handler, d->hwirq, enable); } } +static void plic_irq_enable(struct irq_data *d) +{ + plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); +} + +static void plic_irq_disable(struct irq_data *d) +{ + plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); +} + static void plic_irq_unmask(struct irq_data *d) { - struct cpumask amask; - unsigned int cpu; struct plic_priv *priv = irq_data_get_irq_chip_data(d); - cpumask_and(&amask, &priv->lmask, cpu_online_mask); - cpu = cpumask_any_and(irq_data_get_affinity_mask(d), - &amask); - if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) - return; - plic_irq_toggle(cpumask_of(cpu), d, 1); + writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); } static void plic_irq_mask(struct irq_data *d) { struct plic_priv *priv = irq_data_get_irq_chip_data(d); - plic_irq_toggle(&priv->lmask, d, 0); + writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); +} + +static void plic_irq_eoi(struct irq_data *d) +{ + struct plic_handler *handler = this_cpu_ptr(&plic_handlers); + + writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); } #ifdef CONFIG_SMP @@ -154,38 +165,68 @@ static int plic_set_affinity(struct irq_data *d, if (cpu >= nr_cpu_ids) return -EINVAL; - plic_irq_toggle(&priv->lmask, d, 0); - plic_irq_toggle(cpumask_of(cpu), d, !irqd_irq_masked(d)); + plic_irq_disable(d); irq_data_update_effective_affinity(d, cpumask_of(cpu)); + if (!irqd_irq_disabled(d)) + plic_irq_enable(d); + return IRQ_SET_MASK_OK_DONE; } #endif -static void plic_irq_eoi(struct irq_data *d) -{ - struct plic_handler *handler = this_cpu_ptr(&plic_handlers); - - if (irqd_irq_masked(d)) { - plic_irq_unmask(d); - writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); - plic_irq_mask(d); - } else { - writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); - } -} +static struct irq_chip plic_edge_chip = { + .name = "SiFive PLIC", + .irq_enable = plic_irq_enable, + .irq_disable = plic_irq_disable, + .irq_ack = plic_irq_eoi, + .irq_mask = plic_irq_mask, + .irq_unmask = plic_irq_unmask, +#ifdef CONFIG_SMP + .irq_set_affinity = plic_set_affinity, +#endif + .irq_set_type = plic_irq_set_type, + .flags = IRQCHIP_AFFINITY_PRE_STARTUP, +}; static struct irq_chip plic_chip = { .name = "SiFive PLIC", + .irq_enable = plic_irq_enable, + .irq_disable = plic_irq_disable, .irq_mask = plic_irq_mask, .irq_unmask = plic_irq_unmask, .irq_eoi = plic_irq_eoi, #ifdef CONFIG_SMP .irq_set_affinity = plic_set_affinity, #endif + .irq_set_type = plic_irq_set_type, + .flags = IRQCHIP_AFFINITY_PRE_STARTUP, }; +static int plic_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct plic_priv *priv = irq_data_get_irq_chip_data(d); + + if (!test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) + return IRQ_SET_MASK_OK_NOCOPY; + + switch (type) { + case IRQ_TYPE_EDGE_RISING: + irq_set_chip_handler_name_locked(d, &plic_edge_chip, + handle_edge_irq, NULL); + break; + case IRQ_TYPE_LEVEL_HIGH: + irq_set_chip_handler_name_locked(d, &plic_chip, + handle_fasteoi_irq, NULL); + break; + default: + return -EINVAL; + } + + return IRQ_SET_MASK_OK; +} + static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { @@ -198,6 +239,19 @@ static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, return 0; } +static int plic_irq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + struct plic_priv *priv = d->host_data; + + if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) + return irq_domain_translate_twocell(d, fwspec, hwirq, type); + + return irq_domain_translate_onecell(d, fwspec, hwirq, type); +} + static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { @@ -206,7 +260,7 @@ static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int type; struct irq_fwspec *fwspec = arg; - ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type); + ret = plic_irq_domain_translate(domain, fwspec, &hwirq, &type); if (ret) return ret; @@ -220,7 +274,7 @@ static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, } static const struct irq_domain_ops plic_irqdomain_ops = { - .translate = irq_domain_translate_onecell, + .translate = plic_irq_domain_translate, .alloc = plic_irq_domain_alloc, .free = irq_domain_free_irqs_top, }; @@ -281,8 +335,9 @@ static int plic_starting_cpu(unsigned int cpu) return 0; } -static int __init plic_init(struct device_node *node, - struct device_node *parent) +static int __init __plic_init(struct device_node *node, + struct device_node *parent, + unsigned long plic_quirks) { int error = 0, nr_contexts, nr_handlers = 0, i; u32 nr_irqs; @@ -293,6 +348,8 @@ static int __init plic_init(struct device_node *node, if (!priv) return -ENOMEM; + priv->plic_quirks = plic_quirks; + priv->regs = of_iomap(node, 0); if (WARN_ON(!priv->regs)) { error = -EIO; @@ -382,8 +439,11 @@ static int __init plic_init(struct device_node *node, i * CONTEXT_ENABLE_SIZE; handler->priv = priv; done: - for (hwirq = 1; hwirq <= nr_irqs; hwirq++) + for (hwirq = 1; hwirq <= nr_irqs; hwirq++) { plic_toggle(handler, hwirq, 0); + writel(1, priv->regs + PRIORITY_BASE + + hwirq * PRIORITY_PER_ID); + } nr_handlers++; } @@ -410,6 +470,20 @@ out_free_priv: return error; } +static int __init plic_init(struct device_node *node, + struct device_node *parent) +{ + return __plic_init(node, parent, 0); +} + IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init); IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */ -IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */ + +static int __init plic_edge_init(struct device_node *node, + struct device_node *parent) +{ + return __plic_init(node, parent, BIT(PLIC_QUIRK_EDGE_INTERRUPT)); +} + +IRQCHIP_DECLARE(andestech_nceplic100, "andestech,nceplic100", plic_edge_init); +IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_edge_init); diff --git a/drivers/irqchip/irq-sp7021-intc.c b/drivers/irqchip/irq-sp7021-intc.c new file mode 100644 index 000000000000..bed78d1def3d --- /dev/null +++ b/drivers/irqchip/irq-sp7021-intc.c @@ -0,0 +1,278 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* + * Copyright (C) Sunplus Technology Co., Ltd. + * All rights reserved. + */ +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/io.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#define SP_INTC_HWIRQ_MIN 0 +#define SP_INTC_HWIRQ_MAX 223 + +#define SP_INTC_NR_IRQS (SP_INTC_HWIRQ_MAX - SP_INTC_HWIRQ_MIN + 1) +#define SP_INTC_NR_GROUPS DIV_ROUND_UP(SP_INTC_NR_IRQS, 32) +#define SP_INTC_REG_SIZE (SP_INTC_NR_GROUPS * 4) + +/* REG_GROUP_0 regs */ +#define REG_INTR_TYPE (sp_intc.g0) +#define REG_INTR_POLARITY (REG_INTR_TYPE + SP_INTC_REG_SIZE) +#define REG_INTR_PRIORITY (REG_INTR_POLARITY + SP_INTC_REG_SIZE) +#define REG_INTR_MASK (REG_INTR_PRIORITY + SP_INTC_REG_SIZE) + +/* REG_GROUP_1 regs */ +#define REG_INTR_CLEAR (sp_intc.g1) +#define REG_MASKED_EXT1 (REG_INTR_CLEAR + SP_INTC_REG_SIZE) +#define REG_MASKED_EXT0 (REG_MASKED_EXT1 + SP_INTC_REG_SIZE) +#define REG_INTR_GROUP (REG_INTR_CLEAR + 31 * 4) + +#define GROUP_MASK (BIT(SP_INTC_NR_GROUPS) - 1) +#define GROUP_SHIFT_EXT1 (0) +#define GROUP_SHIFT_EXT0 (8) + +/* + * When GPIO_INT0~7 set to edge trigger, doesn't work properly. + * WORKAROUND: change it to level trigger, and toggle the polarity + * at ACK/Handler to make the HW work. + */ +#define GPIO_INT0_HWIRQ 120 +#define GPIO_INT7_HWIRQ 127 +#define IS_GPIO_INT(irq) \ +({ \ + u32 i = irq; \ + (i >= GPIO_INT0_HWIRQ) && (i <= GPIO_INT7_HWIRQ); \ +}) + +/* index of states */ +enum { + _IS_EDGE = 0, + _IS_LOW, + _IS_ACTIVE +}; + +#define STATE_BIT(irq, idx) (((irq) - GPIO_INT0_HWIRQ) * 3 + (idx)) +#define ASSIGN_STATE(irq, idx, v) assign_bit(STATE_BIT(irq, idx), sp_intc.states, v) +#define TEST_STATE(irq, idx) test_bit(STATE_BIT(irq, idx), sp_intc.states) + +static struct sp_intctl { + /* + * REG_GROUP_0: include type/polarity/priority/mask regs. + * REG_GROUP_1: include clear/masked_ext0/masked_ext1/group regs. + */ + void __iomem *g0; // REG_GROUP_0 base + void __iomem *g1; // REG_GROUP_1 base + + struct irq_domain *domain; + raw_spinlock_t lock; + + /* + * store GPIO_INT states + * each interrupt has 3 states: is_edge, is_low, is_active + */ + DECLARE_BITMAP(states, (GPIO_INT7_HWIRQ - GPIO_INT0_HWIRQ + 1) * 3); +} sp_intc; + +static struct irq_chip sp_intc_chip; + +static void sp_intc_assign_bit(u32 hwirq, void __iomem *base, bool value) +{ + u32 offset, mask; + unsigned long flags; + void __iomem *reg; + + offset = (hwirq / 32) * 4; + reg = base + offset; + + raw_spin_lock_irqsave(&sp_intc.lock, flags); + mask = readl_relaxed(reg); + if (value) + mask |= BIT(hwirq % 32); + else + mask &= ~BIT(hwirq % 32); + writel_relaxed(mask, reg); + raw_spin_unlock_irqrestore(&sp_intc.lock, flags); +} + +static void sp_intc_ack_irq(struct irq_data *d) +{ + u32 hwirq = d->hwirq; + + if (unlikely(IS_GPIO_INT(hwirq) && TEST_STATE(hwirq, _IS_EDGE))) { // WORKAROUND + sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, !TEST_STATE(hwirq, _IS_LOW)); + ASSIGN_STATE(hwirq, _IS_ACTIVE, true); + } + + sp_intc_assign_bit(hwirq, REG_INTR_CLEAR, 1); +} + +static void sp_intc_mask_irq(struct irq_data *d) +{ + sp_intc_assign_bit(d->hwirq, REG_INTR_MASK, 0); +} + +static void sp_intc_unmask_irq(struct irq_data *d) +{ + sp_intc_assign_bit(d->hwirq, REG_INTR_MASK, 1); +} + +static int sp_intc_set_type(struct irq_data *d, unsigned int type) +{ + u32 hwirq = d->hwirq; + bool is_edge = !(type & IRQ_TYPE_LEVEL_MASK); + bool is_low = (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING); + + irq_set_handler_locked(d, is_edge ? handle_edge_irq : handle_level_irq); + + if (unlikely(IS_GPIO_INT(hwirq) && is_edge)) { // WORKAROUND + /* store states */ + ASSIGN_STATE(hwirq, _IS_EDGE, is_edge); + ASSIGN_STATE(hwirq, _IS_LOW, is_low); + ASSIGN_STATE(hwirq, _IS_ACTIVE, false); + /* change to level */ + is_edge = false; + } + + sp_intc_assign_bit(hwirq, REG_INTR_TYPE, is_edge); + sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, is_low); + + return 0; +} + +static int sp_intc_get_ext_irq(int ext_num) +{ + void __iomem *base = ext_num ? REG_MASKED_EXT1 : REG_MASKED_EXT0; + u32 shift = ext_num ? GROUP_SHIFT_EXT1 : GROUP_SHIFT_EXT0; + u32 groups; + u32 pending_group; + u32 group; + u32 pending_irq; + + groups = readl_relaxed(REG_INTR_GROUP); + pending_group = (groups >> shift) & GROUP_MASK; + if (!pending_group) + return -1; + + group = fls(pending_group) - 1; + pending_irq = readl_relaxed(base + group * 4); + if (!pending_irq) + return -1; + + return (group * 32) + fls(pending_irq) - 1; +} + +static void sp_intc_handle_ext_cascaded(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + int ext_num = (uintptr_t)irq_desc_get_handler_data(desc); + int hwirq; + + chained_irq_enter(chip, desc); + + while ((hwirq = sp_intc_get_ext_irq(ext_num)) >= 0) { + if (unlikely(IS_GPIO_INT(hwirq) && TEST_STATE(hwirq, _IS_ACTIVE))) { // WORKAROUND + ASSIGN_STATE(hwirq, _IS_ACTIVE, false); + sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, TEST_STATE(hwirq, _IS_LOW)); + } else { + generic_handle_domain_irq(sp_intc.domain, hwirq); + } + } + + chained_irq_exit(chip, desc); +} + +static struct irq_chip sp_intc_chip = { + .name = "sp_intc", + .irq_ack = sp_intc_ack_irq, + .irq_mask = sp_intc_mask_irq, + .irq_unmask = sp_intc_unmask_irq, + .irq_set_type = sp_intc_set_type, +}; + +static int sp_intc_irq_domain_map(struct irq_domain *domain, + unsigned int irq, irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &sp_intc_chip, handle_level_irq); + irq_set_chip_data(irq, &sp_intc_chip); + irq_set_noprobe(irq); + + return 0; +} + +static const struct irq_domain_ops sp_intc_dm_ops = { + .xlate = irq_domain_xlate_twocell, + .map = sp_intc_irq_domain_map, +}; + +static int sp_intc_irq_map(struct device_node *node, int i) +{ + unsigned int irq; + + irq = irq_of_parse_and_map(node, i); + if (!irq) + return -ENOENT; + + irq_set_chained_handler_and_data(irq, sp_intc_handle_ext_cascaded, (void *)(uintptr_t)i); + + return 0; +} + +static int __init sp_intc_init_dt(struct device_node *node, struct device_node *parent) +{ + int i, ret; + + sp_intc.g0 = of_iomap(node, 0); + if (!sp_intc.g0) + return -ENXIO; + + sp_intc.g1 = of_iomap(node, 1); + if (!sp_intc.g1) { + ret = -ENXIO; + goto out_unmap0; + } + + ret = sp_intc_irq_map(node, 0); // EXT_INT0 + if (ret) + goto out_unmap1; + + ret = sp_intc_irq_map(node, 1); // EXT_INT1 + if (ret) + goto out_unmap1; + + /* initial regs */ + for (i = 0; i < SP_INTC_NR_GROUPS; i++) { + /* all mask */ + writel_relaxed(0, REG_INTR_MASK + i * 4); + /* all edge */ + writel_relaxed(~0, REG_INTR_TYPE + i * 4); + /* all high-active */ + writel_relaxed(0, REG_INTR_POLARITY + i * 4); + /* all EXT_INT0 */ + writel_relaxed(~0, REG_INTR_PRIORITY + i * 4); + /* all clear */ + writel_relaxed(~0, REG_INTR_CLEAR + i * 4); + } + + sp_intc.domain = irq_domain_add_linear(node, SP_INTC_NR_IRQS, + &sp_intc_dm_ops, &sp_intc); + if (!sp_intc.domain) { + ret = -ENOMEM; + goto out_unmap1; + } + + raw_spin_lock_init(&sp_intc.lock); + + return 0; + +out_unmap1: + iounmap(sp_intc.g1); +out_unmap0: + iounmap(sp_intc.g0); + + return ret; +} + +IRQCHIP_DECLARE(sp_intc, "sunplus,sp7021-intc", sp_intc_init_dt); diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 9d18f47040eb..a73763d475f0 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -34,21 +34,15 @@ struct stm32_exti_bank { u32 swier_ofst; u32 rpr_ofst; u32 fpr_ofst; + u32 trg_ofst; }; #define UNDEF_REG ~0 -struct stm32_desc_irq { - u32 exti; - u32 irq_parent; - struct irq_chip *chip; -}; - struct stm32_exti_drv_data { const struct stm32_exti_bank **exti_banks; - const struct stm32_desc_irq *desc_irqs; + const u8 *desc_irqs; u32 bank_nr; - u32 irq_nr; }; struct stm32_exti_chip_data { @@ -78,6 +72,7 @@ static const struct stm32_exti_bank stm32f4xx_exti_b1 = { .swier_ofst = 0x10, .rpr_ofst = 0x14, .fpr_ofst = UNDEF_REG, + .trg_ofst = UNDEF_REG, }; static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = { @@ -97,6 +92,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b1 = { .swier_ofst = 0x08, .rpr_ofst = 0x88, .fpr_ofst = UNDEF_REG, + .trg_ofst = UNDEF_REG, }; static const struct stm32_exti_bank stm32h7xx_exti_b2 = { @@ -107,6 +103,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b2 = { .swier_ofst = 0x28, .rpr_ofst = 0x98, .fpr_ofst = UNDEF_REG, + .trg_ofst = UNDEF_REG, }; static const struct stm32_exti_bank stm32h7xx_exti_b3 = { @@ -117,6 +114,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b3 = { .swier_ofst = 0x48, .rpr_ofst = 0xA8, .fpr_ofst = UNDEF_REG, + .trg_ofst = UNDEF_REG, }; static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = { @@ -132,32 +130,35 @@ static const struct stm32_exti_drv_data stm32h7xx_drv_data = { static const struct stm32_exti_bank stm32mp1_exti_b1 = { .imr_ofst = 0x80, - .emr_ofst = 0x84, + .emr_ofst = UNDEF_REG, .rtsr_ofst = 0x00, .ftsr_ofst = 0x04, .swier_ofst = 0x08, .rpr_ofst = 0x0C, .fpr_ofst = 0x10, + .trg_ofst = 0x3EC, }; static const struct stm32_exti_bank stm32mp1_exti_b2 = { .imr_ofst = 0x90, - .emr_ofst = 0x94, + .emr_ofst = UNDEF_REG, .rtsr_ofst = 0x20, .ftsr_ofst = 0x24, .swier_ofst = 0x28, .rpr_ofst = 0x2C, .fpr_ofst = 0x30, + .trg_ofst = 0x3E8, }; static const struct stm32_exti_bank stm32mp1_exti_b3 = { .imr_ofst = 0xA0, - .emr_ofst = 0xA4, + .emr_ofst = UNDEF_REG, .rtsr_ofst = 0x40, .ftsr_ofst = 0x44, .swier_ofst = 0x48, .rpr_ofst = 0x4C, .fpr_ofst = 0x50, + .trg_ofst = 0x3E4, }; static const struct stm32_exti_bank *stm32mp1_exti_banks[] = { @@ -169,126 +170,114 @@ static const struct stm32_exti_bank *stm32mp1_exti_banks[] = { static struct irq_chip stm32_exti_h_chip; static struct irq_chip stm32_exti_h_chip_direct; -static const struct stm32_desc_irq stm32mp1_desc_irq[] = { - { .exti = 0, .irq_parent = 6, .chip = &stm32_exti_h_chip }, - { .exti = 1, .irq_parent = 7, .chip = &stm32_exti_h_chip }, - { .exti = 2, .irq_parent = 8, .chip = &stm32_exti_h_chip }, - { .exti = 3, .irq_parent = 9, .chip = &stm32_exti_h_chip }, - { .exti = 4, .irq_parent = 10, .chip = &stm32_exti_h_chip }, - { .exti = 5, .irq_parent = 23, .chip = &stm32_exti_h_chip }, - { .exti = 6, .irq_parent = 64, .chip = &stm32_exti_h_chip }, - { .exti = 7, .irq_parent = 65, .chip = &stm32_exti_h_chip }, - { .exti = 8, .irq_parent = 66, .chip = &stm32_exti_h_chip }, - { .exti = 9, .irq_parent = 67, .chip = &stm32_exti_h_chip }, - { .exti = 10, .irq_parent = 40, .chip = &stm32_exti_h_chip }, - { .exti = 11, .irq_parent = 42, .chip = &stm32_exti_h_chip }, - { .exti = 12, .irq_parent = 76, .chip = &stm32_exti_h_chip }, - { .exti = 13, .irq_parent = 77, .chip = &stm32_exti_h_chip }, - { .exti = 14, .irq_parent = 121, .chip = &stm32_exti_h_chip }, - { .exti = 15, .irq_parent = 127, .chip = &stm32_exti_h_chip }, - { .exti = 16, .irq_parent = 1, .chip = &stm32_exti_h_chip }, - { .exti = 19, .irq_parent = 3, .chip = &stm32_exti_h_chip_direct }, - { .exti = 21, .irq_parent = 31, .chip = &stm32_exti_h_chip_direct }, - { .exti = 22, .irq_parent = 33, .chip = &stm32_exti_h_chip_direct }, - { .exti = 23, .irq_parent = 72, .chip = &stm32_exti_h_chip_direct }, - { .exti = 24, .irq_parent = 95, .chip = &stm32_exti_h_chip_direct }, - { .exti = 25, .irq_parent = 107, .chip = &stm32_exti_h_chip_direct }, - { .exti = 26, .irq_parent = 37, .chip = &stm32_exti_h_chip_direct }, - { .exti = 27, .irq_parent = 38, .chip = &stm32_exti_h_chip_direct }, - { .exti = 28, .irq_parent = 39, .chip = &stm32_exti_h_chip_direct }, - { .exti = 29, .irq_parent = 71, .chip = &stm32_exti_h_chip_direct }, - { .exti = 30, .irq_parent = 52, .chip = &stm32_exti_h_chip_direct }, - { .exti = 31, .irq_parent = 53, .chip = &stm32_exti_h_chip_direct }, - { .exti = 32, .irq_parent = 82, .chip = &stm32_exti_h_chip_direct }, - { .exti = 33, .irq_parent = 83, .chip = &stm32_exti_h_chip_direct }, - { .exti = 47, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct }, - { .exti = 48, .irq_parent = 138, .chip = &stm32_exti_h_chip_direct }, - { .exti = 50, .irq_parent = 139, .chip = &stm32_exti_h_chip_direct }, - { .exti = 52, .irq_parent = 140, .chip = &stm32_exti_h_chip_direct }, - { .exti = 53, .irq_parent = 141, .chip = &stm32_exti_h_chip_direct }, - { .exti = 54, .irq_parent = 135, .chip = &stm32_exti_h_chip_direct }, - { .exti = 61, .irq_parent = 100, .chip = &stm32_exti_h_chip_direct }, - { .exti = 65, .irq_parent = 144, .chip = &stm32_exti_h_chip }, - { .exti = 68, .irq_parent = 143, .chip = &stm32_exti_h_chip }, - { .exti = 70, .irq_parent = 62, .chip = &stm32_exti_h_chip_direct }, - { .exti = 73, .irq_parent = 129, .chip = &stm32_exti_h_chip }, +#define EXTI_INVALID_IRQ U8_MAX +#define STM32MP1_DESC_IRQ_SIZE (ARRAY_SIZE(stm32mp1_exti_banks) * IRQS_PER_BANK) + +static const u8 stm32mp1_desc_irq[] = { + /* default value */ + [0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ, + + [0] = 6, + [1] = 7, + [2] = 8, + [3] = 9, + [4] = 10, + [5] = 23, + [6] = 64, + [7] = 65, + [8] = 66, + [9] = 67, + [10] = 40, + [11] = 42, + [12] = 76, + [13] = 77, + [14] = 121, + [15] = 127, + [16] = 1, + [19] = 3, + [21] = 31, + [22] = 33, + [23] = 72, + [24] = 95, + [25] = 107, + [26] = 37, + [27] = 38, + [28] = 39, + [29] = 71, + [30] = 52, + [31] = 53, + [32] = 82, + [33] = 83, + [47] = 93, + [48] = 138, + [50] = 139, + [52] = 140, + [53] = 141, + [54] = 135, + [61] = 100, + [65] = 144, + [68] = 143, + [70] = 62, + [73] = 129, }; -static const struct stm32_desc_irq stm32mp13_desc_irq[] = { - { .exti = 0, .irq_parent = 6, .chip = &stm32_exti_h_chip }, - { .exti = 1, .irq_parent = 7, .chip = &stm32_exti_h_chip }, - { .exti = 2, .irq_parent = 8, .chip = &stm32_exti_h_chip }, - { .exti = 3, .irq_parent = 9, .chip = &stm32_exti_h_chip }, - { .exti = 4, .irq_parent = 10, .chip = &stm32_exti_h_chip }, - { .exti = 5, .irq_parent = 24, .chip = &stm32_exti_h_chip }, - { .exti = 6, .irq_parent = 65, .chip = &stm32_exti_h_chip }, - { .exti = 7, .irq_parent = 66, .chip = &stm32_exti_h_chip }, - { .exti = 8, .irq_parent = 67, .chip = &stm32_exti_h_chip }, - { .exti = 9, .irq_parent = 68, .chip = &stm32_exti_h_chip }, - { .exti = 10, .irq_parent = 41, .chip = &stm32_exti_h_chip }, - { .exti = 11, .irq_parent = 43, .chip = &stm32_exti_h_chip }, - { .exti = 12, .irq_parent = 77, .chip = &stm32_exti_h_chip }, - { .exti = 13, .irq_parent = 78, .chip = &stm32_exti_h_chip }, - { .exti = 14, .irq_parent = 106, .chip = &stm32_exti_h_chip }, - { .exti = 15, .irq_parent = 109, .chip = &stm32_exti_h_chip }, - { .exti = 16, .irq_parent = 1, .chip = &stm32_exti_h_chip }, - { .exti = 19, .irq_parent = 3, .chip = &stm32_exti_h_chip_direct }, - { .exti = 21, .irq_parent = 32, .chip = &stm32_exti_h_chip_direct }, - { .exti = 22, .irq_parent = 34, .chip = &stm32_exti_h_chip_direct }, - { .exti = 23, .irq_parent = 73, .chip = &stm32_exti_h_chip_direct }, - { .exti = 24, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct }, - { .exti = 25, .irq_parent = 114, .chip = &stm32_exti_h_chip_direct }, - { .exti = 26, .irq_parent = 38, .chip = &stm32_exti_h_chip_direct }, - { .exti = 27, .irq_parent = 39, .chip = &stm32_exti_h_chip_direct }, - { .exti = 28, .irq_parent = 40, .chip = &stm32_exti_h_chip_direct }, - { .exti = 29, .irq_parent = 72, .chip = &stm32_exti_h_chip_direct }, - { .exti = 30, .irq_parent = 53, .chip = &stm32_exti_h_chip_direct }, - { .exti = 31, .irq_parent = 54, .chip = &stm32_exti_h_chip_direct }, - { .exti = 32, .irq_parent = 83, .chip = &stm32_exti_h_chip_direct }, - { .exti = 33, .irq_parent = 84, .chip = &stm32_exti_h_chip_direct }, - { .exti = 44, .irq_parent = 96, .chip = &stm32_exti_h_chip_direct }, - { .exti = 47, .irq_parent = 92, .chip = &stm32_exti_h_chip_direct }, - { .exti = 48, .irq_parent = 116, .chip = &stm32_exti_h_chip_direct }, - { .exti = 50, .irq_parent = 117, .chip = &stm32_exti_h_chip_direct }, - { .exti = 52, .irq_parent = 118, .chip = &stm32_exti_h_chip_direct }, - { .exti = 53, .irq_parent = 119, .chip = &stm32_exti_h_chip_direct }, - { .exti = 68, .irq_parent = 63, .chip = &stm32_exti_h_chip_direct }, - { .exti = 70, .irq_parent = 98, .chip = &stm32_exti_h_chip_direct }, +static const u8 stm32mp13_desc_irq[] = { + /* default value */ + [0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ, + + [0] = 6, + [1] = 7, + [2] = 8, + [3] = 9, + [4] = 10, + [5] = 24, + [6] = 65, + [7] = 66, + [8] = 67, + [9] = 68, + [10] = 41, + [11] = 43, + [12] = 77, + [13] = 78, + [14] = 106, + [15] = 109, + [16] = 1, + [19] = 3, + [21] = 32, + [22] = 34, + [23] = 73, + [24] = 93, + [25] = 114, + [26] = 38, + [27] = 39, + [28] = 40, + [29] = 72, + [30] = 53, + [31] = 54, + [32] = 83, + [33] = 84, + [44] = 96, + [47] = 92, + [48] = 116, + [50] = 117, + [52] = 118, + [53] = 119, + [68] = 63, + [70] = 98, }; static const struct stm32_exti_drv_data stm32mp1_drv_data = { .exti_banks = stm32mp1_exti_banks, .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks), .desc_irqs = stm32mp1_desc_irq, - .irq_nr = ARRAY_SIZE(stm32mp1_desc_irq), }; static const struct stm32_exti_drv_data stm32mp13_drv_data = { .exti_banks = stm32mp1_exti_banks, .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks), .desc_irqs = stm32mp13_desc_irq, - .irq_nr = ARRAY_SIZE(stm32mp13_desc_irq), }; -static const struct -stm32_desc_irq *stm32_exti_get_desc(const struct stm32_exti_drv_data *drv_data, - irq_hw_number_t hwirq) -{ - const struct stm32_desc_irq *desc = NULL; - int i; - - if (!drv_data->desc_irqs) - return NULL; - - for (i = 0; i < drv_data->irq_nr; i++) { - desc = &drv_data->desc_irqs[i]; - if (desc->exti == hwirq) - break; - } - - return desc; -} - static unsigned long stm32_exti_pending(struct irq_chip_generic *gc) { struct stm32_exti_chip_data *chip_data = gc->private; @@ -614,7 +603,7 @@ static int stm32_exti_h_set_affinity(struct irq_data *d, if (d->parent_data->chip) return irq_chip_set_affinity_parent(d, dest, force); - return -EINVAL; + return IRQ_SET_MASK_OK_DONE; } static int __maybe_unused stm32_exti_h_suspend(void) @@ -691,8 +680,8 @@ static struct irq_chip stm32_exti_h_chip_direct = { .name = "stm32-exti-h-direct", .irq_eoi = irq_chip_eoi_parent, .irq_ack = irq_chip_ack_parent, - .irq_mask = irq_chip_mask_parent, - .irq_unmask = irq_chip_unmask_parent, + .irq_mask = stm32_exti_h_mask, + .irq_unmask = stm32_exti_h_unmask, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_type = irq_chip_set_type_parent, .irq_set_wake = stm32_exti_h_set_wake, @@ -706,28 +695,36 @@ static int stm32_exti_h_domain_alloc(struct irq_domain *dm, { struct stm32_exti_host_data *host_data = dm->host_data; struct stm32_exti_chip_data *chip_data; - const struct stm32_desc_irq *desc; + u8 desc_irq; struct irq_fwspec *fwspec = data; struct irq_fwspec p_fwspec; irq_hw_number_t hwirq; int bank; + u32 event_trg; + struct irq_chip *chip; hwirq = fwspec->param[0]; + if (hwirq >= host_data->drv_data->bank_nr * IRQS_PER_BANK) + return -EINVAL; + bank = hwirq / IRQS_PER_BANK; chip_data = &host_data->chips_data[bank]; + event_trg = readl_relaxed(host_data->base + chip_data->reg_bank->trg_ofst); + chip = (event_trg & BIT(hwirq % IRQS_PER_BANK)) ? + &stm32_exti_h_chip : &stm32_exti_h_chip_direct; + + irq_domain_set_hwirq_and_chip(dm, virq, hwirq, chip, chip_data); - desc = stm32_exti_get_desc(host_data->drv_data, hwirq); - if (!desc) + if (!host_data->drv_data || !host_data->drv_data->desc_irqs) return -EINVAL; - irq_domain_set_hwirq_and_chip(dm, virq, hwirq, desc->chip, - chip_data); - if (desc->irq_parent) { + desc_irq = host_data->drv_data->desc_irqs[hwirq]; + if (desc_irq != EXTI_INVALID_IRQ) { p_fwspec.fwnode = dm->parent->fwnode; p_fwspec.param_count = 3; p_fwspec.param[0] = GIC_SPI; - p_fwspec.param[1] = desc->irq_parent; + p_fwspec.param[1] = desc_irq; p_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH; return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec); @@ -792,7 +789,8 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data, * clear registers to avoid residue */ writel_relaxed(0, base + stm32_bank->imr_ofst); - writel_relaxed(0, base + stm32_bank->emr_ofst); + if (stm32_bank->emr_ofst != UNDEF_REG) + writel_relaxed(0, base + stm32_bank->emr_ofst); pr_info("%pOF: bank%d\n", node, bank_idx); diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 0454b0885b01..84291e38dca8 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -5,7 +5,7 @@ dm-mod-y += dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \ dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o \ - dm-rq.o + dm-rq.o dm-io-rewind.o dm-multipath-y += dm-path-selector.o dm-mpath.o dm-historical-service-time-y += dm-ps-historical-service-time.o dm-io-affinity-y += dm-ps-io-affinity.o @@ -83,6 +83,7 @@ obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o obj-$(CONFIG_DM_INTEGRITY) += dm-integrity.o obj-$(CONFIG_DM_ZONED) += dm-zoned.o obj-$(CONFIG_DM_WRITECACHE) += dm-writecache.o +obj-$(CONFIG_SECURITY_LOADPIN_VERITY) += dm-verity-loadpin.o ifeq ($(CONFIG_DM_INIT),y) dm-mod-objs += dm-init.o diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 3563d15dbaf2..ba3909bb6bea 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -414,8 +414,8 @@ static void uuid_io_unlock(struct closure *cl) up(&c->uuid_write_mutex); } -static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, - struct bkey *k, struct closure *parent) +static void uuid_io(struct cache_set *c, blk_opf_t opf, struct bkey *k, + struct closure *parent) { struct closure *cl = &c->uuid_write; struct uuid_entry *u; @@ -429,22 +429,22 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, for (i = 0; i < KEY_PTRS(k); i++) { struct bio *bio = bch_bbio_alloc(c); - bio->bi_opf = REQ_SYNC | REQ_META | op_flags; + bio->bi_opf = opf | REQ_SYNC | REQ_META; bio->bi_iter.bi_size = KEY_SIZE(k) << 9; bio->bi_end_io = uuid_endio; bio->bi_private = cl; - bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); bch_bio_map(bio, c->uuids); bch_submit_bbio(bio, c, k, i); - if (op != REQ_OP_WRITE) + if ((opf & REQ_OP_MASK) != REQ_OP_WRITE) break; } bch_extent_to_text(buf, sizeof(buf), k); - pr_debug("%s UUIDs at %s\n", op == REQ_OP_WRITE ? "wrote" : "read", buf); + pr_debug("%s UUIDs at %s\n", (opf & REQ_OP_MASK) == REQ_OP_WRITE ? + "wrote" : "read", buf); for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) if (!bch_is_zero(u->uuid, 16)) @@ -463,7 +463,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) return "bad uuid pointer"; bkey_copy(&c->uuid_bucket, k); - uuid_io(c, REQ_OP_READ, 0, k, cl); + uuid_io(c, REQ_OP_READ, k, cl); if (j->version < BCACHE_JSET_VERSION_UUIDv1) { struct uuid_entry_v0 *u0 = (void *) c->uuids; @@ -511,7 +511,7 @@ static int __uuid_write(struct cache_set *c) size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS; SET_KEY_SIZE(&k.key, size); - uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); + uuid_io(c, REQ_OP_WRITE, &k.key, &cl); closure_sync(&cl); /* Only one bucket used for uuid write */ @@ -587,8 +587,7 @@ static void prio_endio(struct bio *bio) closure_put(&ca->prio); } -static void prio_io(struct cache *ca, uint64_t bucket, int op, - unsigned long op_flags) +static void prio_io(struct cache *ca, uint64_t bucket, blk_opf_t opf) { struct closure *cl = &ca->prio; struct bio *bio = bch_bbio_alloc(ca->set); @@ -601,7 +600,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op, bio->bi_end_io = prio_endio; bio->bi_private = ca; - bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); + bio->bi_opf = opf | REQ_SYNC | REQ_META; bch_bio_map(bio, ca->disk_buckets); closure_bio_submit(ca->set, bio, &ca->prio); @@ -661,7 +660,7 @@ int bch_prio_write(struct cache *ca, bool wait) BUG_ON(bucket == -1); mutex_unlock(&ca->set->bucket_lock); - prio_io(ca, bucket, REQ_OP_WRITE, 0); + prio_io(ca, bucket, REQ_OP_WRITE); mutex_lock(&ca->set->bucket_lock); ca->prio_buckets[i] = bucket; @@ -705,7 +704,7 @@ static int prio_read(struct cache *ca, uint64_t bucket) ca->prio_last_buckets[bucket_nr] = bucket; bucket_nr++; - prio_io(ca, bucket, REQ_OP_READ, 0); + prio_io(ca, bucket, REQ_OP_READ); if (p->csum != bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) { @@ -884,7 +883,7 @@ static void bcache_device_free(struct bcache_device *d) if (disk) { ida_simple_remove(&bcache_device_idx, first_minor_to_idx(disk->first_minor)); - blk_cleanup_disk(disk); + put_disk(disk); } bioset_exit(&d->bio_split); diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 5ffa1dcf84cf..dc01ce33265b 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -577,13 +577,12 @@ static void dmio_complete(unsigned long error, void *context) b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0); } -static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, +static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector, unsigned n_sectors, unsigned offset) { int r; struct dm_io_request io_req = { - .bi_op = rw, - .bi_op_flags = 0, + .bi_opf = op, .notify.fn = dmio_complete, .notify.context = b, .client = b->c->dm_io, @@ -616,7 +615,7 @@ static void bio_complete(struct bio *bio) b->end_io(b, status); } -static void use_bio(struct dm_buffer *b, int rw, sector_t sector, +static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector, unsigned n_sectors, unsigned offset) { struct bio *bio; @@ -630,10 +629,10 @@ static void use_bio(struct dm_buffer *b, int rw, sector_t sector, bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN); if (!bio) { dmio: - use_dmio(b, rw, sector, n_sectors, offset); + use_dmio(b, op, sector, n_sectors, offset); return; } - bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, rw); + bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op); bio->bi_iter.bi_sector = sector; bio->bi_end_io = bio_complete; bio->bi_private = b; @@ -669,7 +668,8 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block return sector; } -static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t)) +static void submit_io(struct dm_buffer *b, enum req_op op, + void (*end_io)(struct dm_buffer *, blk_status_t)) { unsigned n_sectors; sector_t sector; @@ -679,7 +679,7 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff sector = block_to_sector(b->c, b->block); - if (rw != REQ_OP_WRITE) { + if (op != REQ_OP_WRITE) { n_sectors = b->c->block_size >> SECTOR_SHIFT; offset = 0; } else { @@ -698,9 +698,9 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff } if (b->data_mode != DATA_MODE_VMALLOC) - use_bio(b, rw, sector, n_sectors, offset); + use_bio(b, op, sector, n_sectors, offset); else - use_dmio(b, rw, sector, n_sectors, offset); + use_dmio(b, op, sector, n_sectors, offset); } /*---------------------------------------------------------------- @@ -1341,8 +1341,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers); int dm_bufio_issue_flush(struct dm_bufio_client *c) { struct dm_io_request io_req = { - .bi_op = REQ_OP_WRITE, - .bi_op_flags = REQ_PREFLUSH | REQ_SYNC, + .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, .mem.type = DM_IO_KMEM, .mem.ptr.addr = NULL, .client = c->dm_io, @@ -1365,8 +1364,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_issue_flush); int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count) { struct dm_io_request io_req = { - .bi_op = REQ_OP_DISCARD, - .bi_op_flags = REQ_SYNC, + .bi_opf = REQ_OP_DISCARD | REQ_SYNC, .mem.type = DM_IO_KMEM, .mem.ptr.addr = NULL, .client = c->dm_io, diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h index 179ed5bf81a3..0905f2c1615e 100644 --- a/drivers/md/dm-cache-metadata.h +++ b/drivers/md/dm-cache-metadata.h @@ -131,7 +131,7 @@ void dm_cache_dump(struct dm_cache_metadata *cmd); * hints will be lost. * * The hints are indexed by the cblock, but many policies will not - * neccessarily have a fast way of accessing efficiently via cblock. So + * necessarily have a fast way of accessing efficiently via cblock. So * rather than querying the policy for each cblock, we let it walk its data * structures and fill in the hints in whatever order it wishes. */ diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 28c5de8eca4a..54a8d5c9a44e 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2775,7 +2775,7 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock, /* * The discard block size in the on disk metadata is not - * neccessarily the same as we're currently using. So we have to + * necessarily the same as we're currently using. So we have to * be careful to only set the discarded attribute if we know it * covers a complete block of the new size. */ diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index c954ff91870e..6c6bd24774f2 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -22,6 +22,8 @@ #define DM_RESERVED_MAX_IOS 1024 +struct dm_io; + struct dm_kobject_holder { struct kobject kobj; struct completion completion; @@ -91,6 +93,14 @@ struct mapped_device { spinlock_t deferred_lock; struct bio_list deferred; + /* + * requeue work context is needed for cloning one new bio + * to represent the dm_io to be requeued, since each + * dm_io may point to the original bio from FS. + */ + struct work_struct requeue_work; + struct dm_io *requeue_list; + void *interface_ptr; /* @@ -216,6 +226,13 @@ struct dm_table { #endif }; +static inline struct dm_target *dm_table_get_target(struct dm_table *t, + unsigned int index) +{ + BUG_ON(index >= t->num_targets); + return t->targets + index; +} + /* * One of these is allocated per clone bio. */ @@ -230,6 +247,9 @@ struct dm_target_io { sector_t old_sector; struct bio clone; }; +#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone)) +#define DM_IO_BIO_OFFSET \ + (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio)) /* * dm_target_io flags @@ -272,7 +292,6 @@ struct dm_io { atomic_t io_count; struct mapped_device *md; - struct bio *split_bio; /* The three fields represent mapped part of original bio */ struct bio *orig_bio; unsigned int sector_offset; /* offset to end of orig_bio */ @@ -300,6 +319,8 @@ static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit) io->flags |= (1U << bit); } +void dm_io_rewind(struct dm_io *io, struct bio_set *bs); + static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) { return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c index 0221fa63f888..223e8e1a7a13 100644 --- a/drivers/md/dm-ebs-target.c +++ b/drivers/md/dm-ebs-target.c @@ -61,7 +61,8 @@ static inline bool __ebs_check_bs(unsigned int bs) * * copy blocks between bufio blocks and bio vector's (partial/overlapping) pages. */ -static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bvec_iter *iter) +static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv, + struct bvec_iter *iter) { int r = 0; unsigned char *ba, *pa; @@ -81,7 +82,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv cur_len = min(dm_bufio_get_block_size(ec->bufio) - buf_off, bv_len); /* Avoid reading for writes in case bio vector's page overwrites block completely. */ - if (rw == READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio)) + if (op == REQ_OP_READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio)) ba = dm_bufio_read(ec->bufio, block, &b); else ba = dm_bufio_new(ec->bufio, block, &b); @@ -95,7 +96,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv } else { /* Copy data to/from bio to buffer if read/new was successful above. */ ba += buf_off; - if (rw == READ) { + if (op == REQ_OP_READ) { memcpy(pa, ba, cur_len); flush_dcache_page(bv->bv_page); } else { @@ -117,14 +118,14 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv } /* READ/WRITE: iterate bio vector's copying between (partial) pages and bufio blocks. */ -static int __ebs_rw_bio(struct ebs_c *ec, int rw, struct bio *bio) +static int __ebs_rw_bio(struct ebs_c *ec, enum req_op op, struct bio *bio) { int r = 0, rr; struct bio_vec bv; struct bvec_iter iter; bio_for_each_bvec(bv, bio, iter) { - rr = __ebs_rw_bvec(ec, rw, &bv, &iter); + rr = __ebs_rw_bvec(ec, op, &bv, &iter); if (rr) r = rr; } @@ -205,10 +206,10 @@ static void __ebs_process_bios(struct work_struct *ws) bio_list_for_each(bio, &bios) { r = -EIO; if (bio_op(bio) == REQ_OP_READ) - r = __ebs_rw_bio(ec, READ, bio); + r = __ebs_rw_bio(ec, REQ_OP_READ, bio); else if (bio_op(bio) == REQ_OP_WRITE) { write = true; - r = __ebs_rw_bio(ec, WRITE, bio); + r = __ebs_rw_bio(ec, REQ_OP_WRITE, bio); } else if (bio_op(bio) == REQ_OP_DISCARD) { __ebs_forget_bio(ec, bio); r = __ebs_discard_bio(ec, bio); diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index f2305eb758a2..89fa7a68c6c4 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -32,7 +32,7 @@ struct flakey_c { unsigned corrupt_bio_byte; unsigned corrupt_bio_rw; unsigned corrupt_bio_value; - unsigned corrupt_bio_flags; + blk_opf_t corrupt_bio_flags; }; enum feature_flag_bits { @@ -145,7 +145,11 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, /* * Only corrupt bios with these flags set. */ - r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error); + BUILD_BUG_ON(sizeof(fc->corrupt_bio_flags) != + sizeof(unsigned int)); + r = dm_read_arg(_args + 3, as, + (__force unsigned *)&fc->corrupt_bio_flags, + &ti->error); if (r) return r; argc--; diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c index 1842d3a958ef..a1bd7cd52b1b 100644 --- a/drivers/md/dm-ima.c +++ b/drivers/md/dm-ima.c @@ -208,7 +208,7 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl if (!target_data_buf) goto error; - num_targets = dm_table_get_num_targets(table); + num_targets = table->num_targets; if (dm_ima_alloc_and_copy_device_data(table->md, &device_data_buf, num_targets, noio)) goto error; @@ -237,9 +237,6 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl for (i = 0; i < num_targets; i++) { struct dm_target *ti = dm_table_get_target(table, i); - if (!ti) - goto error; - last_target_measured = 0; /* diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 3d5a0ce123c9..c60f9b2ece2d 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -298,7 +298,7 @@ struct dm_integrity_io { struct work_struct work; struct dm_integrity_c *ic; - enum req_opf op; + enum req_op op; bool fua; struct dm_integrity_range range; @@ -551,14 +551,14 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr) return 0; } -static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags) +static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf) { struct dm_io_request io_req; struct dm_io_region io_loc; + const enum req_op op = opf & REQ_OP_MASK; int r; - io_req.bi_op = op; - io_req.bi_op_flags = op_flags; + io_req.bi_opf = opf; io_req.mem.type = DM_IO_KMEM; io_req.mem.ptr.addr = ic->sb; io_req.notify.fn = NULL; @@ -1050,8 +1050,9 @@ static void complete_journal_io(unsigned long error, void *context) complete_journal_op(comp); } -static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags, - unsigned sector, unsigned n_sectors, struct journal_completion *comp) +static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf, + unsigned sector, unsigned n_sectors, + struct journal_completion *comp) { struct dm_io_request io_req; struct dm_io_region io_loc; @@ -1067,8 +1068,7 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags, pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); - io_req.bi_op = op; - io_req.bi_op_flags = op_flags; + io_req.bi_opf = opf; io_req.mem.type = DM_IO_PAGE_LIST; if (ic->journal_io) io_req.mem.ptr.pl = &ic->journal_io[pl_index]; @@ -1088,7 +1088,8 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags, r = dm_io(&io_req, 1, &io_loc, NULL); if (unlikely(r)) { - dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r); + dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ? + "reading journal" : "writing journal", r); if (comp) { WARN_ONCE(1, "asynchronous dm_io failed: %d", r); complete_journal_io(-1UL, comp); @@ -1096,15 +1097,16 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags, } } -static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section, - unsigned n_sections, struct journal_completion *comp) +static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf, + unsigned section, unsigned n_sections, + struct journal_completion *comp) { unsigned sector, n_sectors; sector = section * ic->journal_section_sectors; n_sectors = n_sections * ic->journal_section_sectors; - rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp); + rw_journal_sectors(ic, opf, sector, n_sectors, comp); } static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections) @@ -1129,7 +1131,7 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi for (i = 0; i < commit_sections; i++) rw_section_mac(ic, commit_start + i, true); } - rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start, + rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start, commit_sections, &io_comp); } else { unsigned to_end; @@ -1141,7 +1143,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); if (try_wait_for_completion(&crypt_comp_1.comp)) { - rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); + rw_journal(ic, REQ_OP_WRITE | REQ_FUA, + commit_start, to_end, &io_comp); reinit_completion(&crypt_comp_1.comp); crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); @@ -1152,17 +1155,17 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); wait_for_completion_io(&crypt_comp_1.comp); - rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); + rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp); wait_for_completion_io(&crypt_comp_2.comp); } } else { for (i = 0; i < to_end; i++) rw_section_mac(ic, commit_start + i, true); - rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); + rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp); for (i = 0; i < commit_sections - to_end; i++) rw_section_mac(ic, i, true); } - rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp); + rw_journal(ic, REQ_OP_WRITE | REQ_FUA, 0, commit_sections - to_end, &io_comp); } wait_for_completion_io(&io_comp.comp); @@ -1188,8 +1191,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); - io_req.bi_op = REQ_OP_WRITE; - io_req.bi_op_flags = 0; + io_req.bi_opf = REQ_OP_WRITE; io_req.mem.type = DM_IO_PAGE_LIST; io_req.mem.ptr.pl = &ic->journal[pl_index]; io_req.mem.offset = pl_offset; @@ -1516,8 +1518,7 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat if (!ic->meta_dev) flush_data = false; if (flush_data) { - fr.io_req.bi_op = REQ_OP_WRITE, - fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC, + fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, fr.io_req.mem.type = DM_IO_KMEM, fr.io_req.mem.ptr.addr = NULL, fr.io_req.notify.fn = flush_notify, @@ -2626,7 +2627,7 @@ static void recalc_write_super(struct dm_integrity_c *ic) if (dm_integrity_failed(ic)) return; - r = sync_rw_sb(ic, REQ_OP_WRITE, 0); + r = sync_rw_sb(ic, REQ_OP_WRITE); if (unlikely(r)) dm_integrity_io_error(ic, "writing superblock", r); } @@ -2706,8 +2707,7 @@ next_chunk: if (unlikely(dm_integrity_failed(ic))) goto err; - io_req.bi_op = REQ_OP_READ; - io_req.bi_op_flags = 0; + io_req.bi_opf = REQ_OP_READ; io_req.mem.type = DM_IO_VMA; io_req.mem.ptr.addr = ic->recalc_buffer; io_req.notify.fn = NULL; @@ -2800,7 +2800,7 @@ static void bitmap_block_work(struct work_struct *w) if (bio_list_empty(&waiting)) return; - rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, + rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL); @@ -2846,7 +2846,7 @@ static void bitmap_flush_work(struct work_struct *work) block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR); block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR); - rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, + rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); spin_lock_irq(&ic->endio_wait.lock); @@ -2918,7 +2918,7 @@ static void replay_journal(struct dm_integrity_c *ic) if (!ic->just_formatted) { DEBUG_print("reading journal\n"); - rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL); + rw_journal(ic, REQ_OP_READ, 0, ic->journal_sections, NULL); if (ic->journal_io) DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal"); if (ic->journal_io) { @@ -3113,7 +3113,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti) /* set to 0 to test bitmap replay code */ init_journal(ic, 0, ic->journal_sections, 0); ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); - r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); + r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); if (unlikely(r)) dm_integrity_io_error(ic, "writing superblock", r); #endif @@ -3136,23 +3136,23 @@ static void dm_integrity_resume(struct dm_target *ti) if (ic->provided_data_sectors > old_provided_data_sectors && ic->mode == 'B' && ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) { - rw_journal_sectors(ic, REQ_OP_READ, 0, 0, + rw_journal_sectors(ic, REQ_OP_READ, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); block_bitmap_op(ic, ic->journal, old_provided_data_sectors, ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET); - rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, + rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); } ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors); - r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); + r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); if (unlikely(r)) dm_integrity_io_error(ic, "writing superblock", r); } if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) { DEBUG_print("resume dirty_bitmap\n"); - rw_journal_sectors(ic, REQ_OP_READ, 0, 0, + rw_journal_sectors(ic, REQ_OP_READ, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); if (ic->mode == 'B') { if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit && @@ -3171,7 +3171,7 @@ static void dm_integrity_resume(struct dm_target *ti) block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET); block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET); block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET); - rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, + rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); ic->sb->recalc_sector = cpu_to_le64(0); @@ -3187,7 +3187,7 @@ static void dm_integrity_resume(struct dm_target *ti) replay_journal(ic); ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); } - r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); + r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); if (unlikely(r)) dm_integrity_io_error(ic, "writing superblock", r); } else { @@ -3199,7 +3199,7 @@ static void dm_integrity_resume(struct dm_target *ti) if (ic->mode == 'B') { ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP); ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; - r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); + r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); if (unlikely(r)) dm_integrity_io_error(ic, "writing superblock", r); @@ -3215,7 +3215,7 @@ static void dm_integrity_resume(struct dm_target *ti) block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector), ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); } - rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, + rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); } } @@ -4256,7 +4256,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } - r = sync_rw_sb(ic, REQ_OP_READ, 0); + r = sync_rw_sb(ic, REQ_OP_READ); if (r) { ti->error = "Error reading superblock"; goto bad; @@ -4500,7 +4500,7 @@ try_smaller_buffer: ti->error = "Error initializing journal"; goto bad; } - r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); + r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); if (r) { ti->error = "Error initializing superblock"; goto bad; diff --git a/drivers/md/dm-io-rewind.c b/drivers/md/dm-io-rewind.c new file mode 100644 index 000000000000..0db53ccb94ba --- /dev/null +++ b/drivers/md/dm-io-rewind.c @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2022 Red Hat, Inc. + */ + +#include <linux/bio.h> +#include <linux/blk-crypto.h> +#include <linux/blk-integrity.h> + +#include "dm-core.h" + +static inline bool dm_bvec_iter_rewind(const struct bio_vec *bv, + struct bvec_iter *iter, + unsigned int bytes) +{ + int idx; + + iter->bi_size += bytes; + if (bytes <= iter->bi_bvec_done) { + iter->bi_bvec_done -= bytes; + return true; + } + + bytes -= iter->bi_bvec_done; + idx = iter->bi_idx - 1; + + while (idx >= 0 && bytes && bytes > bv[idx].bv_len) { + bytes -= bv[idx].bv_len; + idx--; + } + + if (WARN_ONCE(idx < 0 && bytes, + "Attempted to rewind iter beyond bvec's boundaries\n")) { + iter->bi_size -= bytes; + iter->bi_bvec_done = 0; + iter->bi_idx = 0; + return false; + } + + iter->bi_idx = idx; + iter->bi_bvec_done = bv[idx].bv_len - bytes; + return true; +} + +#if defined(CONFIG_BLK_DEV_INTEGRITY) + +/** + * dm_bio_integrity_rewind - Rewind integrity vector + * @bio: bio whose integrity vector to update + * @bytes_done: number of data bytes to rewind + * + * Description: This function calculates how many integrity bytes the + * number of completed data bytes correspond to and rewind the + * integrity vector accordingly. + */ +static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); + unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); + + bip->bip_iter.bi_sector -= bio_integrity_intervals(bi, bytes_done >> 9); + dm_bvec_iter_rewind(bip->bip_vec, &bip->bip_iter, bytes); +} + +#else /* CONFIG_BLK_DEV_INTEGRITY */ + +static inline void dm_bio_integrity_rewind(struct bio *bio, + unsigned int bytes_done) +{ + return; +} + +#endif + +#if defined(CONFIG_BLK_INLINE_ENCRYPTION) + +/* Decrements @dun by @dec, treating @dun as a multi-limb integer. */ +static void dm_bio_crypt_dun_decrement(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], + unsigned int dec) +{ + int i; + + for (i = 0; dec && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { + u64 prev = dun[i]; + + dun[i] -= dec; + if (dun[i] > prev) + dec = 1; + else + dec = 0; + } +} + +static void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes) +{ + struct bio_crypt_ctx *bc = bio->bi_crypt_context; + + dm_bio_crypt_dun_decrement(bc->bc_dun, + bytes >> bc->bc_key->data_unit_size_bits); +} + +#else /* CONFIG_BLK_INLINE_ENCRYPTION */ + +static inline void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes) +{ + return; +} + +#endif + +static inline void dm_bio_rewind_iter(const struct bio *bio, + struct bvec_iter *iter, unsigned int bytes) +{ + iter->bi_sector -= bytes >> 9; + + /* No advance means no rewind */ + if (bio_no_advance_iter(bio)) + iter->bi_size += bytes; + else + dm_bvec_iter_rewind(bio->bi_io_vec, iter, bytes); +} + +/** + * dm_bio_rewind - update ->bi_iter of @bio by rewinding @bytes. + * @bio: bio to rewind + * @bytes: how many bytes to rewind + * + * WARNING: + * Caller must ensure that @bio has a fixed end sector, to allow + * rewinding from end of bio and restoring its original position. + * Caller is also responsibile for restoring bio's size. + */ +static void dm_bio_rewind(struct bio *bio, unsigned bytes) +{ + if (bio_integrity(bio)) + dm_bio_integrity_rewind(bio, bytes); + + if (bio_has_crypt_ctx(bio)) + dm_bio_crypt_rewind(bio, bytes); + + dm_bio_rewind_iter(bio, &bio->bi_iter, bytes); +} + +void dm_io_rewind(struct dm_io *io, struct bio_set *bs) +{ + struct bio *orig = io->orig_bio; + struct bio *new_orig = bio_alloc_clone(orig->bi_bdev, orig, + GFP_NOIO, bs); + /* + * dm_bio_rewind can restore to previous position since the + * end sector is fixed for original bio, but we still need + * to restore bio's size manually (using io->sectors). + */ + dm_bio_rewind(new_orig, ((io->sector_offset << 9) - + orig->bi_iter.bi_size)); + bio_trim(new_orig, 0, io->sectors); + + bio_chain(new_orig, orig); + /* + * __bi_remaining was increased (by dm_split_and_process_bio), + * so must drop the one added in bio_chain. + */ + atomic_dec(&orig->__bi_remaining); + io->orig_bio = new_orig; +} diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index e4b95eaeec8c..783564533459 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -293,7 +293,7 @@ static void km_dp_init(struct dpages *dp, void *data) /*----------------------------------------------------------------- * IO routines that accept a list of pages. *---------------------------------------------------------------*/ -static void do_region(int op, int op_flags, unsigned region, +static void do_region(const blk_opf_t opf, unsigned region, struct dm_io_region *where, struct dpages *dp, struct io *io) { @@ -306,6 +306,7 @@ static void do_region(int op, int op_flags, unsigned region, struct request_queue *q = bdev_get_queue(where->bdev); sector_t num_sectors; unsigned int special_cmd_max_sectors; + const enum req_op op = opf & REQ_OP_MASK; /* * Reject unsupported discard and write same requests. @@ -339,8 +340,8 @@ static void do_region(int op, int op_flags, unsigned region, (PAGE_SIZE >> SECTOR_SHIFT))); } - bio = bio_alloc_bioset(where->bdev, num_bvecs, op | op_flags, - GFP_NOIO, &io->client->bios); + bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO, + &io->client->bios); bio->bi_iter.bi_sector = where->sector + (where->count - remaining); bio->bi_end_io = endio; store_io_and_region_in_bio(bio, io, region); @@ -368,7 +369,7 @@ static void do_region(int op, int op_flags, unsigned region, } while (remaining); } -static void dispatch_io(int op, int op_flags, unsigned int num_regions, +static void dispatch_io(blk_opf_t opf, unsigned int num_regions, struct dm_io_region *where, struct dpages *dp, struct io *io, int sync) { @@ -378,7 +379,7 @@ static void dispatch_io(int op, int op_flags, unsigned int num_regions, BUG_ON(num_regions > DM_IO_MAX_REGIONS); if (sync) - op_flags |= REQ_SYNC; + opf |= REQ_SYNC; /* * For multiple regions we need to be careful to rewind @@ -386,8 +387,8 @@ static void dispatch_io(int op, int op_flags, unsigned int num_regions, */ for (i = 0; i < num_regions; i++) { *dp = old_pages; - if (where[i].count || (op_flags & REQ_PREFLUSH)) - do_region(op, op_flags, i, where + i, dp, io); + if (where[i].count || (opf & REQ_PREFLUSH)) + do_region(opf, i, where + i, dp, io); } /* @@ -411,13 +412,13 @@ static void sync_io_complete(unsigned long error, void *context) } static int sync_io(struct dm_io_client *client, unsigned int num_regions, - struct dm_io_region *where, int op, int op_flags, - struct dpages *dp, unsigned long *error_bits) + struct dm_io_region *where, blk_opf_t opf, struct dpages *dp, + unsigned long *error_bits) { struct io *io; struct sync_io sio; - if (num_regions > 1 && !op_is_write(op)) { + if (num_regions > 1 && !op_is_write(opf)) { WARN_ON(1); return -EIO; } @@ -434,7 +435,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, io->vma_invalidate_address = dp->vma_invalidate_address; io->vma_invalidate_size = dp->vma_invalidate_size; - dispatch_io(op, op_flags, num_regions, where, dp, io, 1); + dispatch_io(opf, num_regions, where, dp, io, 1); wait_for_completion_io(&sio.wait); @@ -445,12 +446,12 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, } static int async_io(struct dm_io_client *client, unsigned int num_regions, - struct dm_io_region *where, int op, int op_flags, + struct dm_io_region *where, blk_opf_t opf, struct dpages *dp, io_notify_fn fn, void *context) { struct io *io; - if (num_regions > 1 && !op_is_write(op)) { + if (num_regions > 1 && !op_is_write(opf)) { WARN_ON(1); fn(1, context); return -EIO; @@ -466,7 +467,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, io->vma_invalidate_address = dp->vma_invalidate_address; io->vma_invalidate_size = dp->vma_invalidate_size; - dispatch_io(op, op_flags, num_regions, where, dp, io, 0); + dispatch_io(opf, num_regions, where, dp, io, 0); return 0; } @@ -489,7 +490,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp, case DM_IO_VMA: flush_kernel_vmap_range(io_req->mem.ptr.vma, size); - if (io_req->bi_op == REQ_OP_READ) { + if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) { dp->vma_invalidate_address = io_req->mem.ptr.vma; dp->vma_invalidate_size = size; } @@ -519,11 +520,10 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions, if (!io_req->notify.fn) return sync_io(io_req->client, num_regions, where, - io_req->bi_op, io_req->bi_op_flags, &dp, - sync_error_bits); + io_req->bi_opf, &dp, sync_error_bits); - return async_io(io_req->client, num_regions, where, io_req->bi_op, - io_req->bi_op_flags, &dp, io_req->notify.fn, + return async_io(io_req->client, num_regions, where, + io_req->bi_opf, &dp, io_req->notify.fn, io_req->notify.context); } EXPORT_SYMBOL(dm_io); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 87310fceb0d8..98976aaa9db9 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -832,7 +832,7 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) { if (get_disk_ro(disk)) param->flags |= DM_READONLY_FLAG; - param->target_count = dm_table_get_num_targets(table); + param->target_count = table->num_targets; } param->flags |= DM_ACTIVE_PRESENT_FLAG; @@ -845,7 +845,7 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) if (table) { if (!(dm_table_get_mode(table) & FMODE_WRITE)) param->flags |= DM_READONLY_FLAG; - param->target_count = dm_table_get_num_targets(table); + param->target_count = table->num_targets; } dm_put_live_table(md, srcu_idx); } @@ -1248,7 +1248,7 @@ static void retrieve_status(struct dm_table *table, type = STATUSTYPE_INFO; /* Get all the target info */ - num_targets = dm_table_get_num_targets(table); + num_targets = table->num_targets; for (i = 0; i < num_targets; i++) { struct dm_target *ti = dm_table_get_target(table, i); size_t l; diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 37b03ab7e5c9..4d3bbbea2e9a 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -219,7 +219,7 @@ static struct page_list *alloc_pl(gfp_t gfp) if (!pl) return NULL; - pl->page = alloc_page(gfp); + pl->page = alloc_page(gfp | __GFP_HIGHMEM); if (!pl->page) { kfree(pl); return NULL; @@ -350,9 +350,9 @@ struct kcopyd_job { unsigned long write_err; /* - * Either READ or WRITE + * REQ_OP_READ, REQ_OP_WRITE or REQ_OP_WRITE_ZEROES. */ - int rw; + enum req_op op; struct dm_io_region source; /* @@ -418,7 +418,8 @@ static struct kcopyd_job *pop_io_job(struct list_head *jobs, * constraint and sequential writes that are at the right position. */ list_for_each_entry(job, jobs, list) { - if (job->rw == READ || !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) { + if (job->op == REQ_OP_READ || + !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) { list_del(&job->list); return job; } @@ -518,7 +519,7 @@ static void complete_io(unsigned long error, void *context) io_job_finish(kc->throttle); if (error) { - if (op_is_write(job->rw)) + if (op_is_write(job->op)) job->write_err |= error; else job->read_err = 1; @@ -530,11 +531,11 @@ static void complete_io(unsigned long error, void *context) } } - if (op_is_write(job->rw)) + if (op_is_write(job->op)) push(&kc->complete_jobs, job); else { - job->rw = WRITE; + job->op = REQ_OP_WRITE; push(&kc->io_jobs, job); } @@ -549,8 +550,7 @@ static int run_io_job(struct kcopyd_job *job) { int r; struct dm_io_request io_req = { - .bi_op = job->rw, - .bi_op_flags = 0, + .bi_opf = job->op, .mem.type = DM_IO_PAGE_LIST, .mem.ptr.pl = job->pages, .mem.offset = 0, @@ -571,7 +571,7 @@ static int run_io_job(struct kcopyd_job *job) io_job_start(job->kc->throttle); - if (job->rw == READ) + if (job->op == REQ_OP_READ) r = dm_io(&io_req, 1, &job->source, NULL); else r = dm_io(&io_req, job->num_dests, job->dests, NULL); @@ -614,7 +614,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, if (r < 0) { /* error this rogue job */ - if (op_is_write(job->rw)) + if (op_is_write(job->op)) job->write_err = (unsigned long) -1L; else job->read_err = 1; @@ -817,7 +817,7 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, if (from) { job->source = *from; job->pages = NULL; - job->rw = READ; + job->op = REQ_OP_READ; } else { memset(&job->source, 0, sizeof job->source); job->source.count = job->dests[0].count; @@ -826,10 +826,10 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, /* * Use WRITE ZEROES to optimize zeroing if all dests support it. */ - job->rw = REQ_OP_WRITE_ZEROES; + job->op = REQ_OP_WRITE_ZEROES; for (i = 0; i < job->num_dests; i++) if (!bdev_write_zeroes_sectors(job->dests[i].bdev)) { - job->rw = WRITE; + job->op = REQ_OP_WRITE; break; } } diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 0c6620e7b7bf..cf10fa667797 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -291,10 +291,9 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis core->nr_regions = le64_to_cpu(disk->nr_regions); } -static int rw_header(struct log_c *lc, int op) +static int rw_header(struct log_c *lc, enum req_op op) { - lc->io_req.bi_op = op; - lc->io_req.bi_op_flags = 0; + lc->io_req.bi_opf = op; return dm_io(&lc->io_req, 1, &lc->header_location, NULL); } @@ -307,8 +306,7 @@ static int flush_header(struct log_c *lc) .count = 0, }; - lc->io_req.bi_op = REQ_OP_WRITE; - lc->io_req.bi_op_flags = REQ_PREFLUSH; + lc->io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; return dm_io(&lc->io_req, 1, &null_location, NULL); } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 80c9f7134e9b..1ec17c32867f 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -1369,7 +1369,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, } rs->md.bitmap_info.daemon_sleep = value; } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) { - /* Userspace passes new data_offset after having extended the the data image LV */ + /* Userspace passes new data_offset after having extended the data image LV */ if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) { rs->ti->error = "Only one data_offset argument pair allowed"; return -EINVAL; @@ -2038,7 +2038,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload) rdev->sb_loaded = 0; - if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) { + if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) { DMERR("Failed to read superblock of device at position %d", rdev->raid_disk); md_error(rdev->mddev, rdev); @@ -3097,6 +3097,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) INIT_WORK(&rs->md.event_work, do_table_event); ti->private = rs; ti->num_flush_bios = 1; + ti->needs_bio_set_dev = true; /* Restore any requested new layout for conversion decision */ rs_config_restore(rs, &rs_layout); @@ -3509,7 +3510,7 @@ static void raid_status(struct dm_target *ti, status_type_t type, { struct raid_set *rs = ti->private; struct mddev *mddev = &rs->md; - struct r5conf *conf = mddev->private; + struct r5conf *conf = rs_is_raid456(rs) ? mddev->private : NULL; int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0; unsigned long recovery; unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */ @@ -3819,7 +3820,7 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs) memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices)); - for (i = 0; i < mddev->raid_disks; i++) { + for (i = 0; i < rs->raid_disks; i++) { r = &rs->dev[i].rdev; /* HM FIXME: enhance journal device recovery processing */ if (test_bit(Journal, &r->flags)) diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 8811d484fdd1..06a38dc32025 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -260,8 +260,7 @@ static int mirror_flush(struct dm_target *ti) struct dm_io_region io[MAX_NR_MIRRORS]; struct mirror *m; struct dm_io_request io_req = { - .bi_op = REQ_OP_WRITE, - .bi_op_flags = REQ_PREFLUSH | REQ_SYNC, + .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, .mem.type = DM_IO_KMEM, .mem.ptr.addr = NULL, .client = ms->io_client, @@ -535,8 +534,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio) { struct dm_io_region io; struct dm_io_request io_req = { - .bi_op = REQ_OP_READ, - .bi_op_flags = 0, + .bi_opf = REQ_OP_READ, .mem.type = DM_IO_BIO, .mem.ptr.bio = bio, .notify.fn = read_callback, @@ -648,9 +646,9 @@ static void do_write(struct mirror_set *ms, struct bio *bio) unsigned int i; struct dm_io_region io[MAX_NR_MIRRORS], *dest = io; struct mirror *m; + blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH); struct dm_io_request io_req = { - .bi_op = REQ_OP_WRITE, - .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH), + .bi_opf = REQ_OP_WRITE | op_flags, .mem.type = DM_IO_BIO, .mem.ptr.bio = bio, .notify.fn = write_callback, @@ -659,7 +657,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio) }; if (bio_op(bio) == REQ_OP_DISCARD) { - io_req.bi_op = REQ_OP_DISCARD; + io_req.bi_opf = REQ_OP_DISCARD | op_flags; io_req.mem.type = DM_IO_KMEM; io_req.mem.ptr.addr = NULL; } diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index a83b98a8d2a9..4f49bbcce4f1 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -43,7 +43,6 @@ unsigned dm_get_reserved_rq_based_ios(void) return __dm_get_module_param(&reserved_rq_based_ios, RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS); } -EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); static unsigned dm_get_blk_mq_nr_hw_queues(void) { diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 3bb5cff5d6fc..f46f930eedf9 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -226,8 +226,8 @@ static void do_metadata(struct work_struct *work) /* * Read or write a chunk aligned and sized block of data from a device. */ -static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op, - int op_flags, int metadata) +static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf, + int metadata) { struct dm_io_region where = { .bdev = dm_snap_cow(ps->store->snap)->bdev, @@ -235,8 +235,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op, .count = ps->store->chunk_size, }; struct dm_io_request io_req = { - .bi_op = op, - .bi_op_flags = op_flags, + .bi_opf = opf, .mem.type = DM_IO_VMA, .mem.ptr.vma = area, .client = ps->io_client, @@ -282,11 +281,11 @@ static void skip_metadata(struct pstore *ps) * Read or write a metadata area. Remembering to skip the first * chunk which holds the header. */ -static int area_io(struct pstore *ps, int op, int op_flags) +static int area_io(struct pstore *ps, blk_opf_t opf) { chunk_t chunk = area_location(ps, ps->current_area); - return chunk_io(ps, ps->area, chunk, op, op_flags, 0); + return chunk_io(ps, ps->area, chunk, opf, 0); } static void zero_memory_area(struct pstore *ps) @@ -297,7 +296,7 @@ static void zero_memory_area(struct pstore *ps) static int zero_disk_area(struct pstore *ps, chunk_t area) { return chunk_io(ps, ps->zero_area, area_location(ps, area), - REQ_OP_WRITE, 0, 0); + REQ_OP_WRITE, 0); } static int read_header(struct pstore *ps, int *new_snapshot) @@ -329,7 +328,7 @@ static int read_header(struct pstore *ps, int *new_snapshot) if (r) return r; - r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 0, 1); + r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 1); if (r) goto bad; @@ -390,7 +389,7 @@ static int write_header(struct pstore *ps) dh->version = cpu_to_le32(ps->version); dh->chunk_size = cpu_to_le32(ps->store->chunk_size); - return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 0, 1); + return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 1); } /* @@ -734,8 +733,8 @@ static void persistent_commit_exception(struct dm_exception_store *store, /* * Commit exceptions to disk. */ - if (ps->valid && area_io(ps, REQ_OP_WRITE, - REQ_PREFLUSH | REQ_FUA | REQ_SYNC)) + if (ps->valid && area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | + REQ_SYNC)) ps->valid = 0; /* @@ -775,7 +774,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store, return 0; ps->current_area--; - r = area_io(ps, REQ_OP_READ, 0); + r = area_io(ps, REQ_OP_READ); if (r < 0) return r; ps->current_committed = ps->exceptions_per_area; @@ -812,7 +811,7 @@ static int persistent_commit_merge(struct dm_exception_store *store, for (i = 0; i < nr_merged; i++) clear_exception(ps, ps->current_committed - 1 - i); - r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA); + r = area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA); if (r < 0) return r; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 0d336b5ec571..d1c2f84d27e3 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -2026,7 +2026,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) /* * Write to snapshot - higher level takes care of RW/RO * flags so we should only get this if we are - * writeable. + * writable. */ if (bio_data_dir(bio) == WRITE) { pe = __lookup_pending_exception(s, chunk); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index bd539afbfe88..332f96b58252 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -6,6 +6,7 @@ */ #include "dm-core.h" +#include "dm-rq.h" #include <linux/module.h> #include <linux/vmalloc.h> @@ -174,8 +175,6 @@ static void dm_table_destroy_crypto_profile(struct dm_table *t); void dm_table_destroy(struct dm_table *t) { - unsigned int i; - if (!t) return; @@ -184,13 +183,13 @@ void dm_table_destroy(struct dm_table *t) kvfree(t->index[t->depth - 2]); /* free the targets */ - for (i = 0; i < t->num_targets; i++) { - struct dm_target *tgt = t->targets + i; + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); - if (tgt->type->dtr) - tgt->type->dtr(tgt); + if (ti->type->dtr) + ti->type->dtr(ti); - dm_put_target_type(tgt->type); + dm_put_target_type(ti->type); } kvfree(t->highs); @@ -450,14 +449,14 @@ EXPORT_SYMBOL(dm_put_device); /* * Checks to see if the target joins onto the end of the table. */ -static int adjoin(struct dm_table *table, struct dm_target *ti) +static int adjoin(struct dm_table *t, struct dm_target *ti) { struct dm_target *prev; - if (!table->num_targets) + if (!t->num_targets) return !ti->begin; - prev = &table->targets[table->num_targets - 1]; + prev = &t->targets[t->num_targets - 1]; return (ti->begin == (prev->begin + prev->len)); } @@ -564,8 +563,8 @@ int dm_split_args(int *argc, char ***argvp, char *input) * two or more targets, the size of each piece it gets split into must * be compatible with the logical_block_size of the target processing it. */ -static int validate_hardware_logical_block_alignment(struct dm_table *table, - struct queue_limits *limits) +static int validate_hardware_logical_block_alignment(struct dm_table *t, + struct queue_limits *limits) { /* * This function uses arithmetic modulo the logical_block_size @@ -587,13 +586,13 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table, struct dm_target *ti; struct queue_limits ti_limits; - unsigned i; + unsigned int i; /* * Check each entry in the table in turn. */ - for (i = 0; i < dm_table_get_num_targets(table); i++) { - ti = dm_table_get_target(table, i); + for (i = 0; i < t->num_targets; i++) { + ti = dm_table_get_target(t, i); blk_set_stacking_limits(&ti_limits); @@ -621,7 +620,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table, if (remaining) { DMWARN("%s: table line %u (start sect %llu len %llu) " "not aligned to h/w logical block size %u", - dm_device_name(table->md), i, + dm_device_name(t->md), i, (unsigned long long) ti->begin, (unsigned long long) ti->len, limits->logical_block_size); @@ -636,7 +635,7 @@ int dm_table_add_target(struct dm_table *t, const char *type, { int r = -EINVAL, argc; char **argv; - struct dm_target *tgt; + struct dm_target *ti; if (t->singleton) { DMERR("%s: target type %s must appear alone in table", @@ -646,87 +645,87 @@ int dm_table_add_target(struct dm_table *t, const char *type, BUG_ON(t->num_targets >= t->num_allocated); - tgt = t->targets + t->num_targets; - memset(tgt, 0, sizeof(*tgt)); + ti = t->targets + t->num_targets; + memset(ti, 0, sizeof(*ti)); if (!len) { DMERR("%s: zero-length target", dm_device_name(t->md)); return -EINVAL; } - tgt->type = dm_get_target_type(type); - if (!tgt->type) { + ti->type = dm_get_target_type(type); + if (!ti->type) { DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); return -EINVAL; } - if (dm_target_needs_singleton(tgt->type)) { + if (dm_target_needs_singleton(ti->type)) { if (t->num_targets) { - tgt->error = "singleton target type must appear alone in table"; + ti->error = "singleton target type must appear alone in table"; goto bad; } t->singleton = true; } - if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) { - tgt->error = "target type may not be included in a read-only table"; + if (dm_target_always_writeable(ti->type) && !(t->mode & FMODE_WRITE)) { + ti->error = "target type may not be included in a read-only table"; goto bad; } if (t->immutable_target_type) { - if (t->immutable_target_type != tgt->type) { - tgt->error = "immutable target type cannot be mixed with other target types"; + if (t->immutable_target_type != ti->type) { + ti->error = "immutable target type cannot be mixed with other target types"; goto bad; } - } else if (dm_target_is_immutable(tgt->type)) { + } else if (dm_target_is_immutable(ti->type)) { if (t->num_targets) { - tgt->error = "immutable target type cannot be mixed with other target types"; + ti->error = "immutable target type cannot be mixed with other target types"; goto bad; } - t->immutable_target_type = tgt->type; + t->immutable_target_type = ti->type; } - if (dm_target_has_integrity(tgt->type)) + if (dm_target_has_integrity(ti->type)) t->integrity_added = 1; - tgt->table = t; - tgt->begin = start; - tgt->len = len; - tgt->error = "Unknown error"; + ti->table = t; + ti->begin = start; + ti->len = len; + ti->error = "Unknown error"; /* * Does this target adjoin the previous one ? */ - if (!adjoin(t, tgt)) { - tgt->error = "Gap in table"; + if (!adjoin(t, ti)) { + ti->error = "Gap in table"; goto bad; } r = dm_split_args(&argc, &argv, params); if (r) { - tgt->error = "couldn't split parameters"; + ti->error = "couldn't split parameters"; goto bad; } - r = tgt->type->ctr(tgt, argc, argv); + r = ti->type->ctr(ti, argc, argv); kfree(argv); if (r) goto bad; - t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; + t->highs[t->num_targets++] = ti->begin + ti->len - 1; - if (!tgt->num_discard_bios && tgt->discards_supported) + if (!ti->num_discard_bios && ti->discards_supported) DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.", dm_device_name(t->md), type); - if (tgt->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key)) + if (ti->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key)) static_branch_enable(&swap_bios_enabled); return 0; bad: - DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, tgt->error, ERR_PTR(r)); - dm_put_target_type(tgt->type); + DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, ti->error, ERR_PTR(r)); + dm_put_target_type(ti->type); return r; } @@ -825,14 +824,11 @@ static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_de } static bool dm_table_supports_dax(struct dm_table *t, - iterate_devices_callout_fn iterate_fn) + iterate_devices_callout_fn iterate_fn) { - struct dm_target *ti; - unsigned i; - /* Ensure that all targets support DAX. */ - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); if (!ti->type->direct_access) return false; @@ -860,9 +856,8 @@ static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, static int dm_table_determine_type(struct dm_table *t) { - unsigned i; unsigned bio_based = 0, request_based = 0, hybrid = 0; - struct dm_target *tgt; + struct dm_target *ti; struct list_head *devices = dm_table_get_devices(t); enum dm_queue_mode live_md_type = dm_get_md_type(t->md); @@ -876,11 +871,11 @@ static int dm_table_determine_type(struct dm_table *t) goto verify_rq_based; } - for (i = 0; i < t->num_targets; i++) { - tgt = t->targets + i; - if (dm_target_hybrid(tgt)) + for (unsigned int i = 0; i < t->num_targets; i++) { + ti = dm_table_get_target(t, i); + if (dm_target_hybrid(ti)) hybrid = 1; - else if (dm_target_request_based(tgt)) + else if (dm_target_request_based(ti)) request_based = 1; else bio_based = 1; @@ -942,18 +937,18 @@ verify_rq_based: return 0; } - tgt = dm_table_get_immutable_target(t); - if (!tgt) { + ti = dm_table_get_immutable_target(t); + if (!ti) { DMERR("table load rejected: immutable target is required"); return -EINVAL; - } else if (tgt->max_io_len) { + } else if (ti->max_io_len) { DMERR("table load rejected: immutable target that splits IO is not supported"); return -EINVAL; } /* Non-request-stackable devices can't be used for request-based dm */ - if (!tgt->type->iterate_devices || - !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) { + if (!ti->type->iterate_devices || + !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) { DMERR("table load rejected: including non-request-stackable devices"); return -EINVAL; } @@ -983,11 +978,9 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t) struct dm_target *dm_table_get_wildcard_target(struct dm_table *t) { - struct dm_target *ti; - unsigned i; + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); if (dm_target_is_wildcard(ti->type)) return ti; } @@ -1010,32 +1003,56 @@ static bool dm_table_supports_poll(struct dm_table *t); static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) { enum dm_queue_mode type = dm_table_get_type(t); - unsigned per_io_data_size = 0; - unsigned min_pool_size = 0; - struct dm_target *ti; - unsigned i; - bool poll_supported = false; + unsigned int per_io_data_size = 0, front_pad, io_front_pad; + unsigned int min_pool_size = 0, pool_size; + struct dm_md_mempools *pools; if (unlikely(type == DM_TYPE_NONE)) { DMWARN("no table type is set, can't allocate mempools"); return -EINVAL; } - if (__table_type_bio_based(type)) { - for (i = 0; i < t->num_targets; i++) { - ti = t->targets + i; - per_io_data_size = max(per_io_data_size, ti->per_io_data_size); - min_pool_size = max(min_pool_size, ti->num_flush_bios); - } - poll_supported = dm_table_supports_poll(t); + pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); + if (!pools) + return -ENOMEM; + + if (type == DM_TYPE_REQUEST_BASED) { + pool_size = dm_get_reserved_rq_based_ios(); + front_pad = offsetof(struct dm_rq_clone_bio_info, clone); + goto init_bs; } - t->mempools = dm_alloc_md_mempools(md, type, per_io_data_size, min_pool_size, - t->integrity_supported, poll_supported); - if (!t->mempools) - return -ENOMEM; + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); + per_io_data_size = max(per_io_data_size, ti->per_io_data_size); + min_pool_size = max(min_pool_size, ti->num_flush_bios); + } + pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); + front_pad = roundup(per_io_data_size, + __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET; + + io_front_pad = roundup(per_io_data_size, + __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; + if (bioset_init(&pools->io_bs, pool_size, io_front_pad, + dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0)) + goto out_free_pools; + if (t->integrity_supported && + bioset_integrity_create(&pools->io_bs, pool_size)) + goto out_free_pools; +init_bs: + if (bioset_init(&pools->bs, pool_size, front_pad, 0)) + goto out_free_pools; + if (t->integrity_supported && + bioset_integrity_create(&pools->bs, pool_size)) + goto out_free_pools; + + t->mempools = pools; return 0; + +out_free_pools: + dm_free_md_mempools(pools); + return -ENOMEM; } static int setup_indexes(struct dm_table *t) @@ -1100,10 +1117,10 @@ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t) struct list_head *devices = dm_table_get_devices(t); struct dm_dev_internal *dd = NULL; struct gendisk *prev_disk = NULL, *template_disk = NULL; - unsigned i; - for (i = 0; i < dm_table_get_num_targets(t); i++) { + for (unsigned int i = 0; i < t->num_targets; i++) { struct dm_target *ti = dm_table_get_target(t, i); + if (!dm_target_passes_integrity(ti->type)) goto no_integrity; } @@ -1217,18 +1234,19 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile, struct dm_keyslot_evict_args args = { key }; struct dm_table *t; int srcu_idx; - int i; - struct dm_target *ti; t = dm_get_live_table(md, &srcu_idx); if (!t) return 0; - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); + + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); + if (!ti->type->iterate_devices) continue; ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args); } + dm_put_live_table(md, srcu_idx); return args.err; } @@ -1277,7 +1295,6 @@ static int dm_table_construct_crypto_profile(struct dm_table *t) { struct dm_crypto_profile *dmcp; struct blk_crypto_profile *profile; - struct dm_target *ti; unsigned int i; bool empty_profile = true; @@ -1293,8 +1310,8 @@ static int dm_table_construct_crypto_profile(struct dm_table *t) memset(profile->modes_supported, 0xFF, sizeof(profile->modes_supported)); - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); + for (i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); if (!dm_target_passes_crypto(ti->type)) { blk_crypto_intersect_capabilities(profile, NULL); @@ -1444,14 +1461,6 @@ inline sector_t dm_table_get_size(struct dm_table *t) } EXPORT_SYMBOL(dm_table_get_size); -struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) -{ - if (index >= t->num_targets) - return NULL; - - return t->targets + index; -} - /* * Search the btree for the correct target. * @@ -1512,11 +1521,8 @@ static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev, static bool dm_table_any_dev_attr(struct dm_table *t, iterate_devices_callout_fn func, void *data) { - struct dm_target *ti; - unsigned int i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); if (ti->type->iterate_devices && ti->type->iterate_devices(ti, func, data)) @@ -1538,11 +1544,8 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev, static bool dm_table_supports_poll(struct dm_table *t) { - struct dm_target *ti; - unsigned i = 0; - - while (i < dm_table_get_num_targets(t)) { - ti = dm_table_get_target(t, i++); + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); if (!ti->type->iterate_devices || ti->type->iterate_devices(ti, device_not_poll_capable, NULL)) @@ -1558,18 +1561,15 @@ static bool dm_table_supports_poll(struct dm_table *t) * Returns false if the result is unknown because a target doesn't * support iterate_devices. */ -bool dm_table_has_no_data_devices(struct dm_table *table) +bool dm_table_has_no_data_devices(struct dm_table *t) { - struct dm_target *ti; - unsigned i, num_devices; - - for (i = 0; i < dm_table_get_num_targets(table); i++) { - ti = dm_table_get_target(table, i); + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); + unsigned num_devices = 0; if (!ti->type->iterate_devices) return false; - num_devices = 0; ti->type->iterate_devices(ti, count_device, &num_devices); if (num_devices) return false; @@ -1597,11 +1597,8 @@ static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev, static bool dm_table_supports_zoned_model(struct dm_table *t, enum blk_zoned_model zoned_model) { - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); if (dm_target_supports_zoned_hm(ti->type)) { if (!ti->type->iterate_devices || @@ -1620,13 +1617,11 @@ static bool dm_table_supports_zoned_model(struct dm_table *t, static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - struct request_queue *q = bdev_get_queue(dev->bdev); unsigned int *zone_sectors = data; - if (!blk_queue_is_zoned(q)) + if (!bdev_is_zoned(dev->bdev)) return 0; - - return blk_queue_zone_sectors(q) != *zone_sectors; + return bdev_zone_sectors(dev->bdev) != *zone_sectors; } /* @@ -1634,16 +1629,16 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev * * zone sectors, if the destination device is a zoned block device, it shall * have the specified zone_sectors. */ -static int validate_hardware_zoned_model(struct dm_table *table, +static int validate_hardware_zoned_model(struct dm_table *t, enum blk_zoned_model zoned_model, unsigned int zone_sectors) { if (zoned_model == BLK_ZONED_NONE) return 0; - if (!dm_table_supports_zoned_model(table, zoned_model)) { + if (!dm_table_supports_zoned_model(t, zoned_model)) { DMERR("%s: zoned model is not consistent across all devices", - dm_device_name(table->md)); + dm_device_name(t->md)); return -EINVAL; } @@ -1651,9 +1646,9 @@ static int validate_hardware_zoned_model(struct dm_table *table, if (!zone_sectors || !is_power_of_2(zone_sectors)) return -EINVAL; - if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) { + if (dm_table_any_dev_attr(t, device_not_matches_zone_sectors, &zone_sectors)) { DMERR("%s: zone sectors is not consistent across all zoned devices", - dm_device_name(table->md)); + dm_device_name(t->md)); return -EINVAL; } @@ -1663,21 +1658,19 @@ static int validate_hardware_zoned_model(struct dm_table *table, /* * Establish the new table's queue_limits and validate them. */ -int dm_calculate_queue_limits(struct dm_table *table, +int dm_calculate_queue_limits(struct dm_table *t, struct queue_limits *limits) { - struct dm_target *ti; struct queue_limits ti_limits; - unsigned i; enum blk_zoned_model zoned_model = BLK_ZONED_NONE; unsigned int zone_sectors = 0; blk_set_stacking_limits(limits); - for (i = 0; i < dm_table_get_num_targets(table); i++) { - blk_set_stacking_limits(&ti_limits); + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); - ti = dm_table_get_target(table, i); + blk_set_stacking_limits(&ti_limits); if (!ti->type->iterate_devices) goto combine_limits; @@ -1718,7 +1711,7 @@ combine_limits: DMWARN("%s: adding target device " "(start sect %llu len %llu) " "caused an alignment inconsistency", - dm_device_name(table->md), + dm_device_name(t->md), (unsigned long long) ti->begin, (unsigned long long) ti->len); } @@ -1738,10 +1731,10 @@ combine_limits: zoned_model = limits->zoned; zone_sectors = limits->chunk_sectors; } - if (validate_hardware_zoned_model(table, zoned_model, zone_sectors)) + if (validate_hardware_zoned_model(t, zoned_model, zone_sectors)) return -EINVAL; - return validate_hardware_logical_block_alignment(table, limits); + return validate_hardware_logical_block_alignment(t, limits); } /* @@ -1785,17 +1778,14 @@ static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) { - struct dm_target *ti; - unsigned i; - /* * Require at least one underlying device to support flushes. * t->devices includes internal dm devices such as mirror logs * so we need to use iterate_devices here, which targets * supporting flushes must provide. */ - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); if (!ti->num_flush_bios) continue; @@ -1849,11 +1839,8 @@ static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev * static bool dm_table_supports_write_zeroes(struct dm_table *t) { - struct dm_target *ti; - unsigned i = 0; - - while (i < dm_table_get_num_targets(t)) { - ti = dm_table_get_target(t, i++); + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); if (!ti->num_write_zeroes_bios) return false; @@ -1876,11 +1863,8 @@ static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev, static bool dm_table_supports_nowait(struct dm_table *t) { - struct dm_target *ti; - unsigned i = 0; - - while (i < dm_table_get_num_targets(t)) { - ti = dm_table_get_target(t, i++); + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); if (!dm_target_supports_nowait(ti->type)) return false; @@ -1901,11 +1885,8 @@ static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, static bool dm_table_supports_discards(struct dm_table *t) { - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); if (!ti->num_discard_bios) return false; @@ -1933,11 +1914,8 @@ static int device_not_secure_erase_capable(struct dm_target *ti, static bool dm_table_supports_secure_erase(struct dm_table *t) { - struct dm_target *ti; - unsigned int i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); if (!ti->num_secure_erase_bios) return false; @@ -2067,11 +2045,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, return 0; } -unsigned int dm_table_get_num_targets(struct dm_table *t) -{ - return t->num_targets; -} - struct list_head *dm_table_get_devices(struct dm_table *t) { return &t->devices; @@ -2091,12 +2064,11 @@ enum suspend_mode { static void suspend_targets(struct dm_table *t, enum suspend_mode mode) { - int i = t->num_targets; - struct dm_target *ti = t->targets; - lockdep_assert_held(&t->md->suspend_lock); - while (i--) { + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); + switch (mode) { case PRESUSPEND: if (ti->type->presuspend) @@ -2111,7 +2083,6 @@ static void suspend_targets(struct dm_table *t, enum suspend_mode mode) ti->type->postsuspend(ti); break; } - ti++; } } @@ -2141,12 +2112,13 @@ void dm_table_postsuspend_targets(struct dm_table *t) int dm_table_resume_targets(struct dm_table *t) { - int i, r = 0; + unsigned int i; + int r = 0; lockdep_assert_held(&t->md->suspend_lock); for (i = 0; i < t->num_targets; i++) { - struct dm_target *ti = t->targets + i; + struct dm_target *ti = dm_table_get_target(t, i); if (!ti->type->preresume) continue; @@ -2160,7 +2132,7 @@ int dm_table_resume_targets(struct dm_table *t) } for (i = 0; i < t->num_targets; i++) { - struct dm_target *ti = t->targets + i; + struct dm_target *ti = dm_table_get_target(t, i); if (ti->type->resume) ti->type->resume(ti); diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 2db7030aba00..a27395c8621f 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -2045,10 +2045,13 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, dm_sm_threshold_fn fn, void *context) { - int r; + int r = -EINVAL; pmd_write_lock_in_core(pmd); - r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context); + if (!pmd->fail_io) { + r = dm_sm_register_threshold_callback(pmd->metadata_sm, + threshold, fn, context); + } pmd_write_unlock(pmd); return r; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 84c083f76673..e76c96c760a9 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -3375,8 +3375,10 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) calc_metadata_threshold(pt), metadata_low_callback, pool); - if (r) + if (r) { + ti->error = "Error registering metadata threshold"; goto out_flags_changed; + } dm_pool_register_pre_commit_callback(pool->pmd, metadata_pre_commit_callback, pool); diff --git a/drivers/md/dm-verity-loadpin.c b/drivers/md/dm-verity-loadpin.c new file mode 100644 index 000000000000..387ec43aef72 --- /dev/null +++ b/drivers/md/dm-verity-loadpin.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/list.h> +#include <linux/kernel.h> +#include <linux/dm-verity-loadpin.h> + +#include "dm.h" +#include "dm-core.h" +#include "dm-verity.h" + +#define DM_MSG_PREFIX "verity-loadpin" + +LIST_HEAD(dm_verity_loadpin_trusted_root_digests); + +static bool is_trusted_verity_target(struct dm_target *ti) +{ + u8 *root_digest; + unsigned int digest_size; + struct dm_verity_loadpin_trusted_root_digest *trd; + bool trusted = false; + + if (!dm_is_verity_target(ti)) + return false; + + if (dm_verity_get_root_digest(ti, &root_digest, &digest_size)) + return false; + + list_for_each_entry(trd, &dm_verity_loadpin_trusted_root_digests, node) { + if ((trd->len == digest_size) && + !memcmp(trd->data, root_digest, digest_size)) { + trusted = true; + break; + } + } + + kfree(root_digest); + + return trusted; +} + +/* + * Determines whether the file system of a superblock is located on + * a verity device that is trusted by LoadPin. + */ +bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev) +{ + struct mapped_device *md; + struct dm_table *table; + struct dm_target *ti; + int srcu_idx; + bool trusted = false; + + if (list_empty(&dm_verity_loadpin_trusted_root_digests)) + return false; + + md = dm_get_md(bdev->bd_dev); + if (!md) + return false; + + table = dm_get_live_table(md, &srcu_idx); + + if (table->num_targets != 1) + goto out; + + ti = dm_table_get_target(table, 0); + + if (is_trusted_verity_target(ti)) + trusted = true; + +out: + dm_put_live_table(md, srcu_idx); + dm_put(md); + + return trusted; +} diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index d6dbd47492a8..4fd853a56b1a 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -19,6 +19,7 @@ #include <linux/module.h> #include <linux/reboot.h> #include <linux/scatterlist.h> +#include <linux/string.h> #define DM_MSG_PREFIX "verity" @@ -527,11 +528,10 @@ static int verity_verify_io(struct dm_verity_io *io) if (v->validated_blocks) set_bit(cur_block, v->validated_blocks); continue; - } - else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA, - cur_block, NULL, &start) == 0) + } else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA, + cur_block, NULL, &start) == 0) { continue; - else { + } else { if (bio->bi_status) { /* * Error correction failed; Just return error @@ -1310,10 +1310,40 @@ bad: return r; } +/* + * Check whether a DM target is a verity target. + */ +bool dm_is_verity_target(struct dm_target *ti) +{ + return ti->type->module == THIS_MODULE; +} + +/* + * Get the root digest of a verity target. + * + * Returns a copy of the root digest, the caller is responsible for + * freeing the memory of the digest. + */ +int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned int *digest_size) +{ + struct dm_verity *v = ti->private; + + if (!dm_is_verity_target(ti)) + return -EINVAL; + + *root_digest = kmemdup(v->root_digest, v->digest_size, GFP_KERNEL); + if (*root_digest == NULL) + return -ENOMEM; + + *digest_size = v->digest_size; + + return 0; +} + static struct target_type verity_target = { .name = "verity", .features = DM_TARGET_IMMUTABLE, - .version = {1, 8, 0}, + .version = {1, 8, 1}, .module = THIS_MODULE, .ctr = verity_ctr, .dtr = verity_dtr, diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index 4e769d13473a..c832cc3e3d24 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -129,4 +129,8 @@ extern int verity_hash(struct dm_verity *v, struct ahash_request *req, extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io, sector_t block, u8 *digest, bool *is_zero); +extern bool dm_is_verity_target(struct dm_target *ti); +extern int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, + unsigned int *digest_size); + #endif /* DM_VERITY_H */ diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index d74c5a7a0ab4..1fc161d65673 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -22,7 +22,7 @@ #define HIGH_WATERMARK 50 #define LOW_WATERMARK 45 -#define MAX_WRITEBACK_JOBS 0 +#define MAX_WRITEBACK_JOBS min(0x10000000 / PAGE_SIZE, totalram_pages() / 16) #define ENDIO_LATENCY 16 #define WRITEBACK_LATENCY 64 #define AUTOCOMMIT_BLOCKS_SSD 65536 @@ -523,8 +523,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) region.sector += wc->start_sector; atomic_inc(&endio.count); - req.bi_op = REQ_OP_WRITE; - req.bi_op_flags = REQ_SYNC; + req.bi_opf = REQ_OP_WRITE | REQ_SYNC; req.mem.type = DM_IO_VMA; req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY; req.client = wc->dm_io; @@ -562,8 +561,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc) region.sector += wc->start_sector; - req.bi_op = REQ_OP_WRITE; - req.bi_op_flags = REQ_SYNC | REQ_FUA; + req.bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_FUA; req.mem.type = DM_IO_VMA; req.mem.ptr.vma = (char *)wc->memory_map; req.client = wc->dm_io; @@ -592,8 +590,7 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev) region.bdev = dev->bdev; region.sector = 0; region.count = 0; - req.bi_op = REQ_OP_WRITE; - req.bi_op_flags = REQ_PREFLUSH; + req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; req.mem.type = DM_IO_KMEM; req.mem.ptr.addr = NULL; req.client = wc->dm_io; @@ -981,8 +978,7 @@ static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors region.bdev = wc->ssd_dev->bdev; region.sector = wc->start_sector; region.count = n_sectors; - req.bi_op = REQ_OP_READ; - req.bi_op_flags = REQ_SYNC; + req.bi_opf = REQ_OP_READ | REQ_SYNC; req.mem.type = DM_IO_VMA; req.mem.ptr.vma = (char *)wc->memory_map; req.client = wc->dm_io; @@ -1329,8 +1325,8 @@ enum wc_map_op { WC_MAP_ERROR, }; -static enum wc_map_op writecache_map_remap_origin(struct dm_writecache *wc, struct bio *bio, - struct wc_entry *e) +static void writecache_map_remap_origin(struct dm_writecache *wc, struct bio *bio, + struct wc_entry *e) { if (e) { sector_t next_boundary = @@ -1338,8 +1334,6 @@ static enum wc_map_op writecache_map_remap_origin(struct dm_writecache *wc, stru if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) dm_accept_partial_bio(bio, next_boundary); } - - return WC_MAP_REMAP_ORIGIN; } static enum wc_map_op writecache_map_read(struct dm_writecache *wc, struct bio *bio) @@ -1366,14 +1360,16 @@ read_next_block: map_op = WC_MAP_REMAP; } } else { - map_op = writecache_map_remap_origin(wc, bio, e); + writecache_map_remap_origin(wc, bio, e); + wc->stats.reads += (bio->bi_iter.bi_size - wc->block_size) >> wc->block_size_bits; + map_op = WC_MAP_REMAP_ORIGIN; } return map_op; } -static enum wc_map_op writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio, - struct wc_entry *e, bool search_used) +static void writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio, + struct wc_entry *e, bool search_used) { unsigned bio_size = wc->block_size; sector_t start_cache_sec = cache_sector(wc, e); @@ -1413,14 +1409,15 @@ static enum wc_map_op writecache_bio_copy_ssd(struct dm_writecache *wc, struct b bio->bi_iter.bi_sector = start_cache_sec; dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT); + wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits; + wc->stats.writes_allocate += (bio->bi_iter.bi_size - wc->block_size) >> wc->block_size_bits; + if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) { wc->uncommitted_blocks = 0; queue_work(wc->writeback_wq, &wc->flush_work); } else { writecache_schedule_autocommit(wc); } - - return WC_MAP_REMAP; } static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio *bio) @@ -1430,9 +1427,10 @@ static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio do { bool found_entry = false; bool search_used = false; - wc->stats.writes++; - if (writecache_has_error(wc)) + if (writecache_has_error(wc)) { + wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits; return WC_MAP_ERROR; + } e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0); if (e) { if (!writecache_entry_is_committed(wc, e)) { @@ -1456,9 +1454,11 @@ static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio if (unlikely(!e)) { if (!WC_MODE_PMEM(wc) && !found_entry) { direct_write: - wc->stats.writes_around++; e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING); - return writecache_map_remap_origin(wc, bio, e); + writecache_map_remap_origin(wc, bio, e); + wc->stats.writes_around += bio->bi_iter.bi_size >> wc->block_size_bits; + wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits; + return WC_MAP_REMAP_ORIGIN; } wc->stats.writes_blocked_on_freelist++; writecache_wait_on_freelist(wc); @@ -1469,10 +1469,13 @@ direct_write: wc->uncommitted_blocks++; wc->stats.writes_allocate++; bio_copy: - if (WC_MODE_PMEM(wc)) + if (WC_MODE_PMEM(wc)) { bio_copy_block(wc, bio, memory_data(wc, e)); - else - return writecache_bio_copy_ssd(wc, bio, e, search_used); + wc->stats.writes++; + } else { + writecache_bio_copy_ssd(wc, bio, e, search_used); + return WC_MAP_REMAP; + } } while (bio->bi_iter.bi_size); if (unlikely(bio->bi_opf & REQ_FUA || wc->uncommitted_blocks >= wc->autocommit_blocks)) @@ -1507,7 +1510,7 @@ static enum wc_map_op writecache_map_flush(struct dm_writecache *wc, struct bio static enum wc_map_op writecache_map_discard(struct dm_writecache *wc, struct bio *bio) { - wc->stats.discards++; + wc->stats.discards += bio->bi_iter.bi_size >> wc->block_size_bits; if (writecache_has_error(wc)) return WC_MAP_ERROR; diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index 3e7b1fe1580b..3dafc0e8b7a9 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -139,13 +139,11 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio) void dm_cleanup_zoned_dev(struct mapped_device *md) { - struct request_queue *q = md->queue; - - if (q) { - kfree(q->conv_zones_bitmap); - q->conv_zones_bitmap = NULL; - kfree(q->seq_zones_wlock); - q->seq_zones_wlock = NULL; + if (md->disk) { + kfree(md->disk->conv_zones_bitmap); + md->disk->conv_zones_bitmap = NULL; + kfree(md->disk->seq_zones_wlock); + md->disk->seq_zones_wlock = NULL; } kvfree(md->zwp_offset); @@ -179,31 +177,31 @@ static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx, void *data) { struct mapped_device *md = data; - struct request_queue *q = md->queue; + struct gendisk *disk = md->disk; switch (zone->type) { case BLK_ZONE_TYPE_CONVENTIONAL: - if (!q->conv_zones_bitmap) { - q->conv_zones_bitmap = - kcalloc(BITS_TO_LONGS(q->nr_zones), + if (!disk->conv_zones_bitmap) { + disk->conv_zones_bitmap = + kcalloc(BITS_TO_LONGS(disk->nr_zones), sizeof(unsigned long), GFP_NOIO); - if (!q->conv_zones_bitmap) + if (!disk->conv_zones_bitmap) return -ENOMEM; } - set_bit(idx, q->conv_zones_bitmap); + set_bit(idx, disk->conv_zones_bitmap); break; case BLK_ZONE_TYPE_SEQWRITE_REQ: case BLK_ZONE_TYPE_SEQWRITE_PREF: - if (!q->seq_zones_wlock) { - q->seq_zones_wlock = - kcalloc(BITS_TO_LONGS(q->nr_zones), + if (!disk->seq_zones_wlock) { + disk->seq_zones_wlock = + kcalloc(BITS_TO_LONGS(disk->nr_zones), sizeof(unsigned long), GFP_NOIO); - if (!q->seq_zones_wlock) + if (!disk->seq_zones_wlock) return -ENOMEM; } if (!md->zwp_offset) { md->zwp_offset = - kvcalloc(q->nr_zones, sizeof(unsigned int), + kvcalloc(disk->nr_zones, sizeof(unsigned int), GFP_KERNEL); if (!md->zwp_offset) return -ENOMEM; @@ -228,7 +226,7 @@ static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx, */ static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t) { - struct request_queue *q = md->queue; + struct gendisk *disk = md->disk; unsigned int noio_flag; int ret; @@ -236,7 +234,7 @@ static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t) * Check if something changed. If yes, cleanup the current resources * and reallocate everything. */ - if (!q->nr_zones || q->nr_zones != md->nr_zones) + if (!disk->nr_zones || disk->nr_zones != md->nr_zones) dm_cleanup_zoned_dev(md); if (md->nr_zones) return 0; @@ -246,17 +244,17 @@ static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t) * operations in this context are done as if GFP_NOIO was specified. */ noio_flag = memalloc_noio_save(); - ret = dm_blk_do_report_zones(md, t, 0, q->nr_zones, + ret = dm_blk_do_report_zones(md, t, 0, disk->nr_zones, dm_zone_revalidate_cb, md); memalloc_noio_restore(noio_flag); if (ret < 0) goto err; - if (ret != q->nr_zones) { + if (ret != disk->nr_zones) { ret = -EIO; goto err; } - md->nr_zones = q->nr_zones; + md->nr_zones = disk->nr_zones; return 0; @@ -270,16 +268,13 @@ static int device_not_zone_append_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - return !blk_queue_is_zoned(bdev_get_queue(dev->bdev)); + return !bdev_is_zoned(dev->bdev); } static bool dm_table_supports_zone_append(struct dm_table *t) { - struct dm_target *ti; - unsigned int i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); if (ti->emulate_zone_append) return false; @@ -301,7 +296,7 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q) * correct value to be exposed in sysfs queue/nr_zones. */ WARN_ON_ONCE(queue_is_mq(q)); - q->nr_zones = blkdev_nr_zones(md->disk); + md->disk->nr_zones = bdev_nr_zones(md->disk->part0); /* Check if zone append is natively supported */ if (dm_table_supports_zone_append(t)) { @@ -334,7 +329,7 @@ static int dm_update_zone_wp_offset_cb(struct blk_zone *zone, unsigned int idx, static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno, unsigned int *wp_ofst) { - sector_t sector = zno * blk_queue_zone_sectors(md->queue); + sector_t sector = zno * bdev_zone_sectors(md->disk->part0); unsigned int noio_flag; struct dm_table *t; int srcu_idx, ret; @@ -361,7 +356,7 @@ static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno, } struct orig_bio_details { - unsigned int op; + enum req_op op; unsigned int nr_sectors; }; @@ -373,7 +368,7 @@ struct orig_bio_details { static bool dm_zone_map_bio_begin(struct mapped_device *md, unsigned int zno, struct bio *clone) { - sector_t zsectors = blk_queue_zone_sectors(md->queue); + sector_t zsectors = bdev_zone_sectors(md->disk->part0); unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]); /* @@ -443,7 +438,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int z return BLK_STS_OK; case REQ_OP_ZONE_FINISH: WRITE_ONCE(md->zwp_offset[zno], - blk_queue_zone_sectors(md->queue)); + bdev_zone_sectors(md->disk->part0)); return BLK_STS_OK; case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE: @@ -466,26 +461,26 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int z } } -static inline void dm_zone_lock(struct request_queue *q, - unsigned int zno, struct bio *clone) +static inline void dm_zone_lock(struct gendisk *disk, unsigned int zno, + struct bio *clone) { if (WARN_ON_ONCE(bio_flagged(clone, BIO_ZONE_WRITE_LOCKED))) return; - wait_on_bit_lock_io(q->seq_zones_wlock, zno, TASK_UNINTERRUPTIBLE); + wait_on_bit_lock_io(disk->seq_zones_wlock, zno, TASK_UNINTERRUPTIBLE); bio_set_flag(clone, BIO_ZONE_WRITE_LOCKED); } -static inline void dm_zone_unlock(struct request_queue *q, - unsigned int zno, struct bio *clone) +static inline void dm_zone_unlock(struct gendisk *disk, unsigned int zno, + struct bio *clone) { if (!bio_flagged(clone, BIO_ZONE_WRITE_LOCKED)) return; - WARN_ON_ONCE(!test_bit(zno, q->seq_zones_wlock)); - clear_bit_unlock(zno, q->seq_zones_wlock); + WARN_ON_ONCE(!test_bit(zno, disk->seq_zones_wlock)); + clear_bit_unlock(zno, disk->seq_zones_wlock); smp_mb__after_atomic(); - wake_up_bit(q->seq_zones_wlock, zno); + wake_up_bit(disk->seq_zones_wlock, zno); bio_clear_flag(clone, BIO_ZONE_WRITE_LOCKED); } @@ -520,7 +515,6 @@ int dm_zone_map_bio(struct dm_target_io *tio) struct dm_io *io = tio->io; struct dm_target *ti = tio->ti; struct mapped_device *md = io->md; - struct request_queue *q = md->queue; struct bio *clone = &tio->clone; struct orig_bio_details orig_bio_details; unsigned int zno; @@ -536,7 +530,7 @@ int dm_zone_map_bio(struct dm_target_io *tio) /* Lock the target zone */ zno = bio_zone_no(clone); - dm_zone_lock(q, zno, clone); + dm_zone_lock(md->disk, zno, clone); orig_bio_details.nr_sectors = bio_sectors(clone); orig_bio_details.op = bio_op(clone); @@ -546,7 +540,7 @@ int dm_zone_map_bio(struct dm_target_io *tio) * both valid, and if the bio is a zone append, remap it to a write. */ if (!dm_zone_map_bio_begin(md, zno, clone)) { - dm_zone_unlock(q, zno, clone); + dm_zone_unlock(md->disk, zno, clone); return DM_MAPIO_KILL; } @@ -570,12 +564,12 @@ int dm_zone_map_bio(struct dm_target_io *tio) sts = dm_zone_map_bio_end(md, zno, &orig_bio_details, *tio->len_ptr); if (sts != BLK_STS_OK) - dm_zone_unlock(q, zno, clone); + dm_zone_unlock(md->disk, zno, clone); break; case DM_MAPIO_REQUEUE: case DM_MAPIO_KILL: default: - dm_zone_unlock(q, zno, clone); + dm_zone_unlock(md->disk, zno, clone); sts = BLK_STS_IOERR; break; } @@ -592,7 +586,7 @@ int dm_zone_map_bio(struct dm_target_io *tio) void dm_zone_endio(struct dm_io *io, struct bio *clone) { struct mapped_device *md = io->md; - struct request_queue *q = md->queue; + struct gendisk *disk = md->disk; struct bio *orig_bio = io->orig_bio; unsigned int zwp_offset; unsigned int zno; @@ -608,7 +602,8 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone) */ if (clone->bi_status == BLK_STS_OK && bio_op(clone) == REQ_OP_ZONE_APPEND) { - sector_t mask = (sector_t)blk_queue_zone_sectors(q) - 1; + sector_t mask = + (sector_t)bdev_zone_sectors(disk->part0) - 1; orig_bio->bi_iter.bi_sector += clone->bi_iter.bi_sector & mask; @@ -649,5 +644,5 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone) zwp_offset - bio_sectors(orig_bio); } - dm_zone_unlock(q, zno, clone); + dm_zone_unlock(disk, zno, clone); } diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index d1ea66114d14..34db364c23a8 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -737,7 +737,7 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, /* * Read/write a metadata block. */ -static int dmz_rdwr_block(struct dmz_dev *dev, int op, +static int dmz_rdwr_block(struct dmz_dev *dev, enum req_op op, sector_t block, struct page *page) { struct bio *bio; @@ -2045,7 +2045,8 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd, * allocated and used to map the chunk. * The zone returned will be set to the active state. */ -struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, int op) +struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, + unsigned int chunk, enum req_op op) { struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT]; struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data; diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 0ec5d8b9b1a4..95b132b52f33 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -764,8 +764,7 @@ static void dmz_put_zoned_device(struct dm_target *ti) static int dmz_fixup_devices(struct dm_target *ti) { struct dmz_target *dmz = ti->private; - struct dmz_dev *reg_dev, *zoned_dev; - struct request_queue *q; + struct dmz_dev *reg_dev = NULL; sector_t zone_nr_sectors = 0; int i; @@ -780,32 +779,32 @@ static int dmz_fixup_devices(struct dm_target *ti) return -EINVAL; } for (i = 1; i < dmz->nr_ddevs; i++) { - zoned_dev = &dmz->dev[i]; + struct dmz_dev *zoned_dev = &dmz->dev[i]; + struct block_device *bdev = zoned_dev->bdev; + if (zoned_dev->flags & DMZ_BDEV_REGULAR) { ti->error = "Secondary disk is not a zoned device"; return -EINVAL; } - q = bdev_get_queue(zoned_dev->bdev); if (zone_nr_sectors && - zone_nr_sectors != blk_queue_zone_sectors(q)) { + zone_nr_sectors != bdev_zone_sectors(bdev)) { ti->error = "Zone nr sectors mismatch"; return -EINVAL; } - zone_nr_sectors = blk_queue_zone_sectors(q); + zone_nr_sectors = bdev_zone_sectors(bdev); zoned_dev->zone_nr_sectors = zone_nr_sectors; - zoned_dev->nr_zones = - blkdev_nr_zones(zoned_dev->bdev->bd_disk); + zoned_dev->nr_zones = bdev_nr_zones(bdev); } } else { - reg_dev = NULL; - zoned_dev = &dmz->dev[0]; + struct dmz_dev *zoned_dev = &dmz->dev[0]; + struct block_device *bdev = zoned_dev->bdev; + if (zoned_dev->flags & DMZ_BDEV_REGULAR) { ti->error = "Disk is not a zoned device"; return -EINVAL; } - q = bdev_get_queue(zoned_dev->bdev); - zoned_dev->zone_nr_sectors = blk_queue_zone_sectors(q); - zoned_dev->nr_zones = blkdev_nr_zones(zoned_dev->bdev->bd_disk); + zoned_dev->zone_nr_sectors = bdev_zone_sectors(bdev); + zoned_dev->nr_zones = bdev_nr_zones(bdev); } if (reg_dev) { diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h index a02744a0846c..265494d3f711 100644 --- a/drivers/md/dm-zoned.h +++ b/drivers/md/dm-zoned.h @@ -248,7 +248,7 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd, unsigned int dev_idx, bool idle); struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, - unsigned int chunk, int op); + unsigned int chunk, enum req_op op); void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *zone); struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd, struct dm_zone *dzone); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2b75f1ef7386..99642f69bfa7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -88,10 +88,6 @@ struct clone_info { bool submit_as_polled:1; }; -#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone)) -#define DM_IO_BIO_OFFSET \ - (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio)) - static inline struct dm_target_io *clone_to_tio(struct bio *clone) { return container_of(clone, struct dm_target_io, clone); @@ -415,7 +411,7 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, struct block_device **bdev) { - struct dm_target *tgt; + struct dm_target *ti; struct dm_table *map; int r; @@ -426,17 +422,17 @@ retry: return r; /* We only support devices that have a single target */ - if (dm_table_get_num_targets(map) != 1) + if (map->num_targets != 1) return r; - tgt = dm_table_get_target(map, 0); - if (!tgt->type->prepare_ioctl) + ti = dm_table_get_target(map, 0); + if (!ti->type->prepare_ioctl) return r; if (dm_suspended_md(md)) return -EAGAIN; - r = tgt->type->prepare_ioctl(tgt, bdev); + r = ti->type->prepare_ioctl(ti, bdev); if (r == -ENOTCONN && !fatal_signal_pending(current)) { dm_put_live_table(md, *srcu_idx); msleep(10); @@ -578,9 +574,6 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) struct bio *clone; clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); - /* Set default bdev, but target must bio_set_dev() before issuing IO */ - clone->bi_bdev = md->disk->part0; - tio = clone_to_tio(clone); tio->flags = 0; dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); @@ -594,7 +587,6 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) atomic_set(&io->io_count, 2); this_cpu_inc(*md->pending_io); io->orig_bio = bio; - io->split_bio = NULL; io->md = md; spin_lock_init(&io->lock); io->start_time = jiffies; @@ -614,6 +606,7 @@ static void free_io(struct dm_io *io) static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask) { + struct mapped_device *md = ci->io->md; struct dm_target_io *tio; struct bio *clone; @@ -623,14 +616,10 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, /* alloc_io() already initialized embedded clone */ clone = &tio->clone; } else { - struct mapped_device *md = ci->io->md; - clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, &md->mempools->bs); if (!clone) return NULL; - /* Set default bdev, but target must bio_set_dev() before issuing IO */ - clone->bi_bdev = md->disk->part0; /* REQ_DM_POLL_LIST shouldn't be inherited */ clone->bi_opf &= ~REQ_DM_POLL_LIST; @@ -646,6 +635,11 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, tio->len_ptr = len; tio->old_sector = 0; + /* Set default bdev, but target must bio_set_dev() before issuing IO */ + clone->bi_bdev = md->disk->part0; + if (unlikely(ti->needs_bio_set_dev)) + bio_set_dev(clone, md->disk->part0); + if (len) { clone->bi_iter.bi_size = to_bytes(*len); if (bio_integrity(clone)) @@ -716,7 +710,7 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) } static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md, - int *srcu_idx, unsigned bio_opf) + int *srcu_idx, blk_opf_t bio_opf) { if (bio_opf & REQ_NOWAIT) return dm_get_live_table_fast(md); @@ -725,7 +719,7 @@ static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md, } static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx, - unsigned bio_opf) + blk_opf_t bio_opf) { if (bio_opf & REQ_NOWAIT) dm_put_live_table_fast(md); @@ -884,22 +878,63 @@ static int __noflush_suspending(struct mapped_device *md) return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); } -static void dm_io_complete(struct dm_io *io) +static void dm_requeue_add_io(struct dm_io *io, bool first_stage) { - blk_status_t io_error; struct mapped_device *md = io->md; - struct bio *bio = io->split_bio ? io->split_bio : io->orig_bio; - if (io->status == BLK_STS_DM_REQUEUE) { + if (first_stage) { + struct dm_io *next = md->requeue_list; + + md->requeue_list = io; + io->next = next; + } else { + bio_list_add_head(&md->deferred, io->orig_bio); + } +} + +static void dm_kick_requeue(struct mapped_device *md, bool first_stage) +{ + if (first_stage) + queue_work(md->wq, &md->requeue_work); + else + queue_work(md->wq, &md->work); +} + +/* + * Return true if the dm_io's original bio is requeued. + * io->status is updated with error if requeue disallowed. + */ +static bool dm_handle_requeue(struct dm_io *io, bool first_stage) +{ + struct bio *bio = io->orig_bio; + bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE); + bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) && + (bio->bi_opf & REQ_POLLED)); + struct mapped_device *md = io->md; + bool requeued = false; + + if (handle_requeue || handle_polled_eagain) { unsigned long flags; + + if (bio->bi_opf & REQ_POLLED) { + /* + * Upper layer won't help us poll split bio + * (io->orig_bio may only reflect a subset of the + * pre-split original) so clear REQ_POLLED. + */ + bio_clear_polled(bio); + } + /* - * Target requested pushing back the I/O. + * Target requested pushing back the I/O or + * polled IO hit BLK_STS_AGAIN. */ spin_lock_irqsave(&md->deferred_lock, flags); - if (__noflush_suspending(md) && - !WARN_ON_ONCE(dm_is_zone_write(md, bio))) { - /* NOTE early return due to BLK_STS_DM_REQUEUE below */ - bio_list_add_head(&md->deferred, bio); + if ((__noflush_suspending(md) && + !WARN_ON_ONCE(dm_is_zone_write(md, bio))) || + handle_polled_eagain || first_stage) { + dm_requeue_add_io(io, first_stage); + requeued = true; } else { /* * noflush suspend was interrupted or this is @@ -910,6 +945,23 @@ static void dm_io_complete(struct dm_io *io) spin_unlock_irqrestore(&md->deferred_lock, flags); } + if (requeued) + dm_kick_requeue(md, first_stage); + + return requeued; +} + +static void __dm_io_complete(struct dm_io *io, bool first_stage) +{ + struct bio *bio = io->orig_bio; + struct mapped_device *md = io->md; + blk_status_t io_error; + bool requeued; + + requeued = dm_handle_requeue(io, first_stage); + if (requeued && first_stage) + return; + io_error = io->status; if (dm_io_flagged(io, DM_IO_ACCOUNTED)) dm_end_io_acct(io); @@ -929,23 +981,9 @@ static void dm_io_complete(struct dm_io *io) if (unlikely(wq_has_sleeper(&md->wait))) wake_up(&md->wait); - if (io_error == BLK_STS_DM_REQUEUE || io_error == BLK_STS_AGAIN) { - if (bio->bi_opf & REQ_POLLED) { - /* - * Upper layer won't help us poll split bio (io->orig_bio - * may only reflect a subset of the pre-split original) - * so clear REQ_POLLED in case of requeue. - */ - bio_clear_polled(bio); - if (io_error == BLK_STS_AGAIN) { - /* io_uring doesn't handle BLK_STS_AGAIN (yet) */ - queue_io(md, bio); - return; - } - } - if (io_error == BLK_STS_DM_REQUEUE) - return; - } + /* Return early if the original bio was requeued */ + if (requeued) + return; if (bio_is_flush_with_data(bio)) { /* @@ -962,6 +1000,58 @@ static void dm_io_complete(struct dm_io *io) } } +static void dm_wq_requeue_work(struct work_struct *work) +{ + struct mapped_device *md = container_of(work, struct mapped_device, + requeue_work); + unsigned long flags; + struct dm_io *io; + + /* reuse deferred lock to simplify dm_handle_requeue */ + spin_lock_irqsave(&md->deferred_lock, flags); + io = md->requeue_list; + md->requeue_list = NULL; + spin_unlock_irqrestore(&md->deferred_lock, flags); + + while (io) { + struct dm_io *next = io->next; + + dm_io_rewind(io, &md->queue->bio_split); + + io->next = NULL; + __dm_io_complete(io, false); + io = next; + } +} + +/* + * Two staged requeue: + * + * 1) io->orig_bio points to the real original bio, and the part mapped to + * this io must be requeued, instead of other parts of the original bio. + * + * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io. + */ +static void dm_io_complete(struct dm_io *io) +{ + bool first_requeue; + + /* + * Only dm_io that has been split needs two stage requeue, otherwise + * we may run into long bio clone chain during suspend and OOM could + * be triggered. + * + * Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they + * also aren't handled via the first stage requeue. + */ + if (dm_io_flagged(io, DM_IO_WAS_SPLIT)) + first_requeue = true; + else + first_requeue = false; + + __dm_io_complete(io, first_requeue); +} + /* * Decrements the number of outstanding ios that a bio has been * cloned into, completing the original io if necc. @@ -1033,7 +1123,7 @@ static void clone_endio(struct bio *bio) } if (static_branch_unlikely(&zoned_enabled) && - unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev)))) + unlikely(bdev_is_zoned(bio->bi_bdev))) dm_zone_endio(io, bio); if (endio) { @@ -1086,23 +1176,18 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector) { sector_t target_offset = dm_target_offset(ti, sector); sector_t len = max_io_len_target_boundary(ti, target_offset); - sector_t max_len; /* * Does the target need to split IO even further? * - varied (per target) IO splitting is a tenet of DM; this * explains why stacked chunk_sectors based splitting via - * blk_max_size_offset() isn't possible here. So pass in - * ti->max_io_len to override stacked chunk_sectors. + * blk_queue_split() isn't possible here. */ - if (ti->max_io_len) { - max_len = blk_max_size_offset(ti->table->md->queue, - target_offset, ti->max_io_len); - if (len > max_len) - len = max_len; - } - - return len; + if (!ti->max_io_len) + return len; + return min_t(sector_t, len, + min(queue_max_sectors(ti->table->md->queue), + blk_chunk_sectors_left(target_offset, ti->max_io_len))); } int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) @@ -1245,6 +1330,7 @@ out: void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) { struct dm_target_io *tio = clone_to_tio(bio); + struct dm_io *io = tio->io; unsigned bio_sectors = bio_sectors(bio); BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); @@ -1260,8 +1346,9 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) * __split_and_process_bio() may have already saved mapped part * for accounting but it is being reduced so update accordingly. */ - dm_io_set_flag(tio->io, DM_IO_WAS_SPLIT); - tio->io->sectors = n_sectors; + dm_io_set_flag(io, DM_IO_WAS_SPLIT); + io->sectors = n_sectors; + io->sector_offset = bio_sectors(io->orig_bio); } EXPORT_SYMBOL_GPL(dm_accept_partial_bio); @@ -1384,17 +1471,7 @@ static void setup_split_accounting(struct clone_info *ci, unsigned len) */ dm_io_set_flag(io, DM_IO_WAS_SPLIT); io->sectors = len; - } - - if (static_branch_unlikely(&stats_enabled) && - unlikely(dm_stats_used(&io->md->stats))) { - /* - * Save bi_sector in terms of its offset from end of - * original bio, only needed for DM-stats' benefit. - * - saved regardless of whether split needed so that - * dm_accept_partial_bio() doesn't need to. - */ - io->sector_offset = bio_end_sector(ci->bio) - ci->sector; + io->sector_offset = bio_sectors(ci->bio); } } @@ -1428,11 +1505,11 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, } static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, - unsigned num_bios, unsigned *len) + unsigned int num_bios, unsigned *len) { struct bio_list blist = BIO_EMPTY_LIST; struct bio *clone; - int ret = 0; + unsigned int ret = 0; switch (num_bios) { case 0: @@ -1460,8 +1537,7 @@ static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, static void __send_empty_flush(struct clone_info *ci) { - unsigned target_nr = 0; - struct dm_target *ti; + struct dm_table *t = ci->map; struct bio flush_bio; /* @@ -1476,8 +1552,9 @@ static void __send_empty_flush(struct clone_info *ci) ci->sector_count = 0; ci->io->tio.clone.bi_iter.bi_size = 0; - while ((ti = dm_table_get_target(ci->map, target_nr++))) { - int bios; + for (unsigned int i = 0; i < t->num_targets; i++) { + unsigned int bios; + struct dm_target *ti = dm_table_get_target(t, i); atomic_add(ti->num_flush_bios, &ci->io->io_count); bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); @@ -1497,7 +1574,7 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target unsigned num_bios) { unsigned len; - int bios; + unsigned int bios; len = min_t(sector_t, ci->sector_count, max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); @@ -1516,7 +1593,7 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target static bool is_abnormal_io(struct bio *bio) { - unsigned int op = bio_op(bio); + enum req_op op = bio_op(bio); if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) { switch (op) { @@ -1547,6 +1624,8 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci, case REQ_OP_WRITE_ZEROES: num_bios = ti->num_write_zeroes_bios; break; + default: + break; } /* @@ -1628,7 +1707,7 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) * Only support bio polling for normal IO, and the target io is * exactly inside the dm_io instance (verified in dm_poll_dm_io) */ - ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED; + ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED); len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); setup_split_accounting(ci, len); @@ -1694,11 +1773,9 @@ static void dm_split_and_process_bio(struct mapped_device *md, * Remainder must be passed to submit_bio_noacct() so it gets handled * *after* bios already submitted have been completely processed. */ - WARN_ON_ONCE(!dm_io_flagged(io, DM_IO_WAS_SPLIT)); - io->split_bio = bio_split(bio, io->sectors, GFP_NOIO, - &md->queue->bio_split); - bio_chain(io->split_bio, bio); - trace_block_split(io->split_bio, bio->bi_iter.bi_sector); + bio_trim(bio, io->sectors, ci.sector_count); + trace_block_split(bio, bio->bi_iter.bi_sector); + bio_inc_remaining(bio); submit_bio_noacct(bio); out: /* @@ -1725,7 +1802,7 @@ static void dm_submit_bio(struct bio *bio) struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; int srcu_idx; struct dm_table *map; - unsigned bio_opf = bio->bi_opf; + blk_opf_t bio_opf = bio->bi_opf; map = dm_get_live_table_bio(md, &srcu_idx, bio_opf); @@ -1899,7 +1976,7 @@ static void cleanup_mapped_device(struct mapped_device *md) del_gendisk(md->disk); } dm_queue_destroy_crypto_profile(md->queue); - blk_cleanup_disk(md->disk); + put_disk(md->disk); } if (md->pending_io) { @@ -1974,9 +2051,11 @@ static struct mapped_device *alloc_dev(int minor) init_waitqueue_head(&md->wait); INIT_WORK(&md->work, dm_wq_work); + INIT_WORK(&md->requeue_work, dm_wq_requeue_work); init_waitqueue_head(&md->eventq); init_completion(&md->kobj_holder.completion); + md->requeue_list = NULL; md->swap_bios = get_swap_bios(); sema_init(&md->swap_bios_semaphore, md->swap_bios); mutex_init(&md->swap_bios_lock); @@ -2983,54 +3062,6 @@ int dm_noflush_suspending(struct dm_target *ti) } EXPORT_SYMBOL_GPL(dm_noflush_suspending); -struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, - unsigned per_io_data_size, unsigned min_pool_size, - bool integrity, bool poll) -{ - struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); - unsigned int pool_size = 0; - unsigned int front_pad, io_front_pad; - int ret; - - if (!pools) - return NULL; - - switch (type) { - case DM_TYPE_BIO_BASED: - case DM_TYPE_DAX_BIO_BASED: - pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); - front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET; - io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; - ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, poll ? BIOSET_PERCPU_CACHE : 0); - if (ret) - goto out; - if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) - goto out; - break; - case DM_TYPE_REQUEST_BASED: - pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); - front_pad = offsetof(struct dm_rq_clone_bio_info, clone); - /* per_io_data_size is used for blk-mq pdu at queue allocation */ - break; - default: - BUG(); - } - - ret = bioset_init(&pools->bs, pool_size, front_pad, 0); - if (ret) - goto out; - - if (integrity && bioset_integrity_create(&pools->bs, pool_size)) - goto out; - - return pools; - -out: - dm_free_md_mempools(pools); - - return NULL; -} - void dm_free_md_mempools(struct dm_md_mempools *pools) { if (!pools) @@ -3046,11 +3077,14 @@ struct dm_pr { u64 old_key; u64 new_key; u32 flags; + bool abort; bool fail_early; + int ret; + enum pr_type type; }; static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, - void *data) + struct dm_pr *pr) { struct mapped_device *md = bdev->bd_disk->private_data; struct dm_table *table; @@ -3062,15 +3096,21 @@ static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, goto out; /* We only support devices that have a single target */ - if (dm_table_get_num_targets(table) != 1) + if (table->num_targets != 1) goto out; ti = dm_table_get_target(table, 0); + if (dm_suspended_md(md)) { + ret = -EAGAIN; + goto out; + } + ret = -EINVAL; if (!ti->type->iterate_devices) goto out; - ret = ti->type->iterate_devices(ti, fn, data); + ti->type->iterate_devices(ti, fn, pr); + ret = 0; out: dm_put_live_table(md, srcu_idx); return ret; @@ -3084,10 +3124,24 @@ static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, { struct dm_pr *pr = data; const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; + int ret; - if (!ops || !ops->pr_register) - return -EOPNOTSUPP; - return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); + if (!ops || !ops->pr_register) { + pr->ret = -EOPNOTSUPP; + return -1; + } + + ret = ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); + if (!ret) + return 0; + + if (!pr->ret) + pr->ret = ret; + + if (pr->fail_early) + return -1; + + return 0; } static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, @@ -3098,82 +3152,145 @@ static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, .new_key = new_key, .flags = flags, .fail_early = true, + .ret = 0, }; int ret; ret = dm_call_pr(bdev, __dm_pr_register, &pr); - if (ret && new_key) { - /* unregister all paths if we failed to register any path */ - pr.old_key = new_key; - pr.new_key = 0; - pr.flags = 0; - pr.fail_early = false; - dm_call_pr(bdev, __dm_pr_register, &pr); + if (ret) { + /* Didn't even get to register a path */ + return ret; } + if (!pr.ret) + return 0; + ret = pr.ret; + + if (!new_key) + return ret; + + /* unregister all paths if we failed to register any path */ + pr.old_key = new_key; + pr.new_key = 0; + pr.flags = 0; + pr.fail_early = false; + (void) dm_call_pr(bdev, __dm_pr_register, &pr); return ret; } + +static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct dm_pr *pr = data; + const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; + + if (!ops || !ops->pr_reserve) { + pr->ret = -EOPNOTSUPP; + return -1; + } + + pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags); + if (!pr->ret) + return -1; + + return 0; +} + static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, u32 flags) { - struct mapped_device *md = bdev->bd_disk->private_data; - const struct pr_ops *ops; - int r, srcu_idx; + struct dm_pr pr = { + .old_key = key, + .flags = flags, + .type = type, + .fail_early = false, + .ret = 0, + }; + int ret; - r = dm_prepare_ioctl(md, &srcu_idx, &bdev); - if (r < 0) - goto out; + ret = dm_call_pr(bdev, __dm_pr_reserve, &pr); + if (ret) + return ret; - ops = bdev->bd_disk->fops->pr_ops; - if (ops && ops->pr_reserve) - r = ops->pr_reserve(bdev, key, type, flags); - else - r = -EOPNOTSUPP; -out: - dm_unprepare_ioctl(md, srcu_idx); - return r; + return pr.ret; +} + +/* + * If there is a non-All Registrants type of reservation, the release must be + * sent down the holding path. For the cases where there is no reservation or + * the path is not the holder the device will also return success, so we must + * try each path to make sure we got the correct path. + */ +static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct dm_pr *pr = data; + const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; + + if (!ops || !ops->pr_release) { + pr->ret = -EOPNOTSUPP; + return -1; + } + + pr->ret = ops->pr_release(dev->bdev, pr->old_key, pr->type); + if (pr->ret) + return -1; + + return 0; } static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) { - struct mapped_device *md = bdev->bd_disk->private_data; - const struct pr_ops *ops; - int r, srcu_idx; + struct dm_pr pr = { + .old_key = key, + .type = type, + .fail_early = false, + }; + int ret; - r = dm_prepare_ioctl(md, &srcu_idx, &bdev); - if (r < 0) - goto out; + ret = dm_call_pr(bdev, __dm_pr_release, &pr); + if (ret) + return ret; - ops = bdev->bd_disk->fops->pr_ops; - if (ops && ops->pr_release) - r = ops->pr_release(bdev, key, type); - else - r = -EOPNOTSUPP; -out: - dm_unprepare_ioctl(md, srcu_idx); - return r; + return pr.ret; +} + +static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct dm_pr *pr = data; + const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; + + if (!ops || !ops->pr_preempt) { + pr->ret = -EOPNOTSUPP; + return -1; + } + + pr->ret = ops->pr_preempt(dev->bdev, pr->old_key, pr->new_key, pr->type, + pr->abort); + if (!pr->ret) + return -1; + + return 0; } static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, enum pr_type type, bool abort) { - struct mapped_device *md = bdev->bd_disk->private_data; - const struct pr_ops *ops; - int r, srcu_idx; + struct dm_pr pr = { + .new_key = new_key, + .old_key = old_key, + .type = type, + .fail_early = false, + }; + int ret; - r = dm_prepare_ioctl(md, &srcu_idx, &bdev); - if (r < 0) - goto out; + ret = dm_call_pr(bdev, __dm_pr_preempt, &pr); + if (ret) + return ret; - ops = bdev->bd_disk->fops->pr_ops; - if (ops && ops->pr_preempt) - r = ops->pr_preempt(bdev, old_key, new_key, type, abort); - else - r = -EOPNOTSUPP; -out: - dm_unprepare_ioctl(md, srcu_idx); - return r; + return pr.ret; } static int dm_pr_clear(struct block_device *bdev, u64 key) diff --git a/drivers/md/dm.h b/drivers/md/dm.h index a8405ce305a9..5201df03ce40 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -53,7 +53,6 @@ struct dm_io; *---------------------------------------------------------------*/ void dm_table_event_callback(struct dm_table *t, void (*fn)(void *), void *context); -struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); bool dm_table_has_no_data_devices(struct dm_table *table); int dm_calculate_queue_limits(struct dm_table *table, @@ -218,9 +217,6 @@ void dm_kcopyd_exit(void); /* * Mempool operations */ -struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, - unsigned per_io_data_size, unsigned min_pool_size, - bool integrity, bool poll); void dm_free_md_mempools(struct dm_md_mempools *pools); /* diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index d87f674ab762..bf6dffadbe6f 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -165,7 +165,7 @@ static int read_sb_page(struct mddev *mddev, loff_t offset, if (sync_page_io(rdev, target, roundup(size, bdev_logical_block_size(rdev->bdev)), - page, REQ_OP_READ, 0, true)) { + page, REQ_OP_READ, true)) { page->index = index; return 0; } @@ -302,7 +302,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait) atomic_inc(&bitmap->pending_writes); set_buffer_locked(bh); set_buffer_mapped(bh); - submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); + submit_bh(REQ_OP_WRITE | REQ_SYNC, bh); bh = bh->b_this_page; } @@ -394,7 +394,7 @@ static int read_page(struct file *file, unsigned long index, atomic_inc(&bitmap->pending_writes); set_buffer_locked(bh); set_buffer_mapped(bh); - submit_bh(REQ_OP_READ, 0, bh); + submit_bh(REQ_OP_READ, bh); } blk_cur++; bh = bh->b_this_page; diff --git a/drivers/md/md.c b/drivers/md/md.c index c7ecb0bffda0..4df78e30b76a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -993,15 +993,15 @@ int md_super_wait(struct mddev *mddev) } int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, - struct page *page, int op, int op_flags, bool metadata_op) + struct page *page, blk_opf_t opf, bool metadata_op) { struct bio bio; struct bio_vec bvec; if (metadata_op && rdev->meta_bdev) - bio_init(&bio, rdev->meta_bdev, &bvec, 1, op | op_flags); + bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf); else - bio_init(&bio, rdev->bdev, &bvec, 1, op | op_flags); + bio_init(&bio, rdev->bdev, &bvec, 1, opf); if (metadata_op) bio.bi_iter.bi_sector = sector + rdev->sb_start; @@ -1024,7 +1024,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size) if (rdev->sb_loaded) return 0; - if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) + if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) goto fail; rdev->sb_loaded = 1; return 0; @@ -1722,7 +1722,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ return -EINVAL; bb_sector = (long long)offset; if (!sync_page_io(rdev, bb_sector, sectors << 9, - rdev->bb_page, REQ_OP_READ, 0, true)) + rdev->bb_page, REQ_OP_READ, true)) return -EIO; bbp = (__le64 *)page_address(rdev->bb_page); rdev->badblocks.shift = sb->bblog_shift; @@ -2438,7 +2438,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) mdname(mddev), mddev->max_disks); return -EBUSY; } - bdevname(rdev->bdev,b); + snprintf(b, sizeof(b), "%pg", rdev->bdev); strreplace(b, '/', '!'); rdev->mddev = mddev; @@ -5579,7 +5579,7 @@ static void md_free(struct kobject *ko) if (mddev->gendisk) { del_gendisk(mddev->gendisk); - blk_cleanup_disk(mddev->gendisk); + put_disk(mddev->gendisk); } percpu_ref_exit(&mddev->writes_pending); @@ -5718,7 +5718,7 @@ static int md_alloc(dev_t dev, char *name) out_del_gendisk: del_gendisk(disk); out_cleanup_disk: - blk_cleanup_disk(disk); + put_disk(disk); out_unlock_disks_mutex: mutex_unlock(&disks_mutex); mddev_put(mddev); diff --git a/drivers/md/md.h b/drivers/md/md.h index cf2cbb17acbd..b4f84b27bdef 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -738,8 +738,7 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, sector_t sector, int size, struct page *page); extern int md_super_wait(struct mddev *mddev); extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, - struct page *page, int op, int op_flags, - bool metadata_op); + struct page *page, blk_opf_t opf, bool metadata_op); extern void md_do_sync(struct md_thread *thread); extern void md_new_event(void); extern void md_allow_write(struct mddev *mddev); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 258d4eb2d63c..05d8438cfec8 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1220,8 +1220,8 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, struct raid1_info *mirror; struct bio *read_bio; struct bitmap *bitmap = mddev->bitmap; - const int op = bio_op(bio); - const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); + const enum req_op op = bio_op(bio); + const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; int max_sectors; int rdisk; bool r1bio_existed = !!r1_bio; @@ -1240,7 +1240,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, rcu_read_lock(); rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev); if (rdev) - bdevname(rdev->bdev, b); + snprintf(b, sizeof(b), "%pg", rdev->bdev); else strcpy(b, "???"); rcu_read_unlock(); @@ -1988,9 +1988,9 @@ static void end_sync_write(struct bio *bio) } static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, - int sectors, struct page *page, int rw) + int sectors, struct page *page, int rw) { - if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) + if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) /* success */ return 1; if (rw == WRITE) { @@ -2057,7 +2057,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) rdev = conf->mirrors[d].rdev; if (sync_page_io(rdev, sect, s<<9, pages[idx], - REQ_OP_READ, 0, false)) { + REQ_OP_READ, false)) { success = 1; break; } @@ -2305,7 +2305,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, atomic_inc(&rdev->nr_pending); rcu_read_unlock(); if (sync_page_io(rdev, sect, s<<9, - conf->tmppage, REQ_OP_READ, 0, false)) + conf->tmppage, REQ_OP_READ, false)) success = 1; rdev_dec_pending(rdev, mddev); if (success) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d589f823feb1..26545950ca42 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1136,8 +1136,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, { struct r10conf *conf = mddev->private; struct bio *read_bio; - const int op = bio_op(bio); - const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); + const enum req_op op = bio_op(bio); + const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; int max_sectors; struct md_rdev *rdev; char b[BDEVNAME_SIZE]; @@ -1164,7 +1164,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, disk = r10_bio->devs[slot].devnum; err_rdev = rcu_dereference(conf->mirrors[disk].rdev); if (err_rdev) - bdevname(err_rdev->bdev, b); + snprintf(b, sizeof(b), "%pg", err_rdev->bdev); else { strcpy(b, "???"); /* This never gets dereferenced */ @@ -1230,9 +1230,9 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, struct bio *bio, bool replacement, int n_copy) { - const int op = bio_op(bio); - const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); - const unsigned long do_fua = (bio->bi_opf & REQ_FUA); + const enum req_op op = bio_op(bio); + const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; + const blk_opf_t do_fua = bio->bi_opf & REQ_FUA; unsigned long flags; struct blk_plug_cb *cb; struct raid1_plug_cb *plug = NULL; @@ -2512,7 +2512,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio) addr, s << 9, pages[idx], - REQ_OP_READ, 0, false); + REQ_OP_READ, false); if (ok) { rdev = conf->mirrors[dw].rdev; addr = r10_bio->devs[1].addr + sect; @@ -2520,7 +2520,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio) addr, s << 9, pages[idx], - REQ_OP_WRITE, 0, false); + REQ_OP_WRITE, false); if (!ok) { set_bit(WriteErrorSeen, &rdev->flags); if (!test_and_set_bit(WantReplacement, @@ -2644,7 +2644,7 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) return -1; - if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) + if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) /* success */ return 1; if (rw == WRITE) { @@ -2726,7 +2726,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 sect, s<<9, conf->tmppage, - REQ_OP_READ, 0, false); + REQ_OP_READ, false); rdev_dec_pending(rdev, mddev); rcu_read_lock(); if (success) @@ -5107,7 +5107,7 @@ static int handle_reshape_read_error(struct mddev *mddev, addr, s << 9, pages[idx], - REQ_OP_READ, 0, false); + REQ_OP_READ, false); rdev_dec_pending(rdev, mddev); rcu_read_lock(); if (success) diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 83c184eddbda..6f2dd73128b0 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1788,7 +1788,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, mb = page_address(page); mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, mb, PAGE_SIZE)); - if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, + if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false)) { __free_page(page); return -EIO; @@ -1898,7 +1898,7 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf, atomic_inc(&rdev->nr_pending); rcu_read_unlock(); sync_page_io(rdev, sh->sector, PAGE_SIZE, - sh->dev[disk_index].page, REQ_OP_WRITE, 0, + sh->dev[disk_index].page, REQ_OP_WRITE, false); rdev_dec_pending(rdev, rdev->mddev); rcu_read_lock(); @@ -1908,7 +1908,7 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf, atomic_inc(&rrdev->nr_pending); rcu_read_unlock(); sync_page_io(rrdev, sh->sector, PAGE_SIZE, - sh->dev[disk_index].page, REQ_OP_WRITE, 0, + sh->dev[disk_index].page, REQ_OP_WRITE, false); rdev_dec_pending(rrdev, rrdev->mddev); rcu_read_lock(); @@ -2394,7 +2394,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, PAGE_SIZE)); kunmap_atomic(addr); sync_page_io(log->rdev, write_pos, PAGE_SIZE, - dev->page, REQ_OP_WRITE, 0, false); + dev->page, REQ_OP_WRITE, false); write_pos = r5l_ring_add(log, write_pos, BLOCK_SECTORS); offset += sizeof(__le32) + @@ -2406,7 +2406,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, mb, PAGE_SIZE)); sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, - REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false); + REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false); sh->log_start = ctx->pos; list_add_tail(&sh->r5c, &log->stripe_in_journal_list); atomic_inc(&log->stripe_in_journal_count); @@ -2971,7 +2971,7 @@ static int r5l_load_log(struct r5l_log *log) if (!page) return -ENOMEM; - if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) { + if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, false)) { ret = -EIO; goto ioerr; } diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 0a2e4806b1ec..98988cb26295 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -897,7 +897,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, __func__, indent, "", rdev->bdev, (unsigned long long)sector); if (!sync_page_io(rdev, sector, block_size, page2, - REQ_OP_READ, 0, false)) { + REQ_OP_READ, false)) { md_error(mddev, rdev); pr_debug("%s:%*s read failed!\n", __func__, indent, ""); @@ -919,7 +919,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, (unsigned long long)(ppl_sector + i)); if (!sync_page_io(log->rdev, ppl_sector - log->rdev->data_offset + i, - block_size, page2, REQ_OP_READ, 0, + block_size, page2, REQ_OP_READ, false)) { pr_debug("%s:%*s read failed!\n", __func__, indent, ""); @@ -946,7 +946,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, (unsigned long long)parity_sector, parity_rdev->bdev); if (!sync_page_io(parity_rdev, parity_sector, block_size, - page1, REQ_OP_WRITE, 0, false)) { + page1, REQ_OP_WRITE, false)) { pr_debug("%s:%*s parity write error!\n", __func__, indent, ""); md_error(mddev, parity_rdev); @@ -998,7 +998,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr, int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size; if (!sync_page_io(rdev, sector - rdev->data_offset, - s, page, REQ_OP_READ, 0, false)) { + s, page, REQ_OP_READ, false)) { md_error(mddev, rdev); ret = -EIO; goto out; @@ -1062,7 +1062,7 @@ static int ppl_write_empty_header(struct ppl_log *log) if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset, PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC | - REQ_FUA, 0, false)) { + REQ_FUA, false)) { md_error(rdev->mddev, rdev); ret = -EIO; } @@ -1100,7 +1100,7 @@ static int ppl_load_distributed(struct ppl_log *log) if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset + pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ, - 0, false)) { + false)) { md_error(mddev, rdev); ret = -EIO; /* if not able to read - don't recover any PPL */ diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index c8539d0e12dd..5cabdbbac48b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1082,7 +1082,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) should_defer = conf->batch_bio_dispatch && conf->group_cnt; for (i = disks; i--; ) { - int op, op_flags = 0; + enum req_op op; + blk_opf_t op_flags = 0; int replace_only = 0; struct bio *bi, *rbi; struct md_rdev *rdev, *rrdev = NULL; diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 4c5154e0bf00..d7cb7ead2ac7 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -21,11 +21,13 @@ /* SMI COMMON */ #define SMI_L1LEN 0x100 +#define SMI_L1_ARB 0x200 #define SMI_BUS_SEL 0x220 #define SMI_BUS_LARB_SHIFT(larbid) ((larbid) << 1) /* All are MMU0 defaultly. Only specialize mmu1 here. */ #define F_MMU1_LARB(larbid) (0x1 << SMI_BUS_LARB_SHIFT(larbid)) +#define SMI_READ_FIFO_TH 0x230 #define SMI_M4U_TH 0x234 #define SMI_FIFO_TH1 0x238 #define SMI_FIFO_TH2 0x23c @@ -360,6 +362,7 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = { {.compatible = "mediatek,mt2701-smi-larb", .data = &mtk_smi_larb_mt2701}, {.compatible = "mediatek,mt2712-smi-larb", .data = &mtk_smi_larb_mt2712}, {.compatible = "mediatek,mt6779-smi-larb", .data = &mtk_smi_larb_mt6779}, + {.compatible = "mediatek,mt6795-smi-larb", .data = &mtk_smi_larb_mt8173}, {.compatible = "mediatek,mt8167-smi-larb", .data = &mtk_smi_larb_mt8167}, {.compatible = "mediatek,mt8173-smi-larb", .data = &mtk_smi_larb_mt8173}, {.compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183}, @@ -544,6 +547,13 @@ static struct platform_driver mtk_smi_larb_driver = { } }; +static const struct mtk_smi_reg_pair mtk_smi_common_mt6795_init[SMI_COMMON_INIT_REGS_NR] = { + {SMI_L1_ARB, 0x1b}, + {SMI_M4U_TH, 0xce810c85}, + {SMI_FIFO_TH1, 0x43214c8}, + {SMI_READ_FIFO_TH, 0x191f}, +}; + static const struct mtk_smi_reg_pair mtk_smi_common_mt8195_init[SMI_COMMON_INIT_REGS_NR] = { {SMI_L1LEN, 0xb}, {SMI_M4U_TH, 0xe100e10}, @@ -568,6 +578,12 @@ static const struct mtk_smi_common_plat mtk_smi_common_mt6779 = { F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7), }; +static const struct mtk_smi_common_plat mtk_smi_common_mt6795 = { + .type = MTK_SMI_GEN2, + .bus_sel = F_MMU1_LARB(0), + .init = mtk_smi_common_mt6795_init, +}; + static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = { .type = MTK_SMI_GEN2, .has_gals = true, @@ -612,6 +628,7 @@ static const struct of_device_id mtk_smi_common_of_ids[] = { {.compatible = "mediatek,mt2701-smi-common", .data = &mtk_smi_common_gen1}, {.compatible = "mediatek,mt2712-smi-common", .data = &mtk_smi_common_gen2}, {.compatible = "mediatek,mt6779-smi-common", .data = &mtk_smi_common_mt6779}, + {.compatible = "mediatek,mt6795-smi-common", .data = &mtk_smi_common_mt6795}, {.compatible = "mediatek,mt8167-smi-common", .data = &mtk_smi_common_gen2}, {.compatible = "mediatek,mt8173-smi-common", .data = &mtk_smi_common_gen2}, {.compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183}, diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c index e23ebd421f17..a9e8fd99730f 100644 --- a/drivers/memory/tegra/tegra234.c +++ b/drivers/memory/tegra/tegra234.c @@ -11,6 +11,76 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { { + .id = TEGRA234_MEMORY_CLIENT_MGBEARD, + .name = "mgbeard", + .sid = TEGRA234_SID_MGBE, + .regs = { + .sid = { + .override = 0x2c0, + .security = 0x2c4, + }, + }, + }, { + .id = TEGRA234_MEMORY_CLIENT_MGBEBRD, + .name = "mgbebrd", + .sid = TEGRA234_SID_MGBE_VF1, + .regs = { + .sid = { + .override = 0x2c8, + .security = 0x2cc, + }, + }, + }, { + .id = TEGRA234_MEMORY_CLIENT_MGBECRD, + .name = "mgbecrd", + .sid = TEGRA234_SID_MGBE_VF2, + .regs = { + .sid = { + .override = 0x2d0, + .security = 0x2d4, + }, + }, + }, { + .id = TEGRA234_MEMORY_CLIENT_MGBEDRD, + .name = "mgbedrd", + .sid = TEGRA234_SID_MGBE_VF3, + .regs = { + .sid = { + .override = 0x2d8, + .security = 0x2dc, + }, + }, + }, { + .id = TEGRA234_MEMORY_CLIENT_MGBEAWR, + .name = "mgbeawr", + .sid = TEGRA234_SID_MGBE, + .regs = { + .sid = { + .override = 0x2e0, + .security = 0x2e4, + }, + }, + }, { + .id = TEGRA234_MEMORY_CLIENT_MGBEBWR, + .name = "mgbebwr", + .sid = TEGRA234_SID_MGBE_VF1, + .regs = { + .sid = { + .override = 0x2f8, + .security = 0x2fc, + }, + }, + }, { + .id = TEGRA234_MEMORY_CLIENT_MGBECWR, + .name = "mgbecwr", + .sid = TEGRA234_SID_MGBE_VF2, + .regs = { + .sid = { + .override = 0x308, + .security = 0x30c, + }, + }, + }, { .id = TEGRA234_MEMORY_CLIENT_SDMMCRAB, .name = "sdmmcrab", .sid = TEGRA234_SID_SDMMC4, @@ -21,6 +91,16 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, }, }, { + .id = TEGRA234_MEMORY_CLIENT_MGBEDWR, + .name = "mgbedwr", + .sid = TEGRA234_SID_MGBE_VF3, + .regs = { + .sid = { + .override = 0x328, + .security = 0x32c, + }, + }, + }, { .id = TEGRA234_MEMORY_CLIENT_SDMMCWAB, .name = "sdmmcwab", .sid = TEGRA234_SID_SDMMC4, diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c index 3993bdd4b519..ed9a683b3ca8 100644 --- a/drivers/memstick/core/ms_block.c +++ b/drivers/memstick/core/ms_block.c @@ -2129,7 +2129,7 @@ static int msb_init_disk(struct memstick_dev *card) return 0; out_cleanup_disk: - blk_cleanup_disk(msb->disk); + put_disk(msb->disk); out_free_tag_set: blk_mq_free_tag_set(&msb->tag_set); out_release_id: @@ -2187,7 +2187,6 @@ static void msb_remove(struct memstick_dev *card) /* Remove the disk */ del_gendisk(msb->disk); - blk_cleanup_queue(msb->queue); blk_mq_free_tag_set(&msb->tag_set); msb->queue = NULL; diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index 725ba74ded30..61cf75d4a01e 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c @@ -1209,7 +1209,7 @@ static int mspro_block_init_disk(struct memstick_dev *card) return 0; out_cleanup_disk: - blk_cleanup_disk(msb->disk); + put_disk(msb->disk); out_free_tag_set: blk_mq_free_tag_set(&msb->tag_set); out_release_id: @@ -1294,7 +1294,6 @@ static void mspro_block_remove(struct memstick_dev *card) del_gendisk(msb->disk); dev_dbg(&card->dev, "mspro block remove\n"); - blk_cleanup_queue(msb->queue); blk_mq_free_tag_set(&msb->tag_set); msb->queue = NULL; diff --git a/drivers/mfd/bcm2835-pm.c b/drivers/mfd/bcm2835-pm.c index 42fe67f1538e..49cd1f03884a 100644 --- a/drivers/mfd/bcm2835-pm.c +++ b/drivers/mfd/bcm2835-pm.c @@ -25,9 +25,52 @@ static const struct mfd_cell bcm2835_power_devs[] = { { .name = "bcm2835-power" }, }; +static int bcm2835_pm_get_pdata(struct platform_device *pdev, + struct bcm2835_pm *pm) +{ + if (of_find_property(pm->dev->of_node, "reg-names", NULL)) { + struct resource *res; + + pm->base = devm_platform_ioremap_resource_byname(pdev, "pm"); + if (IS_ERR(pm->base)) + return PTR_ERR(pm->base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "asb"); + if (res) { + pm->asb = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pm->asb)) + pm->asb = NULL; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "rpivid_asb"); + if (res) { + pm->rpivid_asb = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pm->rpivid_asb)) + pm->rpivid_asb = NULL; + } + + return 0; + } + + /* If no 'reg-names' property is found we can assume we're using old DTB. */ + pm->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(pm->base)) + return PTR_ERR(pm->base); + + pm->asb = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(pm->asb)) + pm->asb = NULL; + + pm->rpivid_asb = devm_platform_ioremap_resource(pdev, 2); + if (IS_ERR(pm->rpivid_asb)) + pm->rpivid_asb = NULL; + + return 0; +} + static int bcm2835_pm_probe(struct platform_device *pdev) { - struct resource *res; struct device *dev = &pdev->dev; struct bcm2835_pm *pm; int ret; @@ -39,10 +82,9 @@ static int bcm2835_pm_probe(struct platform_device *pdev) pm->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pm->base = devm_ioremap_resource(dev, res); - if (IS_ERR(pm->base)) - return PTR_ERR(pm->base); + ret = bcm2835_pm_get_pdata(pdev, pm); + if (ret) + return ret; ret = devm_mfd_add_devices(dev, -1, bcm2835_pm_devs, ARRAY_SIZE(bcm2835_pm_devs), @@ -50,30 +92,22 @@ static int bcm2835_pm_probe(struct platform_device *pdev) if (ret) return ret; - /* We'll use the presence of the AXI ASB regs in the + /* + * We'll use the presence of the AXI ASB regs in the * bcm2835-pm binding as the key for whether we can reference * the full PM register range and support power domains. */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (res) { - pm->asb = devm_ioremap_resource(dev, res); - if (IS_ERR(pm->asb)) - return PTR_ERR(pm->asb); - - ret = devm_mfd_add_devices(dev, -1, - bcm2835_power_devs, - ARRAY_SIZE(bcm2835_power_devs), - NULL, 0, NULL); - if (ret) - return ret; - } - + if (pm->asb) + return devm_mfd_add_devices(dev, -1, bcm2835_power_devs, + ARRAY_SIZE(bcm2835_power_devs), + NULL, 0, NULL); return 0; } static const struct of_device_id bcm2835_pm_of_match[] = { { .compatible = "brcm,bcm2835-pm-wdt", }, { .compatible = "brcm,bcm2835-pm", }, + { .compatible = "brcm,bcm2711-pm", }, {}, }; MODULE_DEVICE_TABLE(of, bcm2835_pm_of_match); diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 684a011a6396..8b058200d5ad 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -60,12 +60,29 @@ int mfd_cell_disable(struct platform_device *pdev) EXPORT_SYMBOL(mfd_cell_disable); #if IS_ENABLED(CONFIG_ACPI) +struct match_ids_walk_data { + struct acpi_device_id *ids; + struct acpi_device *adev; +}; + +static int match_device_ids(struct acpi_device *adev, void *data) +{ + struct match_ids_walk_data *wd = data; + + if (!acpi_match_device_ids(adev, wd->ids)) { + wd->adev = adev; + return 1; + } + + return 0; +} + static void mfd_acpi_add_device(const struct mfd_cell *cell, struct platform_device *pdev) { const struct mfd_cell_acpi_match *match = cell->acpi_match; - struct acpi_device *parent, *child; struct acpi_device *adev = NULL; + struct acpi_device *parent; parent = ACPI_COMPANION(pdev->dev.parent); if (!parent) @@ -83,14 +100,14 @@ static void mfd_acpi_add_device(const struct mfd_cell *cell, if (match) { if (match->pnpid) { struct acpi_device_id ids[2] = {}; + struct match_ids_walk_data wd = { + .adev = NULL, + .ids = ids, + }; strlcpy(ids[0].id, match->pnpid, sizeof(ids[0].id)); - list_for_each_entry(child, &parent->children, node) { - if (!acpi_match_device_ids(child, ids)) { - adev = child; - break; - } - } + acpi_dev_for_each_child(parent, match_device_ids, &wd); + adev = wd.adev; } else { adev = acpi_find_child_device(parent, match->adr, false); } diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index 009239ad1d8a..48821f4c2b21 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -29,7 +29,7 @@ struct lkdtm_list { #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) #define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2) #else -#define REC_STACK_SIZE (THREAD_SIZE / 8) +#define REC_STACK_SIZE (THREAD_SIZE / 8UL) #endif #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2) diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 086ce77d9074..85dd6aa33df6 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -29,8 +29,6 @@ #include <linux/rwsem.h> #include <linux/slab.h> #include <linux/spinlock.h> -#include <linux/mount.h> -#include <linux/pseudo_fs.h> #include <linux/balloon_compaction.h> #include <linux/vmw_vmci_defs.h> #include <linux/vmw_vmci_api.h> @@ -1730,20 +1728,6 @@ static inline void vmballoon_debugfs_exit(struct vmballoon *b) #ifdef CONFIG_BALLOON_COMPACTION - -static int vmballoon_init_fs_context(struct fs_context *fc) -{ - return init_pseudo(fc, BALLOON_VMW_MAGIC) ? 0 : -ENOMEM; -} - -static struct file_system_type vmballoon_fs = { - .name = "balloon-vmware", - .init_fs_context = vmballoon_init_fs_context, - .kill_sb = kill_anon_super, -}; - -static struct vfsmount *vmballoon_mnt; - /** * vmballoon_migratepage() - migrates a balloon page. * @b_dev_info: balloon device information descriptor. @@ -1863,21 +1847,6 @@ out_unlock: } /** - * vmballoon_compaction_deinit() - removes compaction related data. - * - * @b: pointer to the balloon. - */ -static void vmballoon_compaction_deinit(struct vmballoon *b) -{ - if (!IS_ERR(b->b_dev_info.inode)) - iput(b->b_dev_info.inode); - - b->b_dev_info.inode = NULL; - kern_unmount(vmballoon_mnt); - vmballoon_mnt = NULL; -} - -/** * vmballoon_compaction_init() - initialized compaction for the balloon. * * @b: pointer to the balloon. @@ -1888,33 +1857,15 @@ static void vmballoon_compaction_deinit(struct vmballoon *b) * * Return: zero on success or error code on failure. */ -static __init int vmballoon_compaction_init(struct vmballoon *b) +static __init void vmballoon_compaction_init(struct vmballoon *b) { - vmballoon_mnt = kern_mount(&vmballoon_fs); - if (IS_ERR(vmballoon_mnt)) - return PTR_ERR(vmballoon_mnt); - b->b_dev_info.migratepage = vmballoon_migratepage; - b->b_dev_info.inode = alloc_anon_inode(vmballoon_mnt->mnt_sb); - - if (IS_ERR(b->b_dev_info.inode)) - return PTR_ERR(b->b_dev_info.inode); - - b->b_dev_info.inode->i_mapping->a_ops = &balloon_aops; - return 0; } #else /* CONFIG_BALLOON_COMPACTION */ - -static void vmballoon_compaction_deinit(struct vmballoon *b) -{ -} - -static int vmballoon_compaction_init(struct vmballoon *b) +static inline void vmballoon_compaction_init(struct vmballoon *b) { - return 0; } - #endif /* CONFIG_BALLOON_COMPACTION */ static int __init vmballoon_init(void) @@ -1939,9 +1890,7 @@ static int __init vmballoon_init(void) * balloon_devinfo_init() . */ balloon_devinfo_init(&balloon.b_dev_info); - error = vmballoon_compaction_init(&balloon); - if (error) - goto fail; + vmballoon_compaction_init(&balloon); INIT_LIST_HEAD(&balloon.huge_pages); spin_lock_init(&balloon.comm_lock); @@ -1958,7 +1907,6 @@ static int __init vmballoon_init(void) return 0; fail: vmballoon_unregister_shrinker(&balloon); - vmballoon_compaction_deinit(&balloon); return error; } @@ -1985,8 +1933,5 @@ static void __exit vmballoon_exit(void) */ vmballoon_send_start(&balloon, 0); vmballoon_pop(&balloon); - - /* Only once we popped the balloon, compaction can be deinit */ - vmballoon_compaction_deinit(&balloon); } module_exit(vmballoon_exit); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index f4a1281658db..e08e22f0a7c5 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2505,11 +2505,11 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, dev_set_drvdata(&card->dev, md); ret = device_add_disk(md->parent, md->disk, mmc_disk_attr_groups); if (ret) - goto err_cleanup_queue; + goto err_put_disk; return md; - err_cleanup_queue: - blk_cleanup_queue(md->disk->queue); + err_put_disk: + put_disk(md->disk); blk_mq_free_tag_set(&md->queue.tag_set); err_kfree: kfree(md); diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index fa5324ceeebe..fefaa901b50f 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -116,8 +116,7 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req) } } -static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req, - bool reserved) +static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req) { struct request_queue *q = req->q; struct mmc_queue *mq = q->queuedata; @@ -494,7 +493,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq) if (blk_queue_quiesced(q)) blk_mq_unquiesce_queue(q); - blk_cleanup_queue(q); blk_mq_free_tag_set(&mq->tag_set); /* diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index d6144978e32d..10c563999d3d 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -169,8 +169,9 @@ config MMC_SDHCI_OF_ASPEED If unsure, say N. config MMC_SDHCI_OF_ASPEED_TEST - bool "Tests for the ASPEED SDHCI driver" - depends on MMC_SDHCI_OF_ASPEED && KUNIT=y + bool "Tests for the ASPEED SDHCI driver" if !KUNIT_ALL_TESTS + depends on MMC_SDHCI_OF_ASPEED && KUNIT + default KUNIT_ALL_TESTS help Enable KUnit tests for the ASPEED SDHCI driver. Select this option only if you will boot the kernel for the purpose of running diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index c0350e9c03f3..4cca4c90769b 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -775,8 +775,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct sdhci_acpi_slot *slot; - struct acpi_device *device, *child; const struct dmi_system_id *id; + struct acpi_device *device; struct sdhci_acpi_host *c; struct sdhci_host *host; struct resource *iomem; @@ -796,10 +796,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev) slot = sdhci_acpi_get_slot(device); /* Power on the SDHCI controller and its children */ - acpi_device_fix_up_power(device); - list_for_each_entry(child, &device->children, node) - if (child->status.present && child->status.enabled) - acpi_device_fix_up_power(child); + acpi_device_fix_up_power_extended(device); if (sdhci_acpi_byt_defer(dev)) return -EPROBE_DEFER; diff --git a/drivers/mmc/host/sdhci-of-aspeed-test.c b/drivers/mmc/host/sdhci-of-aspeed-test.c index 1ed4f86291f2..ecb502606c53 100644 --- a/drivers/mmc/host/sdhci-of-aspeed-test.c +++ b/drivers/mmc/host/sdhci-of-aspeed-test.c @@ -96,10 +96,4 @@ static struct kunit_suite aspeed_sdhci_test_suite = { .test_cases = aspeed_sdhci_test_cases, }; -static struct kunit_suite *aspeed_sdc_test_suite_array[] = { - &aspeed_sdhci_test_suite, - NULL, -}; - -static struct kunit_suite **aspeed_sdc_test_suites - __used __section(".kunit_test_suites") = aspeed_sdc_test_suite_array; +kunit_test_suite(aspeed_sdhci_test_suite); diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c index 6e4e132903a6..ba6677bf7372 100644 --- a/drivers/mmc/host/sdhci-of-aspeed.c +++ b/drivers/mmc/host/sdhci-of-aspeed.c @@ -606,25 +606,6 @@ static struct platform_driver aspeed_sdc_driver = { #if defined(CONFIG_MMC_SDHCI_OF_ASPEED_TEST) #include "sdhci-of-aspeed-test.c" - -static inline int aspeed_sdc_tests_init(void) -{ - return __kunit_test_suites_init(aspeed_sdc_test_suites); -} - -static inline void aspeed_sdc_tests_exit(void) -{ - __kunit_test_suites_exit(aspeed_sdc_test_suites); -} -#else -static inline int aspeed_sdc_tests_init(void) -{ - return 0; -} - -static inline void aspeed_sdc_tests_exit(void) -{ -} #endif static int __init aspeed_sdc_init(void) @@ -637,18 +618,7 @@ static int __init aspeed_sdc_init(void) rc = platform_driver_register(&aspeed_sdc_driver); if (rc < 0) - goto cleanup_sdhci; - - rc = aspeed_sdc_tests_init(); - if (rc < 0) { - platform_driver_unregister(&aspeed_sdc_driver); - goto cleanup_sdhci; - } - - return 0; - -cleanup_sdhci: - platform_driver_unregister(&aspeed_sdhci_driver); + platform_driver_unregister(&aspeed_sdhci_driver); return rc; } @@ -656,8 +626,6 @@ module_init(aspeed_sdc_init); static void __exit aspeed_sdc_exit(void) { - aspeed_sdc_tests_exit(); - platform_driver_unregister(&aspeed_sdc_driver); platform_driver_unregister(&aspeed_sdhci_driver); } diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index ed53276f6ad9..622b7de96c7f 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -1240,16 +1240,11 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { #ifdef CONFIG_ACPI static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) { - struct acpi_device *device, *child; + struct acpi_device *device; device = ACPI_COMPANION(&slot->chip->pdev->dev); - if (!device) - return; - - acpi_device_fix_up_power(device); - list_for_each_entry(child, &device->children, node) - if (child->status.present && child->status.enabled) - acpi_device_fix_up_power(child); + if (device) + acpi_device_fix_up_power_extended(device); } #else static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {} diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index f73172111465..60b222799871 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -29,7 +29,7 @@ static void blktrans_dev_release(struct kref *kref) struct mtd_blktrans_dev *dev = container_of(kref, struct mtd_blktrans_dev, ref); - blk_cleanup_disk(dev->disk); + put_disk(dev->disk); blk_mq_free_tag_set(dev->tag_set); kfree(dev->tag_set); list_del(&dev->list); @@ -398,7 +398,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) return 0; out_cleanup_disk: - blk_cleanup_disk(new->disk); + put_disk(new->disk); out_free_tag_set: blk_mq_free_tag_set(new->tag_set); out_kfree_tag_set: diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index a78fdf3b30f7..4cf67a2a0d04 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -467,7 +467,7 @@ out_destroy_wq: out_remove_minor: idr_remove(&ubiblock_minor_idr, gd->first_minor); out_cleanup_disk: - blk_cleanup_disk(dev->gd); + put_disk(dev->gd); out_free_tags: blk_mq_free_tag_set(&dev->tag_set); out_free_dev: @@ -486,7 +486,7 @@ static void ubiblock_cleanup(struct ubiblock *dev) destroy_workqueue(dev->wq); /* Finally destroy the blk queue */ dev_info(disk_to_dev(dev->gd), "released"); - blk_cleanup_disk(dev->gd); + put_disk(dev->gd); blk_mq_free_tag_set(&dev->tag_set); idr_remove(&ubiblock_minor_idr, dev->gd->first_minor); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index dcbe55b56e43..b8379e4034a4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -459,43 +459,34 @@ static void brcmf_fw_fix_efi_nvram_ccode(char *data, unsigned long data_len) static u8 *brcmf_fw_nvram_from_efi(size_t *data_len_ret) { - const u16 name[] = { 'n', 'v', 'r', 'a', 'm', 0 }; - struct efivar_entry *nvram_efivar; + efi_guid_t guid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61, 0xb5, 0x1f, + 0x43, 0x26, 0x81, 0x23, 0xd1, 0x13); unsigned long data_len = 0; + efi_status_t status; u8 *data = NULL; - int err; - nvram_efivar = kzalloc(sizeof(*nvram_efivar), GFP_KERNEL); - if (!nvram_efivar) + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) return NULL; - memcpy(&nvram_efivar->var.VariableName, name, sizeof(name)); - nvram_efivar->var.VendorGuid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61, - 0xb5, 0x1f, 0x43, 0x26, - 0x81, 0x23, 0xd1, 0x13); - - err = efivar_entry_size(nvram_efivar, &data_len); - if (err) + status = efi.get_variable(L"nvram", &guid, NULL, &data_len, NULL); + if (status != EFI_BUFFER_TOO_SMALL) goto fail; data = kmalloc(data_len, GFP_KERNEL); if (!data) goto fail; - err = efivar_entry_get(nvram_efivar, NULL, &data_len, data); - if (err) + status = efi.get_variable(L"nvram", &guid, NULL, &data_len, data); + if (status != EFI_SUCCESS) goto fail; brcmf_fw_fix_efi_nvram_ccode(data, data_len); brcmf_info("Using nvram EFI variable\n"); - kfree(nvram_efivar); *data_len_ret = data_len; return data; - fail: kfree(data); - kfree(nvram_efivar); return NULL; } #else diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c index 23b1d689ba7b..6d408cd0f517 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c @@ -19,20 +19,14 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len) { - struct efivar_entry *pnvm_efivar; void *data; unsigned long package_size; - int err; + efi_status_t status; *len = 0; - pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL); - if (!pnvm_efivar) - return ERR_PTR(-ENOMEM); - - memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME, - sizeof(IWL_UEFI_OEM_PNVM_NAME)); - pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID; + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) + return ERR_PTR(-ENODEV); /* * TODO: we hardcode a maximum length here, because reading @@ -42,27 +36,22 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len) package_size = IWL_HARDCODED_PNVM_SIZE; data = kmalloc(package_size, GFP_KERNEL); - if (!data) { - data = ERR_PTR(-ENOMEM); - goto out; - } + if (!data) + return ERR_PTR(-ENOMEM); - err = efivar_entry_get(pnvm_efivar, NULL, &package_size, data); - if (err) { + status = efi.get_variable(IWL_UEFI_OEM_PNVM_NAME, &IWL_EFI_VAR_GUID, + NULL, &package_size, data); + if (status != EFI_SUCCESS) { IWL_DEBUG_FW(trans, - "PNVM UEFI variable not found %d (len %lu)\n", - err, package_size); + "PNVM UEFI variable not found 0x%lx (len %lu)\n", + status, package_size); kfree(data); - data = ERR_PTR(err); - goto out; + return ERR_PTR(-ENOENT); } IWL_DEBUG_FW(trans, "Read PNVM from UEFI with size %lu\n", package_size); *len = package_size; -out: - kfree(pnvm_efivar); - return data; } @@ -211,21 +200,15 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans, void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len) { - struct efivar_entry *reduce_power_efivar; struct pnvm_sku_package *package; void *data = NULL; unsigned long package_size; - int err; + efi_status_t status; *len = 0; - reduce_power_efivar = kzalloc(sizeof(*reduce_power_efivar), GFP_KERNEL); - if (!reduce_power_efivar) - return ERR_PTR(-ENOMEM); - - memcpy(&reduce_power_efivar->var.VariableName, IWL_UEFI_REDUCED_POWER_NAME, - sizeof(IWL_UEFI_REDUCED_POWER_NAME)); - reduce_power_efivar->var.VendorGuid = IWL_EFI_VAR_GUID; + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) + return ERR_PTR(-ENODEV); /* * TODO: we hardcode a maximum length here, because reading @@ -235,19 +218,17 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len) package_size = IWL_HARDCODED_REDUCE_POWER_SIZE; package = kmalloc(package_size, GFP_KERNEL); - if (!package) { - package = ERR_PTR(-ENOMEM); - goto out; - } + if (!package) + return ERR_PTR(-ENOMEM); - err = efivar_entry_get(reduce_power_efivar, NULL, &package_size, package); - if (err) { + status = efi.get_variable(IWL_UEFI_REDUCED_POWER_NAME, &IWL_EFI_VAR_GUID, + NULL, &package_size, data); + if (status != EFI_SUCCESS) { IWL_DEBUG_FW(trans, - "Reduced Power UEFI variable not found %d (len %lu)\n", - err, package_size); + "Reduced Power UEFI variable not found 0x%lx (len %lu)\n", + status, package_size); kfree(package); - data = ERR_PTR(err); - goto out; + return ERR_PTR(-ENOENT); } IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n", @@ -262,9 +243,6 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len) kfree(package); -out: - kfree(reduce_power_efivar); - return data; } @@ -304,22 +282,15 @@ static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data, void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt) { - struct efivar_entry *sgom_efivar; struct uefi_cnv_wlan_sgom_data *data; unsigned long package_size; - int err, ret; - - if (!fwrt->geo_enabled) - return; + efi_status_t status; + int ret; - sgom_efivar = kzalloc(sizeof(*sgom_efivar), GFP_KERNEL); - if (!sgom_efivar) + if (!fwrt->geo_enabled || + !efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) return; - memcpy(&sgom_efivar->var.VariableName, IWL_UEFI_SGOM_NAME, - sizeof(IWL_UEFI_SGOM_NAME)); - sgom_efivar->var.VendorGuid = IWL_EFI_VAR_GUID; - /* TODO: we hardcode a maximum length here, because reading * from the UEFI is not working. To implement this properly, * we have to call efivar_entry_size(). @@ -327,15 +298,14 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans, package_size = IWL_HARDCODED_SGOM_SIZE; data = kmalloc(package_size, GFP_KERNEL); - if (!data) { - data = ERR_PTR(-ENOMEM); - goto out; - } + if (!data) + return; - err = efivar_entry_get(sgom_efivar, NULL, &package_size, data); - if (err) { + status = efi.get_variable(IWL_UEFI_SGOM_NAME, &IWL_EFI_VAR_GUID, + NULL, &package_size, data); + if (status != EFI_SUCCESS) { IWL_DEBUG_FW(trans, - "SGOM UEFI variable not found %d\n", err); + "SGOM UEFI variable not found 0x%lx\n", status); goto out_free; } @@ -349,8 +319,6 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans, out_free: kfree(data); -out: - kfree(sgom_efivar); } IWL_EXPORT_SYMBOL(iwl_uefi_get_sgom_table); #endif /* CONFIG_ACPI */ diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 9613e54c7a67..0297b7882e33 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1422,7 +1422,7 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, struct page *page, unsigned int len, unsigned int off, - unsigned int op, sector_t sector) + enum req_op op, sector_t sector) { int ret; @@ -1483,7 +1483,7 @@ static void btt_submit_bio(struct bio *bio) } static int btt_rw_page(struct block_device *bdev, sector_t sector, - struct page *page, unsigned int op) + struct page *page, enum req_op op) { struct btt *btt = bdev->bd_disk->private_data; int rc; @@ -1548,14 +1548,14 @@ static int btt_blk_init(struct btt *btt) return 0; out_cleanup_disk: - blk_cleanup_disk(btt->btt_disk); + put_disk(btt->btt_disk); return rc; } static void btt_blk_cleanup(struct btt *btt) { del_gendisk(btt->btt_disk); - blk_cleanup_disk(btt->btt_disk); + put_disk(btt->btt_disk); } /** diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 629d10fcf53b..f36efcc11f67 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -239,7 +239,7 @@ static void pmem_submit_bio(struct bio *bio) } static int pmem_rw_page(struct block_device *bdev, sector_t sector, - struct page *page, unsigned int op) + struct page *page, enum req_op op) { struct pmem_device *pmem = bdev->bd_disk->private_data; blk_status_t rc; @@ -450,7 +450,7 @@ static void pmem_release_disk(void *__pmem) put_dax(pmem->dax_dev); del_gendisk(pmem->disk); - blk_cleanup_disk(pmem->disk); + put_disk(pmem->disk); } static int pmem_attach_disk(struct device *dev, @@ -596,7 +596,7 @@ out_cleanup_dax: kill_dax(pmem->dax_dev); put_dax(pmem->dax_dev); out: - blk_cleanup_disk(pmem->disk); + put_disk(pmem->disk); return rc; } diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index d702d7d60235..5c352d5d8ee6 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -862,8 +862,7 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown) } } -static enum blk_eh_timer_return apple_nvme_timeout(struct request *req, - bool reserved) +static enum blk_eh_timer_return apple_nvme_timeout(struct request *req) { struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); struct apple_nvme_queue *q = iod->q; @@ -1502,7 +1501,7 @@ static int apple_nvme_probe(struct platform_device *pdev) if (!blk_get_queue(anv->ctrl.admin_q)) { nvme_start_admin_queue(&anv->ctrl); - blk_cleanup_queue(anv->ctrl.admin_q); + blk_mq_destroy_queue(anv->ctrl.admin_q); anv->ctrl.admin_q = NULL; ret = -ENODEV; goto put_dev; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 6a12a906a11e..2533b88e66d5 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -418,7 +418,7 @@ blk_status_t nvme_host_path_error(struct request *req) } EXPORT_SYMBOL_GPL(nvme_host_path_error); -bool nvme_cancel_request(struct request *req, void *data, bool reserved) +bool nvme_cancel_request(struct request *req, void *data) { dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, "Cancelling I/O %d", req->tag); @@ -4061,7 +4061,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid, mutex_unlock(&ctrl->subsys->lock); nvme_put_ns_head(ns->head); out_cleanup_disk: - blk_cleanup_disk(disk); + put_disk(disk); out_free_ns: kfree(ns); out_free_id: @@ -4103,7 +4103,6 @@ static void nvme_ns_remove(struct nvme_ns *ns) if (!nvme_ns_head_multipath(ns->head)) nvme_cdev_del(&ns->cdev, &ns->cdev_device); del_gendisk(ns->disk); - blk_cleanup_queue(ns->queue); down_write(&ns->ctrl->namespaces_rwsem); list_del_init(&ns->list); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 3c778bb0c294..9987797620b6 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2392,7 +2392,7 @@ nvme_fc_ctrl_free(struct kref *ref) unsigned long flags; if (ctrl->ctrl.tagset) { - blk_cleanup_queue(ctrl->ctrl.connect_q); + blk_mq_destroy_queue(ctrl->ctrl.connect_q); blk_mq_free_tag_set(&ctrl->tag_set); } @@ -2402,8 +2402,8 @@ nvme_fc_ctrl_free(struct kref *ref) spin_unlock_irqrestore(&ctrl->rport->lock, flags); nvme_start_admin_queue(&ctrl->ctrl); - blk_cleanup_queue(ctrl->ctrl.admin_q); - blk_cleanup_queue(ctrl->ctrl.fabrics_q); + blk_mq_destroy_queue(ctrl->ctrl.admin_q); + blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); blk_mq_free_tag_set(&ctrl->admin_tag_set); kfree(ctrl->queues); @@ -2456,8 +2456,7 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) * status. The done path will return the io request back to the block * layer with an error status. */ -static bool -nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) +static bool nvme_fc_terminate_exchange(struct request *req, void *data) { struct nvme_ctrl *nctrl = data; struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); @@ -2565,8 +2564,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) nvme_reset_ctrl(&ctrl->ctrl); } -static enum blk_eh_timer_return -nvme_fc_timeout(struct request *rq, bool reserved) +static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq) { struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); struct nvme_fc_ctrl *ctrl = op->ctrl; @@ -2953,7 +2951,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) out_delete_hw_queues: nvme_fc_delete_hw_io_queues(ctrl); out_cleanup_blk_queue: - blk_cleanup_queue(ctrl->ctrl.connect_q); + blk_mq_destroy_queue(ctrl->ctrl.connect_q); out_free_tag_set: blk_mq_free_tag_set(&ctrl->tag_set); nvme_fc_free_io_queues(ctrl); @@ -3642,9 +3640,9 @@ fail_ctrl: return ERR_PTR(-EIO); out_cleanup_admin_q: - blk_cleanup_queue(ctrl->ctrl.admin_q); + blk_mq_destroy_queue(ctrl->ctrl.admin_q); out_cleanup_fabrics_q: - blk_cleanup_queue(ctrl->ctrl.fabrics_q); + blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); out_free_admin_tag_set: blk_mq_free_tag_set(&ctrl->admin_tag_set); out_free_queues: diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index a2e89db1cd63..27614bee7380 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -68,7 +68,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q, struct nvme_command *cmd, void __user *ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, u32 meta_seed, void **metap, unsigned timeout, bool vec, - unsigned int rq_flags, blk_mq_req_flags_t blk_flags) + blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags) { bool write = nvme_is_write(cmd); struct nvme_ns *ns = q->queuedata; @@ -407,7 +407,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, struct nvme_uring_data d; struct nvme_command c; struct request *req; - unsigned int rq_flags = 0; + blk_opf_t rq_flags = 0; blk_mq_req_flags_t blk_flags = 0; void *meta = NULL; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index d3e2440d8abb..f26640ccb955 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -830,7 +830,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) ns->head->disk->queue); #ifdef CONFIG_BLK_DEV_ZONED if (blk_queue_is_zoned(ns->queue) && ns->head->disk) - ns->head->disk->queue->nr_zones = ns->queue->nr_zones; + ns->head->disk->nr_zones = ns->disk->nr_zones; #endif } @@ -853,7 +853,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); - blk_cleanup_disk(head->disk); + put_disk(head->disk); } void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 5558f8812157..7e0a925bf3be 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -698,7 +698,7 @@ static __always_inline void nvme_complete_batch(struct io_comp_batch *iob, } blk_status_t nvme_host_path_error(struct request *req); -bool nvme_cancel_request(struct request *req, void *data, bool reserved); +bool nvme_cancel_request(struct request *req, void *data); void nvme_cancel_tagset(struct nvme_ctrl *ctrl); void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl); bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, @@ -734,7 +734,7 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl); int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); void nvme_start_freeze(struct nvme_ctrl *ctrl); -static inline unsigned int nvme_req_op(struct nvme_command *cmd) +static inline enum req_op nvme_req_op(struct nvme_command *cmd) { return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 58c72d55769a..7e7d4802ac6b 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1344,7 +1344,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n"); } -static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) +static enum blk_eh_timer_return nvme_timeout(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_queue *nvmeq = iod->nvmeq; @@ -1760,7 +1760,7 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev) * queue to flush these to completion. */ nvme_start_admin_queue(&dev->ctrl); - blk_cleanup_queue(dev->ctrl.admin_q); + blk_mq_destroy_queue(dev->ctrl.admin_q); blk_mq_free_tag_set(&dev->admin_tagset); } } @@ -3515,6 +3515,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 46c2dcf72f7e..4665aebd944d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -840,8 +840,8 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, bool remove) { if (remove) { - blk_cleanup_queue(ctrl->ctrl.admin_q); - blk_cleanup_queue(ctrl->ctrl.fabrics_q); + blk_mq_destroy_queue(ctrl->ctrl.admin_q); + blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); } if (ctrl->async_event_sqe.data) { @@ -935,10 +935,10 @@ out_stop_queue: nvme_cancel_admin_tagset(&ctrl->ctrl); out_cleanup_queue: if (new) - blk_cleanup_queue(ctrl->ctrl.admin_q); + blk_mq_destroy_queue(ctrl->ctrl.admin_q); out_cleanup_fabrics_q: if (new) - blk_cleanup_queue(ctrl->ctrl.fabrics_q); + blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); out_free_tagset: if (new) blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); @@ -957,7 +957,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl, bool remove) { if (remove) { - blk_cleanup_queue(ctrl->ctrl.connect_q); + blk_mq_destroy_queue(ctrl->ctrl.connect_q); blk_mq_free_tag_set(ctrl->ctrl.tagset); } nvme_rdma_free_io_queues(ctrl); @@ -1012,7 +1012,7 @@ out_wait_freeze_timed_out: out_cleanup_connect_q: nvme_cancel_tagset(&ctrl->ctrl); if (new) - blk_cleanup_queue(ctrl->ctrl.connect_q); + blk_mq_destroy_queue(ctrl->ctrl.connect_q); out_free_tag_set: if (new) blk_mq_free_tag_set(ctrl->ctrl.tagset); @@ -2021,8 +2021,7 @@ static void nvme_rdma_complete_timed_out(struct request *rq) nvmf_complete_timed_out_request(rq); } -static enum blk_eh_timer_return -nvme_rdma_timeout(struct request *rq, bool reserved) +static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_queue *queue = req->queue; diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 7a9e6ffa2342..b95ee85053e3 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1884,7 +1884,7 @@ static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) { nvme_tcp_stop_io_queues(ctrl); if (remove) { - blk_cleanup_queue(ctrl->connect_q); + blk_mq_destroy_queue(ctrl->connect_q); blk_mq_free_tag_set(ctrl->tagset); } nvme_tcp_free_io_queues(ctrl); @@ -1939,7 +1939,7 @@ out_wait_freeze_timed_out: out_cleanup_connect_q: nvme_cancel_tagset(ctrl); if (new) - blk_cleanup_queue(ctrl->connect_q); + blk_mq_destroy_queue(ctrl->connect_q); out_free_tag_set: if (new) blk_mq_free_tag_set(ctrl->tagset); @@ -1952,8 +1952,8 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) { nvme_tcp_stop_queue(ctrl, 0); if (remove) { - blk_cleanup_queue(ctrl->admin_q); - blk_cleanup_queue(ctrl->fabrics_q); + blk_mq_destroy_queue(ctrl->admin_q); + blk_mq_destroy_queue(ctrl->fabrics_q); blk_mq_free_tag_set(ctrl->admin_tagset); } nvme_tcp_free_admin_queue(ctrl); @@ -2011,10 +2011,10 @@ out_stop_queue: nvme_cancel_admin_tagset(ctrl); out_cleanup_queue: if (new) - blk_cleanup_queue(ctrl->admin_q); + blk_mq_destroy_queue(ctrl->admin_q); out_cleanup_fabrics_q: if (new) - blk_cleanup_queue(ctrl->fabrics_q); + blk_mq_destroy_queue(ctrl->fabrics_q); out_free_tagset: if (new) blk_mq_free_tag_set(ctrl->admin_tagset); @@ -2323,8 +2323,7 @@ static void nvme_tcp_complete_timed_out(struct request *rq) nvmf_complete_timed_out_request(rq); } -static enum blk_eh_timer_return -nvme_tcp_timeout(struct request *rq, bool reserved) +static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index 9f81beb4df4e..12316ab51bda 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -109,10 +109,10 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) goto free_data; } - blk_queue_set_zoned(ns->disk, BLK_ZONED_HM); + disk_set_zoned(ns->disk, BLK_ZONED_HM); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); - blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1); - blk_queue_max_active_zones(q, le32_to_cpu(id->mar) + 1); + disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1); + disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1); free_data: kfree(id); return status; diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 27a72504d31c..2dc1c1035626 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -246,7 +246,8 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) struct scatterlist *sg; struct blk_plug plug; sector_t sector; - int op, i, rc; + blk_opf_t opf; + int i, rc; struct sg_mapping_iter prot_miter; unsigned int iter_flags; unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len; @@ -260,26 +261,26 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) } if (req->cmd->rw.opcode == nvme_cmd_write) { - op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; + opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) - op |= REQ_FUA; + opf |= REQ_FUA; iter_flags = SG_MITER_TO_SG; } else { - op = REQ_OP_READ; + opf = REQ_OP_READ; iter_flags = SG_MITER_FROM_SG; } if (is_pci_p2pdma_page(sg_page(req->sg))) - op |= REQ_NOMERGE; + opf |= REQ_NOMERGE; sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); if (nvmet_use_inline_bvec(req)) { bio = &req->b.inline_bio; bio_init(bio, req->ns->bdev, req->inline_bvec, - ARRAY_SIZE(req->inline_bvec), op); + ARRAY_SIZE(req->inline_bvec), opf); } else { - bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), op, + bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf, GFP_KERNEL); } bio->bi_iter.bi_sector = sector; @@ -306,7 +307,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) } bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), - op, GFP_KERNEL); + opf, GFP_KERNEL); bio->bi_iter.bi_sector = sector; bio_chain(bio, prev); diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index f3d58abf11e0..64b47e2a4633 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -112,7 +112,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, iocb->ki_pos = pos; iocb->ki_filp = req->ns->file; - iocb->ki_flags = ki_flags | iocb_flags(req->ns->file); + iocb->ki_flags = ki_flags | iocb->ki_filp->f_iocb_flags; return call_iter(iocb, &iter); } diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 59024af2da2e..0f5c77e22a0a 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -266,8 +266,8 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) return; nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); - blk_cleanup_queue(ctrl->ctrl.admin_q); - blk_cleanup_queue(ctrl->ctrl.fabrics_q); + blk_mq_destroy_queue(ctrl->ctrl.admin_q); + blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); blk_mq_free_tag_set(&ctrl->admin_tag_set); } @@ -283,7 +283,7 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) mutex_unlock(&nvme_loop_ctrl_mutex); if (nctrl->tagset) { - blk_cleanup_queue(ctrl->ctrl.connect_q); + blk_mq_destroy_queue(ctrl->ctrl.connect_q); blk_mq_free_tag_set(&ctrl->tag_set); } kfree(ctrl->queues); @@ -410,9 +410,9 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) out_cleanup_queue: clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); - blk_cleanup_queue(ctrl->ctrl.admin_q); + blk_mq_destroy_queue(ctrl->ctrl.admin_q); out_cleanup_fabrics_q: - blk_cleanup_queue(ctrl->ctrl.fabrics_q); + blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); out_free_tagset: blk_mq_free_tag_set(&ctrl->admin_tag_set); out_free_sq: @@ -554,7 +554,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) return 0; out_cleanup_connect_q: - blk_cleanup_queue(ctrl->ctrl.connect_q); + blk_mq_destroy_queue(ctrl->ctrl.connect_q); out_free_tagset: blk_mq_free_tag_set(&ctrl->tag_set); out_destroy_queues: diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index 82b61acf7a72..c7ef69f29fe4 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -57,10 +57,10 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns) * zones, reject the device. Otherwise, use report zones to detect if * the device has conventional zones. */ - if (ns->bdev->bd_disk->queue->conv_zones_bitmap) + if (ns->bdev->bd_disk->conv_zones_bitmap) return false; - ret = blkdev_report_zones(ns->bdev, 0, blkdev_nr_zones(bd_disk), + ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev), validate_conv_zones_cb, NULL); if (ret < 0) return false; @@ -241,7 +241,7 @@ static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req) { unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); - return blkdev_nr_zones(req->ns->bdev->bd_disk) - + return bdev_nr_zones(req->ns->bdev) - (sect >> ilog2(bdev_zone_sectors(req->ns->bdev))); } @@ -308,7 +308,7 @@ void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req) queue_work(zbd_wq, &req->z.zmgmt_work); } -static inline enum req_opf zsa_req_op(u8 zsa) +static inline enum req_op zsa_req_op(u8 zsa) { switch (zsa) { case NVME_ZONE_OPEN: @@ -386,7 +386,7 @@ static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d) static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) { struct block_device *bdev = req->ns->bdev; - unsigned int nr_zones = blkdev_nr_zones(bdev->bd_disk); + unsigned int nr_zones = bdev_nr_zones(bdev); struct request_queue *q = bdev_get_queue(bdev); struct bio *bio = NULL; sector_t sector = 0; @@ -413,8 +413,8 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) ret = 0; } - while (sector < get_capacity(bdev->bd_disk)) { - if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) { + while (sector < bdev_nr_sectors(bdev)) { + if (test_bit(disk_zone_no(bdev->bd_disk, sector), d.zbitmap)) { bio = blk_next_bio(bio, bdev, 0, zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC, GFP_KERNEL); @@ -422,7 +422,7 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) /* This may take a while, so be nice to others */ cond_resched(); } - sector += blk_queue_zone_sectors(q); + sector += bdev_zone_sectors(bdev); } if (bio) { @@ -465,7 +465,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) { struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba); - enum req_opf op = zsa_req_op(req->cmd->zms.zsa); + enum req_op op = zsa_req_op(req->cmd->zms.zsa); struct block_device *bdev = req->ns->bdev; sector_t zone_sectors = bdev_zone_sectors(bdev); u16 status = NVME_SC_SUCCESS; @@ -525,7 +525,7 @@ static void nvmet_bdev_zone_append_bio_done(struct bio *bio) void nvmet_bdev_execute_zone_append(struct nvmet_req *req) { sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); - const unsigned int op = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; + const blk_opf_t opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; u16 status = NVME_SC_SUCCESS; unsigned int total_len = 0; struct scatterlist *sg; @@ -556,9 +556,9 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) if (nvmet_use_inline_bvec(req)) { bio = &req->z.inline_bio; bio_init(bio, req->ns->bdev, req->inline_bvec, - ARRAY_SIZE(req->inline_bvec), op); + ARRAY_SIZE(req->inline_bvec), opf); } else { - bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL); + bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL); } bio->bi_end_io = nvmet_bdev_zone_append_bio_done; diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c index 8d374cc552be..f2e58ddfaed2 100644 --- a/drivers/of/kexec.c +++ b/drivers/of/kexec.c @@ -9,6 +9,7 @@ * Copyright (C) 2016 IBM Corporation */ +#include <linux/ima.h> #include <linux/kernel.h> #include <linux/kexec.h> #include <linux/memblock.h> @@ -115,6 +116,7 @@ static int do_get_kexec_buffer(const void *prop, int len, unsigned long *addr, return 0; } +#ifdef CONFIG_HAVE_IMA_KEXEC /** * ima_get_kexec_buffer - get IMA buffer from the previous kernel * @addr: On successful return, set to point to the buffer contents. @@ -122,16 +124,13 @@ static int do_get_kexec_buffer(const void *prop, int len, unsigned long *addr, * * Return: 0 on success, negative errno on error. */ -int ima_get_kexec_buffer(void **addr, size_t *size) +int __init ima_get_kexec_buffer(void **addr, size_t *size) { int ret, len; unsigned long tmp_addr; size_t tmp_size; const void *prop; - if (!IS_ENABLED(CONFIG_HAVE_IMA_KEXEC)) - return -ENOTSUPP; - prop = of_get_property(of_chosen, "linux,ima-kexec-buffer", &len); if (!prop) return -ENOENT; @@ -149,16 +148,13 @@ int ima_get_kexec_buffer(void **addr, size_t *size) /** * ima_free_kexec_buffer - free memory used by the IMA buffer */ -int ima_free_kexec_buffer(void) +int __init ima_free_kexec_buffer(void) { int ret; unsigned long addr; size_t size; struct property *prop; - if (!IS_ENABLED(CONFIG_HAVE_IMA_KEXEC)) - return -ENOTSUPP; - prop = of_find_property(of_chosen, "linux,ima-kexec-buffer", NULL); if (!prop) return -ENOENT; @@ -173,6 +169,7 @@ int ima_free_kexec_buffer(void) return memblock_phys_free(addr, size); } +#endif /** * remove_ima_buffer - remove the IMA buffer property and reservation from @fdt diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 30394929d700..eb89c9a75985 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -1443,12 +1443,12 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); * It provides the power used by @dev at @kHz if it is the frequency of an * existing OPP, or at the frequency of the first OPP above @kHz otherwise * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled - * frequency and @mW to the associated power. + * frequency and @uW to the associated power. * * Returns 0 on success or a proper -EINVAL value in case of error. */ static int __maybe_unused -_get_dt_power(struct device *dev, unsigned long *mW, unsigned long *kHz) +_get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz) { struct dev_pm_opp *opp; unsigned long opp_freq, opp_power; @@ -1465,7 +1465,7 @@ _get_dt_power(struct device *dev, unsigned long *mW, unsigned long *kHz) return -EINVAL; *kHz = opp_freq / 1000; - *mW = opp_power / 1000; + *uW = opp_power; return 0; } @@ -1475,14 +1475,14 @@ _get_dt_power(struct device *dev, unsigned long *mW, unsigned long *kHz) * This computes the power estimated by @dev at @kHz if it is the frequency * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled - * frequency and @mW to the associated power. The power is estimated as + * frequency and @uW to the associated power. The power is estimated as * P = C * V^2 * f with C being the device's capacitance and V and f * respectively the voltage and frequency of the OPP. * * Returns -EINVAL if the power calculation failed because of missing * parameters, 0 otherwise. */ -static int __maybe_unused _get_power(struct device *dev, unsigned long *mW, +static int __maybe_unused _get_power(struct device *dev, unsigned long *uW, unsigned long *kHz) { struct dev_pm_opp *opp; @@ -1512,9 +1512,10 @@ static int __maybe_unused _get_power(struct device *dev, unsigned long *mW, return -EINVAL; tmp = (u64)cap * mV * mV * (Hz / 1000000); - do_div(tmp, 1000000000); + /* Provide power in micro-Watts */ + do_div(tmp, 1000000); - *mW = (unsigned long)tmp; + *uW = (unsigned long)tmp; *kHz = Hz / 1000; return 0; diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 8a3b0c3a1e92..3a8c98615634 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -677,7 +677,7 @@ static int iosapic_set_affinity_irq(struct irq_data *d, if (dest_cpu < 0) return -1; - cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(dest_cpu)); + irq_data_update_affinity(d, cpumask_of(dest_cpu)); vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu); spin_lock_irqsave(&iosapic_lock, flags); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index db814f7b93ba..e7c6f6629e7c 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -642,7 +642,7 @@ static void hv_arch_irq_unmask(struct irq_data *data) struct hv_retarget_device_interrupt *params; struct tran_int_desc *int_desc; struct hv_pcibus_device *hbus; - struct cpumask *dest; + const struct cpumask *dest; cpumask_var_t tmp; struct pci_bus *pbus; struct pci_dev *pdev; @@ -1613,7 +1613,7 @@ out: } static u32 hv_compose_msi_req_v1( - struct pci_create_interrupt *int_pkt, struct cpumask *affinity, + struct pci_create_interrupt *int_pkt, const struct cpumask *affinity, u32 slot, u8 vector, u8 vector_count) { int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; @@ -1635,13 +1635,13 @@ static u32 hv_compose_msi_req_v1( * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten * by subsequent retarget in hv_irq_unmask(). */ -static int hv_compose_msi_req_get_cpu(struct cpumask *affinity) +static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity) { return cpumask_first_and(affinity, cpu_online_mask); } static u32 hv_compose_msi_req_v2( - struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, + struct pci_create_interrupt2 *int_pkt, const struct cpumask *affinity, u32 slot, u8 vector, u8 vector_count) { int cpu; @@ -1660,7 +1660,7 @@ static u32 hv_compose_msi_req_v2( } static u32 hv_compose_msi_req_v3( - struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity, + struct pci_create_interrupt3 *int_pkt, const struct cpumask *affinity, u32 slot, u32 vector, u8 vector_count) { int cpu; @@ -1697,7 +1697,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct hv_pci_dev *hpdev; struct pci_bus *pbus; struct pci_dev *pdev; - struct cpumask *dest; + const struct cpumask *dest; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; struct msi_desc *msi_desc; diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 96e09fa40909..03b1309875ae 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -1139,7 +1139,7 @@ static void cci_pmu_start(struct perf_event *event, int pmu_flags) /* * To handle interrupt latency, we always reprogram the period - * regardlesss of PERF_EF_RELOAD. + * regardless of PERF_EF_RELOAD. */ if (pmu_flags & PERF_EF_RELOAD) WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); @@ -1261,7 +1261,7 @@ static int validate_group(struct perf_event *event) */ .used_mask = mask, }; - memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long)); + bitmap_zero(mask, cci_pmu->num_cntrs); if (!validate_event(event->pmu, &fake_pmu, leader)) return -EINVAL; @@ -1629,10 +1629,9 @@ static struct cci_pmu *cci_pmu_alloc(struct device *dev) GFP_KERNEL); if (!cci_pmu->hw_events.events) return ERR_PTR(-ENOMEM); - cci_pmu->hw_events.used_mask = devm_kcalloc(dev, - BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)), - sizeof(*cci_pmu->hw_events.used_mask), - GFP_KERNEL); + cci_pmu->hw_events.used_mask = devm_bitmap_zalloc(dev, + CCI_PMU_MAX_HW_CNTRS(model), + GFP_KERNEL); if (!cci_pmu->hw_events.used_mask) return ERR_PTR(-ENOMEM); diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index 40b352e8aa7f..728d13d8e98a 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c @@ -1250,7 +1250,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9); /* Get a convenient /sys/event_source/devices/ name */ - ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL); + ccn->dt.id = ida_alloc(&arm_ccn_pmu_ida, GFP_KERNEL); if (ccn->dt.id == 0) { name = "ccn"; } else { @@ -1312,7 +1312,7 @@ error_pmu_register: &ccn->dt.node); error_set_affinity: error_choose_name: - ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); + ida_free(&arm_ccn_pmu_ida, ccn->dt.id); for (i = 0; i < ccn->num_xps; i++) writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); writel(0, ccn->dt.base + CCN_DT_PMCR); @@ -1329,7 +1329,7 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); writel(0, ccn->dt.base + CCN_DT_PMCR); perf_pmu_unregister(&ccn->dt.pmu); - ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); + ida_free(&arm_ccn_pmu_ida, ccn->dt.id); } static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn, diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index db670b265897..b65a7d9640e1 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -39,6 +39,24 @@ #include <asm/mmu.h> #include <asm/sysreg.h> +/* + * Cache if the event is allowed to trace Context information. + * This allows us to perform the check, i.e, perfmon_capable(), + * in the context of the event owner, once, during the event_init(). + */ +#define SPE_PMU_HW_FLAGS_CX BIT(0) + +static void set_spe_event_has_cx(struct perf_event *event) +{ + if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable()) + event->hw.flags |= SPE_PMU_HW_FLAGS_CX; +} + +static bool get_spe_event_has_cx(struct perf_event *event) +{ + return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX); +} + #define ARM_SPE_BUF_PAD_BYTE 0 struct arm_spe_pmu_buf { @@ -272,7 +290,7 @@ static u64 arm_spe_event_to_pmscr(struct perf_event *event) if (!attr->exclude_kernel) reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT); - if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable()) + if (get_spe_event_has_cx(event)) reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT); return reg; @@ -709,10 +727,10 @@ static int arm_spe_pmu_event_init(struct perf_event *event) !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT)) return -EOPNOTSUPP; + set_spe_event_has_cx(event); reg = arm_spe_event_to_pmscr(event); if (!perfmon_capable() && (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) | - BIT(SYS_PMSCR_EL1_CX_SHIFT) | BIT(SYS_PMSCR_EL1_PCT_SHIFT)))) return -EACCES; diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index b1b2a55de77f..8e058e08fe81 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -611,7 +611,7 @@ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, .dev = dev, }; - pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL); + pmu->id = ida_alloc(&ddr_ida, GFP_KERNEL); return pmu->id; } @@ -765,7 +765,7 @@ ddr_perf_err: cpuhp_instance_err: cpuhp_remove_multi_state(pmu->cpuhp_state); cpuhp_state_err: - ida_simple_remove(&ddr_ida, pmu->id); + ida_free(&ddr_ida, pmu->id); dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret); return ret; } @@ -779,7 +779,7 @@ static int ddr_perf_remove(struct platform_device *pdev) perf_pmu_unregister(&pmu->pmu); - ida_simple_remove(&ddr_ida, pmu->id); + ida_free(&ddr_ida, pmu->id); return 0; } diff --git a/drivers/perf/hisilicon/Kconfig b/drivers/perf/hisilicon/Kconfig index 5546218b5598..171bfc1b6bc2 100644 --- a/drivers/perf/hisilicon/Kconfig +++ b/drivers/perf/hisilicon/Kconfig @@ -14,3 +14,13 @@ config HISI_PCIE_PMU RCiEP devices. Adds the PCIe PMU into perf events system for monitoring latency, bandwidth etc. + +config HNS3_PMU + tristate "HNS3 PERF PMU" + depends on ARM64 || COMPILE_TEST + depends on PCI + help + Provide support for HNS3 performance monitoring unit (PMU) RCiEP + devices. + Adds the HNS3 PMU into perf events system for monitoring latency, + bandwidth etc. diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile index 6be83517acaa..4d2c9abe3372 100644 --- a/drivers/perf/hisilicon/Makefile +++ b/drivers/perf/hisilicon/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \ hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o +obj-$(CONFIG_HNS3_PMU) += hns3_pmu.o diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c index 62299ab5a9be..50d0c0a2f1fe 100644 --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -516,21 +516,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev) "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id, ddrc_pmu->index_id); - ddrc_pmu->pmu = (struct pmu) { - .name = name, - .module = THIS_MODULE, - .task_ctx_nr = perf_invalid_context, - .event_init = hisi_uncore_pmu_event_init, - .pmu_enable = hisi_uncore_pmu_enable, - .pmu_disable = hisi_uncore_pmu_disable, - .add = hisi_uncore_pmu_add, - .del = hisi_uncore_pmu_del, - .start = hisi_uncore_pmu_start, - .stop = hisi_uncore_pmu_stop, - .read = hisi_uncore_pmu_read, - .attr_groups = ddrc_pmu->pmu_events.attr_groups, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, - }; + hisi_pmu_init(&ddrc_pmu->pmu, name, ddrc_pmu->pmu_events.attr_groups, THIS_MODULE); ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1); if (ret) { diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c index 393513150106..13017b3412a5 100644 --- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c @@ -519,21 +519,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev) name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u", hha_pmu->sccl_id, hha_pmu->index_id); - hha_pmu->pmu = (struct pmu) { - .name = name, - .module = THIS_MODULE, - .task_ctx_nr = perf_invalid_context, - .event_init = hisi_uncore_pmu_event_init, - .pmu_enable = hisi_uncore_pmu_enable, - .pmu_disable = hisi_uncore_pmu_disable, - .add = hisi_uncore_pmu_add, - .del = hisi_uncore_pmu_del, - .start = hisi_uncore_pmu_start, - .stop = hisi_uncore_pmu_stop, - .read = hisi_uncore_pmu_read, - .attr_groups = hha_pmu->pmu_events.attr_groups, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, - }; + hisi_pmu_init(&hha_pmu->pmu, name, hha_pmu->pmu_events.attr_groups, THIS_MODULE); ret = perf_pmu_register(&hha_pmu->pmu, name, -1); if (ret) { diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c index 560ab964c8b5..2995f3630d49 100644 --- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c @@ -557,21 +557,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev) */ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u", l3c_pmu->sccl_id, l3c_pmu->ccl_id); - l3c_pmu->pmu = (struct pmu) { - .name = name, - .module = THIS_MODULE, - .task_ctx_nr = perf_invalid_context, - .event_init = hisi_uncore_pmu_event_init, - .pmu_enable = hisi_uncore_pmu_enable, - .pmu_disable = hisi_uncore_pmu_disable, - .add = hisi_uncore_pmu_add, - .del = hisi_uncore_pmu_del, - .start = hisi_uncore_pmu_start, - .stop = hisi_uncore_pmu_stop, - .read = hisi_uncore_pmu_read, - .attr_groups = l3c_pmu->pmu_events.attr_groups, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, - }; + hisi_pmu_init(&l3c_pmu->pmu, name, l3c_pmu->pmu_events.attr_groups, THIS_MODULE); ret = perf_pmu_register(&l3c_pmu->pmu, name, -1); if (ret) { diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c index a0ee84d97c41..47d3cc9b6eec 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c @@ -412,21 +412,7 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev) return ret; } - pa_pmu->pmu = (struct pmu) { - .module = THIS_MODULE, - .task_ctx_nr = perf_invalid_context, - .event_init = hisi_uncore_pmu_event_init, - .pmu_enable = hisi_uncore_pmu_enable, - .pmu_disable = hisi_uncore_pmu_disable, - .add = hisi_uncore_pmu_add, - .del = hisi_uncore_pmu_del, - .start = hisi_uncore_pmu_start, - .stop = hisi_uncore_pmu_stop, - .read = hisi_uncore_pmu_read, - .attr_groups = pa_pmu->pmu_events.attr_groups, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, - }; - + hisi_pmu_init(&pa_pmu->pmu, name, pa_pmu->pmu_events.attr_groups, THIS_MODULE); ret = perf_pmu_register(&pa_pmu->pmu, name, -1); if (ret) { dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 980b9ee6eb14..fbc8a93d5eac 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -531,4 +531,22 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) } EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu); +void hisi_pmu_init(struct pmu *pmu, const char *name, + const struct attribute_group **attr_groups, struct module *module) +{ + pmu->name = name; + pmu->module = module; + pmu->task_ctx_nr = perf_invalid_context; + pmu->event_init = hisi_uncore_pmu_event_init; + pmu->pmu_enable = hisi_uncore_pmu_enable; + pmu->pmu_disable = hisi_uncore_pmu_disable; + pmu->add = hisi_uncore_pmu_add; + pmu->del = hisi_uncore_pmu_del; + pmu->start = hisi_uncore_pmu_start; + pmu->stop = hisi_uncore_pmu_stop; + pmu->read = hisi_uncore_pmu_read; + pmu->attr_groups = attr_groups; +} +EXPORT_SYMBOL_GPL(hisi_pmu_init); + MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h index 96eeddad55ff..b59de33cd059 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.h +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h @@ -121,4 +121,6 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev, int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu, struct platform_device *pdev); +void hisi_pmu_init(struct pmu *pmu, const char *name, + const struct attribute_group **attr_groups, struct module *module); #endif /* __HISI_UNCORE_PMU_H__ */ diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c index 6aedc303ff56..b9c79f17230c 100644 --- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c @@ -445,20 +445,7 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev) return ret; } - sllc_pmu->pmu = (struct pmu) { - .module = THIS_MODULE, - .task_ctx_nr = perf_invalid_context, - .event_init = hisi_uncore_pmu_event_init, - .pmu_enable = hisi_uncore_pmu_enable, - .pmu_disable = hisi_uncore_pmu_disable, - .add = hisi_uncore_pmu_add, - .del = hisi_uncore_pmu_del, - .start = hisi_uncore_pmu_start, - .stop = hisi_uncore_pmu_stop, - .read = hisi_uncore_pmu_read, - .attr_groups = sllc_pmu->pmu_events.attr_groups, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, - }; + hisi_pmu_init(&sllc_pmu->pmu, name, sllc_pmu->pmu_events.attr_groups, THIS_MODULE); ret = perf_pmu_register(&sllc_pmu->pmu, name, -1); if (ret) { diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c new file mode 100644 index 000000000000..e0457d84af6b --- /dev/null +++ b/drivers/perf/hisilicon/hns3_pmu.c @@ -0,0 +1,1671 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This driver adds support for HNS3 PMU iEP device. Related perf events are + * bandwidth, latency, packet rate, interrupt rate etc. + * + * Copyright (C) 2022 HiSilicon Limited + */ +#include <linux/bitfield.h> +#include <linux/bitmap.h> +#include <linux/bug.h> +#include <linux/cpuhotplug.h> +#include <linux/cpumask.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/io-64-nonatomic-hi-lo.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/pci-epf.h> +#include <linux/perf_event.h> +#include <linux/smp.h> + +/* registers offset address */ +#define HNS3_PMU_REG_GLOBAL_CTRL 0x0000 +#define HNS3_PMU_REG_CLOCK_FREQ 0x0020 +#define HNS3_PMU_REG_BDF 0x0fe0 +#define HNS3_PMU_REG_VERSION 0x0fe4 +#define HNS3_PMU_REG_DEVICE_ID 0x0fe8 + +#define HNS3_PMU_REG_EVENT_OFFSET 0x1000 +#define HNS3_PMU_REG_EVENT_SIZE 0x1000 +#define HNS3_PMU_REG_EVENT_CTRL_LOW 0x00 +#define HNS3_PMU_REG_EVENT_CTRL_HIGH 0x04 +#define HNS3_PMU_REG_EVENT_INTR_STATUS 0x08 +#define HNS3_PMU_REG_EVENT_INTR_MASK 0x0c +#define HNS3_PMU_REG_EVENT_COUNTER 0x10 +#define HNS3_PMU_REG_EVENT_EXT_COUNTER 0x18 +#define HNS3_PMU_REG_EVENT_QID_CTRL 0x28 +#define HNS3_PMU_REG_EVENT_QID_PARA 0x2c + +#define HNS3_PMU_FILTER_SUPPORT_GLOBAL BIT(0) +#define HNS3_PMU_FILTER_SUPPORT_PORT BIT(1) +#define HNS3_PMU_FILTER_SUPPORT_PORT_TC BIT(2) +#define HNS3_PMU_FILTER_SUPPORT_FUNC BIT(3) +#define HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE BIT(4) +#define HNS3_PMU_FILTER_SUPPORT_FUNC_INTR BIT(5) + +#define HNS3_PMU_FILTER_ALL_TC 0xf +#define HNS3_PMU_FILTER_ALL_QUEUE 0xffff + +#define HNS3_PMU_CTRL_SUBEVENT_S 4 +#define HNS3_PMU_CTRL_FILTER_MODE_S 24 + +#define HNS3_PMU_GLOBAL_START BIT(0) + +#define HNS3_PMU_EVENT_STATUS_RESET BIT(11) +#define HNS3_PMU_EVENT_EN BIT(12) +#define HNS3_PMU_EVENT_OVERFLOW_RESTART BIT(15) + +#define HNS3_PMU_QID_PARA_FUNC_S 0 +#define HNS3_PMU_QID_PARA_QUEUE_S 16 + +#define HNS3_PMU_QID_CTRL_REQ_ENABLE BIT(0) +#define HNS3_PMU_QID_CTRL_DONE BIT(1) +#define HNS3_PMU_QID_CTRL_MISS BIT(2) + +#define HNS3_PMU_INTR_MASK_OVERFLOW BIT(1) + +#define HNS3_PMU_MAX_HW_EVENTS 8 + +/* + * Each hardware event contains two registers (counter and ext_counter) for + * bandwidth, packet rate, latency and interrupt rate. These two registers will + * be triggered to run at the same when a hardware event is enabled. The meaning + * of counter and ext_counter of different event type are different, their + * meaning show as follow: + * + * +----------------+------------------+---------------+ + * | event type | counter | ext_counter | + * +----------------+------------------+---------------+ + * | bandwidth | byte number | cycle number | + * +----------------+------------------+---------------+ + * | packet rate | packet number | cycle number | + * +----------------+------------------+---------------+ + * | latency | cycle number | packet number | + * +----------------+------------------+---------------+ + * | interrupt rate | interrupt number | cycle number | + * +----------------+------------------+---------------+ + * + * The cycle number indicates increment of counter of hardware timer, the + * frequency of hardware timer can be read from hw_clk_freq file. + * + * Performance of each hardware event is calculated by: counter / ext_counter. + * + * Since processing of data is preferred to be done in userspace, we expose + * ext_counter as a separate event for userspace and use bit 16 to indicate it. + * For example, event 0x00001 and 0x10001 are actually one event for hardware + * because bit 0-15 are same. If the bit 16 of one event is 0 means to read + * counter register, otherwise means to read ext_counter register. + */ +/* bandwidth events */ +#define HNS3_PMU_EVT_BW_SSU_EGU_BYTE_NUM 0x00001 +#define HNS3_PMU_EVT_BW_SSU_EGU_TIME 0x10001 +#define HNS3_PMU_EVT_BW_SSU_RPU_BYTE_NUM 0x00002 +#define HNS3_PMU_EVT_BW_SSU_RPU_TIME 0x10002 +#define HNS3_PMU_EVT_BW_SSU_ROCE_BYTE_NUM 0x00003 +#define HNS3_PMU_EVT_BW_SSU_ROCE_TIME 0x10003 +#define HNS3_PMU_EVT_BW_ROCE_SSU_BYTE_NUM 0x00004 +#define HNS3_PMU_EVT_BW_ROCE_SSU_TIME 0x10004 +#define HNS3_PMU_EVT_BW_TPU_SSU_BYTE_NUM 0x00005 +#define HNS3_PMU_EVT_BW_TPU_SSU_TIME 0x10005 +#define HNS3_PMU_EVT_BW_RPU_RCBRX_BYTE_NUM 0x00006 +#define HNS3_PMU_EVT_BW_RPU_RCBRX_TIME 0x10006 +#define HNS3_PMU_EVT_BW_RCBTX_TXSCH_BYTE_NUM 0x00008 +#define HNS3_PMU_EVT_BW_RCBTX_TXSCH_TIME 0x10008 +#define HNS3_PMU_EVT_BW_WR_FBD_BYTE_NUM 0x00009 +#define HNS3_PMU_EVT_BW_WR_FBD_TIME 0x10009 +#define HNS3_PMU_EVT_BW_WR_EBD_BYTE_NUM 0x0000a +#define HNS3_PMU_EVT_BW_WR_EBD_TIME 0x1000a +#define HNS3_PMU_EVT_BW_RD_FBD_BYTE_NUM 0x0000b +#define HNS3_PMU_EVT_BW_RD_FBD_TIME 0x1000b +#define HNS3_PMU_EVT_BW_RD_EBD_BYTE_NUM 0x0000c +#define HNS3_PMU_EVT_BW_RD_EBD_TIME 0x1000c +#define HNS3_PMU_EVT_BW_RD_PAY_M0_BYTE_NUM 0x0000d +#define HNS3_PMU_EVT_BW_RD_PAY_M0_TIME 0x1000d +#define HNS3_PMU_EVT_BW_RD_PAY_M1_BYTE_NUM 0x0000e +#define HNS3_PMU_EVT_BW_RD_PAY_M1_TIME 0x1000e +#define HNS3_PMU_EVT_BW_WR_PAY_M0_BYTE_NUM 0x0000f +#define HNS3_PMU_EVT_BW_WR_PAY_M0_TIME 0x1000f +#define HNS3_PMU_EVT_BW_WR_PAY_M1_BYTE_NUM 0x00010 +#define HNS3_PMU_EVT_BW_WR_PAY_M1_TIME 0x10010 + +/* packet rate events */ +#define HNS3_PMU_EVT_PPS_IGU_SSU_PACKET_NUM 0x00100 +#define HNS3_PMU_EVT_PPS_IGU_SSU_TIME 0x10100 +#define HNS3_PMU_EVT_PPS_SSU_EGU_PACKET_NUM 0x00101 +#define HNS3_PMU_EVT_PPS_SSU_EGU_TIME 0x10101 +#define HNS3_PMU_EVT_PPS_SSU_RPU_PACKET_NUM 0x00102 +#define HNS3_PMU_EVT_PPS_SSU_RPU_TIME 0x10102 +#define HNS3_PMU_EVT_PPS_SSU_ROCE_PACKET_NUM 0x00103 +#define HNS3_PMU_EVT_PPS_SSU_ROCE_TIME 0x10103 +#define HNS3_PMU_EVT_PPS_ROCE_SSU_PACKET_NUM 0x00104 +#define HNS3_PMU_EVT_PPS_ROCE_SSU_TIME 0x10104 +#define HNS3_PMU_EVT_PPS_TPU_SSU_PACKET_NUM 0x00105 +#define HNS3_PMU_EVT_PPS_TPU_SSU_TIME 0x10105 +#define HNS3_PMU_EVT_PPS_RPU_RCBRX_PACKET_NUM 0x00106 +#define HNS3_PMU_EVT_PPS_RPU_RCBRX_TIME 0x10106 +#define HNS3_PMU_EVT_PPS_RCBTX_TPU_PACKET_NUM 0x00107 +#define HNS3_PMU_EVT_PPS_RCBTX_TPU_TIME 0x10107 +#define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_PACKET_NUM 0x00108 +#define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_TIME 0x10108 +#define HNS3_PMU_EVT_PPS_WR_FBD_PACKET_NUM 0x00109 +#define HNS3_PMU_EVT_PPS_WR_FBD_TIME 0x10109 +#define HNS3_PMU_EVT_PPS_WR_EBD_PACKET_NUM 0x0010a +#define HNS3_PMU_EVT_PPS_WR_EBD_TIME 0x1010a +#define HNS3_PMU_EVT_PPS_RD_FBD_PACKET_NUM 0x0010b +#define HNS3_PMU_EVT_PPS_RD_FBD_TIME 0x1010b +#define HNS3_PMU_EVT_PPS_RD_EBD_PACKET_NUM 0x0010c +#define HNS3_PMU_EVT_PPS_RD_EBD_TIME 0x1010c +#define HNS3_PMU_EVT_PPS_RD_PAY_M0_PACKET_NUM 0x0010d +#define HNS3_PMU_EVT_PPS_RD_PAY_M0_TIME 0x1010d +#define HNS3_PMU_EVT_PPS_RD_PAY_M1_PACKET_NUM 0x0010e +#define HNS3_PMU_EVT_PPS_RD_PAY_M1_TIME 0x1010e +#define HNS3_PMU_EVT_PPS_WR_PAY_M0_PACKET_NUM 0x0010f +#define HNS3_PMU_EVT_PPS_WR_PAY_M0_TIME 0x1010f +#define HNS3_PMU_EVT_PPS_WR_PAY_M1_PACKET_NUM 0x00110 +#define HNS3_PMU_EVT_PPS_WR_PAY_M1_TIME 0x10110 +#define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_PACKET_NUM 0x00111 +#define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_TIME 0x10111 +#define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_PACKET_NUM 0x00112 +#define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_TIME 0x10112 + +/* latency events */ +#define HNS3_PMU_EVT_DLY_TX_PUSH_TIME 0x00202 +#define HNS3_PMU_EVT_DLY_TX_PUSH_PACKET_NUM 0x10202 +#define HNS3_PMU_EVT_DLY_TX_TIME 0x00204 +#define HNS3_PMU_EVT_DLY_TX_PACKET_NUM 0x10204 +#define HNS3_PMU_EVT_DLY_SSU_TX_NIC_TIME 0x00206 +#define HNS3_PMU_EVT_DLY_SSU_TX_NIC_PACKET_NUM 0x10206 +#define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_TIME 0x00207 +#define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_PACKET_NUM 0x10207 +#define HNS3_PMU_EVT_DLY_SSU_RX_NIC_TIME 0x00208 +#define HNS3_PMU_EVT_DLY_SSU_RX_NIC_PACKET_NUM 0x10208 +#define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_TIME 0x00209 +#define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_PACKET_NUM 0x10209 +#define HNS3_PMU_EVT_DLY_RPU_TIME 0x0020e +#define HNS3_PMU_EVT_DLY_RPU_PACKET_NUM 0x1020e +#define HNS3_PMU_EVT_DLY_TPU_TIME 0x0020f +#define HNS3_PMU_EVT_DLY_TPU_PACKET_NUM 0x1020f +#define HNS3_PMU_EVT_DLY_RPE_TIME 0x00210 +#define HNS3_PMU_EVT_DLY_RPE_PACKET_NUM 0x10210 +#define HNS3_PMU_EVT_DLY_TPE_TIME 0x00211 +#define HNS3_PMU_EVT_DLY_TPE_PACKET_NUM 0x10211 +#define HNS3_PMU_EVT_DLY_TPE_PUSH_TIME 0x00212 +#define HNS3_PMU_EVT_DLY_TPE_PUSH_PACKET_NUM 0x10212 +#define HNS3_PMU_EVT_DLY_WR_FBD_TIME 0x00213 +#define HNS3_PMU_EVT_DLY_WR_FBD_PACKET_NUM 0x10213 +#define HNS3_PMU_EVT_DLY_WR_EBD_TIME 0x00214 +#define HNS3_PMU_EVT_DLY_WR_EBD_PACKET_NUM 0x10214 +#define HNS3_PMU_EVT_DLY_RD_FBD_TIME 0x00215 +#define HNS3_PMU_EVT_DLY_RD_FBD_PACKET_NUM 0x10215 +#define HNS3_PMU_EVT_DLY_RD_EBD_TIME 0x00216 +#define HNS3_PMU_EVT_DLY_RD_EBD_PACKET_NUM 0x10216 +#define HNS3_PMU_EVT_DLY_RD_PAY_M0_TIME 0x00217 +#define HNS3_PMU_EVT_DLY_RD_PAY_M0_PACKET_NUM 0x10217 +#define HNS3_PMU_EVT_DLY_RD_PAY_M1_TIME 0x00218 +#define HNS3_PMU_EVT_DLY_RD_PAY_M1_PACKET_NUM 0x10218 +#define HNS3_PMU_EVT_DLY_WR_PAY_M0_TIME 0x00219 +#define HNS3_PMU_EVT_DLY_WR_PAY_M0_PACKET_NUM 0x10219 +#define HNS3_PMU_EVT_DLY_WR_PAY_M1_TIME 0x0021a +#define HNS3_PMU_EVT_DLY_WR_PAY_M1_PACKET_NUM 0x1021a +#define HNS3_PMU_EVT_DLY_MSIX_WRITE_TIME 0x0021c +#define HNS3_PMU_EVT_DLY_MSIX_WRITE_PACKET_NUM 0x1021c + +/* interrupt rate events */ +#define HNS3_PMU_EVT_PPS_MSIX_NIC_INTR_NUM 0x00300 +#define HNS3_PMU_EVT_PPS_MSIX_NIC_TIME 0x10300 + +/* filter mode supported by each bandwidth event */ +#define HNS3_PMU_FILTER_BW_SSU_EGU 0x07 +#define HNS3_PMU_FILTER_BW_SSU_RPU 0x1f +#define HNS3_PMU_FILTER_BW_SSU_ROCE 0x0f +#define HNS3_PMU_FILTER_BW_ROCE_SSU 0x0f +#define HNS3_PMU_FILTER_BW_TPU_SSU 0x1f +#define HNS3_PMU_FILTER_BW_RPU_RCBRX 0x11 +#define HNS3_PMU_FILTER_BW_RCBTX_TXSCH 0x11 +#define HNS3_PMU_FILTER_BW_WR_FBD 0x1b +#define HNS3_PMU_FILTER_BW_WR_EBD 0x11 +#define HNS3_PMU_FILTER_BW_RD_FBD 0x01 +#define HNS3_PMU_FILTER_BW_RD_EBD 0x1b +#define HNS3_PMU_FILTER_BW_RD_PAY_M0 0x01 +#define HNS3_PMU_FILTER_BW_RD_PAY_M1 0x01 +#define HNS3_PMU_FILTER_BW_WR_PAY_M0 0x01 +#define HNS3_PMU_FILTER_BW_WR_PAY_M1 0x01 + +/* filter mode supported by each packet rate event */ +#define HNS3_PMU_FILTER_PPS_IGU_SSU 0x07 +#define HNS3_PMU_FILTER_PPS_SSU_EGU 0x07 +#define HNS3_PMU_FILTER_PPS_SSU_RPU 0x1f +#define HNS3_PMU_FILTER_PPS_SSU_ROCE 0x0f +#define HNS3_PMU_FILTER_PPS_ROCE_SSU 0x0f +#define HNS3_PMU_FILTER_PPS_TPU_SSU 0x1f +#define HNS3_PMU_FILTER_PPS_RPU_RCBRX 0x11 +#define HNS3_PMU_FILTER_PPS_RCBTX_TPU 0x1f +#define HNS3_PMU_FILTER_PPS_RCBTX_TXSCH 0x11 +#define HNS3_PMU_FILTER_PPS_WR_FBD 0x1b +#define HNS3_PMU_FILTER_PPS_WR_EBD 0x11 +#define HNS3_PMU_FILTER_PPS_RD_FBD 0x01 +#define HNS3_PMU_FILTER_PPS_RD_EBD 0x1b +#define HNS3_PMU_FILTER_PPS_RD_PAY_M0 0x01 +#define HNS3_PMU_FILTER_PPS_RD_PAY_M1 0x01 +#define HNS3_PMU_FILTER_PPS_WR_PAY_M0 0x01 +#define HNS3_PMU_FILTER_PPS_WR_PAY_M1 0x01 +#define HNS3_PMU_FILTER_PPS_NICROH_TX_PRE 0x01 +#define HNS3_PMU_FILTER_PPS_NICROH_RX_PRE 0x01 + +/* filter mode supported by each latency event */ +#define HNS3_PMU_FILTER_DLY_TX_PUSH 0x01 +#define HNS3_PMU_FILTER_DLY_TX 0x01 +#define HNS3_PMU_FILTER_DLY_SSU_TX_NIC 0x07 +#define HNS3_PMU_FILTER_DLY_SSU_TX_ROCE 0x07 +#define HNS3_PMU_FILTER_DLY_SSU_RX_NIC 0x07 +#define HNS3_PMU_FILTER_DLY_SSU_RX_ROCE 0x07 +#define HNS3_PMU_FILTER_DLY_RPU 0x11 +#define HNS3_PMU_FILTER_DLY_TPU 0x1f +#define HNS3_PMU_FILTER_DLY_RPE 0x01 +#define HNS3_PMU_FILTER_DLY_TPE 0x0b +#define HNS3_PMU_FILTER_DLY_TPE_PUSH 0x1b +#define HNS3_PMU_FILTER_DLY_WR_FBD 0x1b +#define HNS3_PMU_FILTER_DLY_WR_EBD 0x11 +#define HNS3_PMU_FILTER_DLY_RD_FBD 0x01 +#define HNS3_PMU_FILTER_DLY_RD_EBD 0x1b +#define HNS3_PMU_FILTER_DLY_RD_PAY_M0 0x01 +#define HNS3_PMU_FILTER_DLY_RD_PAY_M1 0x01 +#define HNS3_PMU_FILTER_DLY_WR_PAY_M0 0x01 +#define HNS3_PMU_FILTER_DLY_WR_PAY_M1 0x01 +#define HNS3_PMU_FILTER_DLY_MSIX_WRITE 0x01 + +/* filter mode supported by each interrupt rate event */ +#define HNS3_PMU_FILTER_INTR_MSIX_NIC 0x01 + +enum hns3_pmu_hw_filter_mode { + HNS3_PMU_HW_FILTER_GLOBAL, + HNS3_PMU_HW_FILTER_PORT, + HNS3_PMU_HW_FILTER_PORT_TC, + HNS3_PMU_HW_FILTER_FUNC, + HNS3_PMU_HW_FILTER_FUNC_QUEUE, + HNS3_PMU_HW_FILTER_FUNC_INTR, +}; + +struct hns3_pmu_event_attr { + u32 event; + u16 filter_support; +}; + +struct hns3_pmu { + struct perf_event *hw_events[HNS3_PMU_MAX_HW_EVENTS]; + struct hlist_node node; + struct pci_dev *pdev; + struct pmu pmu; + void __iomem *base; + int irq; + int on_cpu; + u32 identifier; + u32 hw_clk_freq; /* hardware clock frequency of PMU */ + /* maximum and minimum bdf allowed by PMU */ + u16 bdf_min; + u16 bdf_max; +}; + +#define to_hns3_pmu(p) (container_of((p), struct hns3_pmu, pmu)) + +#define GET_PCI_DEVFN(bdf) ((bdf) & 0xff) + +#define FILTER_CONDITION_PORT(port) ((1 << (port)) & 0xff) +#define FILTER_CONDITION_PORT_TC(port, tc) (((port) << 3) | ((tc) & 0x07)) +#define FILTER_CONDITION_FUNC_INTR(func, intr) (((intr) << 8) | (func)) + +#define HNS3_PMU_FILTER_ATTR(_name, _config, _start, _end) \ + static inline u64 hns3_pmu_get_##_name(struct perf_event *event) \ + { \ + return FIELD_GET(GENMASK_ULL(_end, _start), \ + event->attr._config); \ + } + +HNS3_PMU_FILTER_ATTR(subevent, config, 0, 7); +HNS3_PMU_FILTER_ATTR(event_type, config, 8, 15); +HNS3_PMU_FILTER_ATTR(ext_counter_used, config, 16, 16); +HNS3_PMU_FILTER_ATTR(port, config1, 0, 3); +HNS3_PMU_FILTER_ATTR(tc, config1, 4, 7); +HNS3_PMU_FILTER_ATTR(bdf, config1, 8, 23); +HNS3_PMU_FILTER_ATTR(queue, config1, 24, 39); +HNS3_PMU_FILTER_ATTR(intr, config1, 40, 51); +HNS3_PMU_FILTER_ATTR(global, config1, 52, 52); + +#define HNS3_BW_EVT_BYTE_NUM(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_BW_##_name##_BYTE_NUM, \ + HNS3_PMU_FILTER_BW_##_name}) +#define HNS3_BW_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_BW_##_name##_TIME, \ + HNS3_PMU_FILTER_BW_##_name}) +#define HNS3_PPS_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_PPS_##_name##_PACKET_NUM, \ + HNS3_PMU_FILTER_PPS_##_name}) +#define HNS3_PPS_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_PPS_##_name##_TIME, \ + HNS3_PMU_FILTER_PPS_##_name}) +#define HNS3_DLY_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_DLY_##_name##_TIME, \ + HNS3_PMU_FILTER_DLY_##_name}) +#define HNS3_DLY_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_DLY_##_name##_PACKET_NUM, \ + HNS3_PMU_FILTER_DLY_##_name}) +#define HNS3_INTR_EVT_INTR_NUM(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_PPS_##_name##_INTR_NUM, \ + HNS3_PMU_FILTER_INTR_##_name}) +#define HNS3_INTR_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_PPS_##_name##_TIME, \ + HNS3_PMU_FILTER_INTR_##_name}) + +static ssize_t hns3_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sysfs_emit(buf, "%s\n", (char *)eattr->var); +} + +static ssize_t hns3_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns3_pmu_event_attr *event; + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + event = eattr->var; + + return sysfs_emit(buf, "config=0x%x\n", event->event); +} + +static ssize_t hns3_pmu_filter_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hns3_pmu_event_attr *event; + struct dev_ext_attribute *eattr; + int len; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + event = eattr->var; + + len = sysfs_emit_at(buf, 0, "filter mode supported: "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL) + len += sysfs_emit_at(buf, len, "global "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT) + len += sysfs_emit_at(buf, len, "port "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC) + len += sysfs_emit_at(buf, len, "port-tc "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC) + len += sysfs_emit_at(buf, len, "func "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE) + len += sysfs_emit_at(buf, len, "func-queue "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR) + len += sysfs_emit_at(buf, len, "func-intr "); + + len += sysfs_emit_at(buf, len, "\n"); + + return len; +} + +#define HNS3_PMU_ATTR(_name, _func, _config) \ + (&((struct dev_ext_attribute[]) { \ + { __ATTR(_name, 0444, _func, NULL), (void *)_config } \ + })[0].attr.attr) + +#define HNS3_PMU_FORMAT_ATTR(_name, _format) \ + HNS3_PMU_ATTR(_name, hns3_pmu_format_show, (void *)_format) +#define HNS3_PMU_EVENT_ATTR(_name, _event) \ + HNS3_PMU_ATTR(_name, hns3_pmu_event_show, (void *)_event) +#define HNS3_PMU_FLT_MODE_ATTR(_name, _event) \ + HNS3_PMU_ATTR(_name, hns3_pmu_filter_mode_show, (void *)_event) + +#define HNS3_PMU_BW_EVT_PAIR(_name, _macro) \ + HNS3_PMU_EVENT_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \ + HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro)) +#define HNS3_PMU_PPS_EVT_PAIR(_name, _macro) \ + HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \ + HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro)) +#define HNS3_PMU_DLY_EVT_PAIR(_name, _macro) \ + HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \ + HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro)) +#define HNS3_PMU_INTR_EVT_PAIR(_name, _macro) \ + HNS3_PMU_EVENT_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \ + HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro)) + +#define HNS3_PMU_BW_FLT_MODE_PAIR(_name, _macro) \ + HNS3_PMU_FLT_MODE_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \ + HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro)) +#define HNS3_PMU_PPS_FLT_MODE_PAIR(_name, _macro) \ + HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \ + HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro)) +#define HNS3_PMU_DLY_FLT_MODE_PAIR(_name, _macro) \ + HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \ + HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro)) +#define HNS3_PMU_INTR_FLT_MODE_PAIR(_name, _macro) \ + HNS3_PMU_FLT_MODE_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \ + HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro)) + +static u8 hns3_pmu_hw_filter_modes[] = { + HNS3_PMU_HW_FILTER_GLOBAL, + HNS3_PMU_HW_FILTER_PORT, + HNS3_PMU_HW_FILTER_PORT_TC, + HNS3_PMU_HW_FILTER_FUNC, + HNS3_PMU_HW_FILTER_FUNC_QUEUE, + HNS3_PMU_HW_FILTER_FUNC_INTR, +}; + +#define HNS3_PMU_SET_HW_FILTER(_hwc, _mode) \ + ((_hwc)->addr_filters = (void *)&hns3_pmu_hw_filter_modes[(_mode)]) + +static ssize_t identifier_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "0x%x\n", hns3_pmu->identifier); +} +static DEVICE_ATTR_RO(identifier); + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%d\n", hns3_pmu->on_cpu); +} +static DEVICE_ATTR_RO(cpumask); + +static ssize_t bdf_min_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + u16 bdf = hns3_pmu->bdf_min; + + return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf), + PCI_SLOT(bdf), PCI_FUNC(bdf)); +} +static DEVICE_ATTR_RO(bdf_min); + +static ssize_t bdf_max_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + u16 bdf = hns3_pmu->bdf_max; + + return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf), + PCI_SLOT(bdf), PCI_FUNC(bdf)); +} +static DEVICE_ATTR_RO(bdf_max); + +static ssize_t hw_clk_freq_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%u\n", hns3_pmu->hw_clk_freq); +} +static DEVICE_ATTR_RO(hw_clk_freq); + +static struct attribute *hns3_pmu_events_attr[] = { + /* bandwidth events */ + HNS3_PMU_BW_EVT_PAIR(bw_ssu_egu, SSU_EGU), + HNS3_PMU_BW_EVT_PAIR(bw_ssu_rpu, SSU_RPU), + HNS3_PMU_BW_EVT_PAIR(bw_ssu_roce, SSU_ROCE), + HNS3_PMU_BW_EVT_PAIR(bw_roce_ssu, ROCE_SSU), + HNS3_PMU_BW_EVT_PAIR(bw_tpu_ssu, TPU_SSU), + HNS3_PMU_BW_EVT_PAIR(bw_rpu_rcbrx, RPU_RCBRX), + HNS3_PMU_BW_EVT_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH), + HNS3_PMU_BW_EVT_PAIR(bw_wr_fbd, WR_FBD), + HNS3_PMU_BW_EVT_PAIR(bw_wr_ebd, WR_EBD), + HNS3_PMU_BW_EVT_PAIR(bw_rd_fbd, RD_FBD), + HNS3_PMU_BW_EVT_PAIR(bw_rd_ebd, RD_EBD), + HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m1, WR_PAY_M1), + + /* packet rate events */ + HNS3_PMU_PPS_EVT_PAIR(pps_igu_ssu, IGU_SSU), + HNS3_PMU_PPS_EVT_PAIR(pps_ssu_egu, SSU_EGU), + HNS3_PMU_PPS_EVT_PAIR(pps_ssu_rpu, SSU_RPU), + HNS3_PMU_PPS_EVT_PAIR(pps_ssu_roce, SSU_ROCE), + HNS3_PMU_PPS_EVT_PAIR(pps_roce_ssu, ROCE_SSU), + HNS3_PMU_PPS_EVT_PAIR(pps_tpu_ssu, TPU_SSU), + HNS3_PMU_PPS_EVT_PAIR(pps_rpu_rcbrx, RPU_RCBRX), + HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_tpu, RCBTX_TPU), + HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH), + HNS3_PMU_PPS_EVT_PAIR(pps_wr_fbd, WR_FBD), + HNS3_PMU_PPS_EVT_PAIR(pps_wr_ebd, WR_EBD), + HNS3_PMU_PPS_EVT_PAIR(pps_rd_fbd, RD_FBD), + HNS3_PMU_PPS_EVT_PAIR(pps_rd_ebd, RD_EBD), + HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m1, WR_PAY_M1), + HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE), + HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE), + + /* latency events */ + HNS3_PMU_DLY_EVT_PAIR(dly_tx_push_to_mac, TX_PUSH), + HNS3_PMU_DLY_EVT_PAIR(dly_tx_normal_to_mac, TX), + HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC), + HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE), + HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC), + HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE), + HNS3_PMU_DLY_EVT_PAIR(dly_rpu, RPU), + HNS3_PMU_DLY_EVT_PAIR(dly_tpu, TPU), + HNS3_PMU_DLY_EVT_PAIR(dly_rpe, RPE), + HNS3_PMU_DLY_EVT_PAIR(dly_tpe_normal, TPE), + HNS3_PMU_DLY_EVT_PAIR(dly_tpe_push, TPE_PUSH), + HNS3_PMU_DLY_EVT_PAIR(dly_wr_fbd, WR_FBD), + HNS3_PMU_DLY_EVT_PAIR(dly_wr_ebd, WR_EBD), + HNS3_PMU_DLY_EVT_PAIR(dly_rd_fbd, RD_FBD), + HNS3_PMU_DLY_EVT_PAIR(dly_rd_ebd, RD_EBD), + HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m1, WR_PAY_M1), + HNS3_PMU_DLY_EVT_PAIR(dly_msix_write, MSIX_WRITE), + + /* interrupt rate events */ + HNS3_PMU_INTR_EVT_PAIR(pps_intr_msix_nic, MSIX_NIC), + + NULL +}; + +static struct attribute *hns3_pmu_filter_mode_attr[] = { + /* bandwidth events */ + HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_egu, SSU_EGU), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_rpu, SSU_RPU), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_roce, SSU_ROCE), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_roce_ssu, ROCE_SSU), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_tpu_ssu, TPU_SSU), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rpu_rcbrx, RPU_RCBRX), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_fbd, WR_FBD), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_ebd, WR_EBD), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_fbd, RD_FBD), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_ebd, RD_EBD), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m1, WR_PAY_M1), + + /* packet rate events */ + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_igu_ssu, IGU_SSU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_egu, SSU_EGU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_rpu, SSU_RPU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_roce, SSU_ROCE), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_roce_ssu, ROCE_SSU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_tpu_ssu, TPU_SSU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rpu_rcbrx, RPU_RCBRX), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_tpu, RCBTX_TPU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_fbd, WR_FBD), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_ebd, WR_EBD), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_fbd, RD_FBD), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_ebd, RD_EBD), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m1, WR_PAY_M1), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE), + + /* latency events */ + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_push_to_mac, TX_PUSH), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_normal_to_mac, TX), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpu, RPU), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpu, TPU), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpe, RPE), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_normal, TPE), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_push, TPE_PUSH), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_fbd, WR_FBD), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_ebd, WR_EBD), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_fbd, RD_FBD), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_ebd, RD_EBD), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m1, WR_PAY_M1), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_msix_write, MSIX_WRITE), + + /* interrupt rate events */ + HNS3_PMU_INTR_FLT_MODE_PAIR(pps_intr_msix_nic, MSIX_NIC), + + NULL +}; + +static struct attribute_group hns3_pmu_events_group = { + .name = "events", + .attrs = hns3_pmu_events_attr, +}; + +static struct attribute_group hns3_pmu_filter_mode_group = { + .name = "filtermode", + .attrs = hns3_pmu_filter_mode_attr, +}; + +static struct attribute *hns3_pmu_format_attr[] = { + HNS3_PMU_FORMAT_ATTR(subevent, "config:0-7"), + HNS3_PMU_FORMAT_ATTR(event_type, "config:8-15"), + HNS3_PMU_FORMAT_ATTR(ext_counter_used, "config:16"), + HNS3_PMU_FORMAT_ATTR(port, "config1:0-3"), + HNS3_PMU_FORMAT_ATTR(tc, "config1:4-7"), + HNS3_PMU_FORMAT_ATTR(bdf, "config1:8-23"), + HNS3_PMU_FORMAT_ATTR(queue, "config1:24-39"), + HNS3_PMU_FORMAT_ATTR(intr, "config1:40-51"), + HNS3_PMU_FORMAT_ATTR(global, "config1:52"), + NULL +}; + +static struct attribute_group hns3_pmu_format_group = { + .name = "format", + .attrs = hns3_pmu_format_attr, +}; + +static struct attribute *hns3_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static struct attribute_group hns3_pmu_cpumask_attr_group = { + .attrs = hns3_pmu_cpumask_attrs, +}; + +static struct attribute *hns3_pmu_identifier_attrs[] = { + &dev_attr_identifier.attr, + NULL +}; + +static struct attribute_group hns3_pmu_identifier_attr_group = { + .attrs = hns3_pmu_identifier_attrs, +}; + +static struct attribute *hns3_pmu_bdf_range_attrs[] = { + &dev_attr_bdf_min.attr, + &dev_attr_bdf_max.attr, + NULL +}; + +static struct attribute_group hns3_pmu_bdf_range_attr_group = { + .attrs = hns3_pmu_bdf_range_attrs, +}; + +static struct attribute *hns3_pmu_hw_clk_freq_attrs[] = { + &dev_attr_hw_clk_freq.attr, + NULL +}; + +static struct attribute_group hns3_pmu_hw_clk_freq_attr_group = { + .attrs = hns3_pmu_hw_clk_freq_attrs, +}; + +static const struct attribute_group *hns3_pmu_attr_groups[] = { + &hns3_pmu_events_group, + &hns3_pmu_filter_mode_group, + &hns3_pmu_format_group, + &hns3_pmu_cpumask_attr_group, + &hns3_pmu_identifier_attr_group, + &hns3_pmu_bdf_range_attr_group, + &hns3_pmu_hw_clk_freq_attr_group, + NULL +}; + +static u32 hns3_pmu_get_event(struct perf_event *event) +{ + return hns3_pmu_get_ext_counter_used(event) << 16 | + hns3_pmu_get_event_type(event) << 8 | + hns3_pmu_get_subevent(event); +} + +static u32 hns3_pmu_get_real_event(struct perf_event *event) +{ + return hns3_pmu_get_event_type(event) << 8 | + hns3_pmu_get_subevent(event); +} + +static u32 hns3_pmu_get_offset(u32 offset, u32 idx) +{ + return offset + HNS3_PMU_REG_EVENT_OFFSET + + HNS3_PMU_REG_EVENT_SIZE * idx; +} + +static u32 hns3_pmu_readl(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx) +{ + u32 offset = hns3_pmu_get_offset(reg_offset, idx); + + return readl(hns3_pmu->base + offset); +} + +static void hns3_pmu_writel(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx, + u32 val) +{ + u32 offset = hns3_pmu_get_offset(reg_offset, idx); + + writel(val, hns3_pmu->base + offset); +} + +static u64 hns3_pmu_readq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx) +{ + u32 offset = hns3_pmu_get_offset(reg_offset, idx); + + return readq(hns3_pmu->base + offset); +} + +static void hns3_pmu_writeq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx, + u64 val) +{ + u32 offset = hns3_pmu_get_offset(reg_offset, idx); + + writeq(val, hns3_pmu->base + offset); +} + +static bool hns3_pmu_cmp_event(struct perf_event *target, + struct perf_event *event) +{ + return hns3_pmu_get_real_event(target) == hns3_pmu_get_real_event(event); +} + +static int hns3_pmu_find_related_event_idx(struct hns3_pmu *hns3_pmu, + struct perf_event *event) +{ + struct perf_event *sibling; + int hw_event_used = 0; + int idx; + + for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) { + sibling = hns3_pmu->hw_events[idx]; + if (!sibling) + continue; + + hw_event_used++; + + if (!hns3_pmu_cmp_event(sibling, event)) + continue; + + /* Related events is used in group */ + if (sibling->group_leader == event->group_leader) + return idx; + } + + /* No related event and all hardware events are used up */ + if (hw_event_used >= HNS3_PMU_MAX_HW_EVENTS) + return -EBUSY; + + /* No related event and there is extra hardware events can be use */ + return -ENOENT; +} + +static int hns3_pmu_get_event_idx(struct hns3_pmu *hns3_pmu) +{ + int idx; + + for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) { + if (!hns3_pmu->hw_events[idx]) + return idx; + } + + return -EBUSY; +} + +static bool hns3_pmu_valid_bdf(struct hns3_pmu *hns3_pmu, u16 bdf) +{ + struct pci_dev *pdev; + + if (bdf < hns3_pmu->bdf_min || bdf > hns3_pmu->bdf_max) { + pci_err(hns3_pmu->pdev, "Invalid EP device: %#x!\n", bdf); + return false; + } + + pdev = pci_get_domain_bus_and_slot(pci_domain_nr(hns3_pmu->pdev->bus), + PCI_BUS_NUM(bdf), + GET_PCI_DEVFN(bdf)); + if (!pdev) { + pci_err(hns3_pmu->pdev, "Nonexistent EP device: %#x!\n", bdf); + return false; + } + + pci_dev_put(pdev); + return true; +} + +static void hns3_pmu_set_qid_para(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf, + u16 queue) +{ + u32 val; + + val = GET_PCI_DEVFN(bdf); + val |= (u32)queue << HNS3_PMU_QID_PARA_QUEUE_S; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_PARA, idx, val); +} + +static bool hns3_pmu_qid_req_start(struct hns3_pmu *hns3_pmu, u32 idx) +{ + bool queue_id_valid = false; + u32 reg_qid_ctrl, val; + int err; + + /* enable queue id request */ + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, + HNS3_PMU_QID_CTRL_REQ_ENABLE); + + reg_qid_ctrl = hns3_pmu_get_offset(HNS3_PMU_REG_EVENT_QID_CTRL, idx); + err = readl_poll_timeout(hns3_pmu->base + reg_qid_ctrl, val, + val & HNS3_PMU_QID_CTRL_DONE, 1, 1000); + if (err == -ETIMEDOUT) { + pci_err(hns3_pmu->pdev, "QID request timeout!\n"); + goto out; + } + + queue_id_valid = !(val & HNS3_PMU_QID_CTRL_MISS); + +out: + /* disable qid request and clear status */ + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, 0); + + return queue_id_valid; +} + +static bool hns3_pmu_valid_queue(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf, + u16 queue) +{ + hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue); + + return hns3_pmu_qid_req_start(hns3_pmu, idx); +} + +static struct hns3_pmu_event_attr *hns3_pmu_get_pmu_event(u32 event) +{ + struct hns3_pmu_event_attr *pmu_event; + struct dev_ext_attribute *eattr; + struct device_attribute *dattr; + struct attribute *attr; + u32 i; + + for (i = 0; i < ARRAY_SIZE(hns3_pmu_events_attr) - 1; i++) { + attr = hns3_pmu_events_attr[i]; + dattr = container_of(attr, struct device_attribute, attr); + eattr = container_of(dattr, struct dev_ext_attribute, attr); + pmu_event = eattr->var; + + if (event == pmu_event->event) + return pmu_event; + } + + return NULL; +} + +static int hns3_pmu_set_func_mode(struct perf_event *event, + struct hns3_pmu *hns3_pmu) +{ + struct hw_perf_event *hwc = &event->hw; + u16 bdf = hns3_pmu_get_bdf(event); + + if (!hns3_pmu_valid_bdf(hns3_pmu, bdf)) + return -ENOENT; + + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC); + + return 0; +} + +static int hns3_pmu_set_func_queue_mode(struct perf_event *event, + struct hns3_pmu *hns3_pmu) +{ + u16 queue_id = hns3_pmu_get_queue(event); + struct hw_perf_event *hwc = &event->hw; + u16 bdf = hns3_pmu_get_bdf(event); + + if (!hns3_pmu_valid_bdf(hns3_pmu, bdf)) + return -ENOENT; + + if (!hns3_pmu_valid_queue(hns3_pmu, hwc->idx, bdf, queue_id)) { + pci_err(hns3_pmu->pdev, "Invalid queue: %u\n", queue_id); + return -ENOENT; + } + + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_QUEUE); + + return 0; +} + +static bool +hns3_pmu_is_enabled_global_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u8 global = hns3_pmu_get_global(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL)) + return false; + + return global; +} + +static bool hns3_pmu_is_enabled_func_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u16 queue_id = hns3_pmu_get_queue(event); + u16 bdf = hns3_pmu_get_bdf(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC)) + return false; + else if (queue_id != HNS3_PMU_FILTER_ALL_QUEUE) + return false; + + return bdf; +} + +static bool +hns3_pmu_is_enabled_func_queue_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u16 queue_id = hns3_pmu_get_queue(event); + u16 bdf = hns3_pmu_get_bdf(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE)) + return false; + else if (queue_id == HNS3_PMU_FILTER_ALL_QUEUE) + return false; + + return bdf; +} + +static bool hns3_pmu_is_enabled_port_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u8 tc_id = hns3_pmu_get_tc(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT)) + return false; + + return tc_id == HNS3_PMU_FILTER_ALL_TC; +} + +static bool +hns3_pmu_is_enabled_port_tc_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u8 tc_id = hns3_pmu_get_tc(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC)) + return false; + + return tc_id != HNS3_PMU_FILTER_ALL_TC; +} + +static bool +hns3_pmu_is_enabled_func_intr_mode(struct perf_event *event, + struct hns3_pmu *hns3_pmu, + struct hns3_pmu_event_attr *pmu_event) +{ + u16 bdf = hns3_pmu_get_bdf(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR)) + return false; + + return hns3_pmu_valid_bdf(hns3_pmu, bdf); +} + +static int hns3_pmu_select_filter_mode(struct perf_event *event, + struct hns3_pmu *hns3_pmu) +{ + u32 event_id = hns3_pmu_get_event(event); + struct hw_perf_event *hwc = &event->hw; + struct hns3_pmu_event_attr *pmu_event; + + pmu_event = hns3_pmu_get_pmu_event(event_id); + if (!pmu_event) { + pci_err(hns3_pmu->pdev, "Invalid pmu event\n"); + return -ENOENT; + } + + if (hns3_pmu_is_enabled_global_mode(event, pmu_event)) { + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_GLOBAL); + return 0; + } + + if (hns3_pmu_is_enabled_func_mode(event, pmu_event)) + return hns3_pmu_set_func_mode(event, hns3_pmu); + + if (hns3_pmu_is_enabled_func_queue_mode(event, pmu_event)) + return hns3_pmu_set_func_queue_mode(event, hns3_pmu); + + if (hns3_pmu_is_enabled_port_mode(event, pmu_event)) { + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT); + return 0; + } + + if (hns3_pmu_is_enabled_port_tc_mode(event, pmu_event)) { + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT_TC); + return 0; + } + + if (hns3_pmu_is_enabled_func_intr_mode(event, hns3_pmu, pmu_event)) { + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_INTR); + return 0; + } + + return -ENOENT; +} + +static bool hns3_pmu_validate_event_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct perf_event *event_group[HNS3_PMU_MAX_HW_EVENTS]; + int counters = 1; + int num; + + event_group[0] = leader; + if (!is_software_event(leader)) { + if (leader->pmu != event->pmu) + return false; + + if (leader != event && !hns3_pmu_cmp_event(leader, event)) + event_group[counters++] = event; + } + + for_each_sibling_event(sibling, event->group_leader) { + if (is_software_event(sibling)) + continue; + + if (sibling->pmu != event->pmu) + return false; + + for (num = 0; num < counters; num++) { + if (hns3_pmu_cmp_event(event_group[num], sibling)) + break; + } + + if (num == counters) + event_group[counters++] = sibling; + } + + return counters <= HNS3_PMU_MAX_HW_EVENTS; +} + +static u32 hns3_pmu_get_filter_condition(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u16 intr_id = hns3_pmu_get_intr(event); + u8 port_id = hns3_pmu_get_port(event); + u16 bdf = hns3_pmu_get_bdf(event); + u8 tc_id = hns3_pmu_get_tc(event); + u8 filter_mode; + + filter_mode = *(u8 *)hwc->addr_filters; + switch (filter_mode) { + case HNS3_PMU_HW_FILTER_PORT: + return FILTER_CONDITION_PORT(port_id); + case HNS3_PMU_HW_FILTER_PORT_TC: + return FILTER_CONDITION_PORT_TC(port_id, tc_id); + case HNS3_PMU_HW_FILTER_FUNC: + case HNS3_PMU_HW_FILTER_FUNC_QUEUE: + return GET_PCI_DEVFN(bdf); + case HNS3_PMU_HW_FILTER_FUNC_INTR: + return FILTER_CONDITION_FUNC_INTR(GET_PCI_DEVFN(bdf), intr_id); + default: + break; + } + + return 0; +} + +static void hns3_pmu_config_filter(struct perf_event *event) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + u8 event_type = hns3_pmu_get_event_type(event); + u8 subevent_id = hns3_pmu_get_subevent(event); + u16 queue_id = hns3_pmu_get_queue(event); + struct hw_perf_event *hwc = &event->hw; + u8 filter_mode = *(u8 *)hwc->addr_filters; + u16 bdf = hns3_pmu_get_bdf(event); + u32 idx = hwc->idx; + u32 val; + + val = event_type; + val |= subevent_id << HNS3_PMU_CTRL_SUBEVENT_S; + val |= filter_mode << HNS3_PMU_CTRL_FILTER_MODE_S; + val |= HNS3_PMU_EVENT_OVERFLOW_RESTART; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); + + val = hns3_pmu_get_filter_condition(event); + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_HIGH, idx, val); + + if (filter_mode == HNS3_PMU_HW_FILTER_FUNC_QUEUE) + hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue_id); +} + +static void hns3_pmu_enable_counter(struct hns3_pmu *hns3_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); + val |= HNS3_PMU_EVENT_EN; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); +} + +static void hns3_pmu_disable_counter(struct hns3_pmu *hns3_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); + val &= ~HNS3_PMU_EVENT_EN; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); +} + +static void hns3_pmu_enable_intr(struct hns3_pmu *hns3_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx); + val &= ~HNS3_PMU_INTR_MASK_OVERFLOW; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val); +} + +static void hns3_pmu_disable_intr(struct hns3_pmu *hns3_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx); + val |= HNS3_PMU_INTR_MASK_OVERFLOW; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val); +} + +static void hns3_pmu_clear_intr_status(struct hns3_pmu *hns3_pmu, u32 idx) +{ + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); + val |= HNS3_PMU_EVENT_STATUS_RESET; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); + val &= ~HNS3_PMU_EVENT_STATUS_RESET; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); +} + +static u64 hns3_pmu_read_counter(struct perf_event *event) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + + return hns3_pmu_readq(hns3_pmu, event->hw.event_base, event->hw.idx); +} + +static void hns3_pmu_write_counter(struct perf_event *event, u64 value) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + u32 idx = event->hw.idx; + + hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_COUNTER, idx, value); + hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_EXT_COUNTER, idx, value); +} + +static void hns3_pmu_init_counter(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + local64_set(&hwc->prev_count, 0); + hns3_pmu_write_counter(event, 0); +} + +static int hns3_pmu_event_init(struct perf_event *event) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + int ret; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* Sampling is not supported */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + event->cpu = hns3_pmu->on_cpu; + + idx = hns3_pmu_get_event_idx(hns3_pmu); + if (idx < 0) { + pci_err(hns3_pmu->pdev, "Up to %u events are supported!\n", + HNS3_PMU_MAX_HW_EVENTS); + return -EBUSY; + } + + hwc->idx = idx; + + ret = hns3_pmu_select_filter_mode(event, hns3_pmu); + if (ret) { + pci_err(hns3_pmu->pdev, "Invalid filter, ret = %d.\n", ret); + return ret; + } + + if (!hns3_pmu_validate_event_group(event)) { + pci_err(hns3_pmu->pdev, "Invalid event group.\n"); + return -EINVAL; + } + + if (hns3_pmu_get_ext_counter_used(event)) + hwc->event_base = HNS3_PMU_REG_EVENT_EXT_COUNTER; + else + hwc->event_base = HNS3_PMU_REG_EVENT_COUNTER; + + return 0; +} + +static void hns3_pmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 new_cnt, prev_cnt, delta; + + do { + prev_cnt = local64_read(&hwc->prev_count); + new_cnt = hns3_pmu_read_counter(event); + } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) != + prev_cnt); + + delta = new_cnt - prev_cnt; + local64_add(delta, &event->count); +} + +static void hns3_pmu_start(struct perf_event *event, int flags) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + hwc->state = 0; + + hns3_pmu_config_filter(event); + hns3_pmu_init_counter(event); + hns3_pmu_enable_intr(hns3_pmu, hwc); + hns3_pmu_enable_counter(hns3_pmu, hwc); + + perf_event_update_userpage(event); +} + +static void hns3_pmu_stop(struct perf_event *event, int flags) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hns3_pmu_disable_counter(hns3_pmu, hwc); + hns3_pmu_disable_intr(hns3_pmu, hwc); + + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + + if (hwc->state & PERF_HES_UPTODATE) + return; + + /* Read hardware counter and update the perf counter statistics */ + hns3_pmu_read(event); + hwc->state |= PERF_HES_UPTODATE; +} + +static int hns3_pmu_add(struct perf_event *event, int flags) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + /* Check all working events to find a related event. */ + idx = hns3_pmu_find_related_event_idx(hns3_pmu, event); + if (idx < 0 && idx != -ENOENT) + return idx; + + /* Current event shares an enabled hardware event with related event */ + if (idx >= 0 && idx < HNS3_PMU_MAX_HW_EVENTS) { + hwc->idx = idx; + goto start_count; + } + + idx = hns3_pmu_get_event_idx(hns3_pmu); + if (idx < 0) + return idx; + + hwc->idx = idx; + hns3_pmu->hw_events[idx] = event; + +start_count: + if (flags & PERF_EF_START) + hns3_pmu_start(event, PERF_EF_RELOAD); + + return 0; +} + +static void hns3_pmu_del(struct perf_event *event, int flags) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hns3_pmu_stop(event, PERF_EF_UPDATE); + hns3_pmu->hw_events[hwc->idx] = NULL; + perf_event_update_userpage(event); +} + +static void hns3_pmu_enable(struct pmu *pmu) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu); + u32 val; + + val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); + val |= HNS3_PMU_GLOBAL_START; + writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); +} + +static void hns3_pmu_disable(struct pmu *pmu) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu); + u32 val; + + val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); + val &= ~HNS3_PMU_GLOBAL_START; + writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); +} + +static int hns3_pmu_alloc_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu) +{ + u16 device_id; + char *name; + u32 val; + + hns3_pmu->base = pcim_iomap_table(pdev)[BAR_2]; + if (!hns3_pmu->base) { + pci_err(pdev, "ioremap failed\n"); + return -ENOMEM; + } + + hns3_pmu->hw_clk_freq = readl(hns3_pmu->base + HNS3_PMU_REG_CLOCK_FREQ); + + val = readl(hns3_pmu->base + HNS3_PMU_REG_BDF); + hns3_pmu->bdf_min = val & 0xffff; + hns3_pmu->bdf_max = val >> 16; + + val = readl(hns3_pmu->base + HNS3_PMU_REG_DEVICE_ID); + device_id = val & 0xffff; + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hns3_pmu_sicl_%u", device_id); + if (!name) + return -ENOMEM; + + hns3_pmu->pdev = pdev; + hns3_pmu->on_cpu = -1; + hns3_pmu->identifier = readl(hns3_pmu->base + HNS3_PMU_REG_VERSION); + hns3_pmu->pmu = (struct pmu) { + .name = name, + .module = THIS_MODULE, + .event_init = hns3_pmu_event_init, + .pmu_enable = hns3_pmu_enable, + .pmu_disable = hns3_pmu_disable, + .add = hns3_pmu_add, + .del = hns3_pmu_del, + .start = hns3_pmu_start, + .stop = hns3_pmu_stop, + .read = hns3_pmu_read, + .task_ctx_nr = perf_invalid_context, + .attr_groups = hns3_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + return 0; +} + +static irqreturn_t hns3_pmu_irq(int irq, void *data) +{ + struct hns3_pmu *hns3_pmu = data; + u32 intr_status, idx; + + for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) { + intr_status = hns3_pmu_readl(hns3_pmu, + HNS3_PMU_REG_EVENT_INTR_STATUS, + idx); + + /* + * As each counter will restart from 0 when it is overflowed, + * extra processing is no need, just clear interrupt status. + */ + if (intr_status) + hns3_pmu_clear_intr_status(hns3_pmu, idx); + } + + return IRQ_HANDLED; +} + +static int hns3_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hns3_pmu *hns3_pmu; + + hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node); + if (!hns3_pmu) + return -ENODEV; + + if (hns3_pmu->on_cpu == -1) { + hns3_pmu->on_cpu = cpu; + irq_set_affinity(hns3_pmu->irq, cpumask_of(cpu)); + } + + return 0; +} + +static int hns3_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hns3_pmu *hns3_pmu; + unsigned int target; + + hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node); + if (!hns3_pmu) + return -ENODEV; + + /* Nothing to do if this CPU doesn't own the PMU */ + if (hns3_pmu->on_cpu != cpu) + return 0; + + /* Choose a new CPU from all online cpus */ + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&hns3_pmu->pmu, cpu, target); + hns3_pmu->on_cpu = target; + irq_set_affinity(hns3_pmu->irq, cpumask_of(target)); + + return 0; +} + +static void hns3_pmu_free_irq(void *data) +{ + struct pci_dev *pdev = data; + + pci_free_irq_vectors(pdev); +} + +static int hns3_pmu_irq_register(struct pci_dev *pdev, + struct hns3_pmu *hns3_pmu) +{ + int irq, ret; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (ret < 0) { + pci_err(pdev, "failed to enable MSI vectors, ret = %d.\n", ret); + return ret; + } + + ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev); + if (ret) { + pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret); + return ret; + } + + irq = pci_irq_vector(pdev, 0); + ret = devm_request_irq(&pdev->dev, irq, hns3_pmu_irq, 0, + hns3_pmu->pmu.name, hns3_pmu); + if (ret) { + pci_err(pdev, "failed to register irq, ret = %d.\n", ret); + return ret; + } + + hns3_pmu->irq = irq; + + return 0; +} + +static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu) +{ + int ret; + + ret = hns3_pmu_alloc_pmu(pdev, hns3_pmu); + if (ret) + return ret; + + ret = hns3_pmu_irq_register(pdev, hns3_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + &hns3_pmu->node); + if (ret) { + pci_err(pdev, "failed to register hotplug, ret = %d.\n", ret); + return ret; + } + + ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1); + if (ret) { + pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret); + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + &hns3_pmu->node); + } + + return ret; +} + +static void hns3_pmu_uninit_pmu(struct pci_dev *pdev) +{ + struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev); + + perf_pmu_unregister(&hns3_pmu->pmu); + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + &hns3_pmu->node); +} + +static int hns3_pmu_init_dev(struct pci_dev *pdev) +{ + int ret; + + ret = pcim_enable_device(pdev); + if (ret) { + pci_err(pdev, "failed to enable pci device, ret = %d.\n", ret); + return ret; + } + + ret = pcim_iomap_regions(pdev, BIT(BAR_2), "hns3_pmu"); + if (ret < 0) { + pci_err(pdev, "failed to request pci region, ret = %d.\n", ret); + return ret; + } + + pci_set_master(pdev); + + return 0; +} + +static int hns3_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hns3_pmu *hns3_pmu; + int ret; + + hns3_pmu = devm_kzalloc(&pdev->dev, sizeof(*hns3_pmu), GFP_KERNEL); + if (!hns3_pmu) + return -ENOMEM; + + ret = hns3_pmu_init_dev(pdev); + if (ret) + return ret; + + ret = hns3_pmu_init_pmu(pdev, hns3_pmu); + if (ret) { + pci_clear_master(pdev); + return ret; + } + + pci_set_drvdata(pdev, hns3_pmu); + + return ret; +} + +static void hns3_pmu_remove(struct pci_dev *pdev) +{ + hns3_pmu_uninit_pmu(pdev); + pci_clear_master(pdev); + pci_set_drvdata(pdev, NULL); +} + +static const struct pci_device_id hns3_pmu_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa22b) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, hns3_pmu_ids); + +static struct pci_driver hns3_pmu_driver = { + .name = "hns3_pmu", + .id_table = hns3_pmu_ids, + .probe = hns3_pmu_probe, + .remove = hns3_pmu_remove, +}; + +static int __init hns3_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + "AP_PERF_ARM_HNS3_PMU_ONLINE", + hns3_pmu_online_cpu, + hns3_pmu_offline_cpu); + if (ret) { + pr_err("failed to setup HNS3 PMU hotplug, ret = %d.\n", ret); + return ret; + } + + ret = pci_register_driver(&hns3_pmu_driver); + if (ret) { + pr_err("failed to register pci driver, ret = %d.\n", ret); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE); + } + + return ret; +} +module_init(hns3_pmu_module_init); + +static void __exit hns3_pmu_module_exit(void) +{ + pci_unregister_driver(&hns3_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE); +} +module_exit(hns3_pmu_module_exit); + +MODULE_DESCRIPTION("HNS3 PMU driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c index 282d3a071a67..69c3050a4348 100644 --- a/drivers/perf/marvell_cn10k_tad_pmu.c +++ b/drivers/perf/marvell_cn10k_tad_pmu.c @@ -2,10 +2,6 @@ /* Marvell CN10K LLC-TAD perf driver * * Copyright (C) 2021 Marvell - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #define pr_fmt(fmt) "tad_pmu: " fmt @@ -18,9 +14,9 @@ #include <linux/perf_event.h> #include <linux/platform_device.h> -#define TAD_PFC_OFFSET 0x0 +#define TAD_PFC_OFFSET 0x800 #define TAD_PFC(counter) (TAD_PFC_OFFSET | (counter << 3)) -#define TAD_PRF_OFFSET 0x100 +#define TAD_PRF_OFFSET 0x900 #define TAD_PRF(counter) (TAD_PRF_OFFSET | (counter << 3)) #define TAD_PRF_CNTSEL_MASK 0xFF #define TAD_MAX_COUNTERS 8 @@ -100,9 +96,7 @@ static void tad_pmu_event_counter_start(struct perf_event *event, int flags) * which sets TAD()_PRF()[CNTSEL] != 0 */ for (i = 0; i < tad_pmu->region_cnt; i++) { - reg_val = readq_relaxed(tad_pmu->regions[i].base + - TAD_PRF(counter_idx)); - reg_val |= (event_idx & 0xFF); + reg_val = event_idx & 0xFF; writeq_relaxed(reg_val, tad_pmu->regions[i].base + TAD_PRF(counter_idx)); } diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c index b2b8d2074ed0..2c961839903d 100644 --- a/drivers/perf/riscv_pmu.c +++ b/drivers/perf/riscv_pmu.c @@ -121,7 +121,7 @@ u64 riscv_pmu_event_update(struct perf_event *event) return delta; } -static void riscv_pmu_stop(struct perf_event *event, int flags) +void riscv_pmu_stop(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); @@ -175,7 +175,7 @@ int riscv_pmu_event_set_period(struct perf_event *event) return overflow; } -static void riscv_pmu_start(struct perf_event *event, int flags) +void riscv_pmu_start(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index dca3537a8dcc..79a3de515ece 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -17,10 +17,30 @@ #include <linux/irqdomain.h> #include <linux/of_irq.h> #include <linux/of.h> +#include <linux/cpu_pm.h> #include <asm/sbi.h> #include <asm/hwcap.h> +PMU_FORMAT_ATTR(event, "config:0-47"); +PMU_FORMAT_ATTR(firmware, "config:63"); + +static struct attribute *riscv_arch_formats_attr[] = { + &format_attr_event.attr, + &format_attr_firmware.attr, + NULL, +}; + +static struct attribute_group riscv_pmu_format_group = { + .name = "format", + .attrs = riscv_arch_formats_attr, +}; + +static const struct attribute_group *riscv_pmu_attr_groups[] = { + &riscv_pmu_format_group, + NULL, +}; + union sbi_pmu_ctr_info { unsigned long value; struct { @@ -666,12 +686,15 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde child = of_get_compatible_child(cpu, "riscv,cpu-intc"); if (!child) { pr_err("Failed to find INTC node\n"); + of_node_put(cpu); return -ENODEV; } domain = irq_find_host(child); of_node_put(child); - if (domain) + if (domain) { + of_node_put(cpu); break; + } } if (!domain) { pr_err("Failed to find INTC IRQ root domain\n"); @@ -693,6 +716,73 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde return 0; } +#ifdef CONFIG_CPU_PM +static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, + void *v) +{ + struct riscv_pmu *rvpmu = container_of(b, struct riscv_pmu, riscv_pm_nb); + struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); + int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS); + struct perf_event *event; + int idx; + + if (!enabled) + return NOTIFY_OK; + + for (idx = 0; idx < RISCV_MAX_COUNTERS; idx++) { + event = cpuc->events[idx]; + if (!event) + continue; + + switch (cmd) { + case CPU_PM_ENTER: + /* + * Stop and update the counter + */ + riscv_pmu_stop(event, PERF_EF_UPDATE); + break; + case CPU_PM_EXIT: + case CPU_PM_ENTER_FAILED: + /* + * Restore and enable the counter. + * + * Requires RCU read locking to be functional, + * wrap the call within RCU_NONIDLE to make the + * RCU subsystem aware this cpu is not idle from + * an RCU perspective for the riscv_pmu_start() call + * duration. + */ + RCU_NONIDLE(riscv_pmu_start(event, PERF_EF_RELOAD)); + break; + default: + break; + } + } + + return NOTIFY_OK; +} + +static int riscv_pm_pmu_register(struct riscv_pmu *pmu) +{ + pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify; + return cpu_pm_register_notifier(&pmu->riscv_pm_nb); +} + +static void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) +{ + cpu_pm_unregister_notifier(&pmu->riscv_pm_nb); +} +#else +static inline int riscv_pm_pmu_register(struct riscv_pmu *pmu) { return 0; } +static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { } +#endif + +static void riscv_pmu_destroy(struct riscv_pmu *pmu) +{ + riscv_pm_pmu_unregister(pmu); + cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); +} + static int pmu_sbi_device_probe(struct platform_device *pdev) { struct riscv_pmu *pmu = NULL; @@ -720,6 +810,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; } + pmu->pmu.attr_groups = riscv_pmu_attr_groups; pmu->num_counters = num_counters; pmu->ctr_start = pmu_sbi_ctr_start; pmu->ctr_stop = pmu_sbi_ctr_stop; @@ -733,14 +824,19 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) if (ret) return ret; + ret = riscv_pm_pmu_register(pmu); + if (ret) + goto out_unregister; + ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW); - if (ret) { - cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); - return ret; - } + if (ret) + goto out_unregister; return 0; +out_unregister: + riscv_pmu_destroy(pmu); + out_free: kfree(pmu); return ret; diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig index 849c4204f550..93a6a8ee4716 100644 --- a/drivers/phy/broadcom/Kconfig +++ b/drivers/phy/broadcom/Kconfig @@ -83,7 +83,7 @@ config PHY_NS2_USB_DRD config PHY_BRCM_SATA tristate "Broadcom SATA PHY driver" depends on ARCH_BRCMSTB || ARCH_BCM_IPROC || BMIPS_GENERIC || \ - ARCH_BCM_63XX || COMPILE_TEST + ARCH_BCMBCA || COMPILE_TEST depends on OF select GENERIC_PHY default ARCH_BCM_IPROC diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c index dfc8ea9f3843..771dd1f4fbe0 100644 --- a/drivers/pinctrl/pinctrl-ocelot.c +++ b/drivers/pinctrl/pinctrl-ocelot.c @@ -1810,6 +1810,7 @@ static void ocelot_irq_mask(struct irq_data *data) regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio), BIT(gpio % 32), 0); + gpiochip_disable_irq(chip, gpio); } static void ocelot_irq_unmask(struct irq_data *data) @@ -1818,6 +1819,7 @@ static void ocelot_irq_unmask(struct irq_data *data) struct ocelot_pinctrl *info = gpiochip_get_data(chip); unsigned int gpio = irqd_to_hwirq(data); + gpiochip_enable_irq(chip, gpio); regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio), BIT(gpio % 32), BIT(gpio % 32)); } @@ -1839,8 +1841,10 @@ static struct irq_chip ocelot_eoi_irqchip = { .irq_mask = ocelot_irq_mask, .irq_eoi = ocelot_irq_ack, .irq_unmask = ocelot_irq_unmask, - .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED, + .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED | + IRQCHIP_IMMUTABLE, .irq_set_type = ocelot_irq_set_type, + GPIOCHIP_IRQ_RESOURCE_HELPERS }; static struct irq_chip ocelot_irqchip = { @@ -1849,6 +1853,8 @@ static struct irq_chip ocelot_irqchip = { .irq_ack = ocelot_irq_ack, .irq_unmask = ocelot_irq_unmask, .irq_set_type = ocelot_irq_set_type, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS }; static int ocelot_irq_set_type(struct irq_data *data, unsigned int type) @@ -1912,7 +1918,7 @@ static int ocelot_gpiochip_register(struct platform_device *pdev, irq = platform_get_irq_optional(pdev, 0); if (irq > 0) { girq = &gc->irq; - girq->chip = &ocelot_irqchip; + gpio_irq_chip_set_chip(girq, &ocelot_irqchip); girq->parent_handler = ocelot_irq_handler; girq->num_parents = 1; girq->parents = devm_kcalloc(&pdev->dev, 1, diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index fd5fff9adff0..3be2a08ae3a6 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -966,16 +966,13 @@ static int pmic_gpio_child_to_parent_hwirq(struct gpio_chip *chip, return 0; } -static void *pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip, - unsigned int parent_hwirq, - unsigned int parent_type) +static int pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) { struct pmic_gpio_state *state = gpiochip_get_data(chip); - struct irq_fwspec *fwspec; - - fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL); - if (!fwspec) - return NULL; + struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = chip->irq.parent_domain->fwnode; @@ -985,7 +982,7 @@ static void *pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip, /* param[2] must be left as 0 */ fwspec->param[3] = parent_type; - return fwspec; + return 0; } static int pmic_gpio_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index a48cac55152c..c47eed9d948f 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -9,8 +9,10 @@ #include <linux/clk.h> #include <linux/gpio/driver.h> #include <linux/io.h> +#include <linux/interrupt.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinconf.h> #include <linux/pinctrl/pinctrl.h> @@ -89,6 +91,7 @@ #define PIN(n) (0x0800 + 0x10 + (n)) #define IOLH(n) (0x1000 + (n) * 8) #define IEN(n) (0x1800 + (n) * 8) +#define ISEL(n) (0x2c80 + (n) * 8) #define PWPR (0x3014) #define SD_CH(n) (0x3000 + (n) * 4) #define QSPI (0x3008) @@ -112,6 +115,10 @@ #define RZG2L_PIN_ID_TO_PORT_OFFSET(id) (RZG2L_PIN_ID_TO_PORT(id) + 0x10) #define RZG2L_PIN_ID_TO_PIN(id) ((id) % RZG2L_PINS_PER_PORT) +#define RZG2L_TINT_MAX_INTERRUPT 32 +#define RZG2L_TINT_IRQ_START_INDEX 9 +#define RZG2L_PACK_HWIRQ(t, i) (((t) << 16) | (i)) + struct rzg2l_dedicated_configs { const char *name; u32 config; @@ -137,6 +144,9 @@ struct rzg2l_pinctrl { struct gpio_chip gpio_chip; struct pinctrl_gpio_range gpio_range; + DECLARE_BITMAP(tint_slot, RZG2L_TINT_MAX_INTERRUPT); + spinlock_t bitmap_lock; + unsigned int hwirq[RZG2L_TINT_MAX_INTERRUPT]; spinlock_t lock; }; @@ -883,8 +893,14 @@ static int rzg2l_gpio_get(struct gpio_chip *chip, unsigned int offset) static void rzg2l_gpio_free(struct gpio_chip *chip, unsigned int offset) { + unsigned int virq; + pinctrl_gpio_free(chip->base + offset); + virq = irq_find_mapping(chip->irq.domain, offset); + if (virq) + irq_dispose_mapping(virq); + /* * Set the GPIO as an input to ensure that the next GPIO request won't * drive the GPIO pin as an output. @@ -1104,14 +1120,221 @@ static struct { } }; +static int rzg2l_gpio_get_gpioint(unsigned int virq) +{ + unsigned int gpioint; + unsigned int i; + u32 port, bit; + + port = virq / 8; + bit = virq % 8; + + if (port >= ARRAY_SIZE(rzg2l_gpio_configs) || + bit >= RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[port])) + return -EINVAL; + + gpioint = bit; + for (i = 0; i < port; i++) + gpioint += RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[i]); + + return gpioint; +} + +static void rzg2l_gpio_irq_disable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip); + unsigned int hwirq = irqd_to_hwirq(d); + unsigned long flags; + void __iomem *addr; + u32 port; + u8 bit; + + port = RZG2L_PIN_ID_TO_PORT(hwirq); + bit = RZG2L_PIN_ID_TO_PIN(hwirq); + + addr = pctrl->base + ISEL(port); + if (bit >= 4) { + bit -= 4; + addr += 4; + } + + spin_lock_irqsave(&pctrl->lock, flags); + writel(readl(addr) & ~BIT(bit * 8), addr); + spin_unlock_irqrestore(&pctrl->lock, flags); + + gpiochip_disable_irq(gc, hwirq); + irq_chip_disable_parent(d); +} + +static void rzg2l_gpio_irq_enable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip); + unsigned int hwirq = irqd_to_hwirq(d); + unsigned long flags; + void __iomem *addr; + u32 port; + u8 bit; + + gpiochip_enable_irq(gc, hwirq); + + port = RZG2L_PIN_ID_TO_PORT(hwirq); + bit = RZG2L_PIN_ID_TO_PIN(hwirq); + + addr = pctrl->base + ISEL(port); + if (bit >= 4) { + bit -= 4; + addr += 4; + } + + spin_lock_irqsave(&pctrl->lock, flags); + writel(readl(addr) | BIT(bit * 8), addr); + spin_unlock_irqrestore(&pctrl->lock, flags); + + irq_chip_enable_parent(d); +} + +static int rzg2l_gpio_irq_set_type(struct irq_data *d, unsigned int type) +{ + return irq_chip_set_type_parent(d, type); +} + +static void rzg2l_gpio_irqc_eoi(struct irq_data *d) +{ + irq_chip_eoi_parent(d); +} + +static void rzg2l_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + + seq_printf(p, dev_name(gc->parent)); +} + +static const struct irq_chip rzg2l_gpio_irqchip = { + .name = "rzg2l-gpio", + .irq_disable = rzg2l_gpio_irq_disable, + .irq_enable = rzg2l_gpio_irq_enable, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_set_type = rzg2l_gpio_irq_set_type, + .irq_eoi = rzg2l_gpio_irqc_eoi, + .irq_print_chip = rzg2l_gpio_irq_print_chip, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + +static int rzg2l_gpio_child_to_parent_hwirq(struct gpio_chip *gc, + unsigned int child, + unsigned int child_type, + unsigned int *parent, + unsigned int *parent_type) +{ + struct rzg2l_pinctrl *pctrl = gpiochip_get_data(gc); + unsigned long flags; + int gpioint, irq; + + gpioint = rzg2l_gpio_get_gpioint(child); + if (gpioint < 0) + return gpioint; + + spin_lock_irqsave(&pctrl->bitmap_lock, flags); + irq = bitmap_find_free_region(pctrl->tint_slot, RZG2L_TINT_MAX_INTERRUPT, get_order(1)); + spin_unlock_irqrestore(&pctrl->bitmap_lock, flags); + if (irq < 0) + return -ENOSPC; + pctrl->hwirq[irq] = child; + irq += RZG2L_TINT_IRQ_START_INDEX; + + /* All these interrupts are level high in the CPU */ + *parent_type = IRQ_TYPE_LEVEL_HIGH; + *parent = RZG2L_PACK_HWIRQ(gpioint, irq); + return 0; +} + +static int rzg2l_gpio_populate_parent_fwspec(struct gpio_chip *chip, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) +{ + struct irq_fwspec *fwspec = &gfwspec->fwspec; + + fwspec->fwnode = chip->irq.parent_domain->fwnode; + fwspec->param_count = 2; + fwspec->param[0] = parent_hwirq; + fwspec->param[1] = parent_type; + + return 0; +} + +static void rzg2l_gpio_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d; + + d = irq_domain_get_irq_data(domain, virq); + if (d) { + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + unsigned long flags; + unsigned int i; + + for (i = 0; i < RZG2L_TINT_MAX_INTERRUPT; i++) { + if (pctrl->hwirq[i] == hwirq) { + spin_lock_irqsave(&pctrl->bitmap_lock, flags); + bitmap_release_region(pctrl->tint_slot, i, get_order(1)); + spin_unlock_irqrestore(&pctrl->bitmap_lock, flags); + pctrl->hwirq[i] = 0; + break; + } + } + } + irq_domain_free_irqs_common(domain, virq, nr_irqs); +} + +static void rzg2l_init_irq_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, + unsigned int ngpios) +{ + struct rzg2l_pinctrl *pctrl = gpiochip_get_data(gc); + struct gpio_chip *chip = &pctrl->gpio_chip; + unsigned int offset; + + /* Forbid unused lines to be mapped as IRQs */ + for (offset = 0; offset < chip->ngpio; offset++) { + u32 port, bit; + + port = offset / 8; + bit = offset % 8; + + if (port >= ARRAY_SIZE(rzg2l_gpio_configs) || + bit >= RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[port])) + clear_bit(offset, valid_mask); + } +} + static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl) { struct device_node *np = pctrl->dev->of_node; struct gpio_chip *chip = &pctrl->gpio_chip; const char *name = dev_name(pctrl->dev); + struct irq_domain *parent_domain; struct of_phandle_args of_args; + struct device_node *parent_np; + struct gpio_irq_chip *girq; int ret; + parent_np = of_irq_find_parent(np); + if (!parent_np) + return -ENXIO; + + parent_domain = irq_find_host(parent_np); + of_node_put(parent_np); + if (!parent_domain) + return -EPROBE_DEFER; + ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &of_args); if (ret) { dev_err(pctrl->dev, "Unable to parse gpio-ranges\n"); @@ -1138,6 +1361,15 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl) chip->base = -1; chip->ngpio = of_args.args[2]; + girq = &chip->irq; + gpio_irq_chip_set_chip(girq, &rzg2l_gpio_irqchip); + girq->fwnode = of_node_to_fwnode(np); + girq->parent_domain = parent_domain; + girq->child_to_parent_hwirq = rzg2l_gpio_child_to_parent_hwirq; + girq->populate_parent_alloc_arg = rzg2l_gpio_populate_parent_fwspec; + girq->child_irq_domain_ops.free = rzg2l_gpio_irq_domain_free; + girq->init_valid_mask = rzg2l_init_irq_valid_mask; + pctrl->gpio_range.id = 0; pctrl->gpio_range.pin_base = 0; pctrl->gpio_range.base = 0; @@ -1253,6 +1485,7 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev) } spin_lock_init(&pctrl->lock); + spin_lock_init(&pctrl->bitmap_lock); platform_set_drvdata(pdev, pctrl); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index a8b383051528..502dcd1c33b7 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -6842,6 +6842,31 @@ static const struct backlight_ops ibm_backlight_data = { /* --------------------------------------------------------------------- */ +static int __init tpacpi_evaluate_bcl(struct acpi_device *adev, void *not_used) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + acpi_status status; + int rc; + + status = acpi_evaluate_object(adev->handle, "_BCL", NULL, &buffer); + if (ACPI_FAILURE(status)) + return 0; + + obj = buffer.pointer; + if (!obj || obj->type != ACPI_TYPE_PACKAGE) { + acpi_handle_info(adev->handle, + "Unknown _BCL data, please report this to %s\n", + TPACPI_MAIL); + rc = 0; + } else { + rc = obj->package.count; + } + kfree(obj); + + return rc; +} + /* * Call _BCL method of video device. On some ThinkPads this will * switch the firmware to the ACPI brightness control mode. @@ -6849,37 +6874,13 @@ static const struct backlight_ops ibm_backlight_data = { static int __init tpacpi_query_bcl_levels(acpi_handle handle) { - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; - struct acpi_device *device, *child; - int rc; + struct acpi_device *device; device = acpi_fetch_acpi_dev(handle); if (!device) return 0; - rc = 0; - list_for_each_entry(child, &device->children, node) { - acpi_status status = acpi_evaluate_object(child->handle, "_BCL", - NULL, &buffer); - if (ACPI_FAILURE(status)) { - buffer.length = ACPI_ALLOCATE_BUFFER; - continue; - } - - obj = (union acpi_object *)buffer.pointer; - if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) { - pr_err("Unknown _BCL data, please report this to %s\n", - TPACPI_MAIL); - rc = 0; - } else { - rc = obj->package.count; - } - break; - } - - kfree(buffer.pointer); - return rc; + return acpi_dev_for_each_child(device, tpacpi_evaluate_bcl, NULL); } diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c index f5eced0842b3..2ff7717530bf 100644 --- a/drivers/powercap/dtpm_cpu.c +++ b/drivers/powercap/dtpm_cpu.c @@ -53,7 +53,7 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit) for (i = 0; i < pd->nr_perf_states; i++) { - power = pd->table[i].power * MICROWATT_PER_MILLIWATT * nr_cpus; + power = pd->table[i].power * nr_cpus; if (power > power_limit) break; @@ -63,42 +63,26 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit) freq_qos_update_request(&dtpm_cpu->qos_req, freq); - power_limit = pd->table[i - 1].power * - MICROWATT_PER_MILLIWATT * nr_cpus; + power_limit = pd->table[i - 1].power * nr_cpus; return power_limit; } static u64 scale_pd_power_uw(struct cpumask *pd_mask, u64 power) { - unsigned long max = 0, sum_util = 0; + unsigned long max, sum_util = 0; int cpu; - for_each_cpu_and(cpu, pd_mask, cpu_online_mask) { - - /* - * The capacity is the same for all CPUs belonging to - * the same perf domain, so a single call to - * arch_scale_cpu_capacity() is enough. However, we - * need the CPU parameter to be initialized by the - * loop, so the call ends up in this block. - * - * We can initialize 'max' with a cpumask_first() call - * before the loop but the bits computation is not - * worth given the arch_scale_cpu_capacity() just - * returns a value where the resulting assembly code - * will be optimized by the compiler. - */ - max = arch_scale_cpu_capacity(cpu); - sum_util += sched_cpu_util(cpu, max); - } - /* - * In the improbable case where all the CPUs of the perf - * domain are offline, 'max' will be zero and will lead to an - * illegal operation with a zero division. + * The capacity is the same for all CPUs belonging to + * the same perf domain. */ - return max ? (power * ((sum_util << 10) / max)) >> 10 : 0; + max = arch_scale_cpu_capacity(cpumask_first(pd_mask)); + + for_each_cpu_and(cpu, pd_mask, cpu_online_mask) + sum_util += sched_cpu_util(cpu); + + return (power * ((sum_util << 10) / max)) >> 10; } static u64 get_pd_power_uw(struct dtpm *dtpm) diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index a9c99d9e8b42..21d624f9f5fb 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -1109,6 +1109,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &rapl_defaults_core), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &rapl_defaults_core), X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &rapl_defaults_core), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server), X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core), diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c index 9d23984d8931..bc6adda58883 100644 --- a/drivers/powercap/intel_rapl_msr.c +++ b/drivers/powercap/intel_rapl_msr.c @@ -140,7 +140,9 @@ static const struct x86_cpu_id pl4_support_ids[] = { { X86_VENDOR_INTEL, 6, INTEL_FAM6_TIGERLAKE_L, X86_FEATURE_ANY }, { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE, X86_FEATURE_ANY }, { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_L, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_N, X86_FEATURE_ANY }, { X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE_P, X86_FEATURE_ANY }, {} }; diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 904de8d61828..60d13a949bc5 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -140,6 +140,16 @@ config PWM_BRCMSTB To compile this driver as a module, choose M Here: the module will be called pwm-brcmstb.c. +config PWM_CLK + tristate "Clock based PWM support" + depends on HAVE_CLK || COMPILE_TEST + help + Generic PWM framework driver for outputs that can be + muxed to clocks. + + To compile this driver as a module, choose M here: the module + will be called pwm-clk. + config PWM_CLPS711X tristate "CLPS711X PWM support" depends on ARCH_CLPS711X || COMPILE_TEST diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 5c08bdb817b4..7bf1a29f02b8 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_PWM_BCM_KONA) += pwm-bcm-kona.o obj-$(CONFIG_PWM_BCM2835) += pwm-bcm2835.o obj-$(CONFIG_PWM_BERLIN) += pwm-berlin.o obj-$(CONFIG_PWM_BRCMSTB) += pwm-brcmstb.o +obj-$(CONFIG_PWM_CLK) += pwm-clk.o obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o obj-$(CONFIG_PWM_CRC) += pwm-crc.o obj-$(CONFIG_PWM_CROS_EC) += pwm-cros-ec.o diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index c7552df32082..0e042410f6b9 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -235,18 +235,8 @@ EXPORT_SYMBOL_GPL(pwm_get_chip_data); static bool pwm_ops_check(const struct pwm_chip *chip) { - const struct pwm_ops *ops = chip->ops; - /* driver supports legacy, non-atomic operation */ - if (ops->config && ops->enable && ops->disable) { - if (IS_ENABLED(CONFIG_PWM_DEBUG)) - dev_warn(chip->dev, - "Driver needs updating to atomic API\n"); - - return true; - } - if (!ops->apply) return false; @@ -548,73 +538,6 @@ static void pwm_apply_state_debug(struct pwm_device *pwm, } } -static int pwm_apply_legacy(struct pwm_chip *chip, struct pwm_device *pwm, - const struct pwm_state *state) -{ - int err; - struct pwm_state initial_state = pwm->state; - - if (state->polarity != pwm->state.polarity) { - if (!chip->ops->set_polarity) - return -EINVAL; - - /* - * Changing the polarity of a running PWM is only allowed when - * the PWM driver implements ->apply(). - */ - if (pwm->state.enabled) { - chip->ops->disable(chip, pwm); - - /* - * Update pwm->state already here in case - * .set_polarity() or another callback depend on that. - */ - pwm->state.enabled = false; - } - - err = chip->ops->set_polarity(chip, pwm, state->polarity); - if (err) - goto rollback; - - pwm->state.polarity = state->polarity; - } - - if (!state->enabled) { - if (pwm->state.enabled) - chip->ops->disable(chip, pwm); - - return 0; - } - - /* - * We cannot skip calling ->config even if state->period == - * pwm->state.period && state->duty_cycle == pwm->state.duty_cycle - * because we might have exited early in the last call to - * pwm_apply_state because of !state->enabled and so the two values in - * pwm->state might not be configured in hardware. - */ - err = chip->ops->config(pwm->chip, pwm, - state->duty_cycle, - state->period); - if (err) - goto rollback; - - pwm->state.period = state->period; - pwm->state.duty_cycle = state->duty_cycle; - - if (!pwm->state.enabled) { - err = chip->ops->enable(chip, pwm); - if (err) - goto rollback; - } - - return 0; - -rollback: - pwm->state = initial_state; - return err; -} - /** * pwm_apply_state() - atomically apply a new state to a PWM device * @pwm: PWM device @@ -647,10 +570,7 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state) state->usage_power == pwm->state.usage_power) return 0; - if (chip->ops->apply) - err = chip->ops->apply(chip, pwm, state); - else - err = pwm_apply_legacy(chip, pwm, state); + err = chip->ops->apply(chip, pwm, state); if (err) return err; diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c index 3977a0f9d132..2837b4ce8053 100644 --- a/drivers/pwm/pwm-atmel-tcb.c +++ b/drivers/pwm/pwm-atmel-tcb.c @@ -304,7 +304,7 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, /* * Find best clk divisor: * the smallest divisor which can fulfill the period_ns requirements. - * If there is a gclk, the first divisor is actuallly the gclk selector + * If there is a gclk, the first divisor is actually the gclk selector */ if (tcbpwmc->gclk) i = 1; diff --git a/drivers/pwm/pwm-clk.c b/drivers/pwm/pwm-clk.c new file mode 100644 index 000000000000..c2a503d684a7 --- /dev/null +++ b/drivers/pwm/pwm-clk.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Clock based PWM controller + * + * Copyright (c) 2021 Nikita Travkin <nikita@trvn.ru> + * + * This is an "adapter" driver that allows PWM consumers to use + * system clocks with duty cycle control as PWM outputs. + * + * Limitations: + * - Due to the fact that exact behavior depends on the underlying + * clock driver, various limitations are possible. + * - Underlying clock may not be able to give 0% or 100% duty cycle + * (constant off or on), exact behavior will depend on the clock. + * - When the PWM is disabled, the clock will be disabled as well, + * line state will depend on the clock. + * - The clk API doesn't expose the necessary calls to implement + * .get_state(). + */ + +#include <linux/kernel.h> +#include <linux/math64.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/pwm.h> + +struct pwm_clk_chip { + struct pwm_chip chip; + struct clk *clk; + bool clk_enabled; +}; + +#define to_pwm_clk_chip(_chip) container_of(_chip, struct pwm_clk_chip, chip) + +static int pwm_clk_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct pwm_clk_chip *pcchip = to_pwm_clk_chip(chip); + int ret; + u32 rate; + u64 period = state->period; + u64 duty_cycle = state->duty_cycle; + + if (!state->enabled) { + if (pwm->state.enabled) { + clk_disable(pcchip->clk); + pcchip->clk_enabled = false; + } + return 0; + } else if (!pwm->state.enabled) { + ret = clk_enable(pcchip->clk); + if (ret) + return ret; + pcchip->clk_enabled = true; + } + + /* + * We have to enable the clk before setting the rate and duty_cycle, + * that however results in a window where the clk is on with a + * (potentially) different setting. Also setting period and duty_cycle + * are two separate calls, so that probably isn't atomic either. + */ + + rate = DIV64_U64_ROUND_UP(NSEC_PER_SEC, period); + ret = clk_set_rate(pcchip->clk, rate); + if (ret) + return ret; + + if (state->polarity == PWM_POLARITY_INVERSED) + duty_cycle = period - duty_cycle; + + return clk_set_duty_cycle(pcchip->clk, duty_cycle, period); +} + +static const struct pwm_ops pwm_clk_ops = { + .apply = pwm_clk_apply, + .owner = THIS_MODULE, +}; + +static int pwm_clk_probe(struct platform_device *pdev) +{ + struct pwm_clk_chip *pcchip; + int ret; + + pcchip = devm_kzalloc(&pdev->dev, sizeof(*pcchip), GFP_KERNEL); + if (!pcchip) + return -ENOMEM; + + pcchip->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pcchip->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(pcchip->clk), + "Failed to get clock\n"); + + pcchip->chip.dev = &pdev->dev; + pcchip->chip.ops = &pwm_clk_ops; + pcchip->chip.npwm = 1; + + ret = clk_prepare(pcchip->clk); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "Failed to prepare clock\n"); + + ret = pwmchip_add(&pcchip->chip); + if (ret < 0) { + clk_unprepare(pcchip->clk); + return dev_err_probe(&pdev->dev, ret, "Failed to add pwm chip\n"); + } + + platform_set_drvdata(pdev, pcchip); + return 0; +} + +static int pwm_clk_remove(struct platform_device *pdev) +{ + struct pwm_clk_chip *pcchip = platform_get_drvdata(pdev); + + pwmchip_remove(&pcchip->chip); + + if (pcchip->clk_enabled) + clk_disable(pcchip->clk); + + clk_unprepare(pcchip->clk); + + return 0; +} + +static const struct of_device_id pwm_clk_dt_ids[] = { + { .compatible = "clk-pwm", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, pwm_clk_dt_ids); + +static struct platform_driver pwm_clk_driver = { + .driver = { + .name = "pwm-clk", + .of_match_table = pwm_clk_dt_ids, + }, + .probe = pwm_clk_probe, + .remove = pwm_clk_remove, +}; +module_platform_driver(pwm_clk_driver); + +MODULE_ALIAS("platform:pwm-clk"); +MODULE_AUTHOR("Nikita Travkin <nikita@trvn.ru>"); +MODULE_DESCRIPTION("Clock based PWM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c index 272e0b5d01b8..763f2e3a146d 100644 --- a/drivers/pwm/pwm-lpc18xx-sct.c +++ b/drivers/pwm/pwm-lpc18xx-sct.c @@ -98,7 +98,7 @@ struct lpc18xx_pwm_chip { unsigned long clk_rate; unsigned int period_ns; unsigned int min_period_ns; - unsigned int max_period_ns; + u64 max_period_ns; unsigned int period_event; unsigned long event_map; struct mutex res_lock; @@ -145,40 +145,48 @@ static void lpc18xx_pwm_set_conflict_res(struct lpc18xx_pwm_chip *lpc18xx_pwm, mutex_unlock(&lpc18xx_pwm->res_lock); } -static void lpc18xx_pwm_config_period(struct pwm_chip *chip, int period_ns) +static void lpc18xx_pwm_config_period(struct pwm_chip *chip, u64 period_ns) { struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip); - u64 val; + u32 val; - val = (u64)period_ns * lpc18xx_pwm->clk_rate; - do_div(val, NSEC_PER_SEC); + /* + * With clk_rate < NSEC_PER_SEC this cannot overflow. + * With period_ns < max_period_ns this also fits into an u32. + * As period_ns >= min_period_ns = DIV_ROUND_UP(NSEC_PER_SEC, lpc18xx_pwm->clk_rate); + * we have val >= 1. + */ + val = mul_u64_u64_div_u64(period_ns, lpc18xx_pwm->clk_rate, NSEC_PER_SEC); lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_MATCH(lpc18xx_pwm->period_event), - (u32)val - 1); + val - 1); lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_MATCHREL(lpc18xx_pwm->period_event), - (u32)val - 1); + val - 1); } static void lpc18xx_pwm_config_duty(struct pwm_chip *chip, - struct pwm_device *pwm, int duty_ns) + struct pwm_device *pwm, u64 duty_ns) { struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip); struct lpc18xx_pwm_data *lpc18xx_data = &lpc18xx_pwm->channeldata[pwm->hwpwm]; - u64 val; + u32 val; - val = (u64)duty_ns * lpc18xx_pwm->clk_rate; - do_div(val, NSEC_PER_SEC); + /* + * With clk_rate < NSEC_PER_SEC this cannot overflow. + * With duty_ns <= period_ns < max_period_ns this also fits into an u32. + */ + val = mul_u64_u64_div_u64(duty_ns, lpc18xx_pwm->clk_rate, NSEC_PER_SEC); lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_MATCH(lpc18xx_data->duty_event), - (u32)val); + val); lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_MATCHREL(lpc18xx_data->duty_event), - (u32)val); + val); } static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, @@ -359,30 +367,35 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev) return PTR_ERR(lpc18xx_pwm->base); lpc18xx_pwm->pwm_clk = devm_clk_get(&pdev->dev, "pwm"); - if (IS_ERR(lpc18xx_pwm->pwm_clk)) { - dev_err(&pdev->dev, "failed to get pwm clock\n"); - return PTR_ERR(lpc18xx_pwm->pwm_clk); - } + if (IS_ERR(lpc18xx_pwm->pwm_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(lpc18xx_pwm->pwm_clk), + "failed to get pwm clock\n"); ret = clk_prepare_enable(lpc18xx_pwm->pwm_clk); - if (ret < 0) { - dev_err(&pdev->dev, "could not prepare or enable pwm clock\n"); - return ret; - } + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "could not prepare or enable pwm clock\n"); lpc18xx_pwm->clk_rate = clk_get_rate(lpc18xx_pwm->pwm_clk); if (!lpc18xx_pwm->clk_rate) { - dev_err(&pdev->dev, "pwm clock has no frequency\n"); - ret = -EINVAL; + ret = dev_err_probe(&pdev->dev, + -EINVAL, "pwm clock has no frequency\n"); + goto disable_pwmclk; + } + + /* + * If clkrate is too fast, the calculations in .apply() might overflow. + */ + if (lpc18xx_pwm->clk_rate > NSEC_PER_SEC) { + ret = dev_err_probe(&pdev->dev, -EINVAL, "pwm clock to fast\n"); goto disable_pwmclk; } mutex_init(&lpc18xx_pwm->res_lock); mutex_init(&lpc18xx_pwm->period_lock); - val = (u64)NSEC_PER_SEC * LPC18XX_PWM_TIMER_MAX; - do_div(val, lpc18xx_pwm->clk_rate); - lpc18xx_pwm->max_period_ns = val; + lpc18xx_pwm->max_period_ns = + mul_u64_u64_div_u64(NSEC_PER_SEC, LPC18XX_PWM_TIMER_MAX, lpc18xx_pwm->clk_rate); lpc18xx_pwm->min_period_ns = DIV_ROUND_UP(NSEC_PER_SEC, lpc18xx_pwm->clk_rate); @@ -423,7 +436,7 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev) ret = pwmchip_add(&lpc18xx_pwm->chip); if (ret < 0) { - dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret); + dev_err_probe(&pdev->dev, ret, "pwmchip_add failed\n"); goto disable_pwmclk; } diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c index d28c0874c7f2..6901a44dc428 100644 --- a/drivers/pwm/pwm-mediatek.c +++ b/drivers/pwm/pwm-mediatek.c @@ -323,6 +323,12 @@ static const struct pwm_mediatek_of_data mt8183_pwm_data = { .has_ck_26m_sel = true, }; +static const struct pwm_mediatek_of_data mt8365_pwm_data = { + .num_pwms = 3, + .pwm45_fixup = false, + .has_ck_26m_sel = true, +}; + static const struct pwm_mediatek_of_data mt8516_pwm_data = { .num_pwms = 5, .pwm45_fixup = false, @@ -337,6 +343,7 @@ static const struct of_device_id pwm_mediatek_of_match[] = { { .compatible = "mediatek,mt7628-pwm", .data = &mt7628_pwm_data }, { .compatible = "mediatek,mt7629-pwm", .data = &mt7629_pwm_data }, { .compatible = "mediatek,mt8183-pwm", .data = &mt8183_pwm_data }, + { .compatible = "mediatek,mt8365-pwm", .data = &mt8365_pwm_data }, { .compatible = "mediatek,mt8516-pwm", .data = &mt8516_pwm_data }, { }, }; diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c index e6d05a329002..2d4fa5e5fdd4 100644 --- a/drivers/pwm/pwm-sifive.c +++ b/drivers/pwm/pwm-sifive.c @@ -23,7 +23,7 @@ #define PWM_SIFIVE_PWMCFG 0x0 #define PWM_SIFIVE_PWMCOUNT 0x8 #define PWM_SIFIVE_PWMS 0x10 -#define PWM_SIFIVE_PWMCMP0 0x20 +#define PWM_SIFIVE_PWMCMP(i) (0x20 + 4 * (i)) /* PWMCFG fields */ #define PWM_SIFIVE_PWMCFG_SCALE GENMASK(3, 0) @@ -36,14 +36,12 @@ #define PWM_SIFIVE_PWMCFG_GANG BIT(24) #define PWM_SIFIVE_PWMCFG_IP BIT(28) -/* PWM_SIFIVE_SIZE_PWMCMP is used to calculate offset for pwmcmpX registers */ -#define PWM_SIFIVE_SIZE_PWMCMP 4 #define PWM_SIFIVE_CMPWIDTH 16 #define PWM_SIFIVE_DEFAULT_PERIOD 10000000 struct pwm_sifive_ddata { struct pwm_chip chip; - struct mutex lock; /* lock to protect user_count */ + struct mutex lock; /* lock to protect user_count and approx_period */ struct notifier_block notifier; struct clk *clk; void __iomem *regs; @@ -78,6 +76,7 @@ static void pwm_sifive_free(struct pwm_chip *chip, struct pwm_device *pwm) mutex_unlock(&ddata->lock); } +/* Called holding ddata->lock */ static void pwm_sifive_update_clock(struct pwm_sifive_ddata *ddata, unsigned long rate) { @@ -112,8 +111,7 @@ static void pwm_sifive_get_state(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip); u32 duty, val; - duty = readl(ddata->regs + PWM_SIFIVE_PWMCMP0 + - pwm->hwpwm * PWM_SIFIVE_SIZE_PWMCMP); + duty = readl(ddata->regs + PWM_SIFIVE_PWMCMP(pwm->hwpwm)); state->enabled = duty > 0; @@ -127,24 +125,6 @@ static void pwm_sifive_get_state(struct pwm_chip *chip, struct pwm_device *pwm, state->polarity = PWM_POLARITY_INVERSED; } -static int pwm_sifive_enable(struct pwm_chip *chip, bool enable) -{ - struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip); - int ret; - - if (enable) { - ret = clk_enable(ddata->clk); - if (ret) { - dev_err(ddata->chip.dev, "Enable clk failed\n"); - return ret; - } - } else { - clk_disable(ddata->clk); - } - - return 0; -} - static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { @@ -159,13 +139,6 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm, if (state->polarity != PWM_POLARITY_INVERSED) return -EINVAL; - ret = clk_enable(ddata->clk); - if (ret) { - dev_err(ddata->chip.dev, "Enable clk failed\n"); - return ret; - } - - mutex_lock(&ddata->lock); cur_state = pwm->state; enabled = cur_state.enabled; @@ -184,25 +157,36 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm, /* The hardware cannot generate a 100% duty cycle */ frac = min(frac, (1U << PWM_SIFIVE_CMPWIDTH) - 1); + mutex_lock(&ddata->lock); if (state->period != ddata->approx_period) { if (ddata->user_count != 1) { - ret = -EBUSY; - goto exit; + mutex_unlock(&ddata->lock); + return -EBUSY; } ddata->approx_period = state->period; pwm_sifive_update_clock(ddata, clk_get_rate(ddata->clk)); } + mutex_unlock(&ddata->lock); - writel(frac, ddata->regs + PWM_SIFIVE_PWMCMP0 + - pwm->hwpwm * PWM_SIFIVE_SIZE_PWMCMP); + /* + * If the PWM is enabled the clk is already on. So only enable it + * conditionally to have it on exactly once afterwards independent of + * the PWM state. + */ + if (!enabled) { + ret = clk_enable(ddata->clk); + if (ret) { + dev_err(ddata->chip.dev, "Enable clk failed\n"); + return ret; + } + } - if (state->enabled != enabled) - pwm_sifive_enable(chip, state->enabled); + writel(frac, ddata->regs + PWM_SIFIVE_PWMCMP(pwm->hwpwm)); -exit: - clk_disable(ddata->clk); - mutex_unlock(&ddata->lock); - return ret; + if (!state->enabled) + clk_disable(ddata->clk); + + return 0; } static const struct pwm_ops pwm_sifive_ops = { @@ -232,6 +216,8 @@ static int pwm_sifive_probe(struct platform_device *pdev) struct pwm_sifive_ddata *ddata; struct pwm_chip *chip; int ret; + u32 val; + unsigned int enabled_pwms = 0, enabled_clks = 1; ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) @@ -258,6 +244,33 @@ static int pwm_sifive_probe(struct platform_device *pdev) return ret; } + val = readl(ddata->regs + PWM_SIFIVE_PWMCFG); + if (val & PWM_SIFIVE_PWMCFG_EN_ALWAYS) { + unsigned int i; + + for (i = 0; i < chip->npwm; ++i) { + val = readl(ddata->regs + PWM_SIFIVE_PWMCMP(i)); + if (val > 0) + ++enabled_pwms; + } + } + + /* The clk should be on once for each running PWM. */ + if (enabled_pwms) { + while (enabled_clks < enabled_pwms) { + /* This is not expected to fail as the clk is already on */ + ret = clk_enable(ddata->clk); + if (unlikely(ret)) { + dev_err_probe(dev, ret, "Failed to enable clk\n"); + goto disable_clk; + } + ++enabled_clks; + } + } else { + clk_disable(ddata->clk); + enabled_clks = 0; + } + /* Watch for changes to underlying clock frequency */ ddata->notifier.notifier_call = pwm_sifive_clock_notifier; ret = clk_notifier_register(ddata->clk, &ddata->notifier); @@ -280,7 +293,11 @@ static int pwm_sifive_probe(struct platform_device *pdev) unregister_clk: clk_notifier_unregister(ddata->clk, &ddata->notifier); disable_clk: - clk_disable_unprepare(ddata->clk); + while (enabled_clks) { + clk_disable(ddata->clk); + --enabled_clks; + } + clk_unprepare(ddata->clk); return ret; } @@ -288,23 +305,19 @@ disable_clk: static int pwm_sifive_remove(struct platform_device *dev) { struct pwm_sifive_ddata *ddata = platform_get_drvdata(dev); - bool is_enabled = false; struct pwm_device *pwm; int ch; + pwmchip_remove(&ddata->chip); + clk_notifier_unregister(ddata->clk, &ddata->notifier); + for (ch = 0; ch < ddata->chip.npwm; ch++) { pwm = &ddata->chip.pwms[ch]; - if (pwm->state.enabled) { - is_enabled = true; - break; - } + if (pwm->state.enabled) + clk_disable(ddata->clk); } - if (is_enabled) - clk_disable(ddata->clk); - clk_disable_unprepare(ddata->clk); - pwmchip_remove(&ddata->chip); - clk_notifier_unregister(ddata->clk, &ddata->notifier); + clk_unprepare(ddata->clk); return 0; } diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c index ed0b63dd38f1..8fb84b441853 100644 --- a/drivers/pwm/pwm-twl-led.c +++ b/drivers/pwm/pwm-twl-led.c @@ -7,6 +7,22 @@ * * This driver is a complete rewrite of the former pwm-twl6030.c authorded by: * Hemanth V <hemanthv@ti.com> + * + * Reference manual for the twl6030 is available at: + * https://www.ti.com/lit/ds/symlink/twl6030.pdf + * + * Limitations: + * - The twl6030 hardware only supports two period lengths (128 clock ticks and + * 64 clock ticks), the driver only uses 128 ticks + * - The hardware doesn't support ON = 0, so the active part of a period doesn't + * start at its beginning. + * - The hardware could support inverted polarity (with a similar limitation as + * for normal: the last clock tick is always inactive). + * - The hardware emits a constant low output when disabled. + * - A request for .duty_cycle = 0 results in an output wave with one active + * clock tick per period. This should better use the disabled state. + * - The driver only implements setting the relative duty cycle. + * - The driver doesn't implement .get_state(). */ #include <linux/module.h> diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index cbe0f96ca342..23e3e4a35cc9 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -546,6 +546,16 @@ config REGULATOR_MAX1586 regulator via I2C bus. The provided regulator is suitable for PXA27x chips to control VCC_CORE and VCC_USIM voltages. +config REGULATOR_MAX597X + tristate "Maxim 597x power switch and monitor" + depends on I2C + depends on OF + depends on MFD_MAX597X + help + This driver controls a Maxim 5970/5978 switch via I2C bus. + The MAX5970/5978 is a smart switch with no output regulation, but + fault protection and voltage and current monitoring capabilities. + config REGULATOR_MAX77620 tristate "Maxim 77620/MAX20024 voltage regulator" depends on MFD_MAX77620 || COMPILE_TEST @@ -804,6 +814,14 @@ config REGULATOR_MT6360 2-channel buck with Thermal Shutdown and Overload Protection 6-channel High PSRR and Low Dropout LDO. +config REGULATOR_MT6370 + tristate "MT6370 SubPMIC Regulator" + depends on MFD_MT6370 + help + Say Y here to enable MT6370 regulator support. + This driver supports the control for DisplayBias voltages and one + general purpose LDO which is commonly used to drive the vibrator. + config REGULATOR_MT6380 tristate "MediaTek MT6380 PMIC" depends on MTK_PMIC_WRAP @@ -1047,6 +1065,16 @@ config REGULATOR_RT5033 RT5033 PMIC. The device supports multiple regulators like current source, LDO and Buck. +config REGULATOR_RT5120 + tristate "Richtek RT5120 PMIC Regulators" + depends on MFD_RT5120 + help + This adds support for voltage regulator in Richtek RT5120 PMIC. + It integrates 4 channels buck controller, 1 channel LDO, 1 EXTEN + to control external power source. Only BUCK1 is adjustable from + 600mV to 1395mV, per step 6.250mV. The others are all fixed voltage + by external hardware circuit. + config REGULATOR_RT5190A tristate "Richtek RT5190A PMIC" depends on I2C diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 8d3ee8b6d41d..fa49bb6cc544 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -67,6 +67,7 @@ obj-$(CONFIG_REGULATOR_LTC3589) += ltc3589.o obj-$(CONFIG_REGULATOR_LTC3676) += ltc3676.o obj-$(CONFIG_REGULATOR_MAX14577) += max14577-regulator.o obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o +obj-$(CONFIG_REGULATOR_MAX597X) += max597x-regulator.o obj-$(CONFIG_REGULATOR_MAX77620) += max77620-regulator.o obj-$(CONFIG_REGULATOR_MAX77650) += max77650-regulator.o obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o @@ -97,6 +98,7 @@ obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o obj-$(CONFIG_REGULATOR_MT6359) += mt6359-regulator.o obj-$(CONFIG_REGULATOR_MT6360) += mt6360-regulator.o +obj-$(CONFIG_REGULATOR_MT6370) += mt6370-regulator.o obj-$(CONFIG_REGULATOR_MT6380) += mt6380-regulator.o obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o obj-$(CONFIG_REGULATOR_MTK_DVFSRC) += mtk-dvfsrc-regulator.o @@ -126,6 +128,7 @@ obj-$(CONFIG_REGULATOR_ROHM) += rohm-regulator.o obj-$(CONFIG_REGULATOR_RT4801) += rt4801-regulator.o obj-$(CONFIG_REGULATOR_RT4831) += rt4831-regulator.o obj-$(CONFIG_REGULATOR_RT5033) += rt5033-regulator.o +obj-$(CONFIG_REGULATOR_RT5120) += rt5120-regulator.o obj-$(CONFIG_REGULATOR_RT5190A) += rt5190a-regulator.o obj-$(CONFIG_REGULATOR_RT5759) += rt5759-regulator.o obj-$(CONFIG_REGULATOR_RT6160) += rt6160-regulator.o diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 1e54a833f2cf..7150b1d0159e 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1565,6 +1565,9 @@ static int set_machine_constraints(struct regulator_dev *rdev) rdev->constraints->always_on = true; } + if (rdev->desc->off_on_delay) + rdev->last_off = ktime_get(); + /* If the constraints say the regulator should be on at this point * and we have control then make sure it is enabled. */ @@ -1592,8 +1595,6 @@ static int set_machine_constraints(struct regulator_dev *rdev) if (rdev->constraints->always_on) rdev->use_count++; - } else if (rdev->desc->off_on_delay) { - rdev->last_off = ktime_get(); } print_constraints(rdev); @@ -4783,22 +4784,26 @@ int regulator_bulk_get(struct device *dev, int num_consumers, consumers[i].consumer = regulator_get(dev, consumers[i].supply); if (IS_ERR(consumers[i].consumer)) { - ret = PTR_ERR(consumers[i].consumer); consumers[i].consumer = NULL; + ret = dev_err_probe(dev, PTR_ERR(consumers[i].consumer), + "Failed to get supply '%s'", + consumers[i].supply); goto err; } + + if (consumers[i].init_load_uA > 0) { + ret = regulator_set_load(consumers[i].consumer, + consumers[i].init_load_uA); + if (ret) { + i++; + goto err; + } + } } return 0; err: - if (ret != -EPROBE_DEFER) - dev_err(dev, "Failed to get supply '%s': %pe\n", - consumers[i].supply, ERR_PTR(ret)); - else - dev_dbg(dev, "Failed to get supply '%s', deferring\n", - consumers[i].supply); - while (--i >= 0) regulator_put(consumers[i].consumer); diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c index 9113233f41cd..32823a87fd40 100644 --- a/drivers/regulator/devres.c +++ b/drivers/regulator/devres.c @@ -166,6 +166,34 @@ int devm_regulator_bulk_get(struct device *dev, int num_consumers, } EXPORT_SYMBOL_GPL(devm_regulator_bulk_get); +/** + * devm_regulator_bulk_get_const - devm_regulator_bulk_get() w/ const data + * + * @dev: device to supply + * @num_consumers: number of consumers to register + * @in_consumers: const configuration of consumers + * @out_consumers: in_consumers is copied here and this is passed to + * devm_regulator_bulk_get(). + * + * This is a convenience function to allow bulk regulator configuration + * to be stored "static const" in files. + * + * Return: 0 on success, an errno on failure. + */ +int devm_regulator_bulk_get_const(struct device *dev, int num_consumers, + const struct regulator_bulk_data *in_consumers, + struct regulator_bulk_data **out_consumers) +{ + *out_consumers = devm_kmemdup(dev, in_consumers, + num_consumers * sizeof(*in_consumers), + GFP_KERNEL); + if (*out_consumers == NULL) + return -ENOMEM; + + return devm_regulator_bulk_get(dev, num_consumers, *out_consumers); +} +EXPORT_SYMBOL_GPL(devm_regulator_bulk_get_const); + static void devm_rdev_release(struct device *dev, void *res) { regulator_unregister(*(struct regulator_dev **)res); diff --git a/drivers/regulator/max597x-regulator.c b/drivers/regulator/max597x-regulator.c new file mode 100644 index 000000000000..03c6027682d8 --- /dev/null +++ b/drivers/regulator/max597x-regulator.c @@ -0,0 +1,502 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device driver for regulators in MAX5970 and MAX5978 IC + * + * Copyright (c) 2022 9elements GmbH + * + * Author: Patrick Rudolph <patrick.rudolph@9elements.com> + */ + +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/i2c.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h> +#include <linux/platform_device.h> + +#include <linux/mfd/max597x.h> + +struct max597x_regulator { + int num_switches, mon_rng, irng, shunt_micro_ohms, lim_uA; + struct regmap *regmap; +}; + +enum max597x_regulator_id { + MAX597X_SW0, + MAX597X_SW1, +}; + +static int max597x_uvp_ovp_check_mode(struct regulator_dev *rdev, int severity) +{ + int ret, reg; + + /* Status1 register contains the soft strap values sampled at POR */ + ret = regmap_read(rdev->regmap, MAX5970_REG_STATUS1, ®); + if (ret) + return ret; + + /* Check soft straps match requested mode */ + if (severity == REGULATOR_SEVERITY_PROT) { + if (STATUS1_PROT(reg) != STATUS1_PROT_SHUTDOWN) + return -EOPNOTSUPP; + + return 0; + } + if (STATUS1_PROT(reg) == STATUS1_PROT_SHUTDOWN) + return -EOPNOTSUPP; + + return 0; +} + +static int max597x_set_vp(struct regulator_dev *rdev, int lim_uV, int severity, + bool enable, bool overvoltage) +{ + int off_h, off_l, reg, ret; + struct max597x_regulator *data = rdev_get_drvdata(rdev); + int channel = rdev_get_id(rdev); + + if (overvoltage) { + if (severity == REGULATOR_SEVERITY_WARN) { + off_h = MAX5970_REG_CH_OV_WARN_H(channel); + off_l = MAX5970_REG_CH_OV_WARN_L(channel); + } else { + off_h = MAX5970_REG_CH_OV_CRIT_H(channel); + off_l = MAX5970_REG_CH_OV_CRIT_L(channel); + } + } else { + if (severity == REGULATOR_SEVERITY_WARN) { + off_h = MAX5970_REG_CH_UV_WARN_H(channel); + off_l = MAX5970_REG_CH_UV_WARN_L(channel); + } else { + off_h = MAX5970_REG_CH_UV_CRIT_H(channel); + off_l = MAX5970_REG_CH_UV_CRIT_L(channel); + } + } + + if (enable) + /* reg = ADC_MASK * (lim_uV / 1000000) / (data->mon_rng / 1000000) */ + reg = ADC_MASK * lim_uV / data->mon_rng; + else + reg = 0; + + ret = regmap_write(rdev->regmap, off_h, MAX5970_VAL2REG_H(reg)); + if (ret) + return ret; + + ret = regmap_write(rdev->regmap, off_l, MAX5970_VAL2REG_L(reg)); + if (ret) + return ret; + + return 0; +} + +static int max597x_set_uvp(struct regulator_dev *rdev, int lim_uV, int severity, + bool enable) +{ + int ret; + + /* + * MAX5970 has enable control as a special value in limit reg. Can't + * set limit but keep feature disabled or enable W/O given limit. + */ + if ((lim_uV && !enable) || (!lim_uV && enable)) + return -EINVAL; + + ret = max597x_uvp_ovp_check_mode(rdev, severity); + if (ret) + return ret; + + return max597x_set_vp(rdev, lim_uV, severity, enable, false); +} + +static int max597x_set_ovp(struct regulator_dev *rdev, int lim_uV, int severity, + bool enable) +{ + int ret; + + /* + * MAX5970 has enable control as a special value in limit reg. Can't + * set limit but keep feature disabled or enable W/O given limit. + */ + if ((lim_uV && !enable) || (!lim_uV && enable)) + return -EINVAL; + + ret = max597x_uvp_ovp_check_mode(rdev, severity); + if (ret) + return ret; + + return max597x_set_vp(rdev, lim_uV, severity, enable, true); +} + +static int max597x_set_ocp(struct regulator_dev *rdev, int lim_uA, + int severity, bool enable) +{ + int ret, val, reg; + unsigned int vthst, vthfst; + + struct max597x_regulator *data = rdev_get_drvdata(rdev); + int rdev_id = rdev_get_id(rdev); + /* + * MAX5970 doesn't has enable control for ocp. + * If limit is specified but enable is not set then hold the value in + * variable & later use it when ocp needs to be enabled. + */ + if (lim_uA != 0 && lim_uA != data->lim_uA) + data->lim_uA = lim_uA; + + if (severity != REGULATOR_SEVERITY_PROT) + return -EINVAL; + + if (enable) { + + /* Calc Vtrip threshold in uV. */ + vthst = + div_u64(mul_u32_u32(data->shunt_micro_ohms, data->lim_uA), + 1000000); + + /* + * As recommended in datasheed, add 20% margin to avoid + * spurious event & passive component tolerance. + */ + vthst = div_u64(mul_u32_u32(vthst, 120), 100); + + /* Calc fast Vtrip threshold in uV */ + vthfst = vthst * (MAX5970_FAST2SLOW_RATIO / 100); + + if (vthfst > data->irng) { + dev_err(&rdev->dev, "Current limit out of range\n"); + return -EINVAL; + } + /* Fast trip threshold to be programmed */ + val = div_u64(mul_u32_u32(0xFF, vthfst), data->irng); + } else + /* + * Since there is no option to disable ocp, set limit to max + * value + */ + val = 0xFF; + + reg = MAX5970_REG_DAC_FAST(rdev_id); + ret = regmap_write(rdev->regmap, reg, val); + + return ret; +} + +static int max597x_get_status(struct regulator_dev *rdev) +{ + int val, ret; + + ret = regmap_read(rdev->regmap, MAX5970_REG_STATUS3, &val); + if (ret) + return REGULATOR_FAILED_RETRY; + + if (val & MAX5970_STATUS3_ALERT) + return REGULATOR_STATUS_ERROR; + + ret = regulator_is_enabled_regmap(rdev); + if (ret < 0) + return ret; + + if (ret) + return REGULATOR_STATUS_ON; + + return REGULATOR_STATUS_OFF; +} + +static const struct regulator_ops max597x_switch_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .get_status = max597x_get_status, + .set_over_voltage_protection = max597x_set_ovp, + .set_under_voltage_protection = max597x_set_uvp, + .set_over_current_protection = max597x_set_ocp, +}; + +static int max597x_dt_parse(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + struct max597x_regulator *data = cfg->driver_data; + int ret = 0; + + ret = + of_property_read_u32(np, "shunt-resistor-micro-ohms", + &data->shunt_micro_ohms); + if (ret < 0) + dev_err(cfg->dev, + "property 'shunt-resistor-micro-ohms' not found, err %d\n", + ret); + return ret; + +} + +#define MAX597X_SWITCH(_ID, _ereg, _chan, _supply) { \ + .name = #_ID, \ + .of_match = of_match_ptr(#_ID), \ + .ops = &max597x_switch_ops, \ + .regulators_node = of_match_ptr("regulators"), \ + .type = REGULATOR_VOLTAGE, \ + .id = MAX597X_##_ID, \ + .owner = THIS_MODULE, \ + .supply_name = _supply, \ + .enable_reg = _ereg, \ + .enable_mask = CHXEN((_chan)), \ + .of_parse_cb = max597x_dt_parse, \ +} + +static const struct regulator_desc regulators[] = { + MAX597X_SWITCH(SW0, MAX5970_REG_CHXEN, 0, "vss1"), + MAX597X_SWITCH(SW1, MAX5970_REG_CHXEN, 1, "vss2"), +}; + +static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg, + unsigned int *val) +{ + int ret; + + ret = regmap_read(map, reg, val); + if (ret) + return ret; + + if (*val) + return regmap_write(map, reg, *val); + + return 0; +} + +static int max597x_irq_handler(int irq, struct regulator_irq_data *rid, + unsigned long *dev_mask) +{ + struct regulator_err_state *stat; + struct max597x_regulator *d = (struct max597x_regulator *)rid->data; + int val, ret, i; + + ret = max597x_regmap_read_clear(d->regmap, MAX5970_REG_FAULT0, &val); + if (ret) + return REGULATOR_FAILED_RETRY; + + *dev_mask = 0; + for (i = 0; i < d->num_switches; i++) { + stat = &rid->states[i]; + stat->notifs = 0; + stat->errors = 0; + } + + for (i = 0; i < d->num_switches; i++) { + stat = &rid->states[i]; + + if (val & UV_STATUS_CRIT(i)) { + *dev_mask |= 1 << i; + stat->notifs |= REGULATOR_EVENT_UNDER_VOLTAGE; + stat->errors |= REGULATOR_ERROR_UNDER_VOLTAGE; + } else if (val & UV_STATUS_WARN(i)) { + *dev_mask |= 1 << i; + stat->notifs |= REGULATOR_EVENT_UNDER_VOLTAGE_WARN; + stat->errors |= REGULATOR_ERROR_UNDER_VOLTAGE_WARN; + } + } + + ret = max597x_regmap_read_clear(d->regmap, MAX5970_REG_FAULT1, &val); + if (ret) + return REGULATOR_FAILED_RETRY; + + for (i = 0; i < d->num_switches; i++) { + stat = &rid->states[i]; + + if (val & OV_STATUS_CRIT(i)) { + *dev_mask |= 1 << i; + stat->notifs |= REGULATOR_EVENT_REGULATION_OUT; + stat->errors |= REGULATOR_ERROR_REGULATION_OUT; + } else if (val & OV_STATUS_WARN(i)) { + *dev_mask |= 1 << i; + stat->notifs |= REGULATOR_EVENT_OVER_VOLTAGE_WARN; + stat->errors |= REGULATOR_ERROR_OVER_VOLTAGE_WARN; + } + } + + ret = max597x_regmap_read_clear(d->regmap, MAX5970_REG_FAULT2, &val); + if (ret) + return REGULATOR_FAILED_RETRY; + + for (i = 0; i < d->num_switches; i++) { + stat = &rid->states[i]; + + if (val & OC_STATUS_WARN(i)) { + *dev_mask |= 1 << i; + stat->notifs |= REGULATOR_EVENT_OVER_CURRENT_WARN; + stat->errors |= REGULATOR_ERROR_OVER_CURRENT_WARN; + } + } + + ret = regmap_read(d->regmap, MAX5970_REG_STATUS0, &val); + if (ret) + return REGULATOR_FAILED_RETRY; + + for (i = 0; i < d->num_switches; i++) { + stat = &rid->states[i]; + + if ((val & MAX5970_CB_IFAULTF(i)) + || (val & MAX5970_CB_IFAULTS(i))) { + *dev_mask |= 1 << i; + stat->notifs |= + REGULATOR_EVENT_OVER_CURRENT | + REGULATOR_EVENT_DISABLE; + stat->errors |= + REGULATOR_ERROR_OVER_CURRENT | REGULATOR_ERROR_FAIL; + + /* Clear the sub-IRQ status */ + regulator_disable_regmap(stat->rdev); + } + } + return 0; +} + +static const struct regmap_config max597x_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = MAX_REGISTERS, +}; + +static int max597x_adc_range(struct regmap *regmap, const int ch, + u32 *irng, u32 *mon_rng) +{ + unsigned int reg; + int ret; + + /* Decode current ADC range */ + ret = regmap_read(regmap, MAX5970_REG_STATUS2, ®); + if (ret) + return ret; + switch (MAX5970_IRNG(reg, ch)) { + case 0: + *irng = 100000; /* 100 mV */ + break; + case 1: + *irng = 50000; /* 50 mV */ + break; + case 2: + *irng = 25000; /* 25 mV */ + break; + default: + return -EINVAL; + } + + /* Decode current voltage monitor range */ + ret = regmap_read(regmap, MAX5970_REG_MON_RANGE, ®); + if (ret) + return ret; + + *mon_rng = MAX5970_MON_MAX_RANGE_UV >> MAX5970_MON(reg, ch); + + return 0; +} + +static int max597x_setup_irq(struct device *dev, + int irq, + struct regulator_dev *rdevs[MAX5970_NUM_SWITCHES], + int num_switches, struct max597x_regulator *data) +{ + struct regulator_irq_desc max597x_notif = { + .name = "max597x-irq", + .map_event = max597x_irq_handler, + .data = data, + }; + int errs = REGULATOR_ERROR_UNDER_VOLTAGE | + REGULATOR_ERROR_UNDER_VOLTAGE_WARN | + REGULATOR_ERROR_OVER_VOLTAGE_WARN | + REGULATOR_ERROR_REGULATION_OUT | + REGULATOR_ERROR_OVER_CURRENT | + REGULATOR_ERROR_OVER_CURRENT_WARN | REGULATOR_ERROR_FAIL; + void *irq_helper; + + /* Register notifiers - can fail if IRQ is not given */ + irq_helper = devm_regulator_irq_helper(dev, &max597x_notif, + irq, 0, errs, NULL, + &rdevs[0], num_switches); + if (IS_ERR(irq_helper)) { + if (PTR_ERR(irq_helper) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + dev_warn(dev, "IRQ disabled %pe\n", irq_helper); + } + + return 0; +} + +static int max597x_regulator_probe(struct platform_device *pdev) +{ + + + struct max597x_data *max597x = dev_get_drvdata(pdev->dev.parent); + struct max597x_regulator *data; + + struct regulator_config config = { }; + struct regulator_dev *rdev; + struct regulator_dev *rdevs[MAX5970_NUM_SWITCHES]; + int num_switches = max597x->num_switches; + int ret, i; + + for (i = 0; i < num_switches; i++) { + data = + devm_kzalloc(max597x->dev, sizeof(struct max597x_regulator), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->num_switches = num_switches; + data->regmap = max597x->regmap; + + ret = max597x_adc_range(data->regmap, i, &max597x->irng[i], &max597x->mon_rng[i]); + if (ret < 0) + return ret; + + data->irng = max597x->irng[i]; + data->mon_rng = max597x->mon_rng[i]; + + config.dev = max597x->dev; + config.driver_data = (void *)data; + config.regmap = data->regmap; + rdev = devm_regulator_register(max597x->dev, + ®ulators[i], &config); + if (IS_ERR(rdev)) { + dev_err(max597x->dev, "failed to register regulator %s\n", + regulators[i].name); + return PTR_ERR(rdev); + } + rdevs[i] = rdev; + max597x->shunt_micro_ohms[i] = data->shunt_micro_ohms; + } + + if (max597x->irq) { + ret = + max597x_setup_irq(max597x->dev, max597x->irq, rdevs, num_switches, + data); + if (ret) { + dev_err(max597x->dev, "IRQ setup failed"); + return ret; + } + } + + return ret; +} + +static struct platform_driver max597x_regulator_driver = { + .driver = { + .name = "max597x-regulator", + }, + .probe = max597x_regulator_probe, +}; + +module_platform_driver(max597x_regulator_driver); + + +MODULE_AUTHOR("Patrick Rudolph <patrick.rudolph@9elements.com>"); +MODULE_DESCRIPTION("MAX5970_hot-swap controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/mp5416.c b/drivers/regulator/mp5416.c index 39cebec0edb6..82892d71c2c9 100644 --- a/drivers/regulator/mp5416.c +++ b/drivers/regulator/mp5416.c @@ -6,14 +6,14 @@ // // Author: Saravanan Sekar <sravanhome@gmail.com> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> #include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/driver.h> -#include <linux/i2c.h> #define MP5416_REG_CTL0 0x00 #define MP5416_REG_CTL1 0x01 @@ -174,10 +174,22 @@ static struct regulator_desc mp5416_regulators_desc[MP5416_MAX_REGULATORS] = { MP5416LDO("ldo4", 4, BIT(1)), }; +static struct regulator_desc mp5496_regulators_desc[MP5416_MAX_REGULATORS] = { + MP5416BUCK("buck1", 1, mp5416_I_limits1, MP5416_REG_CTL1, BIT(0), 1), + MP5416BUCK("buck2", 2, mp5416_I_limits2, MP5416_REG_CTL1, BIT(1), 1), + MP5416BUCK("buck3", 3, mp5416_I_limits1, MP5416_REG_CTL1, BIT(2), 1), + MP5416BUCK("buck4", 4, mp5416_I_limits2, MP5416_REG_CTL2, BIT(5), 1), + MP5416LDO("ldo1", 1, BIT(4)), + MP5416LDO("ldo2", 2, BIT(3)), + MP5416LDO("ldo3", 3, BIT(2)), + MP5416LDO("ldo4", 4, BIT(1)), +}; + static int mp5416_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct regulator_config config = { NULL, }; + static const struct regulator_desc *desc; struct regulator_dev *rdev; struct regmap *regmap; int i; @@ -188,12 +200,16 @@ static int mp5416_i2c_probe(struct i2c_client *client) return PTR_ERR(regmap); } + desc = of_device_get_match_data(dev); + if (!desc) + return -ENODEV; + config.dev = dev; config.regmap = regmap; for (i = 0; i < MP5416_MAX_REGULATORS; i++) { rdev = devm_regulator_register(dev, - &mp5416_regulators_desc[i], + &desc[i], &config); if (IS_ERR(rdev)) { dev_err(dev, "Failed to register regulator!\n"); @@ -205,13 +221,15 @@ static int mp5416_i2c_probe(struct i2c_client *client) } static const struct of_device_id mp5416_of_match[] = { - { .compatible = "mps,mp5416" }, + { .compatible = "mps,mp5416", .data = &mp5416_regulators_desc }, + { .compatible = "mps,mp5496", .data = &mp5496_regulators_desc }, {}, }; MODULE_DEVICE_TABLE(of, mp5416_of_match); static const struct i2c_device_id mp5416_id[] = { { "mp5416", }, + { "mp5496", }, { }, }; MODULE_DEVICE_TABLE(i2c, mp5416_id); diff --git a/drivers/regulator/mt6370-regulator.c b/drivers/regulator/mt6370-regulator.c new file mode 100644 index 000000000000..e73f5a46cb9a --- /dev/null +++ b/drivers/regulator/mt6370-regulator.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/bits.h> +#include <linux/gpio/consumer.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> + +enum { + MT6370_IDX_DSVBOOST = 0, + MT6370_IDX_DSVPOS, + MT6370_IDX_DSVNEG, + MT6370_IDX_VIBLDO, + MT6370_MAX_IDX +}; + +#define MT6370_REG_LDO_CFG 0x180 +#define MT6370_REG_LDO_VOUT 0x181 +#define MT6370_REG_DB_CTRL1 0x1B0 +#define MT6370_REG_DB_CTRL2 0x1B1 +#define MT6370_REG_DB_VBST 0x1B2 +#define MT6370_REG_DB_VPOS 0x1B3 +#define MT6370_REG_DB_VNEG 0x1B4 +#define MT6370_REG_LDO_STAT 0x1DC +#define MT6370_REG_DB_STAT 0x1DF + +#define MT6370_LDOOMS_MASK BIT(7) +#define MT6370_LDOEN_MASK BIT(7) +#define MT6370_LDOVOUT_MASK GENMASK(3, 0) +#define MT6370_DBPERD_MASK (BIT(7) | BIT(4)) +#define MT6370_DBEXTEN_MASK BIT(0) +#define MT6370_DBVPOSEN_MASK BIT(6) +#define MT6370_DBVPOSDISG_MASK BIT(5) +#define MT6370_DBVNEGEN_MASK BIT(3) +#define MT6370_DBVNEGDISG_MASK BIT(2) +#define MT6370_DBALLON_MASK (MT6370_DBVPOSEN_MASK | MT6370_DBVNEGEN_MASK) +#define MT6370_DBSLEW_MASK GENMASK(7, 6) +#define MT6370_DBVOUT_MASK GENMASK(5, 0) +#define MT6370_LDOOC_EVT_MASK BIT(7) +#define MT6370_POSSCP_EVT_MASK BIT(7) +#define MT6370_NEGSCP_EVT_MASK BIT(6) +#define MT6370_BSTOCP_EVT_MASK BIT(5) +#define MT6370_POSOCP_EVT_MASK BIT(4) +#define MT6370_NEGOCP_EVT_MASK BIT(3) + +#define MT6370_LDO_MINUV 1600000 +#define MT6370_LDO_STPUV 200000 +#define MT6370_LDO_N_VOLT 13 +#define MT6370_DBVBOOST_MINUV 4000000 +#define MT6370_DBVBOOST_STPUV 50000 +#define MT6370_DBVBOOST_N_VOLT 45 +#define MT6370_DBVOUT_MINUV 4000000 +#define MT6370_DBVOUT_STPUV 50000 +#define MT6370_DBVOUT_N_VOLT 41 + +struct mt6370_priv { + struct device *dev; + struct regmap *regmap; + struct regulator_dev *rdev[MT6370_MAX_IDX]; + bool use_external_ctrl; +}; + +static const unsigned int mt6370_vpos_ramp_tbl[] = { 8540, 5840, 4830, 3000 }; +static const unsigned int mt6370_vneg_ramp_tbl[] = { 10090, 6310, 5050, 3150 }; + +static int mt6370_get_error_flags(struct regulator_dev *rdev, + unsigned int *flags) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + unsigned int stat_reg, stat, rpt_flags = 0; + int rid = rdev_get_id(rdev), ret; + + if (rid == MT6370_IDX_VIBLDO) + stat_reg = MT6370_REG_LDO_STAT; + else + stat_reg = MT6370_REG_DB_STAT; + + ret = regmap_read(regmap, stat_reg, &stat); + if (ret) + return ret; + + switch (rid) { + case MT6370_IDX_DSVBOOST: + if (stat & MT6370_BSTOCP_EVT_MASK) + rpt_flags |= REGULATOR_ERROR_OVER_CURRENT; + break; + case MT6370_IDX_DSVPOS: + if (stat & MT6370_POSSCP_EVT_MASK) + rpt_flags |= REGULATOR_ERROR_UNDER_VOLTAGE; + + if (stat & MT6370_POSOCP_EVT_MASK) + rpt_flags |= REGULATOR_ERROR_OVER_CURRENT; + break; + case MT6370_IDX_DSVNEG: + if (stat & MT6370_NEGSCP_EVT_MASK) + rpt_flags |= REGULATOR_ERROR_UNDER_VOLTAGE; + + if (stat & MT6370_NEGOCP_EVT_MASK) + rpt_flags |= REGULATOR_ERROR_OVER_CURRENT; + break; + default: + if (stat & MT6370_LDOOC_EVT_MASK) + rpt_flags |= REGULATOR_ERROR_OVER_CURRENT; + break; + } + + *flags = rpt_flags; + return 0; +} + +static const struct regulator_ops mt6370_dbvboost_ops = { + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .list_voltage = regulator_list_voltage_linear, + .get_bypass = regulator_get_bypass_regmap, + .set_bypass = regulator_set_bypass_regmap, + .get_error_flags = mt6370_get_error_flags, +}; + +static const struct regulator_ops mt6370_dbvout_ops = { + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .list_voltage = regulator_list_voltage_linear, + .is_enabled = regulator_is_enabled_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .set_ramp_delay = regulator_set_ramp_delay_regmap, + .get_error_flags = mt6370_get_error_flags, +}; + +static const struct regulator_ops mt6370_ldo_ops = { + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .list_voltage = regulator_list_voltage_linear, + .is_enabled = regulator_is_enabled_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .get_error_flags = mt6370_get_error_flags, +}; + +static int mt6370_of_parse_cb(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *config) +{ + struct mt6370_priv *priv = config->driver_data; + struct gpio_desc *enable_gpio; + int ret; + + enable_gpio = fwnode_gpiod_get_index(of_fwnode_handle(np), "enable", 0, + GPIOD_OUT_HIGH | + GPIOD_FLAGS_BIT_NONEXCLUSIVE, + desc->name); + if (IS_ERR(enable_gpio)) { + config->ena_gpiod = NULL; + return 0; + } + + /* + * RG control by default + * Only if all are using external pin, change all by external control + */ + if (priv->use_external_ctrl) { + ret = regmap_update_bits(priv->regmap, MT6370_REG_DB_CTRL1, + MT6370_DBEXTEN_MASK, + MT6370_DBEXTEN_MASK); + if (ret) + return ret; + } + + config->ena_gpiod = enable_gpio; + priv->use_external_ctrl = true; + return 0; +} + +static const struct regulator_desc mt6370_regulator_descs[] = { + { + .name = "mt6370-dsv-vbst", + .of_match = of_match_ptr("dsvbst"), + .regulators_node = of_match_ptr("regulators"), + .id = MT6370_IDX_DSVBOOST, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .ops = &mt6370_dbvboost_ops, + .min_uV = MT6370_DBVBOOST_MINUV, + .uV_step = MT6370_DBVBOOST_STPUV, + .n_voltages = MT6370_DBVBOOST_N_VOLT, + .vsel_reg = MT6370_REG_DB_VBST, + .vsel_mask = MT6370_DBVOUT_MASK, + .bypass_reg = MT6370_REG_DB_CTRL1, + .bypass_mask = MT6370_DBPERD_MASK, + .bypass_val_on = MT6370_DBPERD_MASK, + }, + { + .name = "mt6370-dsv-vpos", + .of_match = of_match_ptr("dsvpos"), + .regulators_node = of_match_ptr("regulators"), + .id = MT6370_IDX_DSVPOS, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .of_parse_cb = mt6370_of_parse_cb, + .ops = &mt6370_dbvout_ops, + .min_uV = MT6370_DBVOUT_MINUV, + .uV_step = MT6370_DBVOUT_STPUV, + .n_voltages = MT6370_DBVOUT_N_VOLT, + .vsel_reg = MT6370_REG_DB_VPOS, + .vsel_mask = MT6370_DBVOUT_MASK, + .enable_reg = MT6370_REG_DB_CTRL2, + .enable_mask = MT6370_DBVPOSEN_MASK, + .ramp_reg = MT6370_REG_DB_VPOS, + .ramp_mask = MT6370_DBSLEW_MASK, + .ramp_delay_table = mt6370_vpos_ramp_tbl, + .n_ramp_values = ARRAY_SIZE(mt6370_vpos_ramp_tbl), + .active_discharge_reg = MT6370_REG_DB_CTRL2, + .active_discharge_mask = MT6370_DBVPOSDISG_MASK, + .active_discharge_on = MT6370_DBVPOSDISG_MASK, + }, + { + .name = "mt6370-dsv-vneg", + .of_match = of_match_ptr("dsvneg"), + .regulators_node = of_match_ptr("regulators"), + .id = MT6370_IDX_DSVNEG, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .of_parse_cb = mt6370_of_parse_cb, + .ops = &mt6370_dbvout_ops, + .min_uV = MT6370_DBVOUT_MINUV, + .uV_step = MT6370_DBVOUT_STPUV, + .n_voltages = MT6370_DBVOUT_N_VOLT, + .vsel_reg = MT6370_REG_DB_VNEG, + .vsel_mask = MT6370_DBVOUT_MASK, + .enable_reg = MT6370_REG_DB_CTRL2, + .enable_mask = MT6370_DBVNEGEN_MASK, + .ramp_reg = MT6370_REG_DB_VNEG, + .ramp_mask = MT6370_DBSLEW_MASK, + .ramp_delay_table = mt6370_vneg_ramp_tbl, + .n_ramp_values = ARRAY_SIZE(mt6370_vneg_ramp_tbl), + .active_discharge_reg = MT6370_REG_DB_CTRL2, + .active_discharge_mask = MT6370_DBVNEGDISG_MASK, + .active_discharge_on = MT6370_DBVNEGDISG_MASK, + }, + { + .name = "mt6370-vib-ldo", + .of_match = of_match_ptr("vibldo"), + .regulators_node = of_match_ptr("regulators"), + .id = MT6370_IDX_VIBLDO, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .ops = &mt6370_ldo_ops, + .min_uV = MT6370_LDO_MINUV, + .uV_step = MT6370_LDO_STPUV, + .n_voltages = MT6370_LDO_N_VOLT, + .vsel_reg = MT6370_REG_LDO_VOUT, + .vsel_mask = MT6370_LDOVOUT_MASK, + .enable_reg = MT6370_REG_LDO_VOUT, + .enable_mask = MT6370_LDOEN_MASK, + .active_discharge_reg = MT6370_REG_LDO_CFG, + .active_discharge_mask = MT6370_LDOOMS_MASK, + .active_discharge_on = MT6370_LDOOMS_MASK, + } +}; + +static irqreturn_t mt6370_scp_handler(int irq, void *data) +{ + struct regulator_dev *rdev = data; + + regulator_notifier_call_chain(rdev, REGULATOR_EVENT_UNDER_VOLTAGE, + NULL); + return IRQ_HANDLED; +} + +static irqreturn_t mt6370_ocp_handler(int irq, void *data) +{ + struct regulator_dev *rdev = data; + + regulator_notifier_call_chain(rdev, REGULATOR_EVENT_OVER_CURRENT, NULL); + return IRQ_HANDLED; +} + +static int mt6370_regulator_irq_register(struct mt6370_priv *priv) +{ + struct platform_device *pdev = to_platform_device(priv->dev); + static const struct { + const char *name; + int rid; + irq_handler_t handler; + } mt6370_irqs[] = { + { "db_vpos_scp", MT6370_IDX_DSVPOS, mt6370_scp_handler }, + { "db_vneg_scp", MT6370_IDX_DSVNEG, mt6370_scp_handler }, + { "db_vbst_ocp", MT6370_IDX_DSVBOOST, mt6370_ocp_handler }, + { "db_vpos_ocp", MT6370_IDX_DSVPOS, mt6370_ocp_handler }, + { "db_vneg_ocp", MT6370_IDX_DSVNEG, mt6370_ocp_handler }, + { "ldo_oc", MT6370_IDX_VIBLDO, mt6370_ocp_handler } + }; + struct regulator_dev *rdev; + int i, irq, ret; + + for (i = 0; i < ARRAY_SIZE(mt6370_irqs); i++) { + irq = platform_get_irq_byname(pdev, mt6370_irqs[i].name); + + rdev = priv->rdev[mt6370_irqs[i].rid]; + + ret = devm_request_threaded_irq(priv->dev, irq, NULL, + mt6370_irqs[i].handler, 0, + mt6370_irqs[i].name, rdev); + if (ret) { + dev_err(priv->dev, + "Failed to register (%d) interrupt\n", i); + return ret; + } + } + + return 0; +} + +static int mt6370_regualtor_register(struct mt6370_priv *priv) +{ + struct regulator_dev *rdev; + struct regulator_config cfg = {}; + struct device *parent = priv->dev->parent; + int i; + + cfg.dev = parent; + cfg.driver_data = priv; + + for (i = 0; i < MT6370_MAX_IDX; i++) { + rdev = devm_regulator_register(priv->dev, + mt6370_regulator_descs + i, + &cfg); + if (IS_ERR(rdev)) { + dev_err(priv->dev, + "Failed to register (%d) regulator\n", i); + return PTR_ERR(rdev); + } + + priv->rdev[i] = rdev; + } + + return 0; +} + +static int mt6370_regulator_probe(struct platform_device *pdev) +{ + struct mt6370_priv *priv; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = &pdev->dev; + + priv->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!priv->regmap) { + dev_err(&pdev->dev, "Failed to init regmap\n"); + return -ENODEV; + } + + ret = mt6370_regualtor_register(priv); + if (ret) + return ret; + + return mt6370_regulator_irq_register(priv); +} + +static const struct platform_device_id mt6370_devid_table[] = { + { "mt6370-regulator", 0}, + {} +}; +MODULE_DEVICE_TABLE(platform, mt6370_devid_table); + +static struct platform_driver mt6370_regulator_driver = { + .driver = { + .name = "mt6370-regulator", + }, + .id_table = mt6370_devid_table, + .probe = mt6370_regulator_probe, +}; +module_platform_driver(mt6370_regulator_driver); + +MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>"); +MODULE_DESCRIPTION("Mediatek MT6370 Regulator Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/mt6380-regulator.c b/drivers/regulator/mt6380-regulator.c index 2e6b61d3b0cf..43234296df36 100644 --- a/drivers/regulator/mt6380-regulator.c +++ b/drivers/regulator/mt6380-regulator.c @@ -319,7 +319,7 @@ static const struct platform_device_id mt6380_platform_ids[] = { }; MODULE_DEVICE_TABLE(platform, mt6380_platform_ids); -static const struct of_device_id mt6380_of_match[] = { +static const struct of_device_id __maybe_unused mt6380_of_match[] = { { .compatible = "mediatek,mt6380-regulator", }, { /* sentinel */ }, }; diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index f54d4f176882..e12b681c72e5 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -264,8 +264,12 @@ static int of_get_regulation_constraints(struct device *dev, } suspend_np = of_get_child_by_name(np, regulator_states[i]); - if (!suspend_np || !suspend_state) + if (!suspend_np) continue; + if (!suspend_state) { + of_node_put(suspend_np); + continue; + } if (!of_property_read_u32(suspend_np, "regulator-mode", &pval)) { diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index ef6e47d025ca..59024c639141 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -205,6 +205,7 @@ static const struct regulator_ops rpm_mp5496_ops = { .is_enabled = rpm_reg_is_enabled, .list_voltage = regulator_list_voltage_linear_range, + .get_voltage = rpm_reg_get_voltage, .set_voltage = rpm_reg_set_voltage, }; @@ -357,10 +358,10 @@ static const struct regulator_desc pm8941_switch = { static const struct regulator_desc pm8916_pldo = { .linear_ranges = (struct linear_range[]) { - REGULATOR_LINEAR_RANGE(750000, 0, 208, 12500), + REGULATOR_LINEAR_RANGE(1750000, 0, 127, 12500), }, .n_linear_ranges = 1, - .n_voltages = 209, + .n_voltages = 128, .ops = &rpm_smps_ldo_ops, }; @@ -783,6 +784,29 @@ static const struct rpm_regulator_data rpm_pm8841_regulators[] = { {} }; +static const struct rpm_regulator_data rpm_pm8909_regulators[] = { + { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8916_buck_lvo_smps, "vdd_s1" }, + { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8916_buck_hvo_smps, "vdd_s2" }, + { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8916_nldo, "vdd_l1" }, + { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8916_nldo, "vdd_l2_l5" }, + { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8916_nldo, "vdd_l3_l6_l10" }, + { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8916_pldo, "vdd_l4_l7" }, + { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8226_pldo, "vdd_l2_l5" }, + { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8226_pldo, "vdd_l3_l6_l10" }, + { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8226_pldo, "vdd_l4_l7" }, + { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8916_pldo, "vdd_l8_l11_l15_l18" }, + { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8916_pldo, "vdd_l9_l12_l14_l17" }, + { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8916_nldo, "vdd_l3_l6_l10" }, + { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8226_pldo, "vdd_l8_l11_l15_l18" }, + { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8916_pldo, "vdd_l9_l12_l14_l17" }, + { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8916_pldo, "vdd_l13" }, + { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8916_pldo, "vdd_l9_l12_l14_l17" }, + { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8916_pldo, "vdd_l8_l11_l15_l18" }, + { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8916_pldo, "vdd_l9_l12_l14_l17" }, + { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8916_pldo, "vdd_l8_l11_l15_l18" }, + {} +}; + static const struct rpm_regulator_data rpm_pm8916_regulators[] = { { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8916_buck_lvo_smps, "vdd_s1" }, { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8916_buck_lvo_smps, "vdd_s2" }, @@ -1221,6 +1245,7 @@ static const struct rpm_regulator_data rpm_pm2250_regulators[] = { static const struct of_device_id rpm_of_match[] = { { .compatible = "qcom,rpm-mp5496-regulators", .data = &rpm_mp5496_regulators }, { .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators }, + { .compatible = "qcom,rpm-pm8909-regulators", .data = &rpm_pm8909_regulators }, { .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators }, { .compatible = "qcom,rpm-pm8226-regulators", .data = &rpm_pm8226_regulators }, { .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators }, diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c index 02bfce981150..a2d0292a92fd 100644 --- a/drivers/regulator/qcom_spmi-regulator.c +++ b/drivers/regulator/qcom_spmi-regulator.c @@ -164,6 +164,8 @@ enum spmi_regulator_subtype { SPMI_REGULATOR_SUBTYPE_ULT_HF_CTL3 = 0x0f, SPMI_REGULATOR_SUBTYPE_ULT_HF_CTL4 = 0x10, SPMI_REGULATOR_SUBTYPE_HFS430 = 0x0a, + SPMI_REGULATOR_SUBTYPE_HT_P150 = 0x35, + SPMI_REGULATOR_SUBTYPE_HT_P600 = 0x3d, }; enum spmi_common_regulator_registers { @@ -544,6 +546,14 @@ static struct spmi_voltage_range hfs430_ranges[] = { SPMI_VOLTAGE_RANGE(0, 320000, 320000, 2040000, 2040000, 8000), }; +static struct spmi_voltage_range ht_p150_ranges[] = { + SPMI_VOLTAGE_RANGE(0, 1616000, 1616000, 3304000, 3304000, 8000), +}; + +static struct spmi_voltage_range ht_p600_ranges[] = { + SPMI_VOLTAGE_RANGE(0, 1704000, 1704000, 1896000, 1896000, 8000), +}; + static DEFINE_SPMI_SET_POINTS(pldo); static DEFINE_SPMI_SET_POINTS(nldo1); static DEFINE_SPMI_SET_POINTS(nldo2); @@ -564,6 +574,8 @@ static DEFINE_SPMI_SET_POINTS(nldo660); static DEFINE_SPMI_SET_POINTS(ht_lvpldo); static DEFINE_SPMI_SET_POINTS(ht_nldo); static DEFINE_SPMI_SET_POINTS(hfs430); +static DEFINE_SPMI_SET_POINTS(ht_p150); +static DEFINE_SPMI_SET_POINTS(ht_p600); static inline int spmi_vreg_read(struct spmi_regulator *vreg, u16 addr, u8 *buf, int len) @@ -1458,6 +1470,8 @@ static const struct regulator_ops spmi_hfs430_ops = { static const struct spmi_regulator_mapping supported_regulators[] = { /* type subtype dig_min dig_max ltype ops setpoints hpm_min */ + SPMI_VREG(LDO, HT_P600, 0, INF, HFS430, hfs430, ht_p600, 10000), + SPMI_VREG(LDO, HT_P150, 0, INF, HFS430, hfs430, ht_p150, 10000), SPMI_VREG(BUCK, GP_CTL, 0, INF, SMPS, smps, smps, 100000), SPMI_VREG(BUCK, HFS430, 0, INF, HFS430, hfs430, hfs430, 10000), SPMI_VREG(LDO, N300, 0, INF, LDO, ldo, nldo1, 10000), @@ -2125,6 +2139,28 @@ static const struct spmi_regulator_data pm8005_regulators[] = { { } }; +static const struct spmi_regulator_data pmp8074_regulators[] = { + { "s1", 0x1400, "vdd_s1"}, + { "s2", 0x1700, "vdd_s2"}, + { "s3", 0x1a00, "vdd_s3"}, + { "s4", 0x1d00, "vdd_s4"}, + { "s5", 0x2000, "vdd_s5"}, + { "l1", 0x4000, "vdd_l1_l2"}, + { "l2", 0x4100, "vdd_l1_l2"}, + { "l3", 0x4200, "vdd_l3_l8"}, + { "l4", 0x4300, "vdd_l4"}, + { "l5", 0x4400, "vdd_l5_l6_l15"}, + { "l6", 0x4500, "vdd_l5_l6_l15"}, + { "l7", 0x4600, "vdd_l7"}, + { "l8", 0x4700, "vdd_l3_l8"}, + { "l9", 0x4800, "vdd_l9"}, + /* l10 is currently unsupported HT_P50 */ + { "l11", 0x4a00, "vdd_l10_l11_l12_l13"}, + { "l12", 0x4b00, "vdd_l10_l11_l12_l13"}, + { "l13", 0x4c00, "vdd_l10_l11_l12_l13"}, + { } +}; + static const struct spmi_regulator_data pms405_regulators[] = { { "s3", 0x1a00, "vdd_s3"}, { } @@ -2142,6 +2178,7 @@ static const struct of_device_id qcom_spmi_regulator_match[] = { { .compatible = "qcom,pmi8994-regulators", .data = &pmi8994_regulators }, { .compatible = "qcom,pm660-regulators", .data = &pm660_regulators }, { .compatible = "qcom,pm660l-regulators", .data = &pm660l_regulators }, + { .compatible = "qcom,pmp8074-regulators", .data = &pmp8074_regulators }, { .compatible = "qcom,pms405-regulators", .data = &pms405_regulators }, { } }; diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c index fa8706a352ce..105f694a67e6 100644 --- a/drivers/regulator/rpi-panel-attiny-regulator.c +++ b/drivers/regulator/rpi-panel-attiny-regulator.c @@ -187,15 +187,11 @@ static int attiny_update_status(struct backlight_device *bl) { struct attiny_lcd *state = bl_get_data(bl); struct regmap *regmap = state->regmap; - int brightness = bl->props.brightness; + int brightness = backlight_get_brightness(bl); int ret, i; mutex_lock(&state->lock); - if (bl->props.power != FB_BLANK_UNBLANK || - bl->props.fb_blank != FB_BLANK_UNBLANK) - brightness = 0; - for (i = 0; i < 10; i++) { ret = regmap_write(regmap, REG_PWM, brightness); if (!ret) diff --git a/drivers/regulator/rt5120-regulator.c b/drivers/regulator/rt5120-regulator.c new file mode 100644 index 000000000000..8173ede09414 --- /dev/null +++ b/drivers/regulator/rt5120-regulator.c @@ -0,0 +1,420 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/bits.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h> + +#define RT5120_REG_PGSTAT 0x03 +#define RT5120_REG_CH1VID 0x06 +#define RT5120_REG_CH1SLPVID 0x07 +#define RT5120_REG_ENABLE 0x08 +#define RT5120_REG_MODECTL 0x09 +#define RT5120_REG_UVOVPROT 0x0A +#define RT5120_REG_SLPCTL 0x0C +#define RT5120_REG_INTSTAT 0x1E +#define RT5120_REG_DISCHG 0x1F + +#define RT5120_OUTPG_MASK(rid) BIT(rid + 1) +#define RT5120_OUTUV_MASK(rid) BIT(rid + 9) +#define RT5120_OUTOV_MASK(rid) BIT(rid + 16) +#define RT5120_CH1VID_MASK GENMASK(6, 0) +#define RT5120_RIDEN_MASK(rid) BIT(rid + 1) +#define RT5120_RADEN_MASK(rid) BIT(rid) +#define RT5120_FPWM_MASK(rid) BIT(rid + 1) +#define RT5120_UVHICCUP_MASK BIT(1) +#define RT5120_OVHICCUP_MASK BIT(0) +#define RT5120_HOTDIE_MASK BIT(1) + +#define RT5120_BUCK1_MINUV 600000 +#define RT5120_BUCK1_MAXUV 1393750 +#define RT5120_BUCK1_STEPUV 6250 +#define RT5120_BUCK1_NUM_VOLT 0x80 + +#define RT5120_AUTO_MODE 0 +#define RT5120_FPWM_MODE 1 + +enum { + RT5120_REGULATOR_BUCK1 = 0, + RT5120_REGULATOR_BUCK2, + RT5120_REGULATOR_BUCK3, + RT5120_REGULATOR_BUCK4, + RT5120_REGULATOR_LDO, + RT5120_REGULATOR_EXTEN, + RT5120_MAX_REGULATOR +}; + +struct rt5120_priv { + struct device *dev; + struct regmap *regmap; + struct regulator_desc rdesc[RT5120_MAX_REGULATOR]; +}; + +static int rt5120_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + int rid = rdev_get_id(rdev); + unsigned int mask = RT5120_FPWM_MASK(rid), val; + + switch (mode) { + case REGULATOR_MODE_NORMAL: + val = 0; + break; + case REGULATOR_MODE_FAST: + val = RT5120_FPWM_MASK(rid); + break; + default: + return -EINVAL; + } + + return regmap_update_bits(regmap, RT5120_REG_MODECTL, mask, val); +} + +static unsigned int rt5120_buck_get_mode(struct regulator_dev *rdev) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + int ret, rid = rdev_get_id(rdev); + unsigned int val; + + ret = regmap_read(regmap, RT5120_REG_MODECTL, &val); + if (ret) + return REGULATOR_MODE_INVALID; + + if (val & RT5120_FPWM_MASK(rid)) + return REGULATOR_MODE_FAST; + + return REGULATOR_MODE_NORMAL; +} + +static int rt5120_regulator_get_error_flags(struct regulator_dev *rdev, + unsigned int *flags) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + unsigned int stat, hd_stat, cur_flags = 0; + int rid = rdev_get_id(rdev), ret; + + /* + * reg 0x03/0x04/0x05 to indicate PG/UV/OV + * use block read to descrease I/O xfer time + */ + ret = regmap_raw_read(regmap, RT5120_REG_PGSTAT, &stat, 3); + if (ret) + return ret; + + ret = regmap_read(regmap, RT5120_REG_INTSTAT, &hd_stat); + if (ret) + return ret; + + if (!(stat & RT5120_OUTPG_MASK(rid))) { + if (stat & RT5120_OUTUV_MASK(rid)) + cur_flags |= REGULATOR_ERROR_UNDER_VOLTAGE; + + if (stat & RT5120_OUTOV_MASK(rid)) + cur_flags |= REGULATOR_ERROR_REGULATION_OUT; + } + + if (hd_stat & RT5120_HOTDIE_MASK) + cur_flags |= REGULATOR_ERROR_OVER_TEMP; + + *flags = cur_flags; + return 0; +} + +static int rt5120_buck1_set_suspend_voltage(struct regulator_dev *rdev, int uV) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + int sel; + + if (uV < RT5120_BUCK1_MINUV || uV > RT5120_BUCK1_MAXUV) + return -EINVAL; + + sel = (uV - RT5120_BUCK1_MINUV) / RT5120_BUCK1_STEPUV; + return regmap_write(regmap, RT5120_REG_CH1SLPVID, sel); +} + +static int rt5120_regulator_set_suspend_enable(struct regulator_dev *rdev) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + int rid = rdev_get_id(rdev); + unsigned int mask = RT5120_RIDEN_MASK(rid); + + return regmap_update_bits(regmap, RT5120_REG_SLPCTL, mask, mask); +} + +static int rt5120_regulator_set_suspend_disable(struct regulator_dev *rdev) +{ + struct regmap *regmap = rdev_get_regmap(rdev); + int rid = rdev_get_id(rdev); + unsigned int mask = RT5120_RIDEN_MASK(rid); + + return regmap_update_bits(regmap, RT5120_REG_SLPCTL, mask, 0); +} + +static const struct regulator_ops rt5120_buck1_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .set_mode = rt5120_buck_set_mode, + .get_mode = rt5120_buck_get_mode, + .get_error_flags = rt5120_regulator_get_error_flags, + .set_suspend_voltage = rt5120_buck1_set_suspend_voltage, + .set_suspend_enable = rt5120_regulator_set_suspend_enable, + .set_suspend_disable = rt5120_regulator_set_suspend_disable, +}; + +static const struct regulator_ops rt5120_buck234_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .set_mode = rt5120_buck_set_mode, + .get_mode = rt5120_buck_get_mode, + .get_error_flags = rt5120_regulator_get_error_flags, + .set_suspend_enable = rt5120_regulator_set_suspend_enable, + .set_suspend_disable = rt5120_regulator_set_suspend_disable, +}; + +static const struct regulator_ops rt5120_ldo_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .get_error_flags = rt5120_regulator_get_error_flags, + .set_suspend_enable = rt5120_regulator_set_suspend_enable, + .set_suspend_disable = rt5120_regulator_set_suspend_disable, +}; + +static const struct regulator_ops rt5120_exten_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_suspend_enable = rt5120_regulator_set_suspend_enable, + .set_suspend_disable = rt5120_regulator_set_suspend_disable, +}; + +static unsigned int rt5120_buck_of_map_mode(unsigned int mode) +{ + switch (mode) { + case RT5120_AUTO_MODE: + return REGULATOR_MODE_NORMAL; + case RT5120_FPWM_MODE: + return REGULATOR_MODE_FAST; + default: + return REGULATOR_MODE_INVALID; + } +} + +static void rt5120_fillin_regulator_desc(struct regulator_desc *desc, int rid) +{ + static const char * const name[] = { + "buck1", "buck2", "buck3", "buck4", "ldo", "exten" }; + static const char * const sname[] = { + "vin1", "vin2", "vin3", "vin4", "vinldo", NULL }; + + /* Common regulator property */ + desc->name = name[rid]; + desc->supply_name = sname[rid]; + desc->owner = THIS_MODULE; + desc->type = REGULATOR_VOLTAGE; + desc->id = rid; + desc->enable_reg = RT5120_REG_ENABLE; + desc->enable_mask = RT5120_RIDEN_MASK(rid); + desc->active_discharge_reg = RT5120_REG_DISCHG; + desc->active_discharge_mask = RT5120_RADEN_MASK(rid); + desc->active_discharge_on = RT5120_RADEN_MASK(rid); + /* Config n_voltages to 1 for all*/ + desc->n_voltages = 1; + + /* Only buck support mode change */ + if (rid >= RT5120_REGULATOR_BUCK1 && rid <= RT5120_REGULATOR_BUCK4) + desc->of_map_mode = rt5120_buck_of_map_mode; + + /* RID specific property init */ + switch (rid) { + case RT5120_REGULATOR_BUCK1: + /* Only buck1 support voltage change by I2C */ + desc->n_voltages = RT5120_BUCK1_NUM_VOLT; + desc->min_uV = RT5120_BUCK1_MINUV; + desc->uV_step = RT5120_BUCK1_STEPUV; + desc->vsel_reg = RT5120_REG_CH1VID, + desc->vsel_mask = RT5120_CH1VID_MASK, + desc->ops = &rt5120_buck1_ops; + break; + case RT5120_REGULATOR_BUCK2 ... RT5120_REGULATOR_BUCK4: + desc->ops = &rt5120_buck234_ops; + break; + case RT5120_REGULATOR_LDO: + desc->ops = &rt5120_ldo_ops; + break; + default: + desc->ops = &rt5120_exten_ops; + } +} + +static int rt5120_of_parse_cb(struct rt5120_priv *priv, int rid, + struct of_regulator_match *match) +{ + struct regulator_desc *desc = priv->rdesc + rid; + struct regulator_init_data *init_data = match->init_data; + + if (!init_data || rid == RT5120_REGULATOR_BUCK1) + return 0; + + if (init_data->constraints.min_uV != init_data->constraints.max_uV) { + dev_err(priv->dev, "Variable voltage for fixed regulator\n"); + return -EINVAL; + } + + desc->fixed_uV = init_data->constraints.min_uV; + return 0; +} + +static struct of_regulator_match rt5120_regu_match[RT5120_MAX_REGULATOR] = { + [RT5120_REGULATOR_BUCK1] = { .name = "buck1", }, + [RT5120_REGULATOR_BUCK2] = { .name = "buck2", }, + [RT5120_REGULATOR_BUCK3] = { .name = "buck3", }, + [RT5120_REGULATOR_BUCK4] = { .name = "buck4", }, + [RT5120_REGULATOR_LDO] = { .name = "ldo", }, + [RT5120_REGULATOR_EXTEN] = { .name = "exten", } +}; + +static int rt5120_parse_regulator_dt_data(struct rt5120_priv *priv) +{ + struct device *dev = priv->dev->parent; + struct device_node *reg_node; + int i, ret; + + for (i = 0; i < RT5120_MAX_REGULATOR; i++) { + rt5120_fillin_regulator_desc(priv->rdesc + i, i); + + rt5120_regu_match[i].desc = priv->rdesc + i; + } + + reg_node = of_get_child_by_name(dev->of_node, "regulators"); + if (!reg_node) { + dev_err(priv->dev, "Couldn't find 'regulators' node\n"); + return -ENODEV; + } + + ret = of_regulator_match(priv->dev, reg_node, rt5120_regu_match, + ARRAY_SIZE(rt5120_regu_match)); + + of_node_put(reg_node); + + if (ret < 0) { + dev_err(priv->dev, + "Error parsing regulator init data (%d)\n", ret); + return ret; + } + + for (i = 0; i < RT5120_MAX_REGULATOR; i++) { + ret = rt5120_of_parse_cb(priv, i, rt5120_regu_match + i); + if (ret) { + dev_err(priv->dev, "Failed in [%d] of_passe_cb\n", i); + return ret; + } + } + + return 0; +} + +static int rt5120_device_property_init(struct rt5120_priv *priv) +{ + struct device *dev = priv->dev->parent; + struct device_node *np = dev->of_node; + bool prot_enable; + unsigned int prot_enable_val = 0; + + /* Assign UV/OV HW protection behavior */ + prot_enable = of_property_read_bool(np, + "richtek,enable-undervolt-hiccup"); + if (prot_enable) + prot_enable_val |= RT5120_UVHICCUP_MASK; + + prot_enable = of_property_read_bool(np, + "richtek,enable-overvolt-hiccup"); + if (prot_enable) + prot_enable_val |= RT5120_OVHICCUP_MASK; + + return regmap_update_bits(priv->regmap, RT5120_REG_UVOVPROT, + RT5120_UVHICCUP_MASK | RT5120_OVHICCUP_MASK, + prot_enable_val); +} + +static int rt5120_regulator_probe(struct platform_device *pdev) +{ + struct rt5120_priv *priv; + struct regulator_dev *rdev; + struct regulator_config config = {}; + int i, ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = &pdev->dev; + + priv->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!priv->regmap) { + dev_err(&pdev->dev, "Failed to init regmap\n"); + return -ENODEV; + } + + ret = rt5120_device_property_init(priv); + if (ret) { + dev_err(&pdev->dev, "Failed to do property init\n"); + return ret; + } + + ret = rt5120_parse_regulator_dt_data(priv); + if (ret) { + dev_err(&pdev->dev, "Failed to parse dt data\n"); + return ret; + } + + config.dev = &pdev->dev; + config.regmap = priv->regmap; + + for (i = 0; i < RT5120_MAX_REGULATOR; i++) { + config.of_node = rt5120_regu_match[i].of_node; + config.init_data = rt5120_regu_match[i].init_data; + + rdev = devm_regulator_register(&pdev->dev, priv->rdesc + i, + &config); + if (IS_ERR(rdev)) { + dev_err(&pdev->dev, + "Failed to register regulator [%d]\n", i); + return PTR_ERR(rdev); + } + } + + return 0; +} + +static const struct platform_device_id rt5120_regulator_dev_table[] = { + { "rt5120-regulator", 0 }, + {} +}; +MODULE_DEVICE_TABLE(platform, rt5120_regulator_dev_table); + +static struct platform_driver rt5120_regulator_driver = { + .driver = { + .name = "rt5120-regulator", + }, + .id_table = rt5120_regulator_dev_table, + .probe = rt5120_regulator_probe, +}; +module_platform_driver(rt5120_regulator_driver); + +MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>"); +MODULE_DESCRIPTION("Richtek RT5120 regulator driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/rt5190a-regulator.c b/drivers/regulator/rt5190a-regulator.c index 155d4afd00b1..4a3397b32582 100644 --- a/drivers/regulator/rt5190a-regulator.c +++ b/drivers/regulator/rt5190a-regulator.c @@ -224,6 +224,9 @@ static int rt5190a_of_parse_cb(struct rt5190a_priv *priv, int rid, bool latchup_enable; unsigned int mask = RT5190A_RID_BITMASK(rid), val; + if (!init_data) + return 0; + switch (rid) { case RT5190A_IDX_BUCK1: case RT5190A_IDX_BUCK4: diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c index 41ae7ac27ff6..b9918f4fd241 100644 --- a/drivers/regulator/scmi-regulator.c +++ b/drivers/regulator/scmi-regulator.c @@ -343,6 +343,7 @@ static int scmi_regulator_probe(struct scmi_device *sdev) * plausible SCMI Voltage Domain number, all belonging to this SCMI * platform instance node (handle->dev->of_node). */ + of_node_get(handle->dev->of_node); np = of_find_node_by_name(handle->dev->of_node, "regulators"); for_each_child_of_node(np, child) { ret = process_scmi_regulator_of_node(sdev, ph, child, rinfo); diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c index bd7b2f287250..afa336be1cc9 100644 --- a/drivers/regulator/ti-abb-regulator.c +++ b/drivers/regulator/ti-abb-regulator.c @@ -309,7 +309,7 @@ out: * * Return: 0 on success or appropriate error value when fails */ -static int ti_abb_set_voltage_sel(struct regulator_dev *rdev, unsigned sel) +static int ti_abb_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel) { const struct regulator_desc *desc = rdev->desc; struct ti_abb *abb = rdev_get_drvdata(rdev); @@ -344,7 +344,7 @@ static int ti_abb_set_voltage_sel(struct regulator_dev *rdev, unsigned sel) info = &abb->info[sel]; /* - * When Linux kernel is starting up, we are'nt sure of the + * When Linux kernel is starting up, we aren't sure of the * Bias configuration that bootloader has configured. * So, we get to know the actual setting the first time * we are asked to transition. diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 93c8d07ee328..48d94649ea82 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -231,6 +231,15 @@ config RESET_STARFIVE_JH7100 help This enables the reset controller driver for the StarFive JH7100 SoC. +config RESET_SUNPLUS + bool "Sunplus SoCs Reset Driver" if COMPILE_TEST + default ARCH_SUNPLUS + help + This enables the reset driver support for Sunplus SoCs. + The reset lines that can be asserted and deasserted by toggling bits + in a contiguous, exclusive register space. The register is HIWORD_MASKED, + which means each register holds 16 reset lines. + config RESET_SUNXI bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI default ARCH_SUNXI diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index a80a9c4008a7..3ff378f43348 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_RESET_SCMI) += reset-scmi.o obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o obj-$(CONFIG_RESET_STARFIVE_JH7100) += reset-starfive-jh7100.o +obj-$(CONFIG_RESET_SUNPLUS) += reset-sunplus.o obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o diff --git a/drivers/reset/reset-npcm.c b/drivers/reset/reset-npcm.c index 2ea4d3136e15..24c55efa98e5 100644 --- a/drivers/reset/reset-npcm.c +++ b/drivers/reset/reset-npcm.c @@ -17,13 +17,20 @@ /* NPCM7xx GCR registers */ #define NPCM_MDLR_OFFSET 0x7C -#define NPCM_MDLR_USBD0 BIT(9) -#define NPCM_MDLR_USBD1 BIT(8) -#define NPCM_MDLR_USBD2_4 BIT(21) -#define NPCM_MDLR_USBD5_9 BIT(22) +#define NPCM7XX_MDLR_USBD0 BIT(9) +#define NPCM7XX_MDLR_USBD1 BIT(8) +#define NPCM7XX_MDLR_USBD2_4 BIT(21) +#define NPCM7XX_MDLR_USBD5_9 BIT(22) + +/* NPCM8xx MDLR bits */ +#define NPCM8XX_MDLR_USBD0_3 BIT(9) +#define NPCM8XX_MDLR_USBD4_7 BIT(22) +#define NPCM8XX_MDLR_USBD8 BIT(24) +#define NPCM8XX_MDLR_USBD9 BIT(21) #define NPCM_USB1PHYCTL_OFFSET 0x140 #define NPCM_USB2PHYCTL_OFFSET 0x144 +#define NPCM_USB3PHYCTL_OFFSET 0x148 #define NPCM_USBXPHYCTL_RS BIT(28) /* NPCM7xx Reset registers */ @@ -49,12 +56,38 @@ #define NPCM_IPSRST3_USBPHY1 BIT(24) #define NPCM_IPSRST3_USBPHY2 BIT(25) +#define NPCM_IPSRST4 0x74 +#define NPCM_IPSRST4_USBPHY3 BIT(25) +#define NPCM_IPSRST4_USB_HOST2 BIT(31) + #define NPCM_RC_RESETS_PER_REG 32 #define NPCM_MASK_RESETS GENMASK(4, 0) +enum { + BMC_NPCM7XX = 0, + BMC_NPCM8XX, +}; + +static const u32 npxm7xx_ipsrst[] = {NPCM_IPSRST1, NPCM_IPSRST2, NPCM_IPSRST3}; +static const u32 npxm8xx_ipsrst[] = {NPCM_IPSRST1, NPCM_IPSRST2, NPCM_IPSRST3, + NPCM_IPSRST4}; + +struct npcm_reset_info { + u32 bmc_id; + u32 num_ipsrst; + const u32 *ipsrst; +}; + +static const struct npcm_reset_info npxm7xx_reset_info[] = { + {.bmc_id = BMC_NPCM7XX, .num_ipsrst = 3, .ipsrst = npxm7xx_ipsrst}}; +static const struct npcm_reset_info npxm8xx_reset_info[] = { + {.bmc_id = BMC_NPCM8XX, .num_ipsrst = 4, .ipsrst = npxm8xx_ipsrst}}; + struct npcm_rc_data { struct reset_controller_dev rcdev; struct notifier_block restart_nb; + const struct npcm_reset_info *info; + struct regmap *gcr_regmap; u32 sw_reset_number; void __iomem *base; spinlock_t lock; @@ -120,14 +153,24 @@ static int npcm_rc_status(struct reset_controller_dev *rcdev, static int npcm_reset_xlate(struct reset_controller_dev *rcdev, const struct of_phandle_args *reset_spec) { + struct npcm_rc_data *rc = to_rc_data(rcdev); unsigned int offset, bit; + bool offset_found = false; + int off_num; offset = reset_spec->args[0]; - if (offset != NPCM_IPSRST1 && offset != NPCM_IPSRST2 && - offset != NPCM_IPSRST3) { + for (off_num = 0 ; off_num < rc->info->num_ipsrst ; off_num++) { + if (offset == rc->info->ipsrst[off_num]) { + offset_found = true; + break; + } + } + + if (!offset_found) { dev_err(rcdev->dev, "Error reset register (0x%x)\n", offset); return -EINVAL; } + bit = reset_spec->args[1]; if (bit >= NPCM_RC_RESETS_PER_REG) { dev_err(rcdev->dev, "Error reset number (%d)\n", bit); @@ -138,45 +181,29 @@ static int npcm_reset_xlate(struct reset_controller_dev *rcdev, } static const struct of_device_id npcm_rc_match[] = { - { .compatible = "nuvoton,npcm750-reset", - .data = (void *)"nuvoton,npcm750-gcr" }, + { .compatible = "nuvoton,npcm750-reset", .data = &npxm7xx_reset_info}, + { .compatible = "nuvoton,npcm845-reset", .data = &npxm8xx_reset_info}, { } }; -/* - * The following procedure should be observed in USB PHY, USB device and - * USB host initialization at BMC boot - */ -static int npcm_usb_reset(struct platform_device *pdev, struct npcm_rc_data *rc) +static void npcm_usb_reset_npcm7xx(struct npcm_rc_data *rc) { u32 mdlr, iprst1, iprst2, iprst3; - struct device *dev = &pdev->dev; - struct regmap *gcr_regmap; u32 ipsrst1_bits = 0; u32 ipsrst2_bits = NPCM_IPSRST2_USB_HOST; u32 ipsrst3_bits = 0; - const char *gcr_dt; - - gcr_dt = (const char *) - of_match_device(dev->driver->of_match_table, dev)->data; - - gcr_regmap = syscon_regmap_lookup_by_compatible(gcr_dt); - if (IS_ERR(gcr_regmap)) { - dev_err(&pdev->dev, "Failed to find %s\n", gcr_dt); - return PTR_ERR(gcr_regmap); - } /* checking which USB device is enabled */ - regmap_read(gcr_regmap, NPCM_MDLR_OFFSET, &mdlr); - if (!(mdlr & NPCM_MDLR_USBD0)) + regmap_read(rc->gcr_regmap, NPCM_MDLR_OFFSET, &mdlr); + if (!(mdlr & NPCM7XX_MDLR_USBD0)) ipsrst3_bits |= NPCM_IPSRST3_USBD0; - if (!(mdlr & NPCM_MDLR_USBD1)) + if (!(mdlr & NPCM7XX_MDLR_USBD1)) ipsrst1_bits |= NPCM_IPSRST1_USBD1; - if (!(mdlr & NPCM_MDLR_USBD2_4)) + if (!(mdlr & NPCM7XX_MDLR_USBD2_4)) ipsrst1_bits |= (NPCM_IPSRST1_USBD2 | NPCM_IPSRST1_USBD3 | NPCM_IPSRST1_USBD4); - if (!(mdlr & NPCM_MDLR_USBD0)) { + if (!(mdlr & NPCM7XX_MDLR_USBD0)) { ipsrst1_bits |= (NPCM_IPSRST1_USBD5 | NPCM_IPSRST1_USBD6); ipsrst3_bits |= (NPCM_IPSRST3_USBD7 | @@ -199,9 +226,9 @@ static int npcm_usb_reset(struct platform_device *pdev, struct npcm_rc_data *rc) writel(iprst3, rc->base + NPCM_IPSRST3); /* clear USB PHY RS bit */ - regmap_update_bits(gcr_regmap, NPCM_USB1PHYCTL_OFFSET, + regmap_update_bits(rc->gcr_regmap, NPCM_USB1PHYCTL_OFFSET, NPCM_USBXPHYCTL_RS, 0); - regmap_update_bits(gcr_regmap, NPCM_USB2PHYCTL_OFFSET, + regmap_update_bits(rc->gcr_regmap, NPCM_USB2PHYCTL_OFFSET, NPCM_USBXPHYCTL_RS, 0); /* deassert reset USB PHY */ @@ -211,19 +238,131 @@ static int npcm_usb_reset(struct platform_device *pdev, struct npcm_rc_data *rc) udelay(50); /* set USB PHY RS bit */ - regmap_update_bits(gcr_regmap, NPCM_USB1PHYCTL_OFFSET, + regmap_update_bits(rc->gcr_regmap, NPCM_USB1PHYCTL_OFFSET, + NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS); + regmap_update_bits(rc->gcr_regmap, NPCM_USB2PHYCTL_OFFSET, + NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS); + + /* deassert reset USB devices*/ + iprst1 &= ~ipsrst1_bits; + iprst2 &= ~ipsrst2_bits; + iprst3 &= ~ipsrst3_bits; + + writel(iprst1, rc->base + NPCM_IPSRST1); + writel(iprst2, rc->base + NPCM_IPSRST2); + writel(iprst3, rc->base + NPCM_IPSRST3); +} + +static void npcm_usb_reset_npcm8xx(struct npcm_rc_data *rc) +{ + u32 mdlr, iprst1, iprst2, iprst3, iprst4; + u32 ipsrst1_bits = 0; + u32 ipsrst2_bits = NPCM_IPSRST2_USB_HOST; + u32 ipsrst3_bits = 0; + u32 ipsrst4_bits = NPCM_IPSRST4_USB_HOST2 | NPCM_IPSRST4_USBPHY3; + + /* checking which USB device is enabled */ + regmap_read(rc->gcr_regmap, NPCM_MDLR_OFFSET, &mdlr); + if (!(mdlr & NPCM8XX_MDLR_USBD0_3)) { + ipsrst3_bits |= NPCM_IPSRST3_USBD0; + ipsrst1_bits |= (NPCM_IPSRST1_USBD1 | + NPCM_IPSRST1_USBD2 | + NPCM_IPSRST1_USBD3); + } + if (!(mdlr & NPCM8XX_MDLR_USBD4_7)) { + ipsrst1_bits |= (NPCM_IPSRST1_USBD4 | + NPCM_IPSRST1_USBD5 | + NPCM_IPSRST1_USBD6); + ipsrst3_bits |= NPCM_IPSRST3_USBD7; + } + + if (!(mdlr & NPCM8XX_MDLR_USBD8)) + ipsrst3_bits |= NPCM_IPSRST3_USBD8; + if (!(mdlr & NPCM8XX_MDLR_USBD9)) + ipsrst3_bits |= NPCM_IPSRST3_USBD9; + + /* assert reset USB PHY and USB devices */ + iprst1 = readl(rc->base + NPCM_IPSRST1); + iprst2 = readl(rc->base + NPCM_IPSRST2); + iprst3 = readl(rc->base + NPCM_IPSRST3); + iprst4 = readl(rc->base + NPCM_IPSRST4); + + iprst1 |= ipsrst1_bits; + iprst2 |= ipsrst2_bits; + iprst3 |= (ipsrst3_bits | NPCM_IPSRST3_USBPHY1 | + NPCM_IPSRST3_USBPHY2); + iprst2 |= ipsrst4_bits; + + writel(iprst1, rc->base + NPCM_IPSRST1); + writel(iprst2, rc->base + NPCM_IPSRST2); + writel(iprst3, rc->base + NPCM_IPSRST3); + writel(iprst4, rc->base + NPCM_IPSRST4); + + /* clear USB PHY RS bit */ + regmap_update_bits(rc->gcr_regmap, NPCM_USB1PHYCTL_OFFSET, + NPCM_USBXPHYCTL_RS, 0); + regmap_update_bits(rc->gcr_regmap, NPCM_USB2PHYCTL_OFFSET, + NPCM_USBXPHYCTL_RS, 0); + regmap_update_bits(rc->gcr_regmap, NPCM_USB3PHYCTL_OFFSET, + NPCM_USBXPHYCTL_RS, 0); + + /* deassert reset USB PHY */ + iprst3 &= ~(NPCM_IPSRST3_USBPHY1 | NPCM_IPSRST3_USBPHY2); + writel(iprst3, rc->base + NPCM_IPSRST3); + iprst4 &= ~NPCM_IPSRST4_USBPHY3; + writel(iprst4, rc->base + NPCM_IPSRST4); + + /* set USB PHY RS bit */ + regmap_update_bits(rc->gcr_regmap, NPCM_USB1PHYCTL_OFFSET, + NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS); + regmap_update_bits(rc->gcr_regmap, NPCM_USB2PHYCTL_OFFSET, NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS); - regmap_update_bits(gcr_regmap, NPCM_USB2PHYCTL_OFFSET, + regmap_update_bits(rc->gcr_regmap, NPCM_USB3PHYCTL_OFFSET, NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS); /* deassert reset USB devices*/ iprst1 &= ~ipsrst1_bits; iprst2 &= ~ipsrst2_bits; iprst3 &= ~ipsrst3_bits; + iprst4 &= ~ipsrst4_bits; writel(iprst1, rc->base + NPCM_IPSRST1); writel(iprst2, rc->base + NPCM_IPSRST2); writel(iprst3, rc->base + NPCM_IPSRST3); + writel(iprst4, rc->base + NPCM_IPSRST4); +} + +/* + * The following procedure should be observed in USB PHY, USB device and + * USB host initialization at BMC boot + */ +static int npcm_usb_reset(struct platform_device *pdev, struct npcm_rc_data *rc) +{ + struct device *dev = &pdev->dev; + + rc->gcr_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "nuvoton,sysgcr"); + if (IS_ERR(rc->gcr_regmap)) { + dev_warn(&pdev->dev, "Failed to find nuvoton,sysgcr property, please update the device tree\n"); + dev_info(&pdev->dev, "Using nuvoton,npcm750-gcr for Poleg backward compatibility\n"); + rc->gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr"); + if (IS_ERR(rc->gcr_regmap)) { + dev_err(&pdev->dev, "Failed to find nuvoton,npcm750-gcr"); + return PTR_ERR(rc->gcr_regmap); + } + } + + rc->info = (const struct npcm_reset_info *) + of_match_device(dev->driver->of_match_table, dev)->data; + switch (rc->info->bmc_id) { + case BMC_NPCM7XX: + npcm_usb_reset_npcm7xx(rc); + break; + case BMC_NPCM8XX: + npcm_usb_reset_npcm8xx(rc); + break; + default: + return -ENODEV; + } return 0; } diff --git a/drivers/reset/reset-sunplus.c b/drivers/reset/reset-sunplus.c new file mode 100644 index 000000000000..2f23ecaa7b98 --- /dev/null +++ b/drivers/reset/reset-sunplus.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* + * SP7021 reset driver + * + * Copyright (C) Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#include <linux/io.h> +#include <linux/init.h> +#include <linux/mod_devicetable.h> +#include <linux/platform_device.h> +#include <linux/reset-controller.h> +#include <linux/reboot.h> + +/* HIWORD_MASK_REG BITS */ +#define BITS_PER_HWM_REG 16 + +/* resets HW info: reg_index_shift */ +static const u32 sp_resets[] = { +/* SP7021: mo_reset0 ~ mo_reset9 */ + 0x00, + 0x02, + 0x03, + 0x04, + 0x05, + 0x06, + 0x07, + 0x08, + 0x09, + 0x0a, + 0x0b, + 0x0d, + 0x0e, + 0x0f, + 0x10, + 0x12, + 0x14, + 0x15, + 0x16, + 0x17, + 0x18, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x20, + 0x21, + 0x22, + 0x23, + 0x24, + 0x25, + 0x26, + 0x2a, + 0x2b, + 0x2d, + 0x2e, + 0x30, + 0x31, + 0x32, + 0x33, + 0x3d, + 0x3e, + 0x3f, + 0x42, + 0x44, + 0x4b, + 0x4c, + 0x4d, + 0x4e, + 0x4f, + 0x50, + 0x55, + 0x60, + 0x61, + 0x6a, + 0x6f, + 0x70, + 0x73, + 0x74, + 0x86, + 0x8a, + 0x8b, + 0x8d, + 0x8e, + 0x8f, + 0x90, + 0x92, + 0x93, + 0x94, + 0x95, + 0x96, + 0x97, + 0x98, + 0x99, +}; + +struct sp_reset { + struct reset_controller_dev rcdev; + struct notifier_block notifier; + void __iomem *base; +}; + +static inline struct sp_reset *to_sp_reset(struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct sp_reset, rcdev); +} + +static int sp_reset_update(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) +{ + struct sp_reset *reset = to_sp_reset(rcdev); + int index = sp_resets[id] / BITS_PER_HWM_REG; + int shift = sp_resets[id] % BITS_PER_HWM_REG; + u32 val; + + val = (1 << (16 + shift)) | (assert << shift); + writel(val, reset->base + (index * 4)); + + return 0; +} + +static int sp_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return sp_reset_update(rcdev, id, true); +} + +static int sp_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return sp_reset_update(rcdev, id, false); +} + +static int sp_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct sp_reset *reset = to_sp_reset(rcdev); + int index = sp_resets[id] / BITS_PER_HWM_REG; + int shift = sp_resets[id] % BITS_PER_HWM_REG; + u32 reg; + + reg = readl(reset->base + (index * 4)); + + return !!(reg & BIT(shift)); +} + +static const struct reset_control_ops sp_reset_ops = { + .assert = sp_reset_assert, + .deassert = sp_reset_deassert, + .status = sp_reset_status, +}; + +static int sp_restart(struct notifier_block *nb, unsigned long mode, + void *cmd) +{ + struct sp_reset *reset = container_of(nb, struct sp_reset, notifier); + + sp_reset_assert(&reset->rcdev, 0); + sp_reset_deassert(&reset->rcdev, 0); + + return NOTIFY_DONE; +} + +static int sp_reset_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sp_reset *reset; + struct resource *res; + int ret; + + reset = devm_kzalloc(dev, sizeof(*reset), GFP_KERNEL); + if (!reset) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + reset->base = devm_ioremap_resource(dev, res); + if (IS_ERR(reset->base)) + return PTR_ERR(reset->base); + + reset->rcdev.ops = &sp_reset_ops; + reset->rcdev.owner = THIS_MODULE; + reset->rcdev.of_node = dev->of_node; + reset->rcdev.nr_resets = resource_size(res) / 4 * BITS_PER_HWM_REG; + + ret = devm_reset_controller_register(dev, &reset->rcdev); + if (ret) + return ret; + + reset->notifier.notifier_call = sp_restart; + reset->notifier.priority = 192; + + return register_restart_handler(&reset->notifier); +} + +static const struct of_device_id sp_reset_dt_ids[] = { + {.compatible = "sunplus,sp7021-reset",}, + { /* sentinel */ }, +}; + +static struct platform_driver sp_reset_driver = { + .probe = sp_reset_probe, + .driver = { + .name = "sunplus-reset", + .of_match_table = sp_reset_dt_ids, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(sp_reset_driver); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index ba6d78789660..4df8bf6505fc 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -3145,7 +3145,7 @@ out: * BLK_EH_DONE if the request is handled or terminated * by the driver. */ -enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) +enum blk_eh_timer_return dasd_times_out(struct request *req) { struct dasd_block *block = req->q->queuedata; struct dasd_device *device; @@ -3280,7 +3280,7 @@ static int dasd_alloc_queue(struct dasd_block *block) static void dasd_free_queue(struct dasd_block *block) { if (block->request_queue) { - blk_cleanup_queue(block->request_queue); + blk_mq_destroy_queue(block->request_queue); blk_mq_free_tag_set(&block->tag_set); block->request_queue = NULL; } diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index a7a33ebf4bbe..5a83f0a39901 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -41,8 +41,8 @@ int dasd_gendisk_alloc(struct dasd_block *block) if (base->devindex >= DASD_PER_MAJOR) return -EBUSY; - gdp = __alloc_disk_node(block->request_queue, NUMA_NO_NODE, - &dasd_bio_compl_lkclass); + gdp = blk_mq_alloc_disk_for_queue(block->request_queue, + &dasd_bio_compl_lkclass); if (!gdp) return -ENOMEM; diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 83b918b84b4a..333a399f754e 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -795,7 +795,7 @@ void dasd_free_device(struct dasd_device *); struct dasd_block *dasd_alloc_block(void); void dasd_free_block(struct dasd_block *); -enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved); +enum blk_eh_timer_return dasd_times_out(struct request *req); void dasd_enable_device(struct dasd_device *); void dasd_set_target_state(struct dasd_device *, int); diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 8d0d0eaa3059..4d8d1759775a 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -414,7 +414,7 @@ removeseg: kill_dax(dev_info->dax_dev); put_dax(dev_info->dax_dev); del_gendisk(dev_info->gd); - blk_cleanup_disk(dev_info->gd); + put_disk(dev_info->gd); up_write(&dcssblk_devices_sem); if (device_remove_file_self(dev, attr)) { @@ -712,7 +712,7 @@ out_dax: put_dax(dev_info->dax_dev); put_dev: list_del(&dev_info->lh); - blk_cleanup_disk(dev_info->gd); + put_disk(dev_info->gd); list_for_each_entry(seg_info, &dev_info->seg_list, lh) { segment_unload(seg_info->segment_name); } @@ -722,7 +722,7 @@ put_dev: dev_list_del: list_del(&dev_info->lh); release_gd: - blk_cleanup_disk(dev_info->gd); + put_disk(dev_info->gd); up_write(&dcssblk_devices_sem); seg_list_del: if (dev_info == NULL) @@ -790,7 +790,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch kill_dax(dev_info->dax_dev); put_dax(dev_info->dax_dev); del_gendisk(dev_info->gd); - blk_cleanup_disk(dev_info->gd); + put_disk(dev_info->gd); /* unload all related segments */ list_for_each_entry(entry, &dev_info->seg_list, lh) diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 2a9c0ddcade5..0c1df1d5f1ac 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -501,7 +501,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) return 0; out_cleanup_disk: - blk_cleanup_disk(bdev->gendisk); + put_disk(bdev->gendisk); out_tag: blk_mq_free_tag_set(&bdev->tag_set); out: @@ -512,7 +512,7 @@ out: void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) { del_gendisk(bdev->gendisk); - blk_cleanup_disk(bdev->gendisk); + put_disk(bdev->gendisk); blk_mq_free_tag_set(&bdev->tag_set); } diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 940a6deab38f..bd99c5492b7d 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -272,7 +272,7 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, q->entries = qsize; } -static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data, bool rsvd) +static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data) { int *active = data; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 9c27bc37e5de..5ba5c18b77b4 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -633,7 +633,7 @@ struct fib_count_data { int krlcnt; }; -static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data, bool reserved) +static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data) { struct fib_count_data *fib_count = data; diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 1d9be771f3ee..610a51538f03 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -127,7 +127,7 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff, int bufflen, struct scsi_sense_hdr *sshdr, int flags) { u8 cdb[MAX_COMMAND_SIZE]; - int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; /* Prepare the command. */ @@ -157,7 +157,7 @@ static int submit_stpg(struct scsi_device *sdev, int group_id, u8 cdb[MAX_COMMAND_SIZE]; unsigned char stpg_data[8]; int stpg_len = 8; - int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; /* Prepare the data buffer */ diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index bd28ec6cfb72..2e21ab447873 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -239,7 +239,7 @@ static int send_trespass_cmd(struct scsi_device *sdev, unsigned char cdb[MAX_COMMAND_SIZE]; int err, res = SCSI_DH_OK, len; struct scsi_sense_hdr sshdr; - u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; if (csdev->flags & CLARIION_SHORT_TRESPASS) { diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index 4a3f7831a2d6..0d2cfa60aa06 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -83,7 +83,7 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) unsigned char cmd[6] = { TEST_UNIT_READY }; struct scsi_sense_hdr sshdr; int ret = SCSI_DH_OK, res; - u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; retry: @@ -121,7 +121,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h) struct scsi_device *sdev = h->sdev; int res, rc = SCSI_DH_OK; int retry_cnt = HP_SW_RETRIES; - u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; retry: diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 66652ab409cc..bf8754741f85 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -536,7 +536,7 @@ static void send_mode_select(struct work_struct *work) unsigned char cdb[MAX_COMMAND_SIZE]; struct scsi_sense_hdr sshdr; unsigned int data_size; - u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; spin_lock(&ctlr->ms_lock); diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 3d64877bda8d..77a4d9f8aa83 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -1350,8 +1350,7 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do) return wq_work_done; } -static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data, - bool reserved) +static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data) { const int tag = scsi_cmd_to_rq(sc)->tag; struct fnic *fnic = data; @@ -1548,8 +1547,7 @@ struct fnic_rport_abort_io_iter_data { int term_cnt; }; -static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data, - bool reserved) +static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) { struct fnic_rport_abort_io_iter_data *iter_data = data; struct fnic *fnic = iter_data->fnic; @@ -2003,8 +2001,7 @@ struct fnic_pending_aborts_iter_data { int ret; }; -static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, - void *data, bool reserved) +static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data) { struct fnic_pending_aborts_iter_data *iter_data = data; struct fnic *fnic = iter_data->fnic; @@ -2019,8 +2016,6 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, if (sc == iter_data->lr_sc || sc->device != lun_dev) return true; - if (reserved) - return true; io_lock = fnic_io_lock_tag(fnic, abt_tag); spin_lock_irqsave(io_lock, flags); @@ -2670,8 +2665,7 @@ call_fc_exch_mgr_reset: } -static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data, - bool reserved) +static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data) { struct fnic_pending_aborts_iter_data *iter_data = data; struct fnic *fnic = iter_data->fnic; diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 8352f90d997d..315c7ac730e9 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -566,8 +566,7 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost) } EXPORT_SYMBOL(scsi_host_get); -static bool scsi_host_check_in_flight(struct request *rq, void *data, - bool reserved) +static bool scsi_host_check_in_flight(struct request *rq, void *data) { int *count = data; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); @@ -662,7 +661,7 @@ void scsi_flush_work(struct Scsi_Host *shost) } EXPORT_SYMBOL_GPL(scsi_flush_work); -static bool complete_all_cmds_iter(struct request *rq, void *data, bool rsvd) +static bool complete_all_cmds_iter(struct request *rq, void *data) { struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); enum scsi_host_status status = *(enum scsi_host_status *)data; @@ -693,17 +692,16 @@ void scsi_host_complete_all_commands(struct Scsi_Host *shost, EXPORT_SYMBOL_GPL(scsi_host_complete_all_commands); struct scsi_host_busy_iter_data { - bool (*fn)(struct scsi_cmnd *, void *, bool); + bool (*fn)(struct scsi_cmnd *, void *); void *priv; }; -static bool __scsi_host_busy_iter_fn(struct request *req, void *priv, - bool reserved) +static bool __scsi_host_busy_iter_fn(struct request *req, void *priv) { struct scsi_host_busy_iter_data *iter_data = priv; struct scsi_cmnd *sc = blk_mq_rq_to_pdu(req); - return iter_data->fn(sc, iter_data->priv, reserved); + return iter_data->fn(sc, iter_data->priv); } /** @@ -716,7 +714,7 @@ static bool __scsi_host_busy_iter_fn(struct request *req, void *priv, * ithas to be provided by the caller **/ void scsi_host_busy_iter(struct Scsi_Host *shost, - bool (*fn)(struct scsi_cmnd *, void *, bool), + bool (*fn)(struct scsi_cmnd *, void *), void *priv) { struct scsi_host_busy_iter_data iter_data = { diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index d8c195b7ca57..59a18769a4fe 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -381,14 +381,12 @@ void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) * mpi3mr_print_scmd - print individual SCSI command * @rq: Block request * @data: Adapter instance reference - * @reserved: N/A. Currently not used * * Print the SCSI command details if it is in LLD scope. * * Return: true always. */ -static bool mpi3mr_print_scmd(struct request *rq, - void *data, bool reserved) +static bool mpi3mr_print_scmd(struct request *rq, void *data) { struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); @@ -412,7 +410,6 @@ out: * mpi3mr_flush_scmd - Flush individual SCSI command * @rq: Block request * @data: Adapter instance reference - * @reserved: N/A. Currently not used * * Return the SCSI command to the upper layers if it is in LLD * scope. @@ -420,8 +417,7 @@ out: * Return: true always. */ -static bool mpi3mr_flush_scmd(struct request *rq, - void *data, bool reserved) +static bool mpi3mr_flush_scmd(struct request *rq, void *data) { struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); @@ -451,7 +447,6 @@ out: * mpi3mr_count_dev_pending - Count commands pending for a lun * @rq: Block request * @data: SCSI device reference - * @reserved: Unused * * This is an iterator function called for each SCSI command in * a host and if the command is pending in the LLD for the @@ -461,8 +456,7 @@ out: * Return: true always. */ -static bool mpi3mr_count_dev_pending(struct request *rq, - void *data, bool reserved) +static bool mpi3mr_count_dev_pending(struct request *rq, void *data) { struct scsi_device *sdev = (struct scsi_device *)data; struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; @@ -485,7 +479,6 @@ out: * mpi3mr_count_tgt_pending - Count commands pending for target * @rq: Block request * @data: SCSI target reference - * @reserved: Unused * * This is an iterator function called for each SCSI command in * a host and if the command is pending in the LLD for the @@ -495,8 +488,7 @@ out: * Return: true always. */ -static bool mpi3mr_count_tgt_pending(struct request *rq, - void *data, bool reserved) +static bool mpi3mr_count_tgt_pending(struct request *rq, void *data) { struct scsi_target *starget = (struct scsi_target *)data; struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index b519f4b59d30..5e8887fa02c8 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -11386,6 +11386,7 @@ scsih_shutdown(struct pci_dev *pdev) _scsih_ir_shutdown(ioc); _scsih_nvme_shutdown(ioc); mpt3sas_base_mask_interrupts(ioc); + mpt3sas_base_stop_watchdog(ioc); ioc->shost_recovery = 1; mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); ioc->shost_recovery = 0; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 49ef864df581..b776cefc7cda 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -139,7 +139,7 @@ static bool scsi_eh_should_retry_cmd(struct scsi_cmnd *cmd) * * Note: this function must be called only for a command that has timed out. * Because the block layer marks a request as complete before it calls - * scsi_times_out(), a .scsi_done() call from the LLD for a command that has + * scsi_timeout(), a .scsi_done() call from the LLD for a command that has * timed out do not have any effect. Hence it is safe to call * scsi_finish_command() from this function. */ @@ -316,7 +316,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd) } /** - * scsi_times_out - Timeout function for normal scsi commands. + * scsi_timeout - Timeout function for normal scsi commands. * @req: request that is timing out. * * Notes: @@ -325,7 +325,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd) * normal completion function determines that the timer has already * fired, then it mustn't do anything. */ -enum blk_eh_timer_return scsi_times_out(struct request *req) +enum blk_eh_timer_return scsi_timeout(struct request *req) { struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); enum blk_eh_timer_return rtn = BLK_EH_DONE; @@ -1779,7 +1779,7 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q, * scsi_noretry_cmd - determine if command should be failed fast * @scmd: SCSI cmd to examine. */ -int scsi_noretry_cmd(struct scsi_cmnd *scmd) +bool scsi_noretry_cmd(struct scsi_cmnd *scmd) { struct request *req = scsi_cmd_to_rq(scmd); @@ -1789,19 +1789,19 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd) case DID_TIME_OUT: goto check_type; case DID_BUS_BUSY: - return req->cmd_flags & REQ_FAILFAST_TRANSPORT; + return !!(req->cmd_flags & REQ_FAILFAST_TRANSPORT); case DID_PARITY: - return req->cmd_flags & REQ_FAILFAST_DEV; + return !!(req->cmd_flags & REQ_FAILFAST_DEV); case DID_ERROR: if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT) - return 0; + return false; fallthrough; case DID_SOFT_ERROR: - return req->cmd_flags & REQ_FAILFAST_DRIVER; + return !!(req->cmd_flags & REQ_FAILFAST_DRIVER); } if (!scsi_status_is_check_condition(scmd->result)) - return 0; + return false; check_type: /* @@ -1809,9 +1809,9 @@ check_type: * the check condition was retryable. */ if (req->cmd_flags & REQ_FAILFAST_DEV || blk_rq_is_passthrough(req)) - return 1; + return true; - return 0; + return false; } /** diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index a480c4d589f5..729e309e6034 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -450,7 +450,7 @@ static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, fmode_t mode) goto out_put_request; ret = 0; - if (hdr->iovec_count) { + if (hdr->iovec_count && hdr->dxfer_len) { struct iov_iter i; struct iovec *iov = NULL; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 6ffc9e4258a8..17a617db9ae0 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -163,7 +163,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) * Requeue this command. It will go before all other commands * that are already in the queue. Schedule requeue work under * lock such that the kblockd_schedule_work() call happens - * before blk_cleanup_queue() finishes. + * before blk_mq_destroy_queue() finishes. */ cmd->result = 0; @@ -209,8 +209,8 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, unsigned char *sense, struct scsi_sense_hdr *sshdr, - int timeout, int retries, u64 flags, req_flags_t rq_flags, - int *resid) + int timeout, int retries, blk_opf_t flags, + req_flags_t rq_flags, int *resid) { struct request *req; struct scsi_cmnd *scmd; @@ -424,9 +424,9 @@ static void scsi_starved_list_run(struct Scsi_Host *shost) * it and the queue. Mitigate by taking a reference to the * queue and never touching the sdev again after we drop the * host lock. Note: if __scsi_remove_device() invokes - * blk_cleanup_queue() before the queue is run from this + * blk_mq_destroy_queue() before the queue is run from this * function then blk_run_queue() will return immediately since - * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. + * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING. */ slq = sdev->request_queue; if (!blk_get_queue(slq)) @@ -633,7 +633,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result) */ static unsigned int scsi_rq_err_bytes(const struct request *rq) { - unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; + blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK; unsigned int bytes = 0; struct bio *bio; @@ -1125,12 +1125,12 @@ static void scsi_initialize_rq(struct request *rq) cmd->retries = 0; } -struct request *scsi_alloc_request(struct request_queue *q, - unsigned int op, blk_mq_req_flags_t flags) +struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf, + blk_mq_req_flags_t flags) { struct request *rq; - rq = blk_mq_alloc_request(q, op, flags); + rq = blk_mq_alloc_request(q, opf, flags); if (!IS_ERR(rq)) scsi_initialize_rq(rq); return rq; @@ -1790,14 +1790,6 @@ out_put_budget: return ret; } -static enum blk_eh_timer_return scsi_timeout(struct request *req, - bool reserved) -{ - if (reserved) - return BLK_EH_RESET_TIMER; - return scsi_times_out(req); -} - static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) { diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 5c4786310a31..429663bd78ec 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -72,7 +72,7 @@ extern void scsi_exit_devinfo(void); /* scsi_error.c */ extern void scmd_eh_abort_handler(struct work_struct *work); -extern enum blk_eh_timer_return scsi_times_out(struct request *req); +extern enum blk_eh_timer_return scsi_timeout(struct request *req); extern int scsi_error_handler(void *host); extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd); extern void scsi_eh_wakeup(struct Scsi_Host *shost); @@ -82,7 +82,7 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost, struct list_head *done_q); int scsi_eh_get_sense(struct list_head *work_q, struct list_head *done_q); -int scsi_noretry_cmd(struct scsi_cmnd *scmd); +bool scsi_noretry_cmd(struct scsi_cmnd *scmd); void scsi_eh_done(struct scsi_cmnd *scmd); /* scsi_lib.c */ diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 43949798a2e4..aa70d9282161 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1475,7 +1475,7 @@ void __scsi_remove_device(struct scsi_device *sdev) scsi_device_set_state(sdev, SDEV_DEL); mutex_unlock(&sdev->state_mutex); - blk_cleanup_queue(sdev->request_queue); + blk_mq_destroy_queue(sdev->request_queue); cancel_work_sync(&sdev->requeue_work); if (sdev->host->hostt->slave_destroy) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a1a2ac09066f..eb02d939dd44 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2934,15 +2934,15 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) if (sdkp->device->type == TYPE_ZBC) { /* Host-managed */ - blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM); + disk_set_zoned(sdkp->disk, BLK_ZONED_HM); } else { sdkp->zoned = zoned; if (sdkp->zoned == 1) { /* Host-aware */ - blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA); + disk_set_zoned(sdkp->disk, BLK_ZONED_HA); } else { /* Regular disk or drive managed disk */ - blk_queue_set_zoned(sdkp->disk, BLK_ZONED_NONE); + disk_set_zoned(sdkp->disk, BLK_ZONED_NONE); } } @@ -3440,8 +3440,8 @@ static int sd_probe(struct device *dev) if (!sdkp) goto out; - gd = __alloc_disk_node(sdp->request_queue, NUMA_NO_NODE, - &sd_bio_compl_lkclass); + gd = blk_mq_alloc_disk_for_queue(sdp->request_queue, + &sd_bio_compl_lkclass); if (!gd) goto out_free; diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 6acc4f406eb8..bd15624c6322 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -529,7 +529,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd, struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_disk *sdkp = scsi_disk(rq->q->disk); unsigned int zno = blk_rq_zone_no(rq); - enum req_opf op = req_op(rq); + enum req_op op = req_op(rq); unsigned long flags; /* @@ -855,7 +855,7 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) if (sdkp->zone_info.zone_blocks == zone_blocks && sdkp->zone_info.nr_zones == nr_zones && - disk->queue->nr_zones == nr_zones) + disk->nr_zones == nr_zones) goto unlock; flags = memalloc_noio_save(); @@ -929,7 +929,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]) /* * This can happen for a host aware disk with partitions. * The block device zone model was already cleared by - * blk_queue_set_zoned(). Only free the scsi disk zone + * disk_set_zoned(). Only free the scsi disk zone * information and exit early. */ sd_zbc_free_zone_info(sdkp); @@ -950,10 +950,10 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]) blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE); if (sdkp->zones_max_open == U32_MAX) - blk_queue_max_open_zones(q, 0); + disk_set_max_open_zones(disk, 0); else - blk_queue_max_open_zones(q, sdkp->zones_max_open); - blk_queue_max_active_zones(q, 0); + disk_set_max_open_zones(disk, sdkp->zones_max_open); + disk_set_max_active_zones(disk, 0); nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks); /* diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 32d3b8274f14..a278b739d0c5 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -624,8 +624,8 @@ static int sr_probe(struct device *dev) if (!cd) goto fail; - disk = __alloc_disk_node(sdev->request_queue, NUMA_NO_NODE, - &sr_bio_compl_lkclass); + disk = blk_mq_alloc_disk_for_queue(sdev->request_queue, + &sr_bio_compl_lkclass); if (!disk) goto fail_free; mutex_init(&cd->lock); diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c index 358df7510186..828d81e02b37 100644 --- a/drivers/sh/intc/chip.c +++ b/drivers/sh/intc/chip.c @@ -72,7 +72,7 @@ static int intc_set_affinity(struct irq_data *data, if (!cpumask_intersects(cpumask, cpu_online_mask)) return -1; - cpumask_copy(irq_data_get_affinity_mask(data), cpumask); + irq_data_update_affinity(data, cpumask); return IRQ_SET_MASK_OK_NOCOPY; } diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index 86ccf5970bc1..e461c071189b 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -9,6 +9,7 @@ source "drivers/soc/atmel/Kconfig" source "drivers/soc/bcm/Kconfig" source "drivers/soc/canaan/Kconfig" source "drivers/soc/fsl/Kconfig" +source "drivers/soc/fujitsu/Kconfig" source "drivers/soc/imx/Kconfig" source "drivers/soc/ixp4xx/Kconfig" source "drivers/soc/litex/Kconfig" diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 919716e0e700..69ba6508cf2c 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_SOC_CANAAN) += canaan/ obj-$(CONFIG_ARCH_DOVE) += dove/ obj-$(CONFIG_MACH_DOVE) += dove/ obj-y += fsl/ +obj-y += fujitsu/ obj-$(CONFIG_ARCH_GEMINI) += gemini/ obj-y += imx/ obj-y += ixp4xx/ diff --git a/drivers/soc/amlogic/meson-mx-socinfo.c b/drivers/soc/amlogic/meson-mx-socinfo.c index 78f0f1aeca57..92125dd65f33 100644 --- a/drivers/soc/amlogic/meson-mx-socinfo.c +++ b/drivers/soc/amlogic/meson-mx-socinfo.c @@ -126,6 +126,7 @@ static int __init meson_mx_socinfo_init(void) np = of_find_matching_node(NULL, meson_mx_socinfo_analog_top_ids); if (np) { analog_top_regmap = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(analog_top_regmap)) return PTR_ERR(analog_top_regmap); diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c index a10a417a87db..e93518763526 100644 --- a/drivers/soc/amlogic/meson-secure-pwrc.c +++ b/drivers/soc/amlogic/meson-secure-pwrc.c @@ -152,8 +152,10 @@ static int meson_secure_pwrc_probe(struct platform_device *pdev) } pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL); - if (!pwrc) + if (!pwrc) { + of_node_put(sm_np); return -ENOMEM; + } pwrc->fw = meson_sm_get(sm_np); of_node_put(sm_np); diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c index 1e0041ec8132..5bcd047768b6 100644 --- a/drivers/soc/bcm/bcm2835-power.c +++ b/drivers/soc/bcm/bcm2835-power.c @@ -126,8 +126,7 @@ #define ASB_AXI_BRDG_ID 0x20 -#define ASB_READ(reg) readl(power->asb + (reg)) -#define ASB_WRITE(reg, val) writel(PM_PASSWORD | (val), power->asb + (reg)) +#define BCM2835_BRDG_ID 0x62726467 struct bcm2835_power_domain { struct generic_pm_domain base; @@ -142,24 +141,41 @@ struct bcm2835_power { void __iomem *base; /* AXI Async bridge registers. */ void __iomem *asb; + /* RPiVid bridge registers. */ + void __iomem *rpivid_asb; struct genpd_onecell_data pd_xlate; struct bcm2835_power_domain domains[BCM2835_POWER_DOMAIN_COUNT]; struct reset_controller_dev reset; }; -static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) +static int bcm2835_asb_control(struct bcm2835_power *power, u32 reg, bool enable) { + void __iomem *base = power->asb; u64 start; + u32 val; - if (!reg) + switch (reg) { + case 0: return 0; + case ASB_V3D_S_CTRL: + case ASB_V3D_M_CTRL: + if (power->rpivid_asb) + base = power->rpivid_asb; + break; + } start = ktime_get_ns(); /* Enable the module's async AXI bridges. */ - ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP); - while (ASB_READ(reg) & ASB_ACK) { + if (enable) { + val = readl(base + reg) & ~ASB_REQ_STOP; + } else { + val = readl(base + reg) | ASB_REQ_STOP; + } + writel(PM_PASSWORD | val, base + reg); + + while (readl(base + reg) & ASB_ACK) { cpu_relax(); if (ktime_get_ns() - start >= 1000) return -ETIMEDOUT; @@ -168,30 +184,24 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) return 0; } -static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg) +static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) { - u64 start; - - if (!reg) - return 0; - - start = ktime_get_ns(); - - /* Enable the module's async AXI bridges. */ - ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP); - while (!(ASB_READ(reg) & ASB_ACK)) { - cpu_relax(); - if (ktime_get_ns() - start >= 1000) - return -ETIMEDOUT; - } + return bcm2835_asb_control(power, reg, true); +} - return 0; +static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg) +{ + return bcm2835_asb_control(power, reg, false); } static int bcm2835_power_power_off(struct bcm2835_power_domain *pd, u32 pm_reg) { struct bcm2835_power *power = pd->power; + /* We don't run this on BCM2711 */ + if (power->rpivid_asb) + return 0; + /* Enable functional isolation */ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISFUNC); @@ -213,6 +223,10 @@ static int bcm2835_power_power_on(struct bcm2835_power_domain *pd, u32 pm_reg) int inrush; bool powok; + /* We don't run this on BCM2711 */ + if (power->rpivid_asb) + return 0; + /* If it was already powered on by the fw, leave it that way. */ if (PM_READ(pm_reg) & PM_POWUP) return 0; @@ -626,13 +640,23 @@ static int bcm2835_power_probe(struct platform_device *pdev) power->dev = dev; power->base = pm->base; power->asb = pm->asb; + power->rpivid_asb = pm->rpivid_asb; - id = ASB_READ(ASB_AXI_BRDG_ID); - if (id != 0x62726467 /* "BRDG" */) { + id = readl(power->asb + ASB_AXI_BRDG_ID); + if (id != BCM2835_BRDG_ID /* "BRDG" */) { dev_err(dev, "ASB register ID returned 0x%08x\n", id); return -ENODEV; } + if (power->rpivid_asb) { + id = readl(power->rpivid_asb + ASB_AXI_BRDG_ID); + if (id != BCM2835_BRDG_ID /* "BRDG" */) { + dev_err(dev, "RPiVid ASB register ID returned 0x%08x\n", + id); + return -ENODEV; + } + } + power->pd_xlate.domains = devm_kcalloc(dev, ARRAY_SIZE(power_domain_names), sizeof(*power->pd_xlate.domains), diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c index 2c975d79fe8e..1467bbd59690 100644 --- a/drivers/soc/bcm/brcmstb/biuctrl.c +++ b/drivers/soc/bcm/brcmstb/biuctrl.c @@ -340,12 +340,12 @@ static int __init brcmstb_biuctrl_init(void) ret = setup_hifcpubiuctrl_regs(np); if (ret) - return ret; + goto out_put; ret = mcp_write_pairing_set(); if (ret) { pr_err("MCP: Unable to disable write pairing!\n"); - return ret; + goto out_put; } a72_b53_rac_enable_all(np); @@ -353,6 +353,9 @@ static int __init brcmstb_biuctrl_init(void) #ifdef CONFIG_PM_SLEEP register_syscore_ops(&brcmstb_cpu_credit_syscore_ops); #endif - return 0; + ret = 0; +out_put: + of_node_put(np); + return ret; } early_initcall(brcmstb_biuctrl_init); diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c index 70ad0f3dce28..d6b30d521307 100644 --- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c +++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c @@ -721,7 +721,7 @@ static int brcmstb_pm_probe(struct platform_device *pdev) ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs; ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs; /* - * Slightly grosss to use the phy ver to get a memc, + * Slightly gross to use the phy ver to get a memc, * offset but that is the only versioned things so far * we can test for. */ diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c index 5ed2fc1c53a0..6bf3e6a980ff 100644 --- a/drivers/soc/fsl/guts.c +++ b/drivers/soc/fsl/guts.c @@ -14,21 +14,16 @@ #include <linux/platform_device.h> #include <linux/fsl/guts.h> -struct guts { - struct ccsr_guts __iomem *regs; - bool little_endian; -}; - struct fsl_soc_die_attr { char *die; u32 svr; u32 mask; }; -static struct guts *guts; -static struct soc_device_attribute soc_dev_attr; -static struct soc_device *soc_dev; - +struct fsl_soc_data { + const char *sfp_compat; + u32 uid_offset; +}; /* SoC die attribute definition for QorIQ platform */ static const struct fsl_soc_die_attr fsl_soc_die[] = { @@ -120,88 +115,36 @@ static const struct fsl_soc_die_attr *fsl_soc_die_match( return NULL; } -static u32 fsl_guts_get_svr(void) -{ - u32 svr = 0; - - if (!guts || !guts->regs) - return svr; - - if (guts->little_endian) - svr = ioread32(&guts->regs->svr); - else - svr = ioread32be(&guts->regs->svr); - - return svr; -} - -static int fsl_guts_probe(struct platform_device *pdev) +static u64 fsl_guts_get_soc_uid(const char *compat, unsigned int offset) { - struct device_node *root, *np = pdev->dev.of_node; - struct device *dev = &pdev->dev; - const struct fsl_soc_die_attr *soc_die; - const char *machine; - u32 svr; - - /* Initialize guts */ - guts = devm_kzalloc(dev, sizeof(*guts), GFP_KERNEL); - if (!guts) - return -ENOMEM; - - guts->little_endian = of_property_read_bool(np, "little-endian"); + struct device_node *np; + void __iomem *sfp_base; + u64 uid; - guts->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(guts->regs)) - return PTR_ERR(guts->regs); + np = of_find_compatible_node(NULL, NULL, compat); + if (!np) + return 0; - /* Register soc device */ - root = of_find_node_by_path("/"); - if (of_property_read_string(root, "model", &machine)) - of_property_read_string_index(root, "compatible", 0, &machine); - if (machine) { - soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL); - if (!soc_dev_attr.machine) { - of_node_put(root); - return -ENOMEM; - } + sfp_base = of_iomap(np, 0); + if (!sfp_base) { + of_node_put(np); + return 0; } - of_node_put(root); - svr = fsl_guts_get_svr(); - soc_die = fsl_soc_die_match(svr, fsl_soc_die); - if (soc_die) { - soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, - "QorIQ %s", soc_die->die); - } else { - soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, "QorIQ"); - } - if (!soc_dev_attr.family) - return -ENOMEM; - soc_dev_attr.soc_id = devm_kasprintf(dev, GFP_KERNEL, - "svr:0x%08x", svr); - if (!soc_dev_attr.soc_id) - return -ENOMEM; - soc_dev_attr.revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d", - (svr >> 4) & 0xf, svr & 0xf); - if (!soc_dev_attr.revision) - return -ENOMEM; + uid = ioread32(sfp_base + offset); + uid <<= 32; + uid |= ioread32(sfp_base + offset + 4); - soc_dev = soc_device_register(&soc_dev_attr); - if (IS_ERR(soc_dev)) - return PTR_ERR(soc_dev); + iounmap(sfp_base); + of_node_put(np); - pr_info("Machine: %s\n", soc_dev_attr.machine); - pr_info("SoC family: %s\n", soc_dev_attr.family); - pr_info("SoC ID: %s, Revision: %s\n", - soc_dev_attr.soc_id, soc_dev_attr.revision); - return 0; + return uid; } -static int fsl_guts_remove(struct platform_device *dev) -{ - soc_device_unregister(soc_dev); - return 0; -} +static const struct fsl_soc_data ls1028a_data = { + .sfp_compat = "fsl,ls1028a-sfp", + .uid_offset = 0x21c, +}; /* * Table for matching compatible strings, for device tree @@ -231,28 +174,106 @@ static const struct of_device_id fsl_guts_of_match[] = { { .compatible = "fsl,ls1012a-dcfg", }, { .compatible = "fsl,ls1046a-dcfg", }, { .compatible = "fsl,lx2160a-dcfg", }, - { .compatible = "fsl,ls1028a-dcfg", }, + { .compatible = "fsl,ls1028a-dcfg", .data = &ls1028a_data}, {} }; -MODULE_DEVICE_TABLE(of, fsl_guts_of_match); - -static struct platform_driver fsl_guts_driver = { - .driver = { - .name = "fsl-guts", - .of_match_table = fsl_guts_of_match, - }, - .probe = fsl_guts_probe, - .remove = fsl_guts_remove, -}; static int __init fsl_guts_init(void) { - return platform_driver_register(&fsl_guts_driver); -} -core_initcall(fsl_guts_init); + struct soc_device_attribute *soc_dev_attr; + static struct soc_device *soc_dev; + const struct fsl_soc_die_attr *soc_die; + const struct fsl_soc_data *soc_data; + const struct of_device_id *match; + struct ccsr_guts __iomem *regs; + const char *machine = NULL; + struct device_node *np; + bool little_endian; + u64 soc_uid = 0; + u32 svr; + int ret; -static void __exit fsl_guts_exit(void) -{ - platform_driver_unregister(&fsl_guts_driver); + np = of_find_matching_node_and_match(NULL, fsl_guts_of_match, &match); + if (!np) + return 0; + soc_data = match->data; + + regs = of_iomap(np, 0); + if (!regs) { + of_node_put(np); + return -ENOMEM; + } + + little_endian = of_property_read_bool(np, "little-endian"); + if (little_endian) + svr = ioread32(®s->svr); + else + svr = ioread32be(®s->svr); + iounmap(regs); + of_node_put(np); + + /* Register soc device */ + soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); + if (!soc_dev_attr) + return -ENOMEM; + + if (of_property_read_string(of_root, "model", &machine)) + of_property_read_string_index(of_root, "compatible", 0, &machine); + if (machine) { + soc_dev_attr->machine = kstrdup(machine, GFP_KERNEL); + if (!soc_dev_attr->machine) + goto err_nomem; + } + + soc_die = fsl_soc_die_match(svr, fsl_soc_die); + if (soc_die) { + soc_dev_attr->family = kasprintf(GFP_KERNEL, "QorIQ %s", + soc_die->die); + } else { + soc_dev_attr->family = kasprintf(GFP_KERNEL, "QorIQ"); + } + if (!soc_dev_attr->family) + goto err_nomem; + + soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "svr:0x%08x", svr); + if (!soc_dev_attr->soc_id) + goto err_nomem; + + soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%d.%d", + (svr >> 4) & 0xf, svr & 0xf); + if (!soc_dev_attr->revision) + goto err_nomem; + + if (soc_data) + soc_uid = fsl_guts_get_soc_uid(soc_data->sfp_compat, + soc_data->uid_offset); + if (soc_uid) + soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", + soc_uid); + + soc_dev = soc_device_register(soc_dev_attr); + if (IS_ERR(soc_dev)) { + ret = PTR_ERR(soc_dev); + goto err; + } + + pr_info("Machine: %s\n", soc_dev_attr->machine); + pr_info("SoC family: %s\n", soc_dev_attr->family); + pr_info("SoC ID: %s, Revision: %s\n", + soc_dev_attr->soc_id, soc_dev_attr->revision); + + return 0; + +err_nomem: + ret = -ENOMEM; +err: + kfree(soc_dev_attr->machine); + kfree(soc_dev_attr->family); + kfree(soc_dev_attr->soc_id); + kfree(soc_dev_attr->revision); + kfree(soc_dev_attr->serial_number); + kfree(soc_dev_attr); + + return ret; } -module_exit(fsl_guts_exit); +core_initcall(fsl_guts_init); diff --git a/drivers/soc/fujitsu/Kconfig b/drivers/soc/fujitsu/Kconfig new file mode 100644 index 000000000000..987731e80612 --- /dev/null +++ b/drivers/soc/fujitsu/Kconfig @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "fujitsu SoC drivers" + +config A64FX_DIAG + bool "A64FX diag driver" + depends on ARM64 + depends on ACPI + help + Say Y here if you want to enable diag interrupt on Fujitsu A64FX. + This driver enables BMC's diagnostic requests and enables + A64FX-specific interrupts. This allows administrators to obtain + kernel dumps via diagnostic requests using ipmitool, etc. + + If unsure, say N. + +endmenu diff --git a/drivers/soc/fujitsu/Makefile b/drivers/soc/fujitsu/Makefile new file mode 100644 index 000000000000..945bc1c14ad0 --- /dev/null +++ b/drivers/soc/fujitsu/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_A64FX_DIAG) += a64fx-diag.o diff --git a/drivers/soc/fujitsu/a64fx-diag.c b/drivers/soc/fujitsu/a64fx-diag.c new file mode 100644 index 000000000000..d87f348427bf --- /dev/null +++ b/drivers/soc/fujitsu/a64fx-diag.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * A64FX diag driver. + * Copyright (c) 2022 Fujitsu Ltd. + */ + +#include <linux/acpi.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#define A64FX_DIAG_IRQ 1 +#define BMC_DIAG_INTERRUPT_ENABLE 0x40 +#define BMC_DIAG_INTERRUPT_STATUS 0x44 +#define BMC_DIAG_INTERRUPT_MASK BIT(31) + +struct a64fx_diag_priv { + void __iomem *mmsc_reg_base; + int irq; + bool has_nmi; +}; + +static irqreturn_t a64fx_diag_handler_nmi(int irq, void *dev_id) +{ + nmi_panic(NULL, "a64fx_diag: interrupt received\n"); + + return IRQ_HANDLED; +} + +static irqreturn_t a64fx_diag_handler_irq(int irq, void *dev_id) +{ + panic("a64fx_diag: interrupt received\n"); + + return IRQ_HANDLED; +} + +static void a64fx_diag_interrupt_clear(struct a64fx_diag_priv *priv) +{ + void __iomem *diag_status_reg_addr; + u32 mmsc; + + diag_status_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_STATUS; + mmsc = readl(diag_status_reg_addr); + if (mmsc & BMC_DIAG_INTERRUPT_MASK) + writel(BMC_DIAG_INTERRUPT_MASK, diag_status_reg_addr); +} + +static void a64fx_diag_interrupt_enable(struct a64fx_diag_priv *priv) +{ + void __iomem *diag_enable_reg_addr; + u32 mmsc; + + diag_enable_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_ENABLE; + mmsc = readl(diag_enable_reg_addr); + if (!(mmsc & BMC_DIAG_INTERRUPT_MASK)) { + mmsc |= BMC_DIAG_INTERRUPT_MASK; + writel(mmsc, diag_enable_reg_addr); + } +} + +static void a64fx_diag_interrupt_disable(struct a64fx_diag_priv *priv) +{ + void __iomem *diag_enable_reg_addr; + u32 mmsc; + + diag_enable_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_ENABLE; + mmsc = readl(diag_enable_reg_addr); + if (mmsc & BMC_DIAG_INTERRUPT_MASK) { + mmsc &= ~BMC_DIAG_INTERRUPT_MASK; + writel(mmsc, diag_enable_reg_addr); + } +} + +static int a64fx_diag_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct a64fx_diag_priv *priv; + unsigned long irq_flags; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (priv == NULL) + return -ENOMEM; + + priv->mmsc_reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->mmsc_reg_base)) + return PTR_ERR(priv->mmsc_reg_base); + + priv->irq = platform_get_irq(pdev, A64FX_DIAG_IRQ); + if (priv->irq < 0) + return priv->irq; + + platform_set_drvdata(pdev, priv); + + irq_flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_AUTOEN | + IRQF_NO_THREAD; + ret = request_nmi(priv->irq, &a64fx_diag_handler_nmi, irq_flags, + "a64fx_diag_nmi", NULL); + if (ret) { + ret = request_irq(priv->irq, &a64fx_diag_handler_irq, + irq_flags, "a64fx_diag_irq", NULL); + if (ret) { + dev_err(dev, "cannot register IRQ %d\n", ret); + return ret; + } + enable_irq(priv->irq); + } else { + enable_nmi(priv->irq); + priv->has_nmi = true; + } + + a64fx_diag_interrupt_clear(priv); + a64fx_diag_interrupt_enable(priv); + + return 0; +} + +static int a64fx_diag_remove(struct platform_device *pdev) +{ + struct a64fx_diag_priv *priv = platform_get_drvdata(pdev); + + a64fx_diag_interrupt_disable(priv); + a64fx_diag_interrupt_clear(priv); + + if (priv->has_nmi) + free_nmi(priv->irq, NULL); + else + free_irq(priv->irq, NULL); + + return 0; +} + +static const struct acpi_device_id a64fx_diag_acpi_match[] = { + { "FUJI2007", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, a64fx_diag_acpi_match); + + +static struct platform_driver a64fx_diag_driver = { + .driver = { + .name = "a64fx_diag_driver", + .acpi_match_table = ACPI_PTR(a64fx_diag_acpi_match), + }, + .probe = a64fx_diag_probe, + .remove = a64fx_diag_remove, +}; + +module_platform_driver(a64fx_diag_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Hitomi Hasegawa <hasegawa-hitomi@fujitsu.com>"); +MODULE_DESCRIPTION("A64FX diag driver"); diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index 85aa86e1338a..6383a4edc360 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c @@ -328,7 +328,9 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd) if (!IS_ERR(domain->regulator)) { ret = regulator_enable(domain->regulator); if (ret) { - dev_err(domain->dev, "failed to enable regulator\n"); + dev_err(domain->dev, + "failed to enable regulator: %pe\n", + ERR_PTR(ret)); goto out_put_pm; } } @@ -467,7 +469,9 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd) if (!IS_ERR(domain->regulator)) { ret = regulator_disable(domain->regulator); if (ret) { - dev_err(domain->dev, "failed to disable regulator\n"); + dev_err(domain->dev, + "failed to disable regulator: %pe\n", + ERR_PTR(ret)); return ret; } } diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c index 7ebc28709e94..dff7529268e4 100644 --- a/drivers/soc/imx/imx8m-blk-ctrl.c +++ b/drivers/soc/imx/imx8m-blk-ctrl.c @@ -216,7 +216,7 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev) bc->bus_power_dev = genpd_dev_pm_attach_by_name(dev, "bus"); if (IS_ERR(bc->bus_power_dev)) return dev_err_probe(dev, PTR_ERR(bc->bus_power_dev), - "failed to attach power domain\n"); + "failed to attach power domain \"bus\"\n"); for (i = 0; i < bc_data->num_domains; i++) { const struct imx8m_blk_ctrl_domain_data *data = &bc_data->domains[i]; @@ -238,7 +238,8 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev) dev_pm_domain_attach_by_name(dev, data->gpc_name); if (IS_ERR(domain->power_dev)) { dev_err_probe(dev, PTR_ERR(domain->power_dev), - "failed to attach power domain\n"); + "failed to attach power domain \"%s\"\n", + data->gpc_name); ret = PTR_ERR(domain->power_dev); goto cleanup_pds; } @@ -251,7 +252,9 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev) ret = pm_genpd_init(&domain->genpd, NULL, true); if (ret) { - dev_err_probe(dev, ret, "failed to init power domain\n"); + dev_err_probe(dev, ret, + "failed to init power domain \"%s\"\n", + data->gpc_name); dev_pm_domain_detach(domain->power_dev, true); goto cleanup_pds; } diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig index fdd8bc08569e..3c3eedea35f7 100644 --- a/drivers/soc/mediatek/Kconfig +++ b/drivers/soc/mediatek/Kconfig @@ -73,4 +73,14 @@ config MTK_MMSYS Say yes here to add support for the MediaTek Multimedia Subsystem (MMSYS). +config MTK_SVS + tristate "MediaTek Smart Voltage Scaling(SVS)" + depends on MTK_EFUSE && NVMEM + help + The Smart Voltage Scaling(SVS) engine is a piece of hardware + which has several controllers(banks) for calculating suitable + voltage to different power domains(CPU/GPU/CCI) according to + chip process corner, temperatures and other factors. Then DVFS + driver could apply SVS bank voltage to PMIC/Buck. + endmenu diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile index 90270f8114ed..0e9e703c931a 100644 --- a/drivers/soc/mediatek/Makefile +++ b/drivers/soc/mediatek/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o obj-$(CONFIG_MTK_SCPSYS_PM_DOMAINS) += mtk-pm-domains.o obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o obj-$(CONFIG_MTK_MMSYS) += mtk-mutex.o +obj-$(CONFIG_MTK_SVS) += mtk-svs.o diff --git a/drivers/soc/mediatek/mt6795-pm-domains.h b/drivers/soc/mediatek/mt6795-pm-domains.h new file mode 100644 index 000000000000..ef07c9dfdd9b --- /dev/null +++ b/drivers/soc/mediatek/mt6795-pm-domains.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __SOC_MEDIATEK_MT6795_PM_DOMAINS_H +#define __SOC_MEDIATEK_MT6795_PM_DOMAINS_H + +#include "mtk-pm-domains.h" +#include <dt-bindings/power/mt6795-power.h> + +/* + * MT6795 power domain support + */ + +static const struct scpsys_domain_data scpsys_domain_data_mt6795[] = { + [MT6795_POWER_DOMAIN_VDEC] = { + .name = "vdec", + .sta_mask = PWR_STATUS_VDEC, + .ctl_offs = SPM_VDE_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + }, + [MT6795_POWER_DOMAIN_VENC] = { + .name = "venc", + .sta_mask = PWR_STATUS_VENC, + .ctl_offs = SPM_VEN_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + }, + [MT6795_POWER_DOMAIN_ISP] = { + .name = "isp", + .sta_mask = PWR_STATUS_ISP, + .ctl_offs = SPM_ISP_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(13, 12), + }, + [MT6795_POWER_DOMAIN_MM] = { + .name = "mm", + .sta_mask = PWR_STATUS_DISP, + .ctl_offs = SPM_DIS_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MM_M0 | + MT8173_TOP_AXI_PROT_EN_MM_M1), + }, + }, + [MT6795_POWER_DOMAIN_MJC] = { + .name = "mjc", + .sta_mask = BIT(20), + .ctl_offs = 0x298, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + }, + [MT6795_POWER_DOMAIN_AUDIO] = { + .name = "audio", + .sta_mask = PWR_STATUS_AUDIO, + .ctl_offs = SPM_AUDIO_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + }, + [MT6795_POWER_DOMAIN_MFG_ASYNC] = { + .name = "mfg_async", + .sta_mask = PWR_STATUS_MFG_ASYNC, + .ctl_offs = SPM_MFG_ASYNC_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = 0, + }, + [MT6795_POWER_DOMAIN_MFG_2D] = { + .name = "mfg_2d", + .sta_mask = PWR_STATUS_MFG_2D, + .ctl_offs = SPM_MFG_2D_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(13, 12), + }, + [MT6795_POWER_DOMAIN_MFG] = { + .name = "mfg", + .sta_mask = PWR_STATUS_MFG, + .ctl_offs = SPM_MFG_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(13, 8), + .sram_pdn_ack_bits = GENMASK(21, 16), + .bp_infracfg = { + BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MFG_S | + MT8173_TOP_AXI_PROT_EN_MFG_M0 | + MT8173_TOP_AXI_PROT_EN_MFG_M1 | + MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT), + }, + }, +}; + +static const struct scpsys_soc_data mt6795_scpsys_data = { + .domains_data = scpsys_domain_data_mt6795, + .num_domains = ARRAY_SIZE(scpsys_domain_data_mt6795), +}; + +#endif /* __SOC_MEDIATEK_MT6795_PM_DOMAINS_H */ diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h index 71b8757e552d..99de67fe5de8 100644 --- a/drivers/soc/mediatek/mt8183-pm-domains.h +++ b/drivers/soc/mediatek/mt8183-pm-domains.h @@ -41,6 +41,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = 0, .sram_pdn_ack_bits = 0, + .caps = MTK_SCPD_DOMAIN_SUPPLY, }, [MT8183_POWER_DOMAIN_MFG] = { .name = "mfg", diff --git a/drivers/soc/mediatek/mt8186-pm-domains.h b/drivers/soc/mediatek/mt8186-pm-domains.h index bf2dd0cdc3a8..108af61854a3 100644 --- a/drivers/soc/mediatek/mt8186-pm-domains.h +++ b/drivers/soc/mediatek/mt8186-pm-domains.h @@ -51,7 +51,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = { MT8186_TOP_AXI_PROT_EN_1_CLR, MT8186_TOP_AXI_PROT_EN_1_STA), }, - .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY, }, [MT8186_POWER_DOMAIN_MFG2] = { .name = "mfg2", diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h index 558c4ee4784a..b97b2051920f 100644 --- a/drivers/soc/mediatek/mt8192-pm-domains.h +++ b/drivers/soc/mediatek/mt8192-pm-domains.h @@ -58,6 +58,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_DOMAIN_SUPPLY, }, [MT8192_POWER_DOMAIN_MFG1] = { .name = "mfg1", @@ -85,6 +86,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { MT8192_TOP_AXI_PROT_EN_2_CLR, MT8192_TOP_AXI_PROT_EN_2_STA1), }, + .caps = MTK_SCPD_DOMAIN_SUPPLY, }, [MT8192_POWER_DOMAIN_MFG2] = { .name = "mfg2", diff --git a/drivers/soc/mediatek/mt8195-pm-domains.h b/drivers/soc/mediatek/mt8195-pm-domains.h index 938f4d51f5ae..d7387ea1b9c9 100644 --- a/drivers/soc/mediatek/mt8195-pm-domains.h +++ b/drivers/soc/mediatek/mt8195-pm-domains.h @@ -67,7 +67,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = { .ctl_offs = 0x334, .pwr_sta_offs = 0x174, .pwr_sta2nd_offs = 0x178, - .caps = MTK_SCPD_ACTIVE_WAKEUP, + .caps = MTK_SCPD_ACTIVE_WAKEUP | MTK_SCPD_ALWAYS_ON, }, [MT8195_POWER_DOMAIN_CSI_RX_TOP] = { .name = "csi_rx_top", @@ -162,7 +162,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = { MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR, MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1), }, - .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY, }, [MT8195_POWER_DOMAIN_MFG2] = { .name = "mfg2", diff --git a/drivers/soc/mediatek/mt8365-mmsys.h b/drivers/soc/mediatek/mt8365-mmsys.h index 24129a6c25f8..7abaf048d91e 100644 --- a/drivers/soc/mediatek/mt8365-mmsys.h +++ b/drivers/soc/mediatek/mt8365-mmsys.h @@ -10,6 +10,9 @@ #define MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN 0xf60 #define MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0xf64 #define MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN 0xf68 +#define MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL 0xfd0 +#define MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN 0xfd8 +#define MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00 0xfdc #define MT8365_RDMA0_SOUT_COLOR0 0x1 #define MT8365_DITHER_MOUT_EN_DSI0 0x1 @@ -18,6 +21,10 @@ #define MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 0x0 #define MT8365_DISP_COLOR_SEL_IN_COLOR0 0x0 #define MT8365_OVL0_MOUT_PATH0_SEL BIT(0) +#define MT8365_RDMA1_SOUT_DPI0 0x1 +#define MT8365_DPI0_SEL_IN_RDMA1 0x0 +#define MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK 0x1 +#define MT8365_DPI0_SEL_IN_RDMA1 0x0 static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = { { @@ -55,6 +62,21 @@ static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = { MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 }, + { + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, + MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00, + MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK, MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK + }, + { + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, + MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN, + MT8365_DPI0_SEL_IN_RDMA1, MT8365_DPI0_SEL_IN_RDMA1 + }, + { + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, + MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL, + MT8365_RDMA1_SOUT_DPI0, MT8365_RDMA1_SOUT_DPI0 + }, }; #endif /* __SOC_MEDIATEK_MT8365_MMSYS_H */ diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c index 7c65ad3d1f8a..fc13334db1b1 100644 --- a/drivers/soc/mediatek/mtk-devapc.c +++ b/drivers/soc/mediatek/mtk-devapc.c @@ -31,10 +31,7 @@ struct mtk_devapc_vio_dbgs { u32 vio_dbg1; }; -struct mtk_devapc_data { - /* numbers of violation index */ - u32 vio_idx_num; - +struct mtk_devapc_regs_ofs { /* reg offset */ u32 vio_mask_offset; u32 vio_sta_offset; @@ -46,6 +43,12 @@ struct mtk_devapc_data { u32 vio_shift_con_offset; }; +struct mtk_devapc_data { + /* numbers of violation index */ + u32 vio_idx_num; + const struct mtk_devapc_regs_ofs *regs_ofs; +}; + struct mtk_devapc_context { struct device *dev; void __iomem *infra_base; @@ -58,7 +61,7 @@ static void clear_vio_status(struct mtk_devapc_context *ctx) void __iomem *reg; int i; - reg = ctx->infra_base + ctx->data->vio_sta_offset; + reg = ctx->infra_base + ctx->data->regs_ofs->vio_sta_offset; for (i = 0; i < VIO_MOD_TO_REG_IND(ctx->data->vio_idx_num) - 1; i++) writel(GENMASK(31, 0), reg + 4 * i); @@ -73,7 +76,7 @@ static void mask_module_irq(struct mtk_devapc_context *ctx, bool mask) u32 val; int i; - reg = ctx->infra_base + ctx->data->vio_mask_offset; + reg = ctx->infra_base + ctx->data->regs_ofs->vio_mask_offset; if (mask) val = GENMASK(31, 0); @@ -116,11 +119,11 @@ static int devapc_sync_vio_dbg(struct mtk_devapc_context *ctx) u32 val; pd_vio_shift_sta_reg = ctx->infra_base + - ctx->data->vio_shift_sta_offset; + ctx->data->regs_ofs->vio_shift_sta_offset; pd_vio_shift_sel_reg = ctx->infra_base + - ctx->data->vio_shift_sel_offset; + ctx->data->regs_ofs->vio_shift_sel_offset; pd_vio_shift_con_reg = ctx->infra_base + - ctx->data->vio_shift_con_offset; + ctx->data->regs_ofs->vio_shift_con_offset; /* Find the minimum shift group which has violation */ val = readl(pd_vio_shift_sta_reg); @@ -161,8 +164,8 @@ static void devapc_extract_vio_dbg(struct mtk_devapc_context *ctx) void __iomem *vio_dbg0_reg; void __iomem *vio_dbg1_reg; - vio_dbg0_reg = ctx->infra_base + ctx->data->vio_dbg0_offset; - vio_dbg1_reg = ctx->infra_base + ctx->data->vio_dbg1_offset; + vio_dbg0_reg = ctx->infra_base + ctx->data->regs_ofs->vio_dbg0_offset; + vio_dbg1_reg = ctx->infra_base + ctx->data->regs_ofs->vio_dbg1_offset; vio_dbgs.vio_dbg0 = readl(vio_dbg0_reg); vio_dbgs.vio_dbg1 = readl(vio_dbg1_reg); @@ -200,7 +203,7 @@ static irqreturn_t devapc_violation_irq(int irq_number, void *data) */ static void start_devapc(struct mtk_devapc_context *ctx) { - writel(BIT(31), ctx->infra_base + ctx->data->apc_con_offset); + writel(BIT(31), ctx->infra_base + ctx->data->regs_ofs->apc_con_offset); mask_module_irq(ctx, false); } @@ -212,11 +215,10 @@ static void stop_devapc(struct mtk_devapc_context *ctx) { mask_module_irq(ctx, true); - writel(BIT(2), ctx->infra_base + ctx->data->apc_con_offset); + writel(BIT(2), ctx->infra_base + ctx->data->regs_ofs->apc_con_offset); } -static const struct mtk_devapc_data devapc_mt6779 = { - .vio_idx_num = 511, +static const struct mtk_devapc_regs_ofs devapc_regs_ofs_mt6779 = { .vio_mask_offset = 0x0, .vio_sta_offset = 0x400, .vio_dbg0_offset = 0x900, @@ -227,11 +229,24 @@ static const struct mtk_devapc_data devapc_mt6779 = { .vio_shift_con_offset = 0xF20, }; +static const struct mtk_devapc_data devapc_mt6779 = { + .vio_idx_num = 511, + .regs_ofs = &devapc_regs_ofs_mt6779, +}; + +static const struct mtk_devapc_data devapc_mt8186 = { + .vio_idx_num = 519, + .regs_ofs = &devapc_regs_ofs_mt6779, +}; + static const struct of_device_id mtk_devapc_dt_match[] = { { .compatible = "mediatek,mt6779-devapc", .data = &devapc_mt6779, }, { + .compatible = "mediatek,mt8186-devapc", + .data = &devapc_mt8186, + }, { }, }; MODULE_DEVICE_TABLE(of, mtk_devapc_dt_match); diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c index 981d56967e7a..5ea43de4e410 100644 --- a/drivers/soc/mediatek/mtk-mutex.c +++ b/drivers/soc/mediatek/mtk-mutex.c @@ -7,10 +7,12 @@ #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/soc/mediatek/mtk-mmsys.h> #include <linux/soc/mediatek/mtk-mutex.h> +#include <linux/soc/mediatek/mtk-cmdq.h> #define MT2701_MUTEX0_MOD0 0x2c #define MT2701_MUTEX0_SOF0 0x30 @@ -80,6 +82,15 @@ #define MT8183_MUTEX_MOD_DISP_GAMMA0 16 #define MT8183_MUTEX_MOD_DISP_DITHER0 17 +#define MT8183_MUTEX_MOD_MDP_RDMA0 2 +#define MT8183_MUTEX_MOD_MDP_RSZ0 4 +#define MT8183_MUTEX_MOD_MDP_RSZ1 5 +#define MT8183_MUTEX_MOD_MDP_TDSHP0 6 +#define MT8183_MUTEX_MOD_MDP_WROT0 7 +#define MT8183_MUTEX_MOD_MDP_WDMA 8 +#define MT8183_MUTEX_MOD_MDP_AAL0 23 +#define MT8183_MUTEX_MOD_MDP_CCORR0 24 + #define MT8173_MUTEX_MOD_DISP_OVL0 11 #define MT8173_MUTEX_MOD_DISP_OVL1 12 #define MT8173_MUTEX_MOD_DISP_RDMA0 13 @@ -110,6 +121,20 @@ #define MT8195_MUTEX_MOD_DISP_DP_INTF0 21 #define MT8195_MUTEX_MOD_DISP_PWM0 27 +#define MT8365_MUTEX_MOD_DISP_OVL0 7 +#define MT8365_MUTEX_MOD_DISP_OVL0_2L 8 +#define MT8365_MUTEX_MOD_DISP_RDMA0 9 +#define MT8365_MUTEX_MOD_DISP_RDMA1 10 +#define MT8365_MUTEX_MOD_DISP_WDMA0 11 +#define MT8365_MUTEX_MOD_DISP_COLOR0 12 +#define MT8365_MUTEX_MOD_DISP_CCORR 13 +#define MT8365_MUTEX_MOD_DISP_AAL 14 +#define MT8365_MUTEX_MOD_DISP_GAMMA 15 +#define MT8365_MUTEX_MOD_DISP_DITHER 16 +#define MT8365_MUTEX_MOD_DISP_DSI0 17 +#define MT8365_MUTEX_MOD_DISP_PWM0 20 +#define MT8365_MUTEX_MOD_DISP_DPI0 22 + #define MT2712_MUTEX_MOD_DISP_PWM2 10 #define MT2712_MUTEX_MOD_DISP_OVL0 11 #define MT2712_MUTEX_MOD_DISP_OVL1 12 @@ -185,6 +210,7 @@ struct mtk_mutex_data { const unsigned int *mutex_sof; const unsigned int mutex_mod_reg; const unsigned int mutex_sof_reg; + const unsigned int *mutex_table_mod; const bool no_clk; }; @@ -194,6 +220,8 @@ struct mtk_mutex_ctx { void __iomem *regs; struct mtk_mutex mutex[10]; const struct mtk_mutex_data *data; + phys_addr_t addr; + struct cmdq_client_reg cmdq_reg; }; static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = { @@ -272,6 +300,17 @@ static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA0] = MT8183_MUTEX_MOD_DISP_WDMA0, }; +static const unsigned int mt8183_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { + [MUTEX_MOD_IDX_MDP_RDMA0] = MT8183_MUTEX_MOD_MDP_RDMA0, + [MUTEX_MOD_IDX_MDP_RSZ0] = MT8183_MUTEX_MOD_MDP_RSZ0, + [MUTEX_MOD_IDX_MDP_RSZ1] = MT8183_MUTEX_MOD_MDP_RSZ1, + [MUTEX_MOD_IDX_MDP_TDSHP0] = MT8183_MUTEX_MOD_MDP_TDSHP0, + [MUTEX_MOD_IDX_MDP_WROT0] = MT8183_MUTEX_MOD_MDP_WROT0, + [MUTEX_MOD_IDX_MDP_WDMA] = MT8183_MUTEX_MOD_MDP_WDMA, + [MUTEX_MOD_IDX_MDP_AAL0] = MT8183_MUTEX_MOD_MDP_AAL0, + [MUTEX_MOD_IDX_MDP_CCORR0] = MT8183_MUTEX_MOD_MDP_CCORR0, +}; + static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = MT8186_MUTEX_MOD_DISP_AAL0, [DDP_COMPONENT_CCORR] = MT8186_MUTEX_MOD_DISP_CCORR0, @@ -315,6 +354,22 @@ static const unsigned int mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_DP_INTF0] = MT8195_MUTEX_MOD_DISP_DP_INTF0, }; +static const unsigned int mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = { + [DDP_COMPONENT_AAL0] = MT8365_MUTEX_MOD_DISP_AAL, + [DDP_COMPONENT_CCORR] = MT8365_MUTEX_MOD_DISP_CCORR, + [DDP_COMPONENT_COLOR0] = MT8365_MUTEX_MOD_DISP_COLOR0, + [DDP_COMPONENT_DITHER0] = MT8365_MUTEX_MOD_DISP_DITHER, + [DDP_COMPONENT_DPI0] = MT8365_MUTEX_MOD_DISP_DPI0, + [DDP_COMPONENT_DSI0] = MT8365_MUTEX_MOD_DISP_DSI0, + [DDP_COMPONENT_GAMMA] = MT8365_MUTEX_MOD_DISP_GAMMA, + [DDP_COMPONENT_OVL0] = MT8365_MUTEX_MOD_DISP_OVL0, + [DDP_COMPONENT_OVL_2L0] = MT8365_MUTEX_MOD_DISP_OVL0_2L, + [DDP_COMPONENT_PWM0] = MT8365_MUTEX_MOD_DISP_PWM0, + [DDP_COMPONENT_RDMA0] = MT8365_MUTEX_MOD_DISP_RDMA0, + [DDP_COMPONENT_RDMA1] = MT8365_MUTEX_MOD_DISP_RDMA1, + [DDP_COMPONENT_WDMA0] = MT8365_MUTEX_MOD_DISP_WDMA0, +}; + static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = { [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, [MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0, @@ -399,6 +454,7 @@ static const struct mtk_mutex_data mt8183_mutex_driver_data = { .mutex_sof = mt8183_mutex_sof, .mutex_mod_reg = MT8183_MUTEX0_MOD0, .mutex_sof_reg = MT8183_MUTEX0_SOF0, + .mutex_table_mod = mt8183_mutex_table_mod, .no_clk = true, }; @@ -423,6 +479,14 @@ static const struct mtk_mutex_data mt8195_mutex_driver_data = { .mutex_sof_reg = MT8183_MUTEX0_SOF0, }; +static const struct mtk_mutex_data mt8365_mutex_driver_data = { + .mutex_mod = mt8365_mutex_mod, + .mutex_sof = mt8183_mutex_sof, + .mutex_mod_reg = MT8183_MUTEX0_MOD0, + .mutex_sof_reg = MT8183_MUTEX0_SOF0, + .no_clk = true, +}; + struct mtk_mutex *mtk_mutex_get(struct device *dev) { struct mtk_mutex_ctx *mtx = dev_get_drvdata(dev); @@ -572,6 +636,30 @@ void mtk_mutex_enable(struct mtk_mutex *mutex) } EXPORT_SYMBOL_GPL(mtk_mutex_enable); +int mtk_mutex_enable_by_cmdq(struct mtk_mutex *mutex, void *pkt) +{ + struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx, + mutex[mutex->id]); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct cmdq_pkt *cmdq_pkt = (struct cmdq_pkt *)pkt; + + WARN_ON(&mtx->mutex[mutex->id] != mutex); + + if (!mtx->cmdq_reg.size) { + dev_err(mtx->dev, "mediatek,gce-client-reg hasn't been set"); + return -EINVAL; + } + + cmdq_pkt_write(cmdq_pkt, mtx->cmdq_reg.subsys, + mtx->addr + DISP_REG_MUTEX_EN(mutex->id), 1); + return 0; +#else + dev_err(mtx->dev, "Not support for enable MUTEX by CMDQ"); + return -ENODEV; +#endif +} +EXPORT_SYMBOL_GPL(mtk_mutex_enable_by_cmdq); + void mtk_mutex_disable(struct mtk_mutex *mutex) { struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx, @@ -606,12 +694,67 @@ void mtk_mutex_release(struct mtk_mutex *mutex) } EXPORT_SYMBOL_GPL(mtk_mutex_release); +int mtk_mutex_write_mod(struct mtk_mutex *mutex, + enum mtk_mutex_mod_index idx, bool clear) +{ + struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx, + mutex[mutex->id]); + unsigned int reg; + unsigned int offset; + + WARN_ON(&mtx->mutex[mutex->id] != mutex); + + if (idx < MUTEX_MOD_IDX_MDP_RDMA0 || + idx >= MUTEX_MOD_IDX_MAX) { + dev_err(mtx->dev, "Not supported MOD table index : %d", idx); + return -EINVAL; + } + + offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg, + mutex->id); + reg = readl_relaxed(mtx->regs + offset); + + if (clear) + reg &= ~BIT(mtx->data->mutex_table_mod[idx]); + else + reg |= BIT(mtx->data->mutex_table_mod[idx]); + + writel_relaxed(reg, mtx->regs + offset); + + return 0; +} +EXPORT_SYMBOL_GPL(mtk_mutex_write_mod); + +int mtk_mutex_write_sof(struct mtk_mutex *mutex, + enum mtk_mutex_sof_index idx) +{ + struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx, + mutex[mutex->id]); + + WARN_ON(&mtx->mutex[mutex->id] != mutex); + + if (idx < MUTEX_SOF_IDX_SINGLE_MODE || + idx >= MUTEX_SOF_IDX_MAX) { + dev_err(mtx->dev, "Not supported SOF index : %d", idx); + return -EINVAL; + } + + writel_relaxed(idx, mtx->regs + + DISP_REG_MUTEX_SOF(mtx->data->mutex_sof_reg, mutex->id)); + + return 0; +} +EXPORT_SYMBOL_GPL(mtk_mutex_write_sof); + static int mtk_mutex_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_mutex_ctx *mtx; struct resource *regs; int i; +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + int ret; +#endif mtx = devm_kzalloc(dev, sizeof(*mtx), GFP_KERNEL); if (!mtx) @@ -631,12 +774,18 @@ static int mtk_mutex_probe(struct platform_device *pdev) } } - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mtx->regs = devm_ioremap_resource(dev, regs); + mtx->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ®s); if (IS_ERR(mtx->regs)) { dev_err(dev, "Failed to map mutex registers\n"); return PTR_ERR(mtx->regs); } + mtx->addr = regs->start; + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(dev, &mtx->cmdq_reg, 0); + if (ret) + dev_dbg(dev, "No mediatek,gce-client-reg!\n"); +#endif platform_set_drvdata(pdev, mtx); @@ -665,6 +814,8 @@ static const struct of_device_id mutex_driver_dt_match[] = { .data = &mt8192_mutex_driver_data}, { .compatible = "mediatek,mt8195-disp-mutex", .data = &mt8195_mutex_driver_data}, + { .compatible = "mediatek,mt8365-disp-mutex", + .data = &mt8365_mutex_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, mutex_driver_dt_match); diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c index 5ced254b082b..9734f1091c69 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.c +++ b/drivers/soc/mediatek/mtk-pm-domains.c @@ -16,6 +16,7 @@ #include <linux/regulator/consumer.h> #include <linux/soc/mediatek/infracfg.h> +#include "mt6795-pm-domains.h" #include "mt8167-pm-domains.h" #include "mt8173-pm-domains.h" #include "mt8183-pm-domains.h" @@ -428,6 +429,9 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no dev_err(scpsys->dev, "%pOF: failed to power on domain: %d\n", node, ret); goto err_put_subsys_clocks; } + + if (MTK_SCPD_CAPS(pd, MTK_SCPD_ALWAYS_ON)) + pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON; } if (scpsys->domains[id]) { @@ -556,6 +560,10 @@ static void scpsys_domain_cleanup(struct scpsys *scpsys) static const struct of_device_id scpsys_of_match[] = { { + .compatible = "mediatek,mt6795-power-controller", + .data = &mt6795_scpsys_data, + }, + { .compatible = "mediatek,mt8167-power-controller", .data = &mt8167_scpsys_data, }, diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h index daa24e890dd4..7d3c0c36316c 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.h +++ b/drivers/soc/mediatek/mtk-pm-domains.h @@ -8,6 +8,8 @@ #define MTK_SCPD_SRAM_ISO BIT(2) #define MTK_SCPD_KEEP_DEFAULT_OFF BIT(3) #define MTK_SCPD_DOMAIN_SUPPLY BIT(4) +/* can't set MTK_SCPD_KEEP_DEFAULT_OFF at the same time */ +#define MTK_SCPD_ALWAYS_ON BIT(5) #define MTK_SCPD_CAPS(_scpd, _x) ((_scpd)->data->caps & (_x)) #define SPM_VDE_PWR_CON 0x0210 diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index bf39a64f3ecc..d8cb0f833645 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -13,6 +13,9 @@ #include <linux/regmap.h> #include <linux/reset.h> +#define PWRAP_POLL_DELAY_US 10 +#define PWRAP_POLL_TIMEOUT_US 10000 + #define PWRAP_MT8135_BRIDGE_IORD_ARB_EN 0x4 #define PWRAP_MT8135_BRIDGE_WACS3_EN 0x10 #define PWRAP_MT8135_BRIDGE_INIT_DONE3 0x14 @@ -1140,12 +1143,9 @@ enum pwrap_type { }; struct pmic_wrapper; -struct pwrap_slv_type { - const u32 *dew_regs; - enum pmic_type type; + +struct pwrap_slv_regops { const struct regmap_config *regmap; - /* Flags indicating the capability for the target slave */ - u32 caps; /* * pwrap operations are highly associated with the PMIC types, * so the pointers added increases flexibility allowing determination @@ -1155,6 +1155,14 @@ struct pwrap_slv_type { int (*pwrap_write)(struct pmic_wrapper *wrp, u32 adr, u32 wdata); }; +struct pwrap_slv_type { + const u32 *dew_regs; + enum pmic_type type; + const struct pwrap_slv_regops *regops; + /* Flags indicating the capability for the target slave */ + u32 caps; +}; + struct pmic_wrapper { struct device *dev; void __iomem *base; @@ -1241,27 +1249,14 @@ static bool pwrap_is_fsm_idle_and_sync_idle(struct pmic_wrapper *wrp) (val & PWRAP_STATE_SYNC_IDLE0); } -static int pwrap_wait_for_state(struct pmic_wrapper *wrp, - bool (*fp)(struct pmic_wrapper *)) -{ - unsigned long timeout; - - timeout = jiffies + usecs_to_jiffies(10000); - - do { - if (time_after(jiffies, timeout)) - return fp(wrp) ? 0 : -ETIMEDOUT; - if (fp(wrp)) - return 0; - } while (1); -} - static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) { + bool tmp; int ret; u32 val; - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { pwrap_leave_fsm_vldclr(wrp); return ret; @@ -1273,7 +1268,8 @@ static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) val = (adr >> 1) << 16; pwrap_writel(wrp, val, PWRAP_WACS2_CMD); - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr); + ret = readx_poll_timeout(pwrap_is_fsm_vldclr, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) return ret; @@ -1290,11 +1286,14 @@ static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) { + bool tmp; int ret, msb; *rdata = 0; for (msb = 0; msb < 2; msb++) { - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); + if (ret) { pwrap_leave_fsm_vldclr(wrp); return ret; @@ -1303,7 +1302,8 @@ static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) pwrap_writel(wrp, ((msb << 30) | (adr << 16)), PWRAP_WACS2_CMD); - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr); + ret = readx_poll_timeout(pwrap_is_fsm_vldclr, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) return ret; @@ -1318,14 +1318,16 @@ static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) { - return wrp->slave->pwrap_read(wrp, adr, rdata); + return wrp->slave->regops->pwrap_read(wrp, adr, rdata); } static int pwrap_write16(struct pmic_wrapper *wrp, u32 adr, u32 wdata) { + bool tmp; int ret; - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { pwrap_leave_fsm_vldclr(wrp); return ret; @@ -1344,10 +1346,12 @@ static int pwrap_write16(struct pmic_wrapper *wrp, u32 adr, u32 wdata) static int pwrap_write32(struct pmic_wrapper *wrp, u32 adr, u32 wdata) { + bool tmp; int ret, msb, rdata; for (msb = 0; msb < 2; msb++) { - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { pwrap_leave_fsm_vldclr(wrp); return ret; @@ -1373,7 +1377,7 @@ static int pwrap_write32(struct pmic_wrapper *wrp, u32 adr, u32 wdata) static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata) { - return wrp->slave->pwrap_write(wrp, adr, wdata); + return wrp->slave->regops->pwrap_write(wrp, adr, wdata); } static int pwrap_regmap_read(void *context, u32 adr, u32 *rdata) @@ -1388,6 +1392,7 @@ static int pwrap_regmap_write(void *context, u32 adr, u32 wdata) static int pwrap_reset_spislave(struct pmic_wrapper *wrp) { + bool tmp; int ret, i; pwrap_writel(wrp, 0, PWRAP_HIPRIO_ARB_EN); @@ -1407,7 +1412,8 @@ static int pwrap_reset_spislave(struct pmic_wrapper *wrp) pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_OUTS, PWRAP_MAN_CMD); - ret = pwrap_wait_for_state(wrp, pwrap_is_sync_idle); + ret = readx_poll_timeout(pwrap_is_sync_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret); return ret; @@ -1458,14 +1464,15 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp) static int pwrap_init_dual_io(struct pmic_wrapper *wrp) { int ret; + bool tmp; u32 rdata; /* Enable dual IO mode */ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_DIO_EN], 1); /* Check IDLE & INIT_DONE in advance */ - ret = pwrap_wait_for_state(wrp, - pwrap_is_fsm_idle_and_sync_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle_and_sync_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret); return ret; @@ -1570,6 +1577,7 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp) static int pwrap_init_cipher(struct pmic_wrapper *wrp) { int ret; + bool tmp; u32 rdata = 0; pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST); @@ -1624,14 +1632,16 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp) } /* wait for cipher data ready@AP */ - ret = pwrap_wait_for_state(wrp, pwrap_is_cipher_ready); + ret = readx_poll_timeout(pwrap_is_cipher_ready, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { dev_err(wrp->dev, "cipher data ready@AP fail, ret=%d\n", ret); return ret; } /* wait for cipher data ready@PMIC */ - ret = pwrap_wait_for_state(wrp, pwrap_is_pmic_cipher_ready); + ret = readx_poll_timeout(pwrap_is_pmic_cipher_ready, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { dev_err(wrp->dev, "timeout waiting for cipher data ready@PMIC\n"); @@ -1640,7 +1650,8 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp) /* wait for cipher mode idle */ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_MODE], 0x1); - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle_and_sync_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle_and_sync_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { dev_err(wrp->dev, "cipher mode idle fail, ret=%d\n", ret); return ret; @@ -1885,99 +1896,82 @@ static const struct regmap_config pwrap_regmap_config32 = { .max_register = 0xffff, }; +static const struct pwrap_slv_regops pwrap_regops16 = { + .pwrap_read = pwrap_read16, + .pwrap_write = pwrap_write16, + .regmap = &pwrap_regmap_config16, +}; + +static const struct pwrap_slv_regops pwrap_regops32 = { + .pwrap_read = pwrap_read32, + .pwrap_write = pwrap_write32, + .regmap = &pwrap_regmap_config32, +}; + static const struct pwrap_slv_type pmic_mt6323 = { .dew_regs = mt6323_regs, .type = PMIC_MT6323, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO | PWRAP_SLV_CAP_SECURITY, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct pwrap_slv_type pmic_mt6351 = { .dew_regs = mt6351_regs, .type = PMIC_MT6351, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = 0, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct pwrap_slv_type pmic_mt6357 = { .dew_regs = mt6357_regs, .type = PMIC_MT6357, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = 0, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct pwrap_slv_type pmic_mt6358 = { .dew_regs = mt6358_regs, .type = PMIC_MT6358, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct pwrap_slv_type pmic_mt6359 = { .dew_regs = mt6359_regs, .type = PMIC_MT6359, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = PWRAP_SLV_CAP_DUALIO, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct pwrap_slv_type pmic_mt6380 = { .dew_regs = NULL, .type = PMIC_MT6380, - .regmap = &pwrap_regmap_config32, + .regops = &pwrap_regops32, .caps = 0, - .pwrap_read = pwrap_read32, - .pwrap_write = pwrap_write32, }; static const struct pwrap_slv_type pmic_mt6397 = { .dew_regs = mt6397_regs, .type = PMIC_MT6397, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO | PWRAP_SLV_CAP_SECURITY, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct of_device_id of_slave_match_tbl[] = { - { - .compatible = "mediatek,mt6323", - .data = &pmic_mt6323, - }, { - .compatible = "mediatek,mt6351", - .data = &pmic_mt6351, - }, { - .compatible = "mediatek,mt6357", - .data = &pmic_mt6357, - }, { - .compatible = "mediatek,mt6358", - .data = &pmic_mt6358, - }, { - .compatible = "mediatek,mt6359", - .data = &pmic_mt6359, - }, { - /* The MT6380 PMIC only implements a regulator, so we bind it - * directly instead of using a MFD. - */ - .compatible = "mediatek,mt6380-regulator", - .data = &pmic_mt6380, - }, { - .compatible = "mediatek,mt6397", - .data = &pmic_mt6397, - }, { - /* sentinel */ - } + { .compatible = "mediatek,mt6323", .data = &pmic_mt6323 }, + { .compatible = "mediatek,mt6351", .data = &pmic_mt6351 }, + { .compatible = "mediatek,mt6357", .data = &pmic_mt6357 }, + { .compatible = "mediatek,mt6358", .data = &pmic_mt6358 }, + { .compatible = "mediatek,mt6359", .data = &pmic_mt6359 }, + + /* The MT6380 PMIC only implements a regulator, so we bind it + * directly instead of using a MFD. + */ + { .compatible = "mediatek,mt6380-regulator", .data = &pmic_mt6380 }, + { .compatible = "mediatek,mt6397", .data = &pmic_mt6397 }, + { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, of_slave_match_tbl); @@ -2136,45 +2130,19 @@ static struct pmic_wrapper_type pwrap_mt8186 = { }; static const struct of_device_id of_pwrap_match_tbl[] = { - { - .compatible = "mediatek,mt2701-pwrap", - .data = &pwrap_mt2701, - }, { - .compatible = "mediatek,mt6765-pwrap", - .data = &pwrap_mt6765, - }, { - .compatible = "mediatek,mt6779-pwrap", - .data = &pwrap_mt6779, - }, { - .compatible = "mediatek,mt6797-pwrap", - .data = &pwrap_mt6797, - }, { - .compatible = "mediatek,mt6873-pwrap", - .data = &pwrap_mt6873, - }, { - .compatible = "mediatek,mt7622-pwrap", - .data = &pwrap_mt7622, - }, { - .compatible = "mediatek,mt8135-pwrap", - .data = &pwrap_mt8135, - }, { - .compatible = "mediatek,mt8173-pwrap", - .data = &pwrap_mt8173, - }, { - .compatible = "mediatek,mt8183-pwrap", - .data = &pwrap_mt8183, - }, { - .compatible = "mediatek,mt8186-pwrap", - .data = &pwrap_mt8186, - }, { - .compatible = "mediatek,mt8195-pwrap", - .data = &pwrap_mt8195, - }, { - .compatible = "mediatek,mt8516-pwrap", - .data = &pwrap_mt8516, - }, { - /* sentinel */ - } + { .compatible = "mediatek,mt2701-pwrap", .data = &pwrap_mt2701 }, + { .compatible = "mediatek,mt6765-pwrap", .data = &pwrap_mt6765 }, + { .compatible = "mediatek,mt6779-pwrap", .data = &pwrap_mt6779 }, + { .compatible = "mediatek,mt6797-pwrap", .data = &pwrap_mt6797 }, + { .compatible = "mediatek,mt6873-pwrap", .data = &pwrap_mt6873 }, + { .compatible = "mediatek,mt7622-pwrap", .data = &pwrap_mt7622 }, + { .compatible = "mediatek,mt8135-pwrap", .data = &pwrap_mt8135 }, + { .compatible = "mediatek,mt8173-pwrap", .data = &pwrap_mt8173 }, + { .compatible = "mediatek,mt8183-pwrap", .data = &pwrap_mt8183 }, + { .compatible = "mediatek,mt8186-pwrap", .data = &pwrap_mt8186 }, + { .compatible = "mediatek,mt8195-pwrap", .data = &pwrap_mt8195 }, + { .compatible = "mediatek,mt8516-pwrap", .data = &pwrap_mt8516 }, + { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, of_pwrap_match_tbl); @@ -2185,7 +2153,6 @@ static int pwrap_probe(struct platform_device *pdev) struct pmic_wrapper *wrp; struct device_node *np = pdev->dev.of_node; const struct of_device_id *of_slave_id = NULL; - struct resource *res; if (np->child) of_slave_id = of_match_node(of_slave_match_tbl, np->child); @@ -2205,8 +2172,7 @@ static int pwrap_probe(struct platform_device *pdev) wrp->slave = of_slave_id->data; wrp->dev = &pdev->dev; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwrap"); - wrp->base = devm_ioremap_resource(wrp->dev, res); + wrp->base = devm_platform_ioremap_resource_byname(pdev, "pwrap"); if (IS_ERR(wrp->base)) return PTR_ERR(wrp->base); @@ -2220,9 +2186,7 @@ static int pwrap_probe(struct platform_device *pdev) } if (HAS_CAP(wrp->master->caps, PWRAP_CAP_BRIDGE)) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "pwrap-bridge"); - wrp->bridge_base = devm_ioremap_resource(wrp->dev, res); + wrp->bridge_base = devm_platform_ioremap_resource_byname(pdev, "pwrap-bridge"); if (IS_ERR(wrp->bridge_base)) return PTR_ERR(wrp->bridge_base); @@ -2315,13 +2279,18 @@ static int pwrap_probe(struct platform_device *pdev) pwrap_writel(wrp, wrp->master->int1_en_all, PWRAP_INT1_EN); irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto err_out2; + } + ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt, IRQF_TRIGGER_HIGH, "mt-pmic-pwrap", wrp); if (ret) goto err_out2; - wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regmap); + wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regops->regmap); if (IS_ERR(wrp->regmap)) { ret = PTR_ERR(wrp->regmap); goto err_out2; diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c new file mode 100644 index 000000000000..dee8664a12fd --- /dev/null +++ b/drivers/soc/mediatek/mtk-svs.c @@ -0,0 +1,2403 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 MediaTek Inc. + */ + +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/cpuidle.h> +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/nvmem-consumer.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_opp.h> +#include <linux/pm_runtime.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/thermal.h> + +/* svs bank 1-line software id */ +#define SVSB_CPU_LITTLE BIT(0) +#define SVSB_CPU_BIG BIT(1) +#define SVSB_CCI BIT(2) +#define SVSB_GPU BIT(3) + +/* svs bank 2-line type */ +#define SVSB_LOW BIT(8) +#define SVSB_HIGH BIT(9) + +/* svs bank mode support */ +#define SVSB_MODE_ALL_DISABLE 0 +#define SVSB_MODE_INIT01 BIT(1) +#define SVSB_MODE_INIT02 BIT(2) +#define SVSB_MODE_MON BIT(3) + +/* svs bank volt flags */ +#define SVSB_INIT01_PD_REQ BIT(0) +#define SVSB_INIT01_VOLT_IGNORE BIT(1) +#define SVSB_INIT01_VOLT_INC_ONLY BIT(2) +#define SVSB_MON_VOLT_IGNORE BIT(16) +#define SVSB_REMOVE_DVTFIXED_VOLT BIT(24) + +/* svs bank register common configuration */ +#define SVSB_DET_MAX 0xffff +#define SVSB_DET_WINDOW 0xa28 +#define SVSB_DTHI 0x1 +#define SVSB_DTLO 0xfe +#define SVSB_EN_INIT01 0x1 +#define SVSB_EN_INIT02 0x5 +#define SVSB_EN_MON 0x2 +#define SVSB_EN_OFF 0x0 +#define SVSB_INTEN_INIT0x 0x00005f01 +#define SVSB_INTEN_MONVOPEN 0x00ff0000 +#define SVSB_INTSTS_CLEAN 0x00ffffff +#define SVSB_INTSTS_COMPLETE 0x1 +#define SVSB_INTSTS_MONVOP 0x00ff0000 +#define SVSB_RUNCONFIG_DEFAULT 0x80000000 + +/* svs bank related setting */ +#define BITS8 8 +#define MAX_OPP_ENTRIES 16 +#define REG_BYTES 4 +#define SVSB_DC_SIGNED_BIT BIT(15) +#define SVSB_DET_CLK_EN BIT(31) +#define SVSB_TEMP_LOWER_BOUND 0xb2 +#define SVSB_TEMP_UPPER_BOUND 0x64 + +static DEFINE_SPINLOCK(svs_lock); + +#define debug_fops_ro(name) \ + static int svs_##name##_debug_open(struct inode *inode, \ + struct file *filp) \ + { \ + return single_open(filp, svs_##name##_debug_show, \ + inode->i_private); \ + } \ + static const struct file_operations svs_##name##_debug_fops = { \ + .owner = THIS_MODULE, \ + .open = svs_##name##_debug_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } + +#define debug_fops_rw(name) \ + static int svs_##name##_debug_open(struct inode *inode, \ + struct file *filp) \ + { \ + return single_open(filp, svs_##name##_debug_show, \ + inode->i_private); \ + } \ + static const struct file_operations svs_##name##_debug_fops = { \ + .owner = THIS_MODULE, \ + .open = svs_##name##_debug_open, \ + .read = seq_read, \ + .write = svs_##name##_debug_write, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } + +#define svs_dentry_data(name) {__stringify(name), &svs_##name##_debug_fops} + +/** + * enum svsb_phase - svs bank phase enumeration + * @SVSB_PHASE_ERROR: svs bank encounters unexpected condition + * @SVSB_PHASE_INIT01: svs bank basic init for data calibration + * @SVSB_PHASE_INIT02: svs bank can provide voltages to opp table + * @SVSB_PHASE_MON: svs bank can provide voltages with thermal effect + * @SVSB_PHASE_MAX: total number of svs bank phase (debug purpose) + * + * Each svs bank has its own independent phase and we enable each svs bank by + * running their phase orderly. However, when svs bank encounters unexpected + * condition, it will fire an irq (PHASE_ERROR) to inform svs software. + * + * svs bank general phase-enabled order: + * SVSB_PHASE_INIT01 -> SVSB_PHASE_INIT02 -> SVSB_PHASE_MON + */ +enum svsb_phase { + SVSB_PHASE_ERROR = 0, + SVSB_PHASE_INIT01, + SVSB_PHASE_INIT02, + SVSB_PHASE_MON, + SVSB_PHASE_MAX, +}; + +enum svs_reg_index { + DESCHAR = 0, + TEMPCHAR, + DETCHAR, + AGECHAR, + DCCONFIG, + AGECONFIG, + FREQPCT30, + FREQPCT74, + LIMITVALS, + VBOOT, + DETWINDOW, + CONFIG, + TSCALCS, + RUNCONFIG, + SVSEN, + INIT2VALS, + DCVALUES, + AGEVALUES, + VOP30, + VOP74, + TEMP, + INTSTS, + INTSTSRAW, + INTEN, + CHKINT, + CHKSHIFT, + STATUS, + VDESIGN30, + VDESIGN74, + DVT30, + DVT74, + AGECOUNT, + SMSTATE0, + SMSTATE1, + CTL0, + DESDETSEC, + TEMPAGESEC, + CTRLSPARE0, + CTRLSPARE1, + CTRLSPARE2, + CTRLSPARE3, + CORESEL, + THERMINTST, + INTST, + THSTAGE0ST, + THSTAGE1ST, + THSTAGE2ST, + THAHBST0, + THAHBST1, + SPARE0, + SPARE1, + SPARE2, + SPARE3, + THSLPEVEB, + SVS_REG_MAX, +}; + +static const u32 svs_regs_v2[] = { + [DESCHAR] = 0xc00, + [TEMPCHAR] = 0xc04, + [DETCHAR] = 0xc08, + [AGECHAR] = 0xc0c, + [DCCONFIG] = 0xc10, + [AGECONFIG] = 0xc14, + [FREQPCT30] = 0xc18, + [FREQPCT74] = 0xc1c, + [LIMITVALS] = 0xc20, + [VBOOT] = 0xc24, + [DETWINDOW] = 0xc28, + [CONFIG] = 0xc2c, + [TSCALCS] = 0xc30, + [RUNCONFIG] = 0xc34, + [SVSEN] = 0xc38, + [INIT2VALS] = 0xc3c, + [DCVALUES] = 0xc40, + [AGEVALUES] = 0xc44, + [VOP30] = 0xc48, + [VOP74] = 0xc4c, + [TEMP] = 0xc50, + [INTSTS] = 0xc54, + [INTSTSRAW] = 0xc58, + [INTEN] = 0xc5c, + [CHKINT] = 0xc60, + [CHKSHIFT] = 0xc64, + [STATUS] = 0xc68, + [VDESIGN30] = 0xc6c, + [VDESIGN74] = 0xc70, + [DVT30] = 0xc74, + [DVT74] = 0xc78, + [AGECOUNT] = 0xc7c, + [SMSTATE0] = 0xc80, + [SMSTATE1] = 0xc84, + [CTL0] = 0xc88, + [DESDETSEC] = 0xce0, + [TEMPAGESEC] = 0xce4, + [CTRLSPARE0] = 0xcf0, + [CTRLSPARE1] = 0xcf4, + [CTRLSPARE2] = 0xcf8, + [CTRLSPARE3] = 0xcfc, + [CORESEL] = 0xf00, + [THERMINTST] = 0xf04, + [INTST] = 0xf08, + [THSTAGE0ST] = 0xf0c, + [THSTAGE1ST] = 0xf10, + [THSTAGE2ST] = 0xf14, + [THAHBST0] = 0xf18, + [THAHBST1] = 0xf1c, + [SPARE0] = 0xf20, + [SPARE1] = 0xf24, + [SPARE2] = 0xf28, + [SPARE3] = 0xf2c, + [THSLPEVEB] = 0xf30, +}; + +/** + * struct svs_platform - svs platform control + * @name: svs platform name + * @base: svs platform register base + * @dev: svs platform device + * @main_clk: main clock for svs bank + * @pbank: svs bank pointer needing to be protected by spin_lock section + * @banks: svs banks that svs platform supports + * @rst: svs platform reset control + * @efuse_parsing: svs platform efuse parsing function pointer + * @probe: svs platform probe function pointer + * @irqflags: svs platform irq settings flags + * @efuse_max: total number of svs efuse + * @tefuse_max: total number of thermal efuse + * @regs: svs platform registers map + * @bank_max: total number of svs banks + * @efuse: svs efuse data received from NVMEM framework + * @tefuse: thermal efuse data received from NVMEM framework + */ +struct svs_platform { + char *name; + void __iomem *base; + struct device *dev; + struct clk *main_clk; + struct svs_bank *pbank; + struct svs_bank *banks; + struct reset_control *rst; + bool (*efuse_parsing)(struct svs_platform *svsp); + int (*probe)(struct svs_platform *svsp); + unsigned long irqflags; + size_t efuse_max; + size_t tefuse_max; + const u32 *regs; + u32 bank_max; + u32 *efuse; + u32 *tefuse; +}; + +struct svs_platform_data { + char *name; + struct svs_bank *banks; + bool (*efuse_parsing)(struct svs_platform *svsp); + int (*probe)(struct svs_platform *svsp); + unsigned long irqflags; + const u32 *regs; + u32 bank_max; +}; + +/** + * struct svs_bank - svs bank representation + * @dev: bank device + * @opp_dev: device for opp table/buck control + * @init_completion: the timeout completion for bank init + * @buck: regulator used by opp_dev + * @tzd: thermal zone device for getting temperature + * @lock: mutex lock to protect voltage update process + * @set_freq_pct: function pointer to set bank frequency percent table + * @get_volts: function pointer to get bank voltages + * @name: bank name + * @buck_name: regulator name + * @tzone_name: thermal zone name + * @phase: bank current phase + * @volt_od: bank voltage overdrive + * @reg_data: bank register data in different phase for debug purpose + * @pm_runtime_enabled_count: bank pm runtime enabled count + * @mode_support: bank mode support. + * @freq_base: reference frequency for bank init + * @turn_freq_base: refenrece frequency for 2-line turn point + * @vboot: voltage request for bank init01 only + * @opp_dfreq: default opp frequency table + * @opp_dvolt: default opp voltage table + * @freq_pct: frequency percent table for bank init + * @volt: bank voltage table + * @volt_step: bank voltage step + * @volt_base: bank voltage base + * @volt_flags: bank voltage flags + * @vmax: bank voltage maximum + * @vmin: bank voltage minimum + * @age_config: bank age configuration + * @age_voffset_in: bank age voltage offset + * @dc_config: bank dc configuration + * @dc_voffset_in: bank dc voltage offset + * @dvt_fixed: bank dvt fixed value + * @vco: bank VCO value + * @chk_shift: bank chicken shift + * @core_sel: bank selection + * @opp_count: bank opp count + * @int_st: bank interrupt identification + * @sw_id: bank software identification + * @cpu_id: cpu core id for SVS CPU bank use only + * @ctl0: TS-x selection + * @temp: bank temperature + * @tzone_htemp: thermal zone high temperature threshold + * @tzone_htemp_voffset: thermal zone high temperature voltage offset + * @tzone_ltemp: thermal zone low temperature threshold + * @tzone_ltemp_voffset: thermal zone low temperature voltage offset + * @bts: svs efuse data + * @mts: svs efuse data + * @bdes: svs efuse data + * @mdes: svs efuse data + * @mtdes: svs efuse data + * @dcbdet: svs efuse data + * @dcmdet: svs efuse data + * @turn_pt: 2-line turn point tells which opp_volt calculated by high/low bank + * @type: bank type to represent it is 2-line (high/low) bank or 1-line bank + * + * Svs bank will generate suitalbe voltages by below general math equation + * and provide these voltages to opp voltage table. + * + * opp_volt[i] = (volt[i] * volt_step) + volt_base; + */ +struct svs_bank { + struct device *dev; + struct device *opp_dev; + struct completion init_completion; + struct regulator *buck; + struct thermal_zone_device *tzd; + struct mutex lock; /* lock to protect voltage update process */ + void (*set_freq_pct)(struct svs_platform *svsp); + void (*get_volts)(struct svs_platform *svsp); + char *name; + char *buck_name; + char *tzone_name; + enum svsb_phase phase; + s32 volt_od; + u32 reg_data[SVSB_PHASE_MAX][SVS_REG_MAX]; + u32 pm_runtime_enabled_count; + u32 mode_support; + u32 freq_base; + u32 turn_freq_base; + u32 vboot; + u32 opp_dfreq[MAX_OPP_ENTRIES]; + u32 opp_dvolt[MAX_OPP_ENTRIES]; + u32 freq_pct[MAX_OPP_ENTRIES]; + u32 volt[MAX_OPP_ENTRIES]; + u32 volt_step; + u32 volt_base; + u32 volt_flags; + u32 vmax; + u32 vmin; + u32 age_config; + u32 age_voffset_in; + u32 dc_config; + u32 dc_voffset_in; + u32 dvt_fixed; + u32 vco; + u32 chk_shift; + u32 core_sel; + u32 opp_count; + u32 int_st; + u32 sw_id; + u32 cpu_id; + u32 ctl0; + u32 temp; + u32 tzone_htemp; + u32 tzone_htemp_voffset; + u32 tzone_ltemp; + u32 tzone_ltemp_voffset; + u32 bts; + u32 mts; + u32 bdes; + u32 mdes; + u32 mtdes; + u32 dcbdet; + u32 dcmdet; + u32 turn_pt; + u32 type; +}; + +static u32 percent(u32 numerator, u32 denominator) +{ + /* If not divide 1000, "numerator * 100" will have data overflow. */ + numerator /= 1000; + denominator /= 1000; + + return DIV_ROUND_UP(numerator * 100, denominator); +} + +static u32 svs_readl_relaxed(struct svs_platform *svsp, enum svs_reg_index rg_i) +{ + return readl_relaxed(svsp->base + svsp->regs[rg_i]); +} + +static void svs_writel_relaxed(struct svs_platform *svsp, u32 val, + enum svs_reg_index rg_i) +{ + writel_relaxed(val, svsp->base + svsp->regs[rg_i]); +} + +static void svs_switch_bank(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + svs_writel_relaxed(svsp, svsb->core_sel, CORESEL); +} + +static u32 svs_bank_volt_to_opp_volt(u32 svsb_volt, u32 svsb_volt_step, + u32 svsb_volt_base) +{ + return (svsb_volt * svsb_volt_step) + svsb_volt_base; +} + +static u32 svs_opp_volt_to_bank_volt(u32 opp_u_volt, u32 svsb_volt_step, + u32 svsb_volt_base) +{ + return (opp_u_volt - svsb_volt_base) / svsb_volt_step; +} + +static int svs_sync_bank_volts_from_opp(struct svs_bank *svsb) +{ + struct dev_pm_opp *opp; + u32 i, opp_u_volt; + + for (i = 0; i < svsb->opp_count; i++) { + opp = dev_pm_opp_find_freq_exact(svsb->opp_dev, + svsb->opp_dfreq[i], + true); + if (IS_ERR(opp)) { + dev_err(svsb->dev, "cannot find freq = %u (%ld)\n", + svsb->opp_dfreq[i], PTR_ERR(opp)); + return PTR_ERR(opp); + } + + opp_u_volt = dev_pm_opp_get_voltage(opp); + svsb->volt[i] = svs_opp_volt_to_bank_volt(opp_u_volt, + svsb->volt_step, + svsb->volt_base); + dev_pm_opp_put(opp); + } + + return 0; +} + +static int svs_adjust_pm_opp_volts(struct svs_bank *svsb) +{ + int ret = -EPERM, tzone_temp = 0; + u32 i, svsb_volt, opp_volt, temp_voffset = 0, opp_start, opp_stop; + + mutex_lock(&svsb->lock); + + /* + * 2-line bank updates its corresponding opp volts. + * 1-line bank updates all opp volts. + */ + if (svsb->type == SVSB_HIGH) { + opp_start = 0; + opp_stop = svsb->turn_pt; + } else if (svsb->type == SVSB_LOW) { + opp_start = svsb->turn_pt; + opp_stop = svsb->opp_count; + } else { + opp_start = 0; + opp_stop = svsb->opp_count; + } + + /* Get thermal effect */ + if (svsb->phase == SVSB_PHASE_MON) { + ret = thermal_zone_get_temp(svsb->tzd, &tzone_temp); + if (ret || (svsb->temp > SVSB_TEMP_UPPER_BOUND && + svsb->temp < SVSB_TEMP_LOWER_BOUND)) { + dev_err(svsb->dev, "%s: %d (0x%x), run default volts\n", + svsb->tzone_name, ret, svsb->temp); + svsb->phase = SVSB_PHASE_ERROR; + } + + if (tzone_temp >= svsb->tzone_htemp) + temp_voffset += svsb->tzone_htemp_voffset; + else if (tzone_temp <= svsb->tzone_ltemp) + temp_voffset += svsb->tzone_ltemp_voffset; + + /* 2-line bank update all opp volts when running mon mode */ + if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) { + opp_start = 0; + opp_stop = svsb->opp_count; + } + } + + /* vmin <= svsb_volt (opp_volt) <= default opp voltage */ + for (i = opp_start; i < opp_stop; i++) { + switch (svsb->phase) { + case SVSB_PHASE_ERROR: + opp_volt = svsb->opp_dvolt[i]; + break; + case SVSB_PHASE_INIT01: + /* do nothing */ + goto unlock_mutex; + case SVSB_PHASE_INIT02: + svsb_volt = max(svsb->volt[i], svsb->vmin); + opp_volt = svs_bank_volt_to_opp_volt(svsb_volt, + svsb->volt_step, + svsb->volt_base); + break; + case SVSB_PHASE_MON: + svsb_volt = max(svsb->volt[i] + temp_voffset, svsb->vmin); + opp_volt = svs_bank_volt_to_opp_volt(svsb_volt, + svsb->volt_step, + svsb->volt_base); + break; + default: + dev_err(svsb->dev, "unknown phase: %u\n", svsb->phase); + ret = -EINVAL; + goto unlock_mutex; + } + + opp_volt = min(opp_volt, svsb->opp_dvolt[i]); + ret = dev_pm_opp_adjust_voltage(svsb->opp_dev, + svsb->opp_dfreq[i], + opp_volt, opp_volt, + svsb->opp_dvolt[i]); + if (ret) { + dev_err(svsb->dev, "set %uuV fail: %d\n", + opp_volt, ret); + goto unlock_mutex; + } + } + +unlock_mutex: + mutex_unlock(&svsb->lock); + + return ret; +} + +static int svs_dump_debug_show(struct seq_file *m, void *p) +{ + struct svs_platform *svsp = (struct svs_platform *)m->private; + struct svs_bank *svsb; + unsigned long svs_reg_addr; + u32 idx, i, j, bank_id; + + for (i = 0; i < svsp->efuse_max; i++) + if (svsp->efuse && svsp->efuse[i]) + seq_printf(m, "M_HW_RES%d = 0x%08x\n", + i, svsp->efuse[i]); + + for (i = 0; i < svsp->tefuse_max; i++) + if (svsp->tefuse) + seq_printf(m, "THERMAL_EFUSE%d = 0x%08x\n", + i, svsp->tefuse[i]); + + for (bank_id = 0, idx = 0; idx < svsp->bank_max; idx++, bank_id++) { + svsb = &svsp->banks[idx]; + + for (i = SVSB_PHASE_INIT01; i <= SVSB_PHASE_MON; i++) { + seq_printf(m, "Bank_number = %u\n", bank_id); + + if (i == SVSB_PHASE_INIT01 || i == SVSB_PHASE_INIT02) + seq_printf(m, "mode = init%d\n", i); + else if (i == SVSB_PHASE_MON) + seq_puts(m, "mode = mon\n"); + else + seq_puts(m, "mode = error\n"); + + for (j = DESCHAR; j < SVS_REG_MAX; j++) { + svs_reg_addr = (unsigned long)(svsp->base + + svsp->regs[j]); + seq_printf(m, "0x%08lx = 0x%08x\n", + svs_reg_addr, svsb->reg_data[i][j]); + } + } + } + + return 0; +} + +debug_fops_ro(dump); + +static int svs_enable_debug_show(struct seq_file *m, void *v) +{ + struct svs_bank *svsb = (struct svs_bank *)m->private; + + switch (svsb->phase) { + case SVSB_PHASE_ERROR: + seq_puts(m, "disabled\n"); + break; + case SVSB_PHASE_INIT01: + seq_puts(m, "init1\n"); + break; + case SVSB_PHASE_INIT02: + seq_puts(m, "init2\n"); + break; + case SVSB_PHASE_MON: + seq_puts(m, "mon mode\n"); + break; + default: + seq_puts(m, "unknown\n"); + break; + } + + return 0; +} + +static ssize_t svs_enable_debug_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *pos) +{ + struct svs_bank *svsb = file_inode(filp)->i_private; + struct svs_platform *svsp = dev_get_drvdata(svsb->dev); + unsigned long flags; + int enabled, ret; + char *buf = NULL; + + if (count >= PAGE_SIZE) + return -EINVAL; + + buf = (char *)memdup_user_nul(buffer, count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + ret = kstrtoint(buf, 10, &enabled); + if (ret) + return ret; + + if (!enabled) { + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svsb->mode_support = SVSB_MODE_ALL_DISABLE; + svs_switch_bank(svsp); + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS); + spin_unlock_irqrestore(&svs_lock, flags); + + svsb->phase = SVSB_PHASE_ERROR; + svs_adjust_pm_opp_volts(svsb); + } + + kfree(buf); + + return count; +} + +debug_fops_rw(enable); + +static int svs_status_debug_show(struct seq_file *m, void *v) +{ + struct svs_bank *svsb = (struct svs_bank *)m->private; + struct dev_pm_opp *opp; + int tzone_temp = 0, ret; + u32 i; + + ret = thermal_zone_get_temp(svsb->tzd, &tzone_temp); + if (ret) + seq_printf(m, "%s: temperature ignore, turn_pt = %u\n", + svsb->name, svsb->turn_pt); + else + seq_printf(m, "%s: temperature = %d, turn_pt = %u\n", + svsb->name, tzone_temp, svsb->turn_pt); + + for (i = 0; i < svsb->opp_count; i++) { + opp = dev_pm_opp_find_freq_exact(svsb->opp_dev, + svsb->opp_dfreq[i], true); + if (IS_ERR(opp)) { + seq_printf(m, "%s: cannot find freq = %u (%ld)\n", + svsb->name, svsb->opp_dfreq[i], + PTR_ERR(opp)); + return PTR_ERR(opp); + } + + seq_printf(m, "opp_freq[%02u]: %u, opp_volt[%02u]: %lu, ", + i, svsb->opp_dfreq[i], i, + dev_pm_opp_get_voltage(opp)); + seq_printf(m, "svsb_volt[%02u]: 0x%x, freq_pct[%02u]: %u\n", + i, svsb->volt[i], i, svsb->freq_pct[i]); + dev_pm_opp_put(opp); + } + + return 0; +} + +debug_fops_ro(status); + +static int svs_create_debug_cmds(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + struct dentry *svs_dir, *svsb_dir, *file_entry; + const char *d = "/sys/kernel/debug/svs"; + u32 i, idx; + + struct svs_dentry { + const char *name; + const struct file_operations *fops; + }; + + struct svs_dentry svs_entries[] = { + svs_dentry_data(dump), + }; + + struct svs_dentry svsb_entries[] = { + svs_dentry_data(enable), + svs_dentry_data(status), + }; + + svs_dir = debugfs_create_dir("svs", NULL); + if (IS_ERR(svs_dir)) { + dev_err(svsp->dev, "cannot create %s: %ld\n", + d, PTR_ERR(svs_dir)); + return PTR_ERR(svs_dir); + } + + for (i = 0; i < ARRAY_SIZE(svs_entries); i++) { + file_entry = debugfs_create_file(svs_entries[i].name, 0664, + svs_dir, svsp, + svs_entries[i].fops); + if (IS_ERR(file_entry)) { + dev_err(svsp->dev, "cannot create %s/%s: %ld\n", + d, svs_entries[i].name, PTR_ERR(file_entry)); + return PTR_ERR(file_entry); + } + } + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (svsb->mode_support == SVSB_MODE_ALL_DISABLE) + continue; + + svsb_dir = debugfs_create_dir(svsb->name, svs_dir); + if (IS_ERR(svsb_dir)) { + dev_err(svsp->dev, "cannot create %s/%s: %ld\n", + d, svsb->name, PTR_ERR(svsb_dir)); + return PTR_ERR(svsb_dir); + } + + for (i = 0; i < ARRAY_SIZE(svsb_entries); i++) { + file_entry = debugfs_create_file(svsb_entries[i].name, + 0664, svsb_dir, svsb, + svsb_entries[i].fops); + if (IS_ERR(file_entry)) { + dev_err(svsp->dev, "no %s/%s/%s?: %ld\n", + d, svsb->name, svsb_entries[i].name, + PTR_ERR(file_entry)); + return PTR_ERR(file_entry); + } + } + } + + return 0; +} + +static u32 interpolate(u32 f0, u32 f1, u32 v0, u32 v1, u32 fx) +{ + u32 vx; + + if (v0 == v1 || f0 == f1) + return v0; + + /* *100 to have decimal fraction factor */ + vx = (v0 * 100) - ((((v0 - v1) * 100) / (f0 - f1)) * (f0 - fx)); + + return DIV_ROUND_UP(vx, 100); +} + +static void svs_get_bank_volts_v3(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + u32 i, j, *vop, vop74, vop30, turn_pt = svsb->turn_pt; + u32 b_sft, shift_byte = 0, opp_start = 0, opp_stop = 0; + u32 middle_index = (svsb->opp_count / 2); + + if (svsb->phase == SVSB_PHASE_MON && + svsb->volt_flags & SVSB_MON_VOLT_IGNORE) + return; + + vop74 = svs_readl_relaxed(svsp, VOP74); + vop30 = svs_readl_relaxed(svsp, VOP30); + + /* Target is to set svsb->volt[] by algorithm */ + if (turn_pt < middle_index) { + if (svsb->type == SVSB_HIGH) { + /* volt[0] ~ volt[turn_pt - 1] */ + for (i = 0; i < turn_pt; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + vop = (shift_byte < REG_BYTES) ? &vop30 : + &vop74; + svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); + shift_byte++; + } + } else if (svsb->type == SVSB_LOW) { + /* volt[turn_pt] + volt[j] ~ volt[opp_count - 1] */ + j = svsb->opp_count - 7; + svsb->volt[turn_pt] = vop30 & GENMASK(7, 0); + shift_byte++; + for (i = j; i < svsb->opp_count; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + vop = (shift_byte < REG_BYTES) ? &vop30 : + &vop74; + svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); + shift_byte++; + } + + /* volt[turn_pt + 1] ~ volt[j - 1] by interpolate */ + for (i = turn_pt + 1; i < j; i++) + svsb->volt[i] = interpolate(svsb->freq_pct[turn_pt], + svsb->freq_pct[j], + svsb->volt[turn_pt], + svsb->volt[j], + svsb->freq_pct[i]); + } + } else { + if (svsb->type == SVSB_HIGH) { + /* volt[0] + volt[j] ~ volt[turn_pt - 1] */ + j = turn_pt - 7; + svsb->volt[0] = vop30 & GENMASK(7, 0); + shift_byte++; + for (i = j; i < turn_pt; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + vop = (shift_byte < REG_BYTES) ? &vop30 : + &vop74; + svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); + shift_byte++; + } + + /* volt[1] ~ volt[j - 1] by interpolate */ + for (i = 1; i < j; i++) + svsb->volt[i] = interpolate(svsb->freq_pct[0], + svsb->freq_pct[j], + svsb->volt[0], + svsb->volt[j], + svsb->freq_pct[i]); + } else if (svsb->type == SVSB_LOW) { + /* volt[turn_pt] ~ volt[opp_count - 1] */ + for (i = turn_pt; i < svsb->opp_count; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + vop = (shift_byte < REG_BYTES) ? &vop30 : + &vop74; + svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); + shift_byte++; + } + } + } + + if (svsb->type == SVSB_HIGH) { + opp_start = 0; + opp_stop = svsb->turn_pt; + } else if (svsb->type == SVSB_LOW) { + opp_start = svsb->turn_pt; + opp_stop = svsb->opp_count; + } + + for (i = opp_start; i < opp_stop; i++) + if (svsb->volt_flags & SVSB_REMOVE_DVTFIXED_VOLT) + svsb->volt[i] -= svsb->dvt_fixed; +} + +static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + u32 i, j, *freq_pct, freq_pct74 = 0, freq_pct30 = 0; + u32 b_sft, shift_byte = 0, turn_pt; + u32 middle_index = (svsb->opp_count / 2); + + for (i = 0; i < svsb->opp_count; i++) { + if (svsb->opp_dfreq[i] <= svsb->turn_freq_base) { + svsb->turn_pt = i; + break; + } + } + + turn_pt = svsb->turn_pt; + + /* Target is to fill out freq_pct74 / freq_pct30 by algorithm */ + if (turn_pt < middle_index) { + if (svsb->type == SVSB_HIGH) { + /* + * If we don't handle this situation, + * SVSB_HIGH's FREQPCT74 / FREQPCT30 would keep "0" + * and this leads SVSB_LOW to work abnormally. + */ + if (turn_pt == 0) + freq_pct30 = svsb->freq_pct[0]; + + /* freq_pct[0] ~ freq_pct[turn_pt - 1] */ + for (i = 0; i < turn_pt; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + freq_pct = (shift_byte < REG_BYTES) ? + &freq_pct30 : &freq_pct74; + *freq_pct |= (svsb->freq_pct[i] << b_sft); + shift_byte++; + } + } else if (svsb->type == SVSB_LOW) { + /* + * freq_pct[turn_pt] + + * freq_pct[opp_count - 7] ~ freq_pct[opp_count -1] + */ + freq_pct30 = svsb->freq_pct[turn_pt]; + shift_byte++; + j = svsb->opp_count - 7; + for (i = j; i < svsb->opp_count; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + freq_pct = (shift_byte < REG_BYTES) ? + &freq_pct30 : &freq_pct74; + *freq_pct |= (svsb->freq_pct[i] << b_sft); + shift_byte++; + } + } + } else { + if (svsb->type == SVSB_HIGH) { + /* + * freq_pct[0] + + * freq_pct[turn_pt - 7] ~ freq_pct[turn_pt - 1] + */ + freq_pct30 = svsb->freq_pct[0]; + shift_byte++; + j = turn_pt - 7; + for (i = j; i < turn_pt; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + freq_pct = (shift_byte < REG_BYTES) ? + &freq_pct30 : &freq_pct74; + *freq_pct |= (svsb->freq_pct[i] << b_sft); + shift_byte++; + } + } else if (svsb->type == SVSB_LOW) { + /* freq_pct[turn_pt] ~ freq_pct[opp_count - 1] */ + for (i = turn_pt; i < svsb->opp_count; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + freq_pct = (shift_byte < REG_BYTES) ? + &freq_pct30 : &freq_pct74; + *freq_pct |= (svsb->freq_pct[i] << b_sft); + shift_byte++; + } + } + } + + svs_writel_relaxed(svsp, freq_pct74, FREQPCT74); + svs_writel_relaxed(svsp, freq_pct30, FREQPCT30); +} + +static void svs_get_bank_volts_v2(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + u32 temp, i; + + temp = svs_readl_relaxed(svsp, VOP74); + svsb->volt[14] = (temp >> 24) & GENMASK(7, 0); + svsb->volt[12] = (temp >> 16) & GENMASK(7, 0); + svsb->volt[10] = (temp >> 8) & GENMASK(7, 0); + svsb->volt[8] = (temp & GENMASK(7, 0)); + + temp = svs_readl_relaxed(svsp, VOP30); + svsb->volt[6] = (temp >> 24) & GENMASK(7, 0); + svsb->volt[4] = (temp >> 16) & GENMASK(7, 0); + svsb->volt[2] = (temp >> 8) & GENMASK(7, 0); + svsb->volt[0] = (temp & GENMASK(7, 0)); + + for (i = 0; i <= 12; i += 2) + svsb->volt[i + 1] = interpolate(svsb->freq_pct[i], + svsb->freq_pct[i + 2], + svsb->volt[i], + svsb->volt[i + 2], + svsb->freq_pct[i + 1]); + + svsb->volt[15] = interpolate(svsb->freq_pct[12], + svsb->freq_pct[14], + svsb->volt[12], + svsb->volt[14], + svsb->freq_pct[15]); + + for (i = 0; i < svsb->opp_count; i++) + svsb->volt[i] += svsb->volt_od; +} + +static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + svs_writel_relaxed(svsp, + (svsb->freq_pct[14] << 24) | + (svsb->freq_pct[12] << 16) | + (svsb->freq_pct[10] << 8) | + svsb->freq_pct[8], + FREQPCT74); + + svs_writel_relaxed(svsp, + (svsb->freq_pct[6] << 24) | + (svsb->freq_pct[4] << 16) | + (svsb->freq_pct[2] << 8) | + svsb->freq_pct[0], + FREQPCT30); +} + +static void svs_set_bank_phase(struct svs_platform *svsp, + enum svsb_phase target_phase) +{ + struct svs_bank *svsb = svsp->pbank; + u32 des_char, temp_char, det_char, limit_vals, init2vals, ts_calcs; + + svs_switch_bank(svsp); + + des_char = (svsb->bdes << 8) | svsb->mdes; + svs_writel_relaxed(svsp, des_char, DESCHAR); + + temp_char = (svsb->vco << 16) | (svsb->mtdes << 8) | svsb->dvt_fixed; + svs_writel_relaxed(svsp, temp_char, TEMPCHAR); + + det_char = (svsb->dcbdet << 8) | svsb->dcmdet; + svs_writel_relaxed(svsp, det_char, DETCHAR); + + svs_writel_relaxed(svsp, svsb->dc_config, DCCONFIG); + svs_writel_relaxed(svsp, svsb->age_config, AGECONFIG); + svs_writel_relaxed(svsp, SVSB_RUNCONFIG_DEFAULT, RUNCONFIG); + + svsb->set_freq_pct(svsp); + + limit_vals = (svsb->vmax << 24) | (svsb->vmin << 16) | + (SVSB_DTHI << 8) | SVSB_DTLO; + svs_writel_relaxed(svsp, limit_vals, LIMITVALS); + + svs_writel_relaxed(svsp, SVSB_DET_WINDOW, DETWINDOW); + svs_writel_relaxed(svsp, SVSB_DET_MAX, CONFIG); + svs_writel_relaxed(svsp, svsb->chk_shift, CHKSHIFT); + svs_writel_relaxed(svsp, svsb->ctl0, CTL0); + svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS); + + switch (target_phase) { + case SVSB_PHASE_INIT01: + svs_writel_relaxed(svsp, svsb->vboot, VBOOT); + svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN); + svs_writel_relaxed(svsp, SVSB_EN_INIT01, SVSEN); + break; + case SVSB_PHASE_INIT02: + svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN); + init2vals = (svsb->age_voffset_in << 16) | svsb->dc_voffset_in; + svs_writel_relaxed(svsp, init2vals, INIT2VALS); + svs_writel_relaxed(svsp, SVSB_EN_INIT02, SVSEN); + break; + case SVSB_PHASE_MON: + ts_calcs = (svsb->bts << 12) | svsb->mts; + svs_writel_relaxed(svsp, ts_calcs, TSCALCS); + svs_writel_relaxed(svsp, SVSB_INTEN_MONVOPEN, INTEN); + svs_writel_relaxed(svsp, SVSB_EN_MON, SVSEN); + break; + default: + dev_err(svsb->dev, "requested unknown target phase: %u\n", + target_phase); + break; + } +} + +static inline void svs_save_bank_register_data(struct svs_platform *svsp, + enum svsb_phase phase) +{ + struct svs_bank *svsb = svsp->pbank; + enum svs_reg_index rg_i; + + for (rg_i = DESCHAR; rg_i < SVS_REG_MAX; rg_i++) + svsb->reg_data[phase][rg_i] = svs_readl_relaxed(svsp, rg_i); +} + +static inline void svs_error_isr_handler(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + dev_err(svsb->dev, "%s: CORESEL = 0x%08x\n", + __func__, svs_readl_relaxed(svsp, CORESEL)); + dev_err(svsb->dev, "SVSEN = 0x%08x, INTSTS = 0x%08x\n", + svs_readl_relaxed(svsp, SVSEN), + svs_readl_relaxed(svsp, INTSTS)); + dev_err(svsb->dev, "SMSTATE0 = 0x%08x, SMSTATE1 = 0x%08x\n", + svs_readl_relaxed(svsp, SMSTATE0), + svs_readl_relaxed(svsp, SMSTATE1)); + dev_err(svsb->dev, "TEMP = 0x%08x\n", svs_readl_relaxed(svsp, TEMP)); + + svs_save_bank_register_data(svsp, SVSB_PHASE_ERROR); + + svsb->phase = SVSB_PHASE_ERROR; + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS); +} + +static inline void svs_init01_isr_handler(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + dev_info(svsb->dev, "%s: VDN74~30:0x%08x~0x%08x, DC:0x%08x\n", + __func__, svs_readl_relaxed(svsp, VDESIGN74), + svs_readl_relaxed(svsp, VDESIGN30), + svs_readl_relaxed(svsp, DCVALUES)); + + svs_save_bank_register_data(svsp, SVSB_PHASE_INIT01); + + svsb->phase = SVSB_PHASE_INIT01; + svsb->dc_voffset_in = ~(svs_readl_relaxed(svsp, DCVALUES) & + GENMASK(15, 0)) + 1; + if (svsb->volt_flags & SVSB_INIT01_VOLT_IGNORE || + (svsb->dc_voffset_in & SVSB_DC_SIGNED_BIT && + svsb->volt_flags & SVSB_INIT01_VOLT_INC_ONLY)) + svsb->dc_voffset_in = 0; + + svsb->age_voffset_in = svs_readl_relaxed(svsp, AGEVALUES) & + GENMASK(15, 0); + + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_COMPLETE, INTSTS); + svsb->core_sel &= ~SVSB_DET_CLK_EN; +} + +static inline void svs_init02_isr_handler(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + dev_info(svsb->dev, "%s: VOP74~30:0x%08x~0x%08x, DC:0x%08x\n", + __func__, svs_readl_relaxed(svsp, VOP74), + svs_readl_relaxed(svsp, VOP30), + svs_readl_relaxed(svsp, DCVALUES)); + + svs_save_bank_register_data(svsp, SVSB_PHASE_INIT02); + + svsb->phase = SVSB_PHASE_INIT02; + svsb->get_volts(svsp); + + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_COMPLETE, INTSTS); +} + +static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + svs_save_bank_register_data(svsp, SVSB_PHASE_MON); + + svsb->phase = SVSB_PHASE_MON; + svsb->get_volts(svsp); + + svsb->temp = svs_readl_relaxed(svsp, TEMP) & GENMASK(7, 0); + svs_writel_relaxed(svsp, SVSB_INTSTS_MONVOP, INTSTS); +} + +static irqreturn_t svs_isr(int irq, void *data) +{ + struct svs_platform *svsp = data; + struct svs_bank *svsb = NULL; + unsigned long flags; + u32 idx, int_sts, svs_en; + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + WARN(!svsb, "%s: svsb(%s) is null", __func__, svsb->name); + + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + + /* Find out which svs bank fires interrupt */ + if (svsb->int_st & svs_readl_relaxed(svsp, INTST)) { + spin_unlock_irqrestore(&svs_lock, flags); + continue; + } + + svs_switch_bank(svsp); + int_sts = svs_readl_relaxed(svsp, INTSTS); + svs_en = svs_readl_relaxed(svsp, SVSEN); + + if (int_sts == SVSB_INTSTS_COMPLETE && + svs_en == SVSB_EN_INIT01) + svs_init01_isr_handler(svsp); + else if (int_sts == SVSB_INTSTS_COMPLETE && + svs_en == SVSB_EN_INIT02) + svs_init02_isr_handler(svsp); + else if (int_sts & SVSB_INTSTS_MONVOP) + svs_mon_mode_isr_handler(svsp); + else + svs_error_isr_handler(svsp); + + spin_unlock_irqrestore(&svs_lock, flags); + break; + } + + svs_adjust_pm_opp_volts(svsb); + + if (svsb->phase == SVSB_PHASE_INIT01 || + svsb->phase == SVSB_PHASE_INIT02) + complete(&svsb->init_completion); + + return IRQ_HANDLED; +} + +static int svs_init01(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + unsigned long flags, time_left; + bool search_done; + int ret = 0, r; + u32 opp_freq, opp_vboot, buck_volt, idx, i; + + /* Keep CPUs' core power on for svs_init01 initialization */ + cpuidle_pause_and_lock(); + + /* Svs bank init01 preparation - power enable */ + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT01)) + continue; + + ret = regulator_enable(svsb->buck); + if (ret) { + dev_err(svsb->dev, "%s enable fail: %d\n", + svsb->buck_name, ret); + goto svs_init01_resume_cpuidle; + } + + /* Some buck doesn't support mode change. Show fail msg only */ + ret = regulator_set_mode(svsb->buck, REGULATOR_MODE_FAST); + if (ret) + dev_notice(svsb->dev, "set fast mode fail: %d\n", ret); + + if (svsb->volt_flags & SVSB_INIT01_PD_REQ) { + if (!pm_runtime_enabled(svsb->opp_dev)) { + pm_runtime_enable(svsb->opp_dev); + svsb->pm_runtime_enabled_count++; + } + + ret = pm_runtime_get_sync(svsb->opp_dev); + if (ret < 0) { + dev_err(svsb->dev, "mtcmos on fail: %d\n", ret); + goto svs_init01_resume_cpuidle; + } + } + } + + /* + * Svs bank init01 preparation - vboot voltage adjustment + * Sometimes two svs banks use the same buck. Therefore, + * we have to set each svs bank to target voltage(vboot) first. + */ + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT01)) + continue; + + /* + * Find the fastest freq that can be run at vboot and + * fix to that freq until svs_init01 is done. + */ + search_done = false; + opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot, + svsb->volt_step, + svsb->volt_base); + + for (i = 0; i < svsb->opp_count; i++) { + opp_freq = svsb->opp_dfreq[i]; + if (!search_done && svsb->opp_dvolt[i] <= opp_vboot) { + ret = dev_pm_opp_adjust_voltage(svsb->opp_dev, + opp_freq, + opp_vboot, + opp_vboot, + opp_vboot); + if (ret) { + dev_err(svsb->dev, + "set opp %uuV vboot fail: %d\n", + opp_vboot, ret); + goto svs_init01_finish; + } + + search_done = true; + } else { + ret = dev_pm_opp_disable(svsb->opp_dev, + svsb->opp_dfreq[i]); + if (ret) { + dev_err(svsb->dev, + "opp %uHz disable fail: %d\n", + svsb->opp_dfreq[i], ret); + goto svs_init01_finish; + } + } + } + } + + /* Svs bank init01 begins */ + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT01)) + continue; + + opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot, + svsb->volt_step, + svsb->volt_base); + + buck_volt = regulator_get_voltage(svsb->buck); + if (buck_volt != opp_vboot) { + dev_err(svsb->dev, + "buck voltage: %uuV, expected vboot: %uuV\n", + buck_volt, opp_vboot); + ret = -EPERM; + goto svs_init01_finish; + } + + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_set_bank_phase(svsp, SVSB_PHASE_INIT01); + spin_unlock_irqrestore(&svs_lock, flags); + + time_left = wait_for_completion_timeout(&svsb->init_completion, + msecs_to_jiffies(5000)); + if (!time_left) { + dev_err(svsb->dev, "init01 completion timeout\n"); + ret = -EBUSY; + goto svs_init01_finish; + } + } + +svs_init01_finish: + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT01)) + continue; + + for (i = 0; i < svsb->opp_count; i++) { + r = dev_pm_opp_enable(svsb->opp_dev, + svsb->opp_dfreq[i]); + if (r) + dev_err(svsb->dev, "opp %uHz enable fail: %d\n", + svsb->opp_dfreq[i], r); + } + + if (svsb->volt_flags & SVSB_INIT01_PD_REQ) { + r = pm_runtime_put_sync(svsb->opp_dev); + if (r) + dev_err(svsb->dev, "mtcmos off fail: %d\n", r); + + if (svsb->pm_runtime_enabled_count > 0) { + pm_runtime_disable(svsb->opp_dev); + svsb->pm_runtime_enabled_count--; + } + } + + r = regulator_set_mode(svsb->buck, REGULATOR_MODE_NORMAL); + if (r) + dev_notice(svsb->dev, "set normal mode fail: %d\n", r); + + r = regulator_disable(svsb->buck); + if (r) + dev_err(svsb->dev, "%s disable fail: %d\n", + svsb->buck_name, r); + } + +svs_init01_resume_cpuidle: + cpuidle_resume_and_unlock(); + + return ret; +} + +static int svs_init02(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + unsigned long flags, time_left; + u32 idx; + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT02)) + continue; + + reinit_completion(&svsb->init_completion); + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_set_bank_phase(svsp, SVSB_PHASE_INIT02); + spin_unlock_irqrestore(&svs_lock, flags); + + time_left = wait_for_completion_timeout(&svsb->init_completion, + msecs_to_jiffies(5000)); + if (!time_left) { + dev_err(svsb->dev, "init02 completion timeout\n"); + return -EBUSY; + } + } + + /* + * 2-line high/low bank update its corresponding opp voltages only. + * Therefore, we sync voltages from opp for high/low bank voltages + * consistency. + */ + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT02)) + continue; + + if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) { + if (svs_sync_bank_volts_from_opp(svsb)) { + dev_err(svsb->dev, "sync volt fail\n"); + return -EPERM; + } + } + } + + return 0; +} + +static void svs_mon_mode(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + unsigned long flags; + u32 idx; + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_MON)) + continue; + + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_set_bank_phase(svsp, SVSB_PHASE_MON); + spin_unlock_irqrestore(&svs_lock, flags); + } +} + +static int svs_start(struct svs_platform *svsp) +{ + int ret; + + ret = svs_init01(svsp); + if (ret) + return ret; + + ret = svs_init02(svsp); + if (ret) + return ret; + + svs_mon_mode(svsp); + + return 0; +} + +static int svs_suspend(struct device *dev) +{ + struct svs_platform *svsp = dev_get_drvdata(dev); + struct svs_bank *svsb; + unsigned long flags; + int ret; + u32 idx; + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + /* This might wait for svs_isr() process */ + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_switch_bank(svsp); + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS); + spin_unlock_irqrestore(&svs_lock, flags); + + svsb->phase = SVSB_PHASE_ERROR; + svs_adjust_pm_opp_volts(svsb); + } + + ret = reset_control_assert(svsp->rst); + if (ret) { + dev_err(svsp->dev, "cannot assert reset %d\n", ret); + return ret; + } + + clk_disable_unprepare(svsp->main_clk); + + return 0; +} + +static int svs_resume(struct device *dev) +{ + struct svs_platform *svsp = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(svsp->main_clk); + if (ret) { + dev_err(svsp->dev, "cannot enable main_clk, disable svs\n"); + return ret; + } + + ret = reset_control_deassert(svsp->rst); + if (ret) { + dev_err(svsp->dev, "cannot deassert reset %d\n", ret); + goto out_of_resume; + } + + ret = svs_init02(svsp); + if (ret) + goto out_of_resume; + + svs_mon_mode(svsp); + + return 0; + +out_of_resume: + clk_disable_unprepare(svsp->main_clk); + return ret; +} + +static int svs_bank_resource_setup(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + struct dev_pm_opp *opp; + unsigned long freq; + int count, ret; + u32 idx, i; + + dev_set_drvdata(svsp->dev, svsp); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + switch (svsb->sw_id) { + case SVSB_CPU_LITTLE: + svsb->name = "SVSB_CPU_LITTLE"; + break; + case SVSB_CPU_BIG: + svsb->name = "SVSB_CPU_BIG"; + break; + case SVSB_CCI: + svsb->name = "SVSB_CCI"; + break; + case SVSB_GPU: + if (svsb->type == SVSB_HIGH) + svsb->name = "SVSB_GPU_HIGH"; + else if (svsb->type == SVSB_LOW) + svsb->name = "SVSB_GPU_LOW"; + else + svsb->name = "SVSB_GPU"; + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + return -EINVAL; + } + + svsb->dev = devm_kzalloc(svsp->dev, sizeof(*svsb->dev), + GFP_KERNEL); + if (!svsb->dev) + return -ENOMEM; + + ret = dev_set_name(svsb->dev, "%s", svsb->name); + if (ret) + return ret; + + dev_set_drvdata(svsb->dev, svsp); + + ret = dev_pm_opp_of_add_table(svsb->opp_dev); + if (ret) { + dev_err(svsb->dev, "add opp table fail: %d\n", ret); + return ret; + } + + mutex_init(&svsb->lock); + init_completion(&svsb->init_completion); + + if (svsb->mode_support & SVSB_MODE_INIT01) { + svsb->buck = devm_regulator_get_optional(svsb->opp_dev, + svsb->buck_name); + if (IS_ERR(svsb->buck)) { + dev_err(svsb->dev, "cannot get \"%s-supply\"\n", + svsb->buck_name); + return PTR_ERR(svsb->buck); + } + } + + if (svsb->mode_support & SVSB_MODE_MON) { + svsb->tzd = thermal_zone_get_zone_by_name(svsb->tzone_name); + if (IS_ERR(svsb->tzd)) { + dev_err(svsb->dev, "cannot get \"%s\" thermal zone\n", + svsb->tzone_name); + return PTR_ERR(svsb->tzd); + } + } + + count = dev_pm_opp_get_opp_count(svsb->opp_dev); + if (svsb->opp_count != count) { + dev_err(svsb->dev, + "opp_count not \"%u\" but get \"%d\"?\n", + svsb->opp_count, count); + return count; + } + + for (i = 0, freq = U32_MAX; i < svsb->opp_count; i++, freq--) { + opp = dev_pm_opp_find_freq_floor(svsb->opp_dev, &freq); + if (IS_ERR(opp)) { + dev_err(svsb->dev, "cannot find freq = %ld\n", + PTR_ERR(opp)); + return PTR_ERR(opp); + } + + svsb->opp_dfreq[i] = freq; + svsb->opp_dvolt[i] = dev_pm_opp_get_voltage(opp); + svsb->freq_pct[i] = percent(svsb->opp_dfreq[i], + svsb->freq_base); + dev_pm_opp_put(opp); + } + } + + return 0; +} + +static bool svs_mt8192_efuse_parsing(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + struct nvmem_cell *cell; + u32 idx, i, vmin, golden_temp; + + for (i = 0; i < svsp->efuse_max; i++) + if (svsp->efuse[i]) + dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n", + i, svsp->efuse[i]); + + if (!svsp->efuse[9]) { + dev_notice(svsp->dev, "svs_efuse[9] = 0x0?\n"); + return false; + } + + /* Svs efuse parsing */ + vmin = (svsp->efuse[19] >> 4) & GENMASK(1, 0); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (vmin == 0x1) + svsb->vmin = 0x1e; + + if (svsb->type == SVSB_LOW) { + svsb->mtdes = svsp->efuse[10] & GENMASK(7, 0); + svsb->bdes = (svsp->efuse[10] >> 16) & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[10] >> 24) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[17]) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[17] >> 8) & GENMASK(7, 0); + } else if (svsb->type == SVSB_HIGH) { + svsb->mtdes = svsp->efuse[9] & GENMASK(7, 0); + svsb->bdes = (svsp->efuse[9] >> 16) & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[9] >> 24) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[17] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[17] >> 24) & GENMASK(7, 0); + } + + svsb->vmax += svsb->dvt_fixed; + } + + /* Thermal efuse parsing */ + cell = nvmem_cell_get(svsp->dev, "t-calibration-data"); + if (IS_ERR_OR_NULL(cell)) { + dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n", + PTR_ERR(cell)); + return false; + } + + svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max); + if (IS_ERR(svsp->tefuse)) { + dev_err(svsp->dev, "cannot read thermal efuse: %ld\n", + PTR_ERR(svsp->tefuse)); + nvmem_cell_put(cell); + return false; + } + + svsp->tefuse_max /= sizeof(u32); + nvmem_cell_put(cell); + + for (i = 0; i < svsp->tefuse_max; i++) + if (svsp->tefuse[i] != 0) + break; + + if (i == svsp->tefuse_max) + golden_temp = 50; /* All thermal efuse data are 0 */ + else + golden_temp = (svsp->tefuse[0] >> 24) & GENMASK(7, 0); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + svsb->mts = 500; + svsb->bts = (((500 * golden_temp + 250460) / 1000) - 25) * 4; + } + + return true; +} + +static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + struct nvmem_cell *cell; + int format[6], x_roomt[6], o_vtsmcu[5], o_vtsabb, tb_roomt = 0; + int adc_ge_t, adc_oe_t, ge, oe, gain, degc_cali, adc_cali_en_t; + int o_slope, o_slope_sign, ts_id; + u32 idx, i, ft_pgm, mts, temp0, temp1, temp2; + + for (i = 0; i < svsp->efuse_max; i++) + if (svsp->efuse[i]) + dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n", + i, svsp->efuse[i]); + + if (!svsp->efuse[2]) { + dev_notice(svsp->dev, "svs_efuse[2] = 0x0?\n"); + return false; + } + + /* Svs efuse parsing */ + ft_pgm = (svsp->efuse[0] >> 4) & GENMASK(3, 0); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (ft_pgm <= 1) + svsb->volt_flags |= SVSB_INIT01_VOLT_IGNORE; + + switch (svsb->sw_id) { + case SVSB_CPU_LITTLE: + svsb->bdes = svsp->efuse[16] & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[16] >> 8) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[16] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[16] >> 24) & GENMASK(7, 0); + svsb->mtdes = (svsp->efuse[17] >> 16) & GENMASK(7, 0); + + if (ft_pgm <= 3) + svsb->volt_od += 10; + else + svsb->volt_od += 2; + break; + case SVSB_CPU_BIG: + svsb->bdes = svsp->efuse[18] & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[18] >> 8) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[18] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[18] >> 24) & GENMASK(7, 0); + svsb->mtdes = svsp->efuse[17] & GENMASK(7, 0); + + if (ft_pgm <= 3) + svsb->volt_od += 15; + else + svsb->volt_od += 12; + break; + case SVSB_CCI: + svsb->bdes = svsp->efuse[4] & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[4] >> 8) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[4] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[4] >> 24) & GENMASK(7, 0); + svsb->mtdes = (svsp->efuse[5] >> 16) & GENMASK(7, 0); + + if (ft_pgm <= 3) + svsb->volt_od += 10; + else + svsb->volt_od += 2; + break; + case SVSB_GPU: + svsb->bdes = svsp->efuse[6] & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[6] >> 8) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[6] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[6] >> 24) & GENMASK(7, 0); + svsb->mtdes = svsp->efuse[5] & GENMASK(7, 0); + + if (ft_pgm >= 2) { + svsb->freq_base = 800000000; /* 800MHz */ + svsb->dvt_fixed = 2; + } + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + return false; + } + } + + /* Get thermal efuse by nvmem */ + cell = nvmem_cell_get(svsp->dev, "t-calibration-data"); + if (IS_ERR(cell)) { + dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n", + PTR_ERR(cell)); + goto remove_mt8183_svsb_mon_mode; + } + + svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max); + if (IS_ERR(svsp->tefuse)) { + dev_err(svsp->dev, "cannot read thermal efuse: %ld\n", + PTR_ERR(svsp->tefuse)); + nvmem_cell_put(cell); + goto remove_mt8183_svsb_mon_mode; + } + + svsp->tefuse_max /= sizeof(u32); + nvmem_cell_put(cell); + + /* Thermal efuse parsing */ + adc_ge_t = (svsp->tefuse[1] >> 22) & GENMASK(9, 0); + adc_oe_t = (svsp->tefuse[1] >> 12) & GENMASK(9, 0); + + o_vtsmcu[0] = (svsp->tefuse[0] >> 17) & GENMASK(8, 0); + o_vtsmcu[1] = (svsp->tefuse[0] >> 8) & GENMASK(8, 0); + o_vtsmcu[2] = svsp->tefuse[1] & GENMASK(8, 0); + o_vtsmcu[3] = (svsp->tefuse[2] >> 23) & GENMASK(8, 0); + o_vtsmcu[4] = (svsp->tefuse[2] >> 5) & GENMASK(8, 0); + o_vtsabb = (svsp->tefuse[2] >> 14) & GENMASK(8, 0); + + degc_cali = (svsp->tefuse[0] >> 1) & GENMASK(5, 0); + adc_cali_en_t = svsp->tefuse[0] & BIT(0); + o_slope_sign = (svsp->tefuse[0] >> 7) & BIT(0); + + ts_id = (svsp->tefuse[1] >> 9) & BIT(0); + o_slope = (svsp->tefuse[0] >> 26) & GENMASK(5, 0); + + if (adc_cali_en_t == 1) { + if (!ts_id) + o_slope = 0; + + if (adc_ge_t < 265 || adc_ge_t > 758 || + adc_oe_t < 265 || adc_oe_t > 758 || + o_vtsmcu[0] < -8 || o_vtsmcu[0] > 484 || + o_vtsmcu[1] < -8 || o_vtsmcu[1] > 484 || + o_vtsmcu[2] < -8 || o_vtsmcu[2] > 484 || + o_vtsmcu[3] < -8 || o_vtsmcu[3] > 484 || + o_vtsmcu[4] < -8 || o_vtsmcu[4] > 484 || + o_vtsabb < -8 || o_vtsabb > 484 || + degc_cali < 1 || degc_cali > 63) { + dev_err(svsp->dev, "bad thermal efuse, no mon mode\n"); + goto remove_mt8183_svsb_mon_mode; + } + } else { + dev_err(svsp->dev, "no thermal efuse, no mon mode\n"); + goto remove_mt8183_svsb_mon_mode; + } + + ge = ((adc_ge_t - 512) * 10000) / 4096; + oe = (adc_oe_t - 512); + gain = (10000 + ge); + + format[0] = (o_vtsmcu[0] + 3350 - oe); + format[1] = (o_vtsmcu[1] + 3350 - oe); + format[2] = (o_vtsmcu[2] + 3350 - oe); + format[3] = (o_vtsmcu[3] + 3350 - oe); + format[4] = (o_vtsmcu[4] + 3350 - oe); + format[5] = (o_vtsabb + 3350 - oe); + + for (i = 0; i < 6; i++) + x_roomt[i] = (((format[i] * 10000) / 4096) * 10000) / gain; + + temp0 = (10000 * 100000 / gain) * 15 / 18; + + if (!o_slope_sign) + mts = (temp0 * 10) / (1534 + o_slope * 10); + else + mts = (temp0 * 10) / (1534 - o_slope * 10); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + svsb->mts = mts; + + switch (svsb->sw_id) { + case SVSB_CPU_LITTLE: + tb_roomt = x_roomt[3]; + break; + case SVSB_CPU_BIG: + tb_roomt = x_roomt[4]; + break; + case SVSB_CCI: + tb_roomt = x_roomt[3]; + break; + case SVSB_GPU: + tb_roomt = x_roomt[1]; + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + goto remove_mt8183_svsb_mon_mode; + } + + temp0 = (degc_cali * 10 / 2); + temp1 = ((10000 * 100000 / 4096 / gain) * + oe + tb_roomt * 10) * 15 / 18; + + if (!o_slope_sign) + temp2 = temp1 * 100 / (1534 + o_slope * 10); + else + temp2 = temp1 * 100 / (1534 - o_slope * 10); + + svsb->bts = (temp0 + temp2 - 250) * 4 / 10; + } + + return true; + +remove_mt8183_svsb_mon_mode: + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + svsb->mode_support &= ~SVSB_MODE_MON; + } + + return true; +} + +static bool svs_is_efuse_data_correct(struct svs_platform *svsp) +{ + struct nvmem_cell *cell; + + /* Get svs efuse by nvmem */ + cell = nvmem_cell_get(svsp->dev, "svs-calibration-data"); + if (IS_ERR(cell)) { + dev_err(svsp->dev, "no \"svs-calibration-data\"? %ld\n", + PTR_ERR(cell)); + return false; + } + + svsp->efuse = nvmem_cell_read(cell, &svsp->efuse_max); + if (IS_ERR(svsp->efuse)) { + dev_err(svsp->dev, "cannot read svs efuse: %ld\n", + PTR_ERR(svsp->efuse)); + nvmem_cell_put(cell); + return false; + } + + svsp->efuse_max /= sizeof(u32); + nvmem_cell_put(cell); + + return svsp->efuse_parsing(svsp); +} + +static struct device *svs_get_subsys_device(struct svs_platform *svsp, + const char *node_name) +{ + struct platform_device *pdev; + struct device_node *np; + + np = of_find_node_by_name(NULL, node_name); + if (!np) { + dev_err(svsp->dev, "cannot find %s node\n", node_name); + return ERR_PTR(-ENODEV); + } + + pdev = of_find_device_by_node(np); + if (!pdev) { + of_node_put(np); + dev_err(svsp->dev, "cannot find pdev by %s\n", node_name); + return ERR_PTR(-ENXIO); + } + + of_node_put(np); + + return &pdev->dev; +} + +static struct device *svs_add_device_link(struct svs_platform *svsp, + const char *node_name) +{ + struct device *dev; + struct device_link *sup_link; + + if (!node_name) { + dev_err(svsp->dev, "node name cannot be null\n"); + return ERR_PTR(-EINVAL); + } + + dev = svs_get_subsys_device(svsp, node_name); + if (IS_ERR(dev)) + return dev; + + sup_link = device_link_add(svsp->dev, dev, + DL_FLAG_AUTOREMOVE_CONSUMER); + if (!sup_link) { + dev_err(svsp->dev, "sup_link is NULL\n"); + return ERR_PTR(-EINVAL); + } + + if (sup_link->supplier->links.status != DL_DEV_DRIVER_BOUND) + return ERR_PTR(-EPROBE_DEFER); + + return dev; +} + +static int svs_mt8192_platform_probe(struct svs_platform *svsp) +{ + struct device *dev; + struct svs_bank *svsb; + u32 idx; + + svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst"); + if (IS_ERR(svsp->rst)) + return dev_err_probe(svsp->dev, PTR_ERR(svsp->rst), + "cannot get svs reset control\n"); + + dev = svs_add_device_link(svsp, "lvts"); + if (IS_ERR(dev)) + return dev_err_probe(svsp->dev, PTR_ERR(dev), + "failed to get lvts device\n"); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (svsb->type == SVSB_HIGH) + svsb->opp_dev = svs_add_device_link(svsp, "mali"); + else if (svsb->type == SVSB_LOW) + svsb->opp_dev = svs_get_subsys_device(svsp, "mali"); + + if (IS_ERR(svsb->opp_dev)) + return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev), + "failed to get OPP device for bank %d\n", + idx); + } + + return 0; +} + +static int svs_mt8183_platform_probe(struct svs_platform *svsp) +{ + struct device *dev; + struct svs_bank *svsb; + u32 idx; + + dev = svs_add_device_link(svsp, "thermal"); + if (IS_ERR(dev)) + return dev_err_probe(svsp->dev, PTR_ERR(dev), + "failed to get thermal device\n"); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + switch (svsb->sw_id) { + case SVSB_CPU_LITTLE: + case SVSB_CPU_BIG: + svsb->opp_dev = get_cpu_device(svsb->cpu_id); + break; + case SVSB_CCI: + svsb->opp_dev = svs_add_device_link(svsp, "cci"); + break; + case SVSB_GPU: + svsb->opp_dev = svs_add_device_link(svsp, "gpu"); + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + return -EINVAL; + } + + if (IS_ERR(svsb->opp_dev)) + return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev), + "failed to get OPP device for bank %d\n", + idx); + } + + return 0; +} + +static struct svs_bank svs_mt8192_banks[] = { + { + .sw_id = SVSB_GPU, + .type = SVSB_LOW, + .set_freq_pct = svs_set_bank_freq_pct_v3, + .get_volts = svs_get_bank_volts_v3, + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT, + .mode_support = SVSB_MODE_INIT02, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 688000000, + .turn_freq_base = 688000000, + .volt_step = 6250, + .volt_base = 400000, + .vmax = 0x60, + .vmin = 0x1a, + .age_config = 0x555555, + .dc_config = 0x1, + .dvt_fixed = 0x1, + .vco = 0x18, + .chk_shift = 0x87, + .core_sel = 0x0fff0100, + .int_st = BIT(0), + .ctl0 = 0x00540003, + }, + { + .sw_id = SVSB_GPU, + .type = SVSB_HIGH, + .set_freq_pct = svs_set_bank_freq_pct_v3, + .get_volts = svs_get_bank_volts_v3, + .tzone_name = "gpu1", + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | + SVSB_MON_VOLT_IGNORE, + .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 902000000, + .turn_freq_base = 688000000, + .volt_step = 6250, + .volt_base = 400000, + .vmax = 0x60, + .vmin = 0x1a, + .age_config = 0x555555, + .dc_config = 0x1, + .dvt_fixed = 0x6, + .vco = 0x18, + .chk_shift = 0x87, + .core_sel = 0x0fff0101, + .int_st = BIT(1), + .ctl0 = 0x00540003, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 0, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 7, + }, +}; + +static struct svs_bank svs_mt8183_banks[] = { + { + .sw_id = SVSB_CPU_LITTLE, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .cpu_id = 0, + .buck_name = "proc", + .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 1989000000, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .vmax = 0x64, + .vmin = 0x18, + .age_config = 0x555555, + .dc_config = 0x555555, + .dvt_fixed = 0x7, + .vco = 0x10, + .chk_shift = 0x77, + .core_sel = 0x8fff0000, + .int_st = BIT(0), + .ctl0 = 0x00010001, + }, + { + .sw_id = SVSB_CPU_BIG, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .cpu_id = 4, + .buck_name = "proc", + .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 1989000000, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .vmax = 0x58, + .vmin = 0x10, + .age_config = 0x555555, + .dc_config = 0x555555, + .dvt_fixed = 0x7, + .vco = 0x10, + .chk_shift = 0x77, + .core_sel = 0x8fff0001, + .int_st = BIT(1), + .ctl0 = 0x00000001, + }, + { + .sw_id = SVSB_CCI, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .buck_name = "proc", + .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 1196000000, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .vmax = 0x64, + .vmin = 0x18, + .age_config = 0x555555, + .dc_config = 0x555555, + .dvt_fixed = 0x7, + .vco = 0x10, + .chk_shift = 0x77, + .core_sel = 0x8fff0002, + .int_st = BIT(2), + .ctl0 = 0x00100003, + }, + { + .sw_id = SVSB_GPU, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .buck_name = "mali", + .tzone_name = "tzts2", + .volt_flags = SVSB_INIT01_PD_REQ | + SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02 | + SVSB_MODE_MON, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 900000000, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .vmax = 0x40, + .vmin = 0x14, + .age_config = 0x555555, + .dc_config = 0x555555, + .dvt_fixed = 0x3, + .vco = 0x10, + .chk_shift = 0x77, + .core_sel = 0x8fff0003, + .int_st = BIT(3), + .ctl0 = 0x00050001, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 0, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 3, + }, +}; + +static const struct svs_platform_data svs_mt8192_platform_data = { + .name = "mt8192-svs", + .banks = svs_mt8192_banks, + .efuse_parsing = svs_mt8192_efuse_parsing, + .probe = svs_mt8192_platform_probe, + .irqflags = IRQF_TRIGGER_HIGH, + .regs = svs_regs_v2, + .bank_max = ARRAY_SIZE(svs_mt8192_banks), +}; + +static const struct svs_platform_data svs_mt8183_platform_data = { + .name = "mt8183-svs", + .banks = svs_mt8183_banks, + .efuse_parsing = svs_mt8183_efuse_parsing, + .probe = svs_mt8183_platform_probe, + .irqflags = IRQF_TRIGGER_LOW, + .regs = svs_regs_v2, + .bank_max = ARRAY_SIZE(svs_mt8183_banks), +}; + +static const struct of_device_id svs_of_match[] = { + { + .compatible = "mediatek,mt8192-svs", + .data = &svs_mt8192_platform_data, + }, { + .compatible = "mediatek,mt8183-svs", + .data = &svs_mt8183_platform_data, + }, { + /* Sentinel */ + }, +}; + +static struct svs_platform *svs_platform_probe(struct platform_device *pdev) +{ + struct svs_platform *svsp; + const struct svs_platform_data *svsp_data; + int ret; + + svsp_data = of_device_get_match_data(&pdev->dev); + if (!svsp_data) { + dev_err(&pdev->dev, "no svs platform data?\n"); + return ERR_PTR(-EPERM); + } + + svsp = devm_kzalloc(&pdev->dev, sizeof(*svsp), GFP_KERNEL); + if (!svsp) + return ERR_PTR(-ENOMEM); + + svsp->dev = &pdev->dev; + svsp->name = svsp_data->name; + svsp->banks = svsp_data->banks; + svsp->efuse_parsing = svsp_data->efuse_parsing; + svsp->probe = svsp_data->probe; + svsp->irqflags = svsp_data->irqflags; + svsp->regs = svsp_data->regs; + svsp->bank_max = svsp_data->bank_max; + + ret = svsp->probe(svsp); + if (ret) + return ERR_PTR(ret); + + return svsp; +} + +static int svs_probe(struct platform_device *pdev) +{ + struct svs_platform *svsp; + unsigned int svsp_irq; + int ret; + + svsp = svs_platform_probe(pdev); + if (IS_ERR(svsp)) + return PTR_ERR(svsp); + + if (!svs_is_efuse_data_correct(svsp)) { + dev_notice(svsp->dev, "efuse data isn't correct\n"); + ret = -EPERM; + goto svs_probe_free_resource; + } + + ret = svs_bank_resource_setup(svsp); + if (ret) { + dev_err(svsp->dev, "svs bank resource setup fail: %d\n", ret); + goto svs_probe_free_resource; + } + + svsp_irq = irq_of_parse_and_map(svsp->dev->of_node, 0); + ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr, + svsp->irqflags | IRQF_ONESHOT, + svsp->name, svsp); + if (ret) { + dev_err(svsp->dev, "register irq(%d) failed: %d\n", + svsp_irq, ret); + goto svs_probe_free_resource; + } + + svsp->main_clk = devm_clk_get(svsp->dev, "main"); + if (IS_ERR(svsp->main_clk)) { + dev_err(svsp->dev, "failed to get clock: %ld\n", + PTR_ERR(svsp->main_clk)); + ret = PTR_ERR(svsp->main_clk); + goto svs_probe_free_resource; + } + + ret = clk_prepare_enable(svsp->main_clk); + if (ret) { + dev_err(svsp->dev, "cannot enable main clk: %d\n", ret); + goto svs_probe_free_resource; + } + + svsp->base = of_iomap(svsp->dev->of_node, 0); + if (IS_ERR_OR_NULL(svsp->base)) { + dev_err(svsp->dev, "cannot find svs register base\n"); + ret = -EINVAL; + goto svs_probe_clk_disable; + } + + ret = svs_start(svsp); + if (ret) { + dev_err(svsp->dev, "svs start fail: %d\n", ret); + goto svs_probe_iounmap; + } + + ret = svs_create_debug_cmds(svsp); + if (ret) { + dev_err(svsp->dev, "svs create debug cmds fail: %d\n", ret); + goto svs_probe_iounmap; + } + + return 0; + +svs_probe_iounmap: + iounmap(svsp->base); + +svs_probe_clk_disable: + clk_disable_unprepare(svsp->main_clk); + +svs_probe_free_resource: + if (!IS_ERR_OR_NULL(svsp->efuse)) + kfree(svsp->efuse); + if (!IS_ERR_OR_NULL(svsp->tefuse)) + kfree(svsp->tefuse); + + return ret; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(svs_pm_ops, svs_suspend, svs_resume); + +static struct platform_driver svs_driver = { + .probe = svs_probe, + .driver = { + .name = "mtk-svs", + .pm = &svs_pm_ops, + .of_match_table = of_match_ptr(svs_of_match), + }, +}; + +module_platform_driver(svs_driver); + +MODULE_AUTHOR("Roger Lu <roger.lu@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek SVS driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index e718b8735444..e0d7a5459562 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -129,7 +129,10 @@ config QCOM_RPMHPD config QCOM_RPMPD tristate "Qualcomm RPM Power domain driver" + depends on PM depends on QCOM_SMD_RPM + select PM_GENERIC_DOMAINS + select PM_GENERIC_DOMAINS_OF help QCOM RPM Power domain driver to support power-domains with performance states. The driver communicates a performance state @@ -228,4 +231,19 @@ config QCOM_APR application processor and QDSP6. APR is used by audio driver to configure QDSP6 ASM, ADM and AFE modules. + +config QCOM_ICC_BWMON + tristate "QCOM Interconnect Bandwidth Monitor driver" + depends on ARCH_QCOM || COMPILE_TEST + select PM_OPP + help + Sets up driver monitoring bandwidth on various interconnects and + based on that voting for interconnect bandwidth, adjusting their + speed to current demand. + Current implementation brings support for BWMON v4, used for example + on SDM845 to measure bandwidth between CPU (gladiator_noc) and Last + Level Cache (memnoc). Usage of this BWMON allows to remove some of + the fixed bandwidth votes from cpufreq (CPU nodes) thus achieve high + memory throughput even with lower CPU frequencies. + endmenu diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 70d5de69fd7b..d66604aff2b0 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -28,3 +28,4 @@ obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o +obj-$(CONFIG_QCOM_ICC_BWMON) += icc-bwmon.o diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c index 3caabd873322..b4046f393575 100644 --- a/drivers/soc/qcom/apr.c +++ b/drivers/soc/qcom/apr.c @@ -377,17 +377,14 @@ static int apr_device_probe(struct device *dev) static void apr_device_remove(struct device *dev) { struct apr_device *adev = to_apr_device(dev); - struct apr_driver *adrv; + struct apr_driver *adrv = to_apr_driver(dev->driver); struct packet_router *apr = dev_get_drvdata(adev->dev.parent); - if (dev->driver) { - adrv = to_apr_driver(dev->driver); - if (adrv->remove) - adrv->remove(adev); - spin_lock(&apr->svcs_lock); - idr_remove(&apr->svcs_idr, adev->svc.id); - spin_unlock(&apr->svcs_lock); - } + if (adrv->remove) + adrv->remove(adev); + spin_lock(&apr->svcs_lock); + idr_remove(&apr->svcs_idr, adev->svc.id); + spin_unlock(&apr->svcs_lock); } static int apr_uevent(struct device *dev, struct kobj_uevent_env *env) diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c index dd872017f345..629a7188b576 100644 --- a/drivers/soc/qcom/cmd-db.c +++ b/drivers/soc/qcom/cmd-db.c @@ -141,13 +141,17 @@ static int cmd_db_get_header(const char *id, const struct entry_header **eh, const struct rsc_hdr *rsc_hdr; const struct entry_header *ent; int ret, i, j; - u8 query[8]; + u8 query[sizeof(ent->id)] __nonstring; ret = cmd_db_ready(); if (ret) return ret; - /* Pad out query string to same length as in DB */ + /* + * Pad out query string to same length as in DB. NOTE: the output + * query string is not necessarily '\0' terminated if it bumps up + * against the max size. That's OK and expected. + */ strncpy(query, id, sizeof(query)); for (i = 0; i < MAX_SLV_ID; i++) { diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c new file mode 100644 index 000000000000..7f8aca533cd3 --- /dev/null +++ b/drivers/soc/qcom/icc-bwmon.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2021-2022 Linaro Ltd + * Author: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>, based on + * previous work of Thara Gopinath and msm-4.9 downstream sources. + */ +#include <linux/interconnect.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/sizes.h> + +/* + * The BWMON samples data throughput within 'sample_ms' time. With three + * configurable thresholds (Low, Medium and High) gives four windows (called + * zones) of current bandwidth: + * + * Zone 0: byte count < THRES_LO + * Zone 1: THRES_LO < byte count < THRES_MED + * Zone 2: THRES_MED < byte count < THRES_HIGH + * Zone 3: THRES_HIGH < byte count + * + * Zones 0 and 2 are not used by this driver. + */ + +/* Internal sampling clock frequency */ +#define HW_TIMER_HZ 19200000 + +#define BWMON_GLOBAL_IRQ_STATUS 0x0 +#define BWMON_GLOBAL_IRQ_CLEAR 0x8 +#define BWMON_GLOBAL_IRQ_ENABLE 0xc +#define BWMON_GLOBAL_IRQ_ENABLE_ENABLE BIT(0) + +#define BWMON_IRQ_STATUS 0x100 +#define BWMON_IRQ_STATUS_ZONE_SHIFT 4 +#define BWMON_IRQ_CLEAR 0x108 +#define BWMON_IRQ_ENABLE 0x10c +#define BWMON_IRQ_ENABLE_ZONE1_SHIFT 5 +#define BWMON_IRQ_ENABLE_ZONE2_SHIFT 6 +#define BWMON_IRQ_ENABLE_ZONE3_SHIFT 7 +#define BWMON_IRQ_ENABLE_MASK (BIT(BWMON_IRQ_ENABLE_ZONE1_SHIFT) | \ + BIT(BWMON_IRQ_ENABLE_ZONE3_SHIFT)) + +#define BWMON_ENABLE 0x2a0 +#define BWMON_ENABLE_ENABLE BIT(0) + +#define BWMON_CLEAR 0x2a4 +#define BWMON_CLEAR_CLEAR BIT(0) + +#define BWMON_SAMPLE_WINDOW 0x2a8 +#define BWMON_THRESHOLD_HIGH 0x2ac +#define BWMON_THRESHOLD_MED 0x2b0 +#define BWMON_THRESHOLD_LOW 0x2b4 + +#define BWMON_ZONE_ACTIONS 0x2b8 +/* + * Actions to perform on some zone 'z' when current zone hits the threshold: + * Increment counter of zone 'z' + */ +#define BWMON_ZONE_ACTIONS_INCREMENT(z) (0x2 << ((z) * 2)) +/* Clear counter of zone 'z' */ +#define BWMON_ZONE_ACTIONS_CLEAR(z) (0x1 << ((z) * 2)) + +/* Zone 0 threshold hit: Clear zone count */ +#define BWMON_ZONE_ACTIONS_ZONE0 (BWMON_ZONE_ACTIONS_CLEAR(0)) + +/* Zone 1 threshold hit: Increment zone count & clear lower zones */ +#define BWMON_ZONE_ACTIONS_ZONE1 (BWMON_ZONE_ACTIONS_INCREMENT(1) | \ + BWMON_ZONE_ACTIONS_CLEAR(0)) + +/* Zone 2 threshold hit: Increment zone count & clear lower zones */ +#define BWMON_ZONE_ACTIONS_ZONE2 (BWMON_ZONE_ACTIONS_INCREMENT(2) | \ + BWMON_ZONE_ACTIONS_CLEAR(1) | \ + BWMON_ZONE_ACTIONS_CLEAR(0)) + +/* Zone 3 threshold hit: Increment zone count & clear lower zones */ +#define BWMON_ZONE_ACTIONS_ZONE3 (BWMON_ZONE_ACTIONS_INCREMENT(3) | \ + BWMON_ZONE_ACTIONS_CLEAR(2) | \ + BWMON_ZONE_ACTIONS_CLEAR(1) | \ + BWMON_ZONE_ACTIONS_CLEAR(0)) +/* Value for BWMON_ZONE_ACTIONS */ +#define BWMON_ZONE_ACTIONS_DEFAULT (BWMON_ZONE_ACTIONS_ZONE0 | \ + BWMON_ZONE_ACTIONS_ZONE1 << 8 | \ + BWMON_ZONE_ACTIONS_ZONE2 << 16 | \ + BWMON_ZONE_ACTIONS_ZONE3 << 24) + +/* + * There is no clear documentation/explanation of BWMON_THRESHOLD_COUNT + * register. Based on observations, this is number of times one threshold has to + * be reached, to trigger interrupt in given zone. + * + * 0xff are maximum values meant to ignore the zones 0 and 2. + */ +#define BWMON_THRESHOLD_COUNT 0x2bc +#define BWMON_THRESHOLD_COUNT_ZONE1_SHIFT 8 +#define BWMON_THRESHOLD_COUNT_ZONE2_SHIFT 16 +#define BWMON_THRESHOLD_COUNT_ZONE3_SHIFT 24 +#define BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT 0xff +#define BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT 0xff + +/* BWMONv4 count registers use count unit of 64 kB */ +#define BWMON_COUNT_UNIT_KB 64 +#define BWMON_ZONE_COUNT 0x2d8 +#define BWMON_ZONE_MAX(zone) (0x2e0 + 4 * (zone)) + +struct icc_bwmon_data { + unsigned int sample_ms; + unsigned int default_highbw_kbps; + unsigned int default_medbw_kbps; + unsigned int default_lowbw_kbps; + u8 zone1_thres_count; + u8 zone3_thres_count; +}; + +struct icc_bwmon { + struct device *dev; + void __iomem *base; + int irq; + + unsigned int default_lowbw_kbps; + unsigned int sample_ms; + unsigned int max_bw_kbps; + unsigned int min_bw_kbps; + unsigned int target_kbps; + unsigned int current_kbps; +}; + +static void bwmon_clear_counters(struct icc_bwmon *bwmon) +{ + /* + * Clear counters. The order and barriers are + * important. Quoting downstream Qualcomm msm-4.9 tree: + * + * The counter clear and IRQ clear bits are not in the same 4KB + * region. So, we need to make sure the counter clear is completed + * before we try to clear the IRQ or do any other counter operations. + */ + writel(BWMON_CLEAR_CLEAR, bwmon->base + BWMON_CLEAR); +} + +static void bwmon_clear_irq(struct icc_bwmon *bwmon) +{ + /* + * Clear zone and global interrupts. The order and barriers are + * important. Quoting downstream Qualcomm msm-4.9 tree: + * + * Synchronize the local interrupt clear in mon_irq_clear() + * with the global interrupt clear here. Otherwise, the CPU + * may reorder the two writes and clear the global interrupt + * before the local interrupt, causing the global interrupt + * to be retriggered by the local interrupt still being high. + * + * Similarly, because the global registers are in a different + * region than the local registers, we need to ensure any register + * writes to enable the monitor after this call are ordered with the + * clearing here so that local writes don't happen before the + * interrupt is cleared. + */ + writel(BWMON_IRQ_ENABLE_MASK, bwmon->base + BWMON_IRQ_CLEAR); + writel(BIT(0), bwmon->base + BWMON_GLOBAL_IRQ_CLEAR); +} + +static void bwmon_disable(struct icc_bwmon *bwmon) +{ + /* Disable interrupts. Strict ordering, see bwmon_clear_irq(). */ + writel(0x0, bwmon->base + BWMON_GLOBAL_IRQ_ENABLE); + writel(0x0, bwmon->base + BWMON_IRQ_ENABLE); + + /* + * Disable bwmon. Must happen before bwmon_clear_irq() to avoid spurious + * IRQ. + */ + writel(0x0, bwmon->base + BWMON_ENABLE); +} + +static void bwmon_enable(struct icc_bwmon *bwmon, unsigned int irq_enable) +{ + /* Enable interrupts */ + writel(BWMON_GLOBAL_IRQ_ENABLE_ENABLE, + bwmon->base + BWMON_GLOBAL_IRQ_ENABLE); + writel(irq_enable, bwmon->base + BWMON_IRQ_ENABLE); + + /* Enable bwmon */ + writel(BWMON_ENABLE_ENABLE, bwmon->base + BWMON_ENABLE); +} + +static unsigned int bwmon_kbps_to_count(unsigned int kbps) +{ + return kbps / BWMON_COUNT_UNIT_KB; +} + +static void bwmon_set_threshold(struct icc_bwmon *bwmon, unsigned int reg, + unsigned int kbps) +{ + unsigned int thres; + + thres = mult_frac(bwmon_kbps_to_count(kbps), bwmon->sample_ms, + MSEC_PER_SEC); + writel_relaxed(thres, bwmon->base + reg); +} + +static void bwmon_start(struct icc_bwmon *bwmon, + const struct icc_bwmon_data *data) +{ + unsigned int thres_count; + int window; + + bwmon_clear_counters(bwmon); + + window = mult_frac(bwmon->sample_ms, HW_TIMER_HZ, MSEC_PER_SEC); + /* Maximum sampling window: 0xfffff */ + writel_relaxed(window, bwmon->base + BWMON_SAMPLE_WINDOW); + + bwmon_set_threshold(bwmon, BWMON_THRESHOLD_HIGH, + data->default_highbw_kbps); + bwmon_set_threshold(bwmon, BWMON_THRESHOLD_MED, + data->default_medbw_kbps); + bwmon_set_threshold(bwmon, BWMON_THRESHOLD_LOW, + data->default_lowbw_kbps); + + thres_count = data->zone3_thres_count << BWMON_THRESHOLD_COUNT_ZONE3_SHIFT | + BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT << BWMON_THRESHOLD_COUNT_ZONE2_SHIFT | + data->zone1_thres_count << BWMON_THRESHOLD_COUNT_ZONE1_SHIFT | + BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT; + writel_relaxed(thres_count, bwmon->base + BWMON_THRESHOLD_COUNT); + writel_relaxed(BWMON_ZONE_ACTIONS_DEFAULT, + bwmon->base + BWMON_ZONE_ACTIONS); + /* Write barriers in bwmon_clear_irq() */ + + bwmon_clear_irq(bwmon); + bwmon_enable(bwmon, BWMON_IRQ_ENABLE_MASK); +} + +static irqreturn_t bwmon_intr(int irq, void *dev_id) +{ + struct icc_bwmon *bwmon = dev_id; + unsigned int status, max; + int zone; + + status = readl(bwmon->base + BWMON_IRQ_STATUS); + status &= BWMON_IRQ_ENABLE_MASK; + if (!status) { + /* + * Only zone 1 and zone 3 interrupts are enabled but zone 2 + * threshold could be hit and trigger interrupt even if not + * enabled. + * Such spurious interrupt might come with valuable max count or + * not, so solution would be to always check all + * BWMON_ZONE_MAX() registers to find the highest value. + * Such case is currently ignored. + */ + return IRQ_NONE; + } + + bwmon_disable(bwmon); + + zone = get_bitmask_order(status >> BWMON_IRQ_STATUS_ZONE_SHIFT) - 1; + /* + * Zone max bytes count register returns count units within sampling + * window. Downstream kernel for BWMONv4 (called BWMON type 2 in + * downstream) always increments the max bytes count by one. + */ + max = readl(bwmon->base + BWMON_ZONE_MAX(zone)) + 1; + max *= BWMON_COUNT_UNIT_KB; + bwmon->target_kbps = mult_frac(max, MSEC_PER_SEC, bwmon->sample_ms); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t bwmon_intr_thread(int irq, void *dev_id) +{ + struct icc_bwmon *bwmon = dev_id; + unsigned int irq_enable = 0; + struct dev_pm_opp *opp, *target_opp; + unsigned int bw_kbps, up_kbps, down_kbps; + + bw_kbps = bwmon->target_kbps; + + target_opp = dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_kbps, 0); + if (IS_ERR(target_opp) && PTR_ERR(target_opp) == -ERANGE) + target_opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0); + + bwmon->target_kbps = bw_kbps; + + bw_kbps--; + opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0); + if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE) + down_kbps = bwmon->target_kbps; + else + down_kbps = bw_kbps; + + up_kbps = bwmon->target_kbps + 1; + + if (bwmon->target_kbps >= bwmon->max_bw_kbps) + irq_enable = BIT(BWMON_IRQ_ENABLE_ZONE1_SHIFT); + else if (bwmon->target_kbps <= bwmon->min_bw_kbps) + irq_enable = BIT(BWMON_IRQ_ENABLE_ZONE3_SHIFT); + else + irq_enable = BWMON_IRQ_ENABLE_MASK; + + bwmon_set_threshold(bwmon, BWMON_THRESHOLD_HIGH, up_kbps); + bwmon_set_threshold(bwmon, BWMON_THRESHOLD_MED, down_kbps); + /* Write barriers in bwmon_clear_counters() */ + bwmon_clear_counters(bwmon); + bwmon_clear_irq(bwmon); + bwmon_enable(bwmon, irq_enable); + + if (bwmon->target_kbps == bwmon->current_kbps) + goto out; + + dev_pm_opp_set_opp(bwmon->dev, target_opp); + bwmon->current_kbps = bwmon->target_kbps; + +out: + dev_pm_opp_put(target_opp); + if (!IS_ERR(opp)) + dev_pm_opp_put(opp); + + return IRQ_HANDLED; +} + +static int bwmon_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dev_pm_opp *opp; + struct icc_bwmon *bwmon; + const struct icc_bwmon_data *data; + int ret; + + bwmon = devm_kzalloc(dev, sizeof(*bwmon), GFP_KERNEL); + if (!bwmon) + return -ENOMEM; + + data = of_device_get_match_data(dev); + + bwmon->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(bwmon->base)) { + dev_err(dev, "failed to map bwmon registers\n"); + return PTR_ERR(bwmon->base); + } + + bwmon->irq = platform_get_irq(pdev, 0); + if (bwmon->irq < 0) + return bwmon->irq; + + ret = devm_pm_opp_of_add_table(dev); + if (ret) + return dev_err_probe(dev, ret, "failed to add OPP table\n"); + + bwmon->max_bw_kbps = UINT_MAX; + opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0); + if (IS_ERR(opp)) + return dev_err_probe(dev, ret, "failed to find max peak bandwidth\n"); + + bwmon->min_bw_kbps = 0; + opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0); + if (IS_ERR(opp)) + return dev_err_probe(dev, ret, "failed to find min peak bandwidth\n"); + + bwmon->sample_ms = data->sample_ms; + bwmon->default_lowbw_kbps = data->default_lowbw_kbps; + bwmon->dev = dev; + + bwmon_disable(bwmon); + ret = devm_request_threaded_irq(dev, bwmon->irq, bwmon_intr, + bwmon_intr_thread, + IRQF_ONESHOT, dev_name(dev), bwmon); + if (ret) + return dev_err_probe(dev, ret, "failed to request IRQ\n"); + + platform_set_drvdata(pdev, bwmon); + bwmon_start(bwmon, data); + + return 0; +} + +static int bwmon_remove(struct platform_device *pdev) +{ + struct icc_bwmon *bwmon = platform_get_drvdata(pdev); + + bwmon_disable(bwmon); + + return 0; +} + +/* BWMON v4 */ +static const struct icc_bwmon_data msm8998_bwmon_data = { + .sample_ms = 4, + .default_highbw_kbps = 4800 * 1024, /* 4.8 GBps */ + .default_medbw_kbps = 512 * 1024, /* 512 MBps */ + .default_lowbw_kbps = 0, + .zone1_thres_count = 16, + .zone3_thres_count = 1, +}; + +static const struct of_device_id bwmon_of_match[] = { + { .compatible = "qcom,msm8998-bwmon", .data = &msm8998_bwmon_data }, + {} +}; +MODULE_DEVICE_TABLE(of, bwmon_of_match); + +static struct platform_driver bwmon_driver = { + .probe = bwmon_probe, + .remove = bwmon_remove, + .driver = { + .name = "qcom-bwmon", + .of_match_table = bwmon_of_match, + }, +}; +module_platform_driver(bwmon_driver); + +MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>"); +MODULE_DESCRIPTION("QCOM BWMON driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index 4b143cf7b4ce..38d7296315a2 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -382,7 +382,7 @@ static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER; * llcc_slice_getd - get llcc slice descriptor * @uid: usecase_id for the client * - * A pointer to llcc slice descriptor will be returned on success and + * A pointer to llcc slice descriptor will be returned on success * and error pointer is returned on failure */ struct llcc_slice_desc *llcc_slice_getd(u32 uid) diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index 366db493579b..3f11554df2f3 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -108,6 +108,8 @@ EXPORT_SYMBOL_GPL(qcom_mdt_get_size); * qcom_mdt_read_metadata() - read header and metadata from mdt or mbn * @fw: firmware of mdt header or mbn * @data_len: length of the read metadata blob + * @fw_name: name of the firmware, for construction of segment file names + * @dev: device handle to associate resources with * * The mechanism that performs the authentication of the loading firmware * expects an ELF header directly followed by the segment of hashes, with no @@ -192,7 +194,7 @@ EXPORT_SYMBOL_GPL(qcom_mdt_read_metadata); * qcom_mdt_pas_init() - initialize PAS region for firmware loading * @dev: device handle to associate resources with * @fw: firmware object for the mdt file - * @firmware: name of the firmware, for construction of segment file names + * @fw_name: name of the firmware, for construction of segment file names * @pas_id: PAS identifier * @mem_phys: physical address of allocated memory region * @ctx: PAS metadata context, to be released by caller diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c index 97fd24c178f8..c92d26b73e6f 100644 --- a/drivers/soc/qcom/ocmem.c +++ b/drivers/soc/qcom/ocmem.c @@ -194,14 +194,17 @@ struct ocmem *of_get_ocmem(struct device *dev) devnode = of_parse_phandle(dev->of_node, "sram", 0); if (!devnode || !devnode->parent) { dev_err(dev, "Cannot look up sram phandle\n"); + of_node_put(devnode); return ERR_PTR(-ENODEV); } pdev = of_find_device_by_node(devnode->parent); if (!pdev) { dev_err(dev, "Cannot find device node %s\n", devnode->name); + of_node_put(devnode); return ERR_PTR(-EPROBE_DEFER); } + of_node_put(devnode); ocmem = platform_get_drvdata(pdev); if (!ocmem) { diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c index a59bb34e5eba..18c856056475 100644 --- a/drivers/soc/qcom/qcom_aoss.c +++ b/drivers/soc/qcom/qcom_aoss.c @@ -399,8 +399,10 @@ static int qmp_cooling_devices_register(struct qmp *qmp) continue; ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++], child); - if (ret) + if (ret) { + of_node_put(child); goto unroll; + } } if (!count) diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c index 05fff8691ee3..092f6ab09acf 100644 --- a/drivers/soc/qcom/rpmhpd.c +++ b/drivers/soc/qcom/rpmhpd.c @@ -23,8 +23,8 @@ /** * struct rpmhpd - top level RPMh power domain resource data structure * @dev: rpmh power domain controller device - * @pd: generic_pm_domain corrresponding to the power domain - * @parent: generic_pm_domain corrresponding to the parent's power domain + * @pd: generic_pm_domain corresponding to the power domain + * @parent: generic_pm_domain corresponding to the parent's power domain * @peer: A peer power domain in case Active only Voting is * supported * @active_only: True if it represents an Active only peer diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c index 3b5b91621532..5803038c744e 100644 --- a/drivers/soc/qcom/rpmpd.c +++ b/drivers/soc/qcom/rpmpd.c @@ -453,6 +453,7 @@ static const struct rpmpd_desc qcm2290_desc = { static const struct of_device_id rpmpd_match_table[] = { { .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc }, { .compatible = "qcom,msm8226-rpmpd", .data = &msm8226_desc }, + { .compatible = "qcom,msm8909-rpmpd", .data = &msm8916_desc }, { .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc }, { .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc }, { .compatible = "qcom,msm8953-rpmpd", .data = &msm8953_desc }, diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c index 30dda1af63c8..413f9f4ae9cd 100644 --- a/drivers/soc/qcom/smd-rpm.c +++ b/drivers/soc/qcom/smd-rpm.c @@ -234,6 +234,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = { { .compatible = "qcom,rpm-apq8084" }, { .compatible = "qcom,rpm-ipq6018" }, { .compatible = "qcom,rpm-msm8226" }, + { .compatible = "qcom,rpm-msm8909" }, { .compatible = "qcom,rpm-msm8916" }, { .compatible = "qcom,rpm-msm8936" }, { .compatible = "qcom,rpm-msm8953" }, diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index 59dbf4b61e6c..d9c28a8a7cbf 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -119,6 +119,9 @@ struct smp2p_entry { * @out: pointer to the outbound smem item * @smem_items: ids of the two smem items * @valid_entries: already scanned inbound entries + * @ssr_ack_enabled: SMP2P_FEATURE_SSR_ACK feature is supported and was enabled + * @ssr_ack: current cached state of the local ack bit + * @negotiation_done: whether negotiating finished * @local_pid: processor id of the inbound edge * @remote_pid: processor id of the outbound edge * @ipc_regmap: regmap for the outbound ipc diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index cee579a267a6..4554fb8655d3 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -328,10 +328,12 @@ static const struct soc_id soc_id[] = { { 455, "QRB5165" }, { 457, "SM8450" }, { 459, "SM7225" }, - { 460, "SA8540P" }, + { 460, "SA8295P" }, + { 461, "SA8540P" }, { 480, "SM8450" }, { 482, "SM8450" }, { 487, "SC7280" }, + { 495, "SC7180P" }, }; static const char *socinfo_machine(struct device *dev, unsigned int id) diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c index f831420b7fd4..484b42b7454e 100644 --- a/drivers/soc/qcom/spm.c +++ b/drivers/soc/qcom/spm.c @@ -74,6 +74,18 @@ static const u16 spm_reg_offset_v3_0[SPM_REG_NR] = { [SPM_REG_SEQ_ENTRY] = 0x400, }; +/* SPM register data for 8909 */ +static const struct spm_reg_data spm_reg_8909_cpu = { + .reg_offset = spm_reg_offset_v3_0, + .spm_cfg = 0x1, + .spm_dly = 0x3C102800, + .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90, + 0x5B, 0x60, 0x03, 0x60, 0x76, 0x76, 0x0B, 0x94, 0x5B, 0x80, + 0x10, 0x26, 0x30, 0x0F }, + .start_index[PM_SLEEP_MODE_STBY] = 0, + .start_index[PM_SLEEP_MODE_SPC] = 5, +}; + /* SPM register data for 8916 */ static const struct spm_reg_data spm_reg_8916_cpu = { .reg_offset = spm_reg_offset_v3_0, @@ -195,6 +207,8 @@ static const struct of_device_id spm_match_table[] = { .data = &spm_reg_660_silver_l2 }, { .compatible = "qcom,msm8226-saw2-v2.1-cpu", .data = &spm_reg_8226_cpu }, + { .compatible = "qcom,msm8909-saw2-v3.0-cpu", + .data = &spm_reg_8909_cpu }, { .compatible = "qcom,msm8916-saw2-v3.0-cpu", .data = &spm_reg_8916_cpu }, { .compatible = "qcom,msm8974-saw2-v2.1-cpu", diff --git a/drivers/soc/renesas/r8a779a0-sysc.c b/drivers/soc/renesas/r8a779a0-sysc.c index fdfc857df334..04f1bc322ae7 100644 --- a/drivers/soc/renesas/r8a779a0-sysc.c +++ b/drivers/soc/renesas/r8a779a0-sysc.c @@ -57,11 +57,11 @@ static struct rcar_gen4_sysc_area r8a779a0_areas[] __initdata = { { "a2cv6", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR }, { "a2cn2", R8A779A0_PD_A2CN2, R8A779A0_PD_A3IR }, { "a2imp23", R8A779A0_PD_A2IMP23, R8A779A0_PD_A3IR }, - { "a2dp1", R8A779A0_PD_A2DP0, R8A779A0_PD_A3IR }, - { "a2cv2", R8A779A0_PD_A2CV0, R8A779A0_PD_A3IR }, - { "a2cv3", R8A779A0_PD_A2CV1, R8A779A0_PD_A3IR }, - { "a2cv5", R8A779A0_PD_A2CV4, R8A779A0_PD_A3IR }, - { "a2cv7", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR }, + { "a2dp1", R8A779A0_PD_A2DP1, R8A779A0_PD_A3IR }, + { "a2cv2", R8A779A0_PD_A2CV2, R8A779A0_PD_A3IR }, + { "a2cv3", R8A779A0_PD_A2CV3, R8A779A0_PD_A3IR }, + { "a2cv5", R8A779A0_PD_A2CV5, R8A779A0_PD_A3IR }, + { "a2cv7", R8A779A0_PD_A2CV7, R8A779A0_PD_A3IR }, { "a2cn1", R8A779A0_PD_A2CN1, R8A779A0_PD_A3IR }, { "a1cnn0", R8A779A0_PD_A1CNN0, R8A779A0_PD_A2CN0 }, { "a1cnn2", R8A779A0_PD_A1CNN2, R8A779A0_PD_A2CN2 }, diff --git a/drivers/soc/renesas/rcar-gen4-sysc.h b/drivers/soc/renesas/rcar-gen4-sysc.h index fe2d98254754..388cfa8f8f9f 100644 --- a/drivers/soc/renesas/rcar-gen4-sysc.h +++ b/drivers/soc/renesas/rcar-gen4-sysc.h @@ -25,8 +25,8 @@ struct rcar_gen4_sysc_area { const char *name; u8 pdr; /* PDRn */ - int parent; /* -1 if none */ - unsigned int flags; /* See PD_* */ + s8 parent; /* -1 if none */ + u8 flags; /* See PD_* */ }; /* diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h index 8d861c1cfdf7..266c599a0a9b 100644 --- a/drivers/soc/renesas/rcar-sysc.h +++ b/drivers/soc/renesas/rcar-sysc.h @@ -31,8 +31,8 @@ struct rcar_sysc_area { u16 chan_offs; /* Offset of PWRSR register for this area */ u8 chan_bit; /* Bit in PWR* (except for PWRUP in PWRSR) */ u8 isr_bit; /* Bit in SYSCI*R */ - int parent; /* -1 if none */ - unsigned int flags; /* See PD_* */ + s8 parent; /* -1 if none */ + u8 flags; /* See PD_* */ }; diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig index 1fef0e711056..8aecbc9b1976 100644 --- a/drivers/soc/sunxi/Kconfig +++ b/drivers/soc/sunxi/Kconfig @@ -6,6 +6,7 @@ config SUNXI_MBUS bool default ARCH_SUNXI + depends on ARM || ARM64 help Say y to enable the fixups needed to support the Allwinner MBUS DMA quirks. diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c index 0e4ba0f89533..6882c86b3ce5 100644 --- a/drivers/soc/ti/pruss.c +++ b/drivers/soc/ti/pruss.c @@ -338,6 +338,7 @@ static const struct of_device_id pruss_of_match[] = { { .compatible = "ti,am654-icssg", .data = &am65x_j721e_pruss_data, }, { .compatible = "ti,j721e-icssg", .data = &am65x_j721e_pruss_data, }, { .compatible = "ti,am642-icssg", .data = &am65x_j721e_pruss_data, }, + { .compatible = "ti,am625-pruss", .data = &am65x_j721e_pruss_data, }, {}, }; MODULE_DEVICE_TABLE(of, pruss_of_match); diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c index 0076d467ff6b..343c58ed5896 100644 --- a/drivers/soc/ti/wkup_m3_ipc.c +++ b/drivers/soc/ti/wkup_m3_ipc.c @@ -688,7 +688,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev) &m3_ipc->sd_fw_name); if (ret) { dev_dbg(dev, "Voltage scaling data blob not provided from DT.\n"); - }; + } /* * Wait for firmware loading completion in a thread so we diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c index 5dcb7665fe22..2de082765bef 100644 --- a/drivers/soc/xilinx/xlnx_event_manager.c +++ b/drivers/soc/xilinx/xlnx_event_manager.c @@ -647,8 +647,7 @@ static int xlnx_event_manager_probe(struct platform_device *pdev) cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/event:starting", xlnx_event_cpuhp_start, xlnx_event_cpuhp_down); - ret = zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num, - 0, NULL); + ret = zynqmp_pm_register_sgi(sgi_num, 0); if (ret) { dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret); xlnx_event_cleanup_sgi(pdev); @@ -681,7 +680,7 @@ static int xlnx_event_manager_remove(struct platform_device *pdev) kfree(eve_data); } - ret = zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, 0, 1, NULL); + ret = zynqmp_pm_register_sgi(0, 1); if (ret) dev_err(&pdev->dev, "SGI unregistration over TF-A failed with %d\n", ret); diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c index 669d7573320b..00f7b490a95d 100644 --- a/drivers/soundwire/slave.c +++ b/drivers/soundwire/slave.c @@ -127,6 +127,71 @@ static bool find_slave(struct sdw_bus *bus, return true; } +struct sdw_acpi_child_walk_data { + struct sdw_bus *bus; + struct acpi_device *adev; + struct sdw_slave_id id; + bool ignore_unique_id; +}; + +static int sdw_acpi_check_duplicate(struct acpi_device *adev, void *data) +{ + struct sdw_acpi_child_walk_data *cwd = data; + struct sdw_bus *bus = cwd->bus; + struct sdw_slave_id id; + + if (adev == cwd->adev) + return 0; + + if (!find_slave(bus, adev, &id)) + return 0; + + if (cwd->id.sdw_version != id.sdw_version || cwd->id.mfg_id != id.mfg_id || + cwd->id.part_id != id.part_id || cwd->id.class_id != id.class_id) + return 0; + + if (cwd->id.unique_id != id.unique_id) { + dev_dbg(bus->dev, + "Valid unique IDs 0x%x 0x%x for Slave mfg_id 0x%04x, part_id 0x%04x\n", + cwd->id.unique_id, id.unique_id, cwd->id.mfg_id, + cwd->id.part_id); + cwd->ignore_unique_id = false; + return 0; + } + + dev_err(bus->dev, + "Invalid unique IDs 0x%x 0x%x for Slave mfg_id 0x%04x, part_id 0x%04x\n", + cwd->id.unique_id, id.unique_id, cwd->id.mfg_id, cwd->id.part_id); + return -ENODEV; +} + +static int sdw_acpi_find_one(struct acpi_device *adev, void *data) +{ + struct sdw_bus *bus = data; + struct sdw_acpi_child_walk_data cwd = { + .bus = bus, + .adev = adev, + .ignore_unique_id = true, + }; + int ret; + + if (!find_slave(bus, adev, &cwd.id)) + return 0; + + /* Brute-force O(N^2) search for duplicates. */ + ret = acpi_dev_for_each_child(ACPI_COMPANION(bus->dev), + sdw_acpi_check_duplicate, &cwd); + if (ret) + return ret; + + if (cwd.ignore_unique_id) + cwd.id.unique_id = SDW_IGNORED_UNIQUE_ID; + + /* Ignore errors and continue. */ + sdw_slave_add(bus, &cwd.id, acpi_fwnode_handle(adev)); + return 0; +} + /* * sdw_acpi_find_slaves() - Find Slave devices in Master ACPI node * @bus: SDW bus instance @@ -135,8 +200,7 @@ static bool find_slave(struct sdw_bus *bus, */ int sdw_acpi_find_slaves(struct sdw_bus *bus) { - struct acpi_device *adev, *parent; - struct acpi_device *adev2, *parent2; + struct acpi_device *parent; parent = ACPI_COMPANION(bus->dev); if (!parent) { @@ -144,54 +208,7 @@ int sdw_acpi_find_slaves(struct sdw_bus *bus) return -ENODEV; } - list_for_each_entry(adev, &parent->children, node) { - struct sdw_slave_id id; - struct sdw_slave_id id2; - bool ignore_unique_id = true; - - if (!find_slave(bus, adev, &id)) - continue; - - /* brute-force O(N^2) search for duplicates */ - parent2 = parent; - list_for_each_entry(adev2, &parent2->children, node) { - - if (adev == adev2) - continue; - - if (!find_slave(bus, adev2, &id2)) - continue; - - if (id.sdw_version != id2.sdw_version || - id.mfg_id != id2.mfg_id || - id.part_id != id2.part_id || - id.class_id != id2.class_id) - continue; - - if (id.unique_id != id2.unique_id) { - dev_dbg(bus->dev, - "Valid unique IDs 0x%x 0x%x for Slave mfg_id 0x%04x, part_id 0x%04x\n", - id.unique_id, id2.unique_id, id.mfg_id, id.part_id); - ignore_unique_id = false; - } else { - dev_err(bus->dev, - "Invalid unique IDs 0x%x 0x%x for Slave mfg_id 0x%04x, part_id 0x%04x\n", - id.unique_id, id2.unique_id, id.mfg_id, id.part_id); - return -ENODEV; - } - } - - if (ignore_unique_id) - id.unique_id = SDW_IGNORED_UNIQUE_ID; - - /* - * don't error check for sdw_slave_add as we want to continue - * adding Slaves - */ - sdw_slave_add(bus, &id, acpi_fwnode_handle(adev)); - } - - return 0; + return acpi_dev_for_each_child(parent, sdw_acpi_find_one, bus); } #endif diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 3b1044ebc400..e32f6a2058ae 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -183,7 +183,7 @@ config SPI_BCM63XX config SPI_BCM63XX_HSSPI tristate "Broadcom BCM63XX HS SPI controller driver" - depends on BCM63XX || BMIPS_GENERIC || ARCH_BCM_63XX || COMPILE_TEST + depends on BCM63XX || BMIPS_GENERIC || ARCH_BCMBCA || COMPILE_TEST help This enables support for the High Speed SPI controller present on newer Broadcom BCM63XX SoCs. @@ -371,6 +371,13 @@ config SPI_FSL_QUADSPI This controller does not support generic SPI messages. It only supports the high-level SPI memory interface. +config SPI_GXP + tristate "GXP SPI driver" + depends on ARCH_HPE || COMPILE_TEST + help + This enables support for the driver for GXP bus attached SPI + controllers. + config SPI_HISI_KUNPENG tristate "HiSilicon SPI Controller for Kunpeng SoCs" depends on (ARM64 && ACPI) || COMPILE_TEST @@ -575,6 +582,15 @@ config SPI_MESON_SPIFC This enables master mode support for the SPIFC (SPI flash controller) available in Amlogic Meson SoCs. +config SPI_MICROCHIP_CORE + tristate "Microchip FPGA SPI controllers" + depends on SPI_MASTER + help + This enables the SPI driver for Microchip FPGA SPI controllers. + Say Y or M here if you want to use the "hard" controllers on + PolarFire SoC. + If built as a module, it will be called spi-microchip-core. + config SPI_MT65XX tristate "MediaTek SPI controller" depends on ARCH_MEDIATEK || COMPILE_TEST diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 0f44eb6083a5..15d2f3835e45 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -57,6 +57,7 @@ obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o obj-$(CONFIG_SPI_GPIO) += spi-gpio.o +obj-$(CONFIG_SPI_GXP) += spi-gxp.o obj-$(CONFIG_SPI_HISI_KUNPENG) += spi-hisi-kunpeng.o obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o @@ -71,6 +72,7 @@ obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o obj-$(CONFIG_SPI_MESON_SPICC) += spi-meson-spicc.o obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o +obj-$(CONFIG_SPI_MICROCHIP_CORE) += spi-microchip-core.o obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 480c0c8c18e4..976a217e356d 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -21,6 +21,7 @@ #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/spi/spi-mem.h> /* QSPI register offsets */ @@ -285,7 +286,7 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem, /* special case not supported by hardware */ if (op->addr.nbytes == 2 && op->cmd.buswidth != op->addr.buswidth && - op->dummy.nbytes == 0) + op->dummy.nbytes == 0) return false; return true; @@ -417,9 +418,13 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) if (op->addr.val + op->data.nbytes > aq->mmap_size) return -ENOTSUPP; + err = pm_runtime_resume_and_get(&aq->pdev->dev); + if (err < 0) + return err; + err = atmel_qspi_set_cfg(aq, op, &offset); if (err) - return err; + goto pm_runtime_put; /* Skip to the final steps if there is no data */ if (op->data.nbytes) { @@ -441,7 +446,7 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) /* Poll INSTRuction End status */ sr = atmel_qspi_read(aq, QSPI_SR); if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED) - return err; + goto pm_runtime_put; /* Wait for INSTRuction End interrupt */ reinit_completion(&aq->cmd_completion); @@ -452,6 +457,9 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) err = -ETIMEDOUT; atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR); +pm_runtime_put: + pm_runtime_mark_last_busy(&aq->pdev->dev); + pm_runtime_put_autosuspend(&aq->pdev->dev); return err; } @@ -472,6 +480,7 @@ static int atmel_qspi_setup(struct spi_device *spi) struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); unsigned long src_rate; u32 scbr; + int ret; if (ctrl->busy) return -EBUSY; @@ -488,9 +497,16 @@ static int atmel_qspi_setup(struct spi_device *spi) if (scbr > 0) scbr--; + ret = pm_runtime_resume_and_get(ctrl->dev.parent); + if (ret < 0) + return ret; + aq->scr = QSPI_SCR_SCBR(scbr); atmel_qspi_write(aq->scr, aq, QSPI_SCR); + pm_runtime_mark_last_busy(ctrl->dev.parent); + pm_runtime_put_autosuspend(ctrl->dev.parent); + return 0; } @@ -621,11 +637,24 @@ static int atmel_qspi_probe(struct platform_device *pdev) if (err) goto disable_qspick; + pm_runtime_set_autosuspend_delay(&pdev->dev, 500); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + atmel_qspi_init(aq); err = spi_register_controller(ctrl); - if (err) + if (err) { + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); goto disable_qspick; + } + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); return 0; @@ -641,9 +670,18 @@ static int atmel_qspi_remove(struct platform_device *pdev) { struct spi_controller *ctrl = platform_get_drvdata(pdev); struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); + int ret; + + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) + return ret; spi_unregister_controller(ctrl); atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR); + + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + clk_disable_unprepare(aq->qspick); clk_disable_unprepare(aq->pclk); return 0; @@ -653,10 +691,19 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev) { struct spi_controller *ctrl = dev_get_drvdata(dev); struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); + int ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR); - clk_disable_unprepare(aq->qspick); - clk_disable_unprepare(aq->pclk); + + pm_runtime_mark_last_busy(dev); + pm_runtime_force_suspend(dev); + + clk_unprepare(aq->qspick); + clk_unprepare(aq->pclk); return 0; } @@ -665,19 +712,54 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev) { struct spi_controller *ctrl = dev_get_drvdata(dev); struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); + int ret; - clk_prepare_enable(aq->pclk); - clk_prepare_enable(aq->qspick); + clk_prepare(aq->pclk); + clk_prepare(aq->qspick); + + ret = pm_runtime_force_resume(dev); + if (ret < 0) + return ret; atmel_qspi_init(aq); atmel_qspi_write(aq->scr, aq, QSPI_SCR); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return 0; +} + +static int __maybe_unused atmel_qspi_runtime_suspend(struct device *dev) +{ + struct spi_controller *ctrl = dev_get_drvdata(dev); + struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); + + clk_disable(aq->qspick); + clk_disable(aq->pclk); + return 0; } -static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend, - atmel_qspi_resume); +static int __maybe_unused atmel_qspi_runtime_resume(struct device *dev) +{ + struct spi_controller *ctrl = dev_get_drvdata(dev); + struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); + int ret; + + ret = clk_enable(aq->pclk); + if (ret) + return ret; + + return clk_enable(aq->qspick); +} + +static const struct dev_pm_ops __maybe_unused atmel_qspi_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(atmel_qspi_suspend, atmel_qspi_resume) + SET_RUNTIME_PM_OPS(atmel_qspi_runtime_suspend, + atmel_qspi_runtime_resume, NULL) +}; static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {}; @@ -704,7 +786,7 @@ static struct platform_driver atmel_qspi_driver = { .driver = { .name = "atmel_qspi", .of_match_table = atmel_qspi_dt_ids, - .pm = &atmel_qspi_pm_ops, + .pm = pm_ptr(&atmel_qspi_pm_ops), }, .probe = atmel_qspi_probe, .remove = atmel_qspi_remove, diff --git a/drivers/spi/spi-altera-dfl.c b/drivers/spi/spi-altera-dfl.c index ca40923258af..596e181ae136 100644 --- a/drivers/spi/spi-altera-dfl.c +++ b/drivers/spi/spi-altera-dfl.c @@ -128,9 +128,9 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev) struct spi_master *master; struct altera_spi *hw; void __iomem *base; - int err = -ENODEV; + int err; - master = spi_alloc_master(dev, sizeof(struct altera_spi)); + master = devm_spi_alloc_master(dev, sizeof(struct altera_spi)); if (!master) return -ENOMEM; @@ -159,10 +159,9 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev) altera_spi_init_master(master); err = devm_spi_register_master(dev, master); - if (err) { - dev_err(dev, "%s failed to register spi master %d\n", __func__, err); - goto exit; - } + if (err) + return dev_err_probe(dev, err, "%s failed to register spi master\n", + __func__); if (dfl_dev->revision == FME_FEATURE_REV_MAX10_SPI_N5010) strscpy(board_info.modalias, "m10-n5010", SPI_NAME_SIZE); @@ -179,9 +178,6 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev) } return 0; -exit: - spi_master_put(master); - return err; } static const struct dfl_device_id dfl_spi_altera_ids[] = { diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c index efdcbe6c4c26..08df4f8d0531 100644 --- a/drivers/spi/spi-amd.c +++ b/drivers/spi/spi-amd.c @@ -40,14 +40,23 @@ #define AMD_SPI_XFER_TX 1 #define AMD_SPI_XFER_RX 2 +/** + * enum amd_spi_versions - SPI controller versions + * @AMD_SPI_V1: AMDI0061 hardware version + * @AMD_SPI_V2: AMDI0062 hardware version + */ enum amd_spi_versions { - AMD_SPI_V1 = 1, /* AMDI0061 */ - AMD_SPI_V2, /* AMDI0062 */ + AMD_SPI_V1 = 1, + AMD_SPI_V2, }; +/** + * struct amd_spi - SPI driver instance + * @io_remap_addr: Start address of the SPI controller registers + * @version: SPI controller hardware version + */ struct amd_spi { void __iomem *io_remap_addr; - unsigned long io_base_addr; enum amd_spi_versions version; }; @@ -281,22 +290,19 @@ static int amd_spi_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct spi_master *master; struct amd_spi *amd_spi; - int err = 0; + int err; /* Allocate storage for spi_master and driver private data */ - master = spi_alloc_master(dev, sizeof(struct amd_spi)); - if (!master) { - dev_err(dev, "Error allocating SPI master\n"); - return -ENOMEM; - } + master = devm_spi_alloc_master(dev, sizeof(struct amd_spi)); + if (!master) + return dev_err_probe(dev, -ENOMEM, "Error allocating SPI master\n"); amd_spi = spi_master_get_devdata(master); amd_spi->io_remap_addr = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(amd_spi->io_remap_addr)) { - err = PTR_ERR(amd_spi->io_remap_addr); - dev_err(dev, "error %d ioremap of SPI registers failed\n", err); - goto err_free_master; - } + if (IS_ERR(amd_spi->io_remap_addr)) + return dev_err_probe(dev, PTR_ERR(amd_spi->io_remap_addr), + "ioremap of SPI registers failed\n"); + dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr); amd_spi->version = (enum amd_spi_versions) device_get_match_data(dev); @@ -313,17 +319,10 @@ static int amd_spi_probe(struct platform_device *pdev) /* Register the controller with SPI framework */ err = devm_spi_register_master(dev, master); - if (err) { - dev_err(dev, "error %d registering SPI controller\n", err); - goto err_free_master; - } + if (err) + return dev_err_probe(dev, err, "error registering SPI controller\n"); return 0; - -err_free_master: - spi_master_put(master); - - return err; } #ifdef CONFIG_ACPI diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c index d8cc4b270644..9df9fc40b783 100644 --- a/drivers/spi/spi-armada-3700.c +++ b/drivers/spi/spi-armada-3700.c @@ -497,7 +497,7 @@ static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi) while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) { val = *(u32 *)a3700_spi->tx_buf; - spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val); + spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, cpu_to_le32(val)); a3700_spi->buf_len -= 4; a3700_spi->tx_buf += 4; } @@ -519,7 +519,7 @@ static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi) while (!a3700_is_rfifo_empty(a3700_spi) && a3700_spi->buf_len) { val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG); if (a3700_spi->buf_len >= 4) { - + val = le32_to_cpu(val); memcpy(a3700_spi->rx_buf, &val, 4); a3700_spi->buf_len -= 4; diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 9e300a932699..c4f22d50dba5 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -1631,7 +1631,6 @@ static int atmel_spi_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM static int atmel_spi_runtime_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); @@ -1653,7 +1652,6 @@ static int atmel_spi_runtime_resume(struct device *dev) return clk_prepare_enable(as->clk); } -#ifdef CONFIG_PM_SLEEP static int atmel_spi_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); @@ -1693,17 +1691,12 @@ static int atmel_spi_resume(struct device *dev) /* Start the queue running */ return spi_master_resume(master); } -#endif static const struct dev_pm_ops atmel_spi_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume) - SET_RUNTIME_PM_OPS(atmel_spi_runtime_suspend, - atmel_spi_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume) + RUNTIME_PM_OPS(atmel_spi_runtime_suspend, + atmel_spi_runtime_resume, NULL) }; -#define ATMEL_SPI_PM_OPS (&atmel_spi_pm_ops) -#else -#define ATMEL_SPI_PM_OPS NULL -#endif static const struct of_device_id atmel_spi_dt_ids[] = { { .compatible = "atmel,at91rm9200-spi" }, @@ -1715,7 +1708,7 @@ MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids); static struct platform_driver atmel_spi_driver = { .driver = { .name = "atmel_spi", - .pm = ATMEL_SPI_PM_OPS, + .pm = pm_ptr(&atmel_spi_pm_ops), .of_match_table = atmel_spi_dt_ids, }, .probe = atmel_spi_probe, diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 0933948d7df3..747e03228c48 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -372,6 +372,10 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) struct bcm2835_spi *bs = dev_id; u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); + /* Bail out early if interrupts are not enabled */ + if (!(cs & BCM2835_SPI_CS_INTR)) + return IRQ_NONE; + /* * An interrupt is signaled either if DONE is set (TX FIFO empty) * or if RXR is set (RX FIFO >= ¾ full). @@ -1369,8 +1373,8 @@ static int bcm2835_spi_probe(struct platform_device *pdev) bcm2835_wr(bs, BCM2835_SPI_CS, BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); - err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0, - dev_name(&pdev->dev), bs); + err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, + IRQF_SHARED, dev_name(&pdev->dev), bs); if (err) { dev_err(&pdev->dev, "could not request IRQ: %d\n", err); goto out_dma_release; diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c index ecea471ff42c..f87d97ccd2d6 100644 --- a/drivers/spi/spi-dw-core.c +++ b/drivers/spi/spi-dw-core.c @@ -307,8 +307,9 @@ static u32 dw_spi_prepare_cr0(struct dw_spi *dws, struct spi_device *spi) if (spi->mode & SPI_LOOP) cr0 |= DW_HSSI_CTRLR0_SRL; - if (dws->caps & DW_SPI_CAP_KEEMBAY_MST) - cr0 |= DW_HSSI_CTRLR0_KEEMBAY_MST; + /* CTRLR0[31] MST */ + if (dw_spi_ver_is_ge(dws, HSSI, 102A)) + cr0 |= DW_HSSI_CTRLR0_MST; } return cr0; @@ -942,7 +943,9 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) if (dws->dma_ops && dws->dma_ops->dma_init) { ret = dws->dma_ops->dma_init(dev, dws); - if (ret) { + if (ret == -EPROBE_DEFER) { + goto err_free_irq; + } else if (ret) { dev_warn(dev, "DMA init failed\n"); } else { master->can_dma = dws->dma_ops->can_dma; @@ -963,6 +966,7 @@ err_dma_exit: if (dws->dma_ops && dws->dma_ops->dma_exit) dws->dma_ops->dma_exit(dws); dw_spi_enable_chip(dws, 0); +err_free_irq: free_irq(dws->irq, master); err_free_master: spi_controller_put(master); diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c index 63e5260100ec..1322b8cce5b7 100644 --- a/drivers/spi/spi-dw-dma.c +++ b/drivers/spi/spi-dw-dma.c @@ -139,15 +139,20 @@ err_exit: static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) { - dws->rxchan = dma_request_slave_channel(dev, "rx"); - if (!dws->rxchan) - return -ENODEV; + int ret; - dws->txchan = dma_request_slave_channel(dev, "tx"); - if (!dws->txchan) { - dma_release_channel(dws->rxchan); + dws->rxchan = dma_request_chan(dev, "rx"); + if (IS_ERR(dws->rxchan)) { + ret = PTR_ERR(dws->rxchan); dws->rxchan = NULL; - return -ENODEV; + goto err_exit; + } + + dws->txchan = dma_request_chan(dev, "tx"); + if (IS_ERR(dws->txchan)) { + ret = PTR_ERR(dws->txchan); + dws->txchan = NULL; + goto free_rxchan; } dws->master->dma_rx = dws->rxchan; @@ -160,6 +165,12 @@ static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) dw_spi_dma_sg_burst_init(dws); return 0; + +free_rxchan: + dma_release_channel(dws->rxchan); + dws->rxchan = NULL; +err_exit: + return ret; } static void dw_spi_dma_exit(struct dw_spi *dws) diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index 5101c4c6017b..26c40ea6dd12 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -214,11 +214,10 @@ static int dw_spi_hssi_init(struct platform_device *pdev, return 0; } -static int dw_spi_keembay_init(struct platform_device *pdev, - struct dw_spi_mmio *dwsmmio) +static int dw_spi_intel_init(struct platform_device *pdev, + struct dw_spi_mmio *dwsmmio) { dwsmmio->dws.ip = DW_HSSI_ID; - dwsmmio->dws.caps = DW_SPI_CAP_KEEMBAY_MST; return 0; } @@ -349,7 +348,8 @@ static const struct of_device_id dw_spi_mmio_of_match[] = { { .compatible = "amazon,alpine-dw-apb-ssi", .data = dw_spi_alpine_init}, { .compatible = "renesas,rzn1-spi", .data = dw_spi_pssi_init}, { .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_hssi_init}, - { .compatible = "intel,keembay-ssi", .data = dw_spi_keembay_init}, + { .compatible = "intel,keembay-ssi", .data = dw_spi_intel_init}, + { .compatible = "intel,thunderbay-ssi", .data = dw_spi_intel_init}, { .compatible = "microchip,sparx5-spi", dw_spi_mscc_sparx5_init}, { .compatible = "canaan,k210-spi", dw_spi_canaan_k210_init}, { /* end of table */} diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h index d5ee5130601e..9e8eb2b52d5c 100644 --- a/drivers/spi/spi-dw.h +++ b/drivers/spi/spi-dw.h @@ -23,7 +23,7 @@ ((_dws)->ip == DW_ ## _ip ## _ID) #define __dw_spi_ver_cmp(_dws, _ip, _ver, _op) \ - (dw_spi_ip_is(_dws, _ip) && (_dws)->ver _op DW_ ## _ip ## _ver) + (dw_spi_ip_is(_dws, _ip) && (_dws)->ver _op DW_ ## _ip ## _ ## _ver) #define dw_spi_ver_is(_dws, _ip, _ver) __dw_spi_ver_cmp(_dws, _ip, _ver, ==) @@ -31,8 +31,7 @@ /* DW SPI controller capabilities */ #define DW_SPI_CAP_CS_OVERRIDE BIT(0) -#define DW_SPI_CAP_KEEMBAY_MST BIT(1) -#define DW_SPI_CAP_DFS32 BIT(2) +#define DW_SPI_CAP_DFS32 BIT(1) /* Register offsets (Generic for both DWC APB SSI and DWC SSI IP-cores) */ #define DW_SPI_CTRLR0 0x00 @@ -94,13 +93,7 @@ #define DW_HSSI_CTRLR0_SCPOL BIT(9) #define DW_HSSI_CTRLR0_TMOD_MASK GENMASK(11, 10) #define DW_HSSI_CTRLR0_SRL BIT(13) - -/* - * For Keem Bay, CTRLR0[31] is used to select controller mode. - * 0: SSI is slave - * 1: SSI is master - */ -#define DW_HSSI_CTRLR0_KEEMBAY_MST BIT(31) +#define DW_HSSI_CTRLR0_MST BIT(31) /* Bit fields in CTRLR1 */ #define DW_SPI_NDF_MASK GENMASK(15, 0) diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c index 72ab066ce552..cf1e4f9ebd72 100644 --- a/drivers/spi/spi-fsi.c +++ b/drivers/spi/spi-fsi.c @@ -24,8 +24,7 @@ #define FSI2SPI_IRQ 0x20 #define SPI_FSI_BASE 0x70000 -#define SPI_FSI_INIT_TIMEOUT_MS 1000 -#define SPI_FSI_STATUS_TIMEOUT_MS 100 +#define SPI_FSI_TIMEOUT_MS 1000 #define SPI_FSI_MAX_RX_SIZE 8 #define SPI_FSI_MAX_TX_SIZE 40 @@ -299,6 +298,7 @@ static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq) static int fsi_spi_transfer_data(struct fsi_spi *ctx, struct spi_transfer *transfer) { + int loops; int rc = 0; unsigned long end; u64 status = 0ULL; @@ -317,9 +317,10 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx, if (rc) return rc; - end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS); + loops = 0; + end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS); do { - if (time_after(jiffies, end)) + if (loops++ && time_after(jiffies, end)) return -ETIMEDOUT; rc = fsi_spi_status(ctx, &status, "TX"); @@ -335,9 +336,10 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx, u8 *rx = transfer->rx_buf; while (transfer->len > recv) { - end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS); + loops = 0; + end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS); do { - if (time_after(jiffies, end)) + if (loops++ && time_after(jiffies, end)) return -ETIMEDOUT; rc = fsi_spi_status(ctx, &status, "RX"); @@ -359,6 +361,7 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx, static int fsi_spi_transfer_init(struct fsi_spi *ctx) { + int loops = 0; int rc; bool reset = false; unsigned long end; @@ -369,9 +372,9 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx) SPI_FSI_CLOCK_CFG_SCK_NO_DEL | FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 19); - end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS); + end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS); do { - if (time_after(jiffies, end)) + if (loops++ && time_after(jiffies, end)) return -ETIMEDOUT; rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, &status); diff --git a/drivers/spi/spi-gxp.c b/drivers/spi/spi-gxp.c new file mode 100644 index 000000000000..9ea355f7d64f --- /dev/null +++ b/drivers/spi/spi-gxp.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0=or-later +/* Copyright (C) 2022 Hewlett-Packard Development Company, L.P. */ + +#include <linux/iopoll.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> + +#define GXP_SPI0_MAX_CHIPSELECT 2 +#define GXP_SPI_SLEEP_TIME 1 +#define GXP_SPI_TIMEOUT (130 * 1000000 / GXP_SPI_SLEEP_TIME) + +#define MANUAL_MODE 0 +#define DIRECT_MODE 1 +#define SPILDAT_LEN 256 + +#define OFFSET_SPIMCFG 0x0 +#define OFFSET_SPIMCTRL 0x4 +#define OFFSET_SPICMD 0x5 +#define OFFSET_SPIDCNT 0x6 +#define OFFSET_SPIADDR 0x8 +#define OFFSET_SPIINTSTS 0xc + +#define SPIMCTRL_START 0x01 +#define SPIMCTRL_BUSY 0x02 +#define SPIMCTRL_DIR 0x08 + +struct gxp_spi; + +struct gxp_spi_chip { + struct gxp_spi *spifi; + u32 cs; +}; + +struct gxp_spi_data { + u32 max_cs; + u32 mode_bits; +}; + +struct gxp_spi { + const struct gxp_spi_data *data; + void __iomem *reg_base; + void __iomem *dat_base; + void __iomem *dir_base; + struct device *dev; + struct gxp_spi_chip chips[GXP_SPI0_MAX_CHIPSELECT]; +}; + +static void gxp_spi_set_mode(struct gxp_spi *spifi, int mode) +{ + u8 value; + void __iomem *reg_base = spifi->reg_base; + + value = readb(reg_base + OFFSET_SPIMCTRL); + + if (mode == MANUAL_MODE) { + writeb(0x55, reg_base + OFFSET_SPICMD); + writeb(0xaa, reg_base + OFFSET_SPICMD); + value &= ~0x30; + } else { + value |= 0x30; + } + writeb(value, reg_base + OFFSET_SPIMCTRL); +} + +static int gxp_spi_read_reg(struct gxp_spi_chip *chip, const struct spi_mem_op *op) +{ + int ret; + struct gxp_spi *spifi = chip->spifi; + void __iomem *reg_base = spifi->reg_base; + u32 value; + + value = readl(reg_base + OFFSET_SPIMCFG); + value &= ~(1 << 24); + value |= (chip->cs << 24); + value &= ~(0x07 << 16); + value &= ~(0x1f << 19); + writel(value, reg_base + OFFSET_SPIMCFG); + + writel(0, reg_base + OFFSET_SPIADDR); + + writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD); + + writew(op->data.nbytes, reg_base + OFFSET_SPIDCNT); + + value = readb(reg_base + OFFSET_SPIMCTRL); + value &= ~SPIMCTRL_DIR; + value |= SPIMCTRL_START; + + writeb(value, reg_base + OFFSET_SPIMCTRL); + + ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value, + !(value & SPIMCTRL_BUSY), + GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT); + if (ret) { + dev_warn(spifi->dev, "read reg busy time out\n"); + return ret; + } + + memcpy_fromio(op->data.buf.in, spifi->dat_base, op->data.nbytes); + return ret; +} + +static int gxp_spi_write_reg(struct gxp_spi_chip *chip, const struct spi_mem_op *op) +{ + int ret; + struct gxp_spi *spifi = chip->spifi; + void __iomem *reg_base = spifi->reg_base; + u32 value; + + value = readl(reg_base + OFFSET_SPIMCFG); + value &= ~(1 << 24); + value |= (chip->cs << 24); + value &= ~(0x07 << 16); + value &= ~(0x1f << 19); + writel(value, reg_base + OFFSET_SPIMCFG); + + writel(0, reg_base + OFFSET_SPIADDR); + + writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD); + + memcpy_toio(spifi->dat_base, op->data.buf.in, op->data.nbytes); + + writew(op->data.nbytes, reg_base + OFFSET_SPIDCNT); + + value = readb(reg_base + OFFSET_SPIMCTRL); + value |= SPIMCTRL_DIR; + value |= SPIMCTRL_START; + + writeb(value, reg_base + OFFSET_SPIMCTRL); + + ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value, + !(value & SPIMCTRL_BUSY), + GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT); + if (ret) + dev_warn(spifi->dev, "write reg busy time out\n"); + + return ret; +} + +static ssize_t gxp_spi_read(struct gxp_spi_chip *chip, const struct spi_mem_op *op) +{ + struct gxp_spi *spifi = chip->spifi; + u32 offset = op->addr.val; + + if (chip->cs == 0) + offset += 0x4000000; + + memcpy_fromio(op->data.buf.in, spifi->dir_base + offset, op->data.nbytes); + + return 0; +} + +static ssize_t gxp_spi_write(struct gxp_spi_chip *chip, const struct spi_mem_op *op) +{ + struct gxp_spi *spifi = chip->spifi; + void __iomem *reg_base = spifi->reg_base; + u32 write_len; + u32 value; + int ret; + + write_len = op->data.nbytes; + if (write_len > SPILDAT_LEN) + write_len = SPILDAT_LEN; + + value = readl(reg_base + OFFSET_SPIMCFG); + value &= ~(1 << 24); + value |= (chip->cs << 24); + value &= ~(0x07 << 16); + value |= (op->addr.nbytes << 16); + value &= ~(0x1f << 19); + writel(value, reg_base + OFFSET_SPIMCFG); + + writel(op->addr.val, reg_base + OFFSET_SPIADDR); + + writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD); + + writew(write_len, reg_base + OFFSET_SPIDCNT); + + memcpy_toio(spifi->dat_base, op->data.buf.in, write_len); + + value = readb(reg_base + OFFSET_SPIMCTRL); + value |= SPIMCTRL_DIR; + value |= SPIMCTRL_START; + + writeb(value, reg_base + OFFSET_SPIMCTRL); + + ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value, + !(value & SPIMCTRL_BUSY), + GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT); + if (ret) { + dev_warn(spifi->dev, "write busy time out\n"); + return ret; + } + + return write_len; +} + +static int do_gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct gxp_spi *spifi = spi_controller_get_devdata(mem->spi->master); + struct gxp_spi_chip *chip = &spifi->chips[mem->spi->chip_select]; + int ret; + + if (op->data.dir == SPI_MEM_DATA_IN) { + if (!op->addr.nbytes) + ret = gxp_spi_read_reg(chip, op); + else + ret = gxp_spi_read(chip, op); + } else { + if (!op->addr.nbytes) + ret = gxp_spi_write_reg(chip, op); + else + ret = gxp_spi_write(chip, op); + } + + return ret; +} + +static int gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + int ret; + + ret = do_gxp_exec_mem_op(mem, op); + if (ret) + dev_err(&mem->spi->dev, "operation failed: %d", ret); + + return ret; +} + +static const struct spi_controller_mem_ops gxp_spi_mem_ops = { + .exec_op = gxp_exec_mem_op, +}; + +static int gxp_spi_setup(struct spi_device *spi) +{ + struct gxp_spi *spifi = spi_controller_get_devdata(spi->master); + unsigned int cs = spi->chip_select; + struct gxp_spi_chip *chip = &spifi->chips[cs]; + + chip->spifi = spifi; + chip->cs = cs; + + gxp_spi_set_mode(spifi, MANUAL_MODE); + + return 0; +} + +static int gxp_spifi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct gxp_spi_data *data; + struct spi_controller *ctlr; + struct gxp_spi *spifi; + struct resource *res; + int ret; + + data = of_device_get_match_data(&pdev->dev); + + ctlr = devm_spi_alloc_master(dev, sizeof(*spifi)); + if (!ctlr) + return -ENOMEM; + + spifi = spi_controller_get_devdata(ctlr); + + platform_set_drvdata(pdev, spifi); + spifi->data = data; + spifi->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + spifi->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spifi->reg_base)) + return PTR_ERR(spifi->reg_base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + spifi->dat_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spifi->dat_base)) + return PTR_ERR(spifi->dat_base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + spifi->dir_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spifi->dir_base)) + return PTR_ERR(spifi->dir_base); + + ctlr->mode_bits = data->mode_bits; + ctlr->bus_num = pdev->id; + ctlr->mem_ops = &gxp_spi_mem_ops; + ctlr->setup = gxp_spi_setup; + ctlr->num_chipselect = data->max_cs; + ctlr->dev.of_node = dev->of_node; + + ret = devm_spi_register_controller(dev, ctlr); + if (ret) { + return dev_err_probe(&pdev->dev, ret, + "failed to register spi controller\n"); + } + + return 0; +} + +static const struct gxp_spi_data gxp_spifi_data = { + .max_cs = 2, + .mode_bits = 0, +}; + +static const struct of_device_id gxp_spifi_match[] = { + {.compatible = "hpe,gxp-spifi", .data = &gxp_spifi_data }, + { /* null */ } +}; +MODULE_DEVICE_TABLE(of, gxp_spifi_match); + +static struct platform_driver gxp_spifi_driver = { + .probe = gxp_spifi_probe, + .driver = { + .name = "gxp-spifi", + .of_match_table = gxp_spifi_match, + }, +}; +module_platform_driver(gxp_spifi_driver); + +MODULE_DESCRIPTION("HPE GXP SPI Flash Interface driver"); +MODULE_AUTHOR("Nick Hawkins <nick.hawkins@hpe.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c index f6eec7a869b6..f0d532ea40e8 100644 --- a/drivers/spi/spi-intel-pci.c +++ b/drivers/spi/spi-intel-pci.c @@ -74,6 +74,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info }, diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c index 50f42983b950..66063687ae27 100644 --- a/drivers/spi/spi-intel.c +++ b/drivers/spi/spi-intel.c @@ -1236,8 +1236,8 @@ static int intel_spi_populate_chip(struct intel_spi *ispi) return -ENOMEM; pdata->nr_parts = 1; - pdata->parts = devm_kcalloc(ispi->dev, sizeof(*pdata->parts), - pdata->nr_parts, GFP_KERNEL); + pdata->parts = devm_kcalloc(ispi->dev, pdata->nr_parts, + sizeof(*pdata->parts), GFP_KERNEL); if (!pdata->parts) return -ENOMEM; diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c new file mode 100644 index 000000000000..ce4385330b19 --- /dev/null +++ b/drivers/spi/spi-microchip-core.c @@ -0,0 +1,617 @@ +// SPDX-License-Identifier: (GPL-2.0) +/* + * Microchip CoreSPI SPI controller driver + * + * Copyright (c) 2018-2022 Microchip Technology Inc. and its subsidiaries + * + * Author: Daire McNamara <daire.mcnamara@microchip.com> + * Author: Conor Dooley <conor.dooley@microchip.com> + * + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> + +#define MAX_LEN (0xffff) +#define MAX_CS (8) +#define DEFAULT_FRAMESIZE (8) +#define FIFO_DEPTH (32) +#define CLK_GEN_MODE1_MAX (255) +#define CLK_GEN_MODE0_MAX (15) +#define CLK_GEN_MIN (0) +#define MODE_X_MASK_SHIFT (24) + +#define CONTROL_ENABLE BIT(0) +#define CONTROL_MASTER BIT(1) +#define CONTROL_RX_DATA_INT BIT(4) +#define CONTROL_TX_DATA_INT BIT(5) +#define CONTROL_RX_OVER_INT BIT(6) +#define CONTROL_TX_UNDER_INT BIT(7) +#define CONTROL_SPO BIT(24) +#define CONTROL_SPH BIT(25) +#define CONTROL_SPS BIT(26) +#define CONTROL_FRAMEURUN BIT(27) +#define CONTROL_CLKMODE BIT(28) +#define CONTROL_BIGFIFO BIT(29) +#define CONTROL_OENOFF BIT(30) +#define CONTROL_RESET BIT(31) + +#define CONTROL_MODE_MASK GENMASK(3, 2) +#define MOTOROLA_MODE (0) +#define CONTROL_FRAMECNT_MASK GENMASK(23, 8) +#define CONTROL_FRAMECNT_SHIFT (8) + +#define STATUS_ACTIVE BIT(14) +#define STATUS_SSEL BIT(13) +#define STATUS_FRAMESTART BIT(12) +#define STATUS_TXFIFO_EMPTY_NEXT_READ BIT(11) +#define STATUS_TXFIFO_EMPTY BIT(10) +#define STATUS_TXFIFO_FULL_NEXT_WRITE BIT(9) +#define STATUS_TXFIFO_FULL BIT(8) +#define STATUS_RXFIFO_EMPTY_NEXT_READ BIT(7) +#define STATUS_RXFIFO_EMPTY BIT(6) +#define STATUS_RXFIFO_FULL_NEXT_WRITE BIT(5) +#define STATUS_RXFIFO_FULL BIT(4) +#define STATUS_TX_UNDERRUN BIT(3) +#define STATUS_RX_OVERFLOW BIT(2) +#define STATUS_RXDAT_RXED BIT(1) +#define STATUS_TXDAT_SENT BIT(0) + +#define INT_TXDONE BIT(0) +#define INT_RXRDY BIT(1) +#define INT_RX_CHANNEL_OVERFLOW BIT(2) +#define INT_TX_CHANNEL_UNDERRUN BIT(3) + +#define INT_ENABLE_MASK (CONTROL_RX_DATA_INT | CONTROL_TX_DATA_INT | \ + CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT) + +#define REG_CONTROL (0x00) +#define REG_FRAME_SIZE (0x04) +#define REG_STATUS (0x08) +#define REG_INT_CLEAR (0x0c) +#define REG_RX_DATA (0x10) +#define REG_TX_DATA (0x14) +#define REG_CLK_GEN (0x18) +#define REG_SLAVE_SELECT (0x1c) +#define SSEL_MASK GENMASK(7, 0) +#define SSEL_DIRECT BIT(8) +#define SSELOUT_SHIFT 9 +#define SSELOUT BIT(SSELOUT_SHIFT) +#define REG_MIS (0x20) +#define REG_RIS (0x24) +#define REG_CONTROL2 (0x28) +#define REG_COMMAND (0x2c) +#define REG_PKTSIZE (0x30) +#define REG_CMD_SIZE (0x34) +#define REG_HWSTATUS (0x38) +#define REG_STAT8 (0x3c) +#define REG_CTRL2 (0x48) +#define REG_FRAMESUP (0x50) + +struct mchp_corespi { + void __iomem *regs; + struct clk *clk; + const u8 *tx_buf; + u8 *rx_buf; + u32 clk_gen; /* divider for spi output clock generated by the controller */ + u32 clk_mode; + int irq; + int tx_len; + int rx_len; + int pending; +}; + +static inline u32 mchp_corespi_read(struct mchp_corespi *spi, unsigned int reg) +{ + return readl(spi->regs + reg); +} + +static inline void mchp_corespi_write(struct mchp_corespi *spi, unsigned int reg, u32 val) +{ + writel(val, spi->regs + reg); +} + +static inline void mchp_corespi_enable(struct mchp_corespi *spi) +{ + u32 control = mchp_corespi_read(spi, REG_CONTROL); + + control |= CONTROL_ENABLE; + + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static inline void mchp_corespi_disable(struct mchp_corespi *spi) +{ + u32 control = mchp_corespi_read(spi, REG_CONTROL); + + control &= ~CONTROL_ENABLE; + + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi) +{ + u8 data; + int fifo_max, i = 0; + + fifo_max = min(spi->rx_len, FIFO_DEPTH); + + while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) { + data = mchp_corespi_read(spi, REG_RX_DATA); + + if (spi->rx_buf) + *spi->rx_buf++ = data; + i++; + } + spi->rx_len -= i; + spi->pending -= i; +} + +static void mchp_corespi_enable_ints(struct mchp_corespi *spi) +{ + u32 control, mask = INT_ENABLE_MASK; + + mchp_corespi_disable(spi); + + control = mchp_corespi_read(spi, REG_CONTROL); + + control |= mask; + mchp_corespi_write(spi, REG_CONTROL, control); + + control |= CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static void mchp_corespi_disable_ints(struct mchp_corespi *spi) +{ + u32 control, mask = INT_ENABLE_MASK; + + mchp_corespi_disable(spi); + + control = mchp_corespi_read(spi, REG_CONTROL); + control &= ~mask; + mchp_corespi_write(spi, REG_CONTROL, control); + + control |= CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len) +{ + u32 control; + u16 lenpart; + + /* + * Disable the SPI controller. Writes to transfer length have + * no effect when the controller is enabled. + */ + mchp_corespi_disable(spi); + + /* + * The lower 16 bits of the frame count are stored in the control reg + * for legacy reasons, but the upper 16 written to a different register: + * FRAMESUP. While both the upper and lower bits can be *READ* from the + * FRAMESUP register, writing to the lower 16 bits is a NOP + */ + lenpart = len & 0xffff; + + control = mchp_corespi_read(spi, REG_CONTROL); + control &= ~CONTROL_FRAMECNT_MASK; + control |= lenpart << CONTROL_FRAMECNT_SHIFT; + mchp_corespi_write(spi, REG_CONTROL, control); + + lenpart = len & 0xffff0000; + mchp_corespi_write(spi, REG_FRAMESUP, lenpart); + + control |= CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi) +{ + u8 byte; + int fifo_max, i = 0; + + fifo_max = min(spi->tx_len, FIFO_DEPTH); + mchp_corespi_set_xfer_size(spi, fifo_max); + + while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) { + byte = spi->tx_buf ? *spi->tx_buf++ : 0xaa; + mchp_corespi_write(spi, REG_TX_DATA, byte); + i++; + } + + spi->tx_len -= i; + spi->pending += i; +} + +static inline void mchp_corespi_set_framesize(struct mchp_corespi *spi, int bt) +{ + u32 control; + + /* + * Disable the SPI controller. Writes to the frame size have + * no effect when the controller is enabled. + */ + mchp_corespi_disable(spi); + + mchp_corespi_write(spi, REG_FRAME_SIZE, bt); + + control = mchp_corespi_read(spi, REG_CONTROL); + control |= CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static void mchp_corespi_set_cs(struct spi_device *spi, bool disable) +{ + u32 reg; + struct mchp_corespi *corespi = spi_master_get_devdata(spi->master); + + reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT); + reg &= ~BIT(spi->chip_select); + reg |= !disable << spi->chip_select; + + mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg); +} + +static int mchp_corespi_setup(struct spi_device *spi) +{ + struct mchp_corespi *corespi = spi_master_get_devdata(spi->master); + u32 reg; + + /* + * Active high slaves need to be specifically set to their inactive + * states during probe by adding them to the "control group" & thus + * driving their select line low. + */ + if (spi->mode & SPI_CS_HIGH) { + reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT); + reg |= BIT(spi->chip_select); + mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg); + } + return 0; +} + +static void mchp_corespi_init(struct spi_master *master, struct mchp_corespi *spi) +{ + unsigned long clk_hz; + u32 control = mchp_corespi_read(spi, REG_CONTROL); + + control |= CONTROL_MASTER; + + control &= ~CONTROL_MODE_MASK; + control |= MOTOROLA_MODE; + + mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE); + + /* max. possible spi clock rate is the apb clock rate */ + clk_hz = clk_get_rate(spi->clk); + master->max_speed_hz = clk_hz; + + /* + * The controller must be configured so that it doesn't remove Chip + * Select until the entire message has been transferred, even if at + * some points TX FIFO becomes empty. + * + * BIGFIFO mode is also enabled, which sets the fifo depth to 32 frames + * for the 8 bit transfers that this driver uses. + */ + control = mchp_corespi_read(spi, REG_CONTROL); + control |= CONTROL_SPS | CONTROL_BIGFIFO; + + mchp_corespi_write(spi, REG_CONTROL, control); + + mchp_corespi_enable_ints(spi); + + /* + * It is required to enable direct mode, otherwise control over the chip + * select is relinquished to the hardware. SSELOUT is enabled too so we + * can deal with active high slaves. + */ + mchp_corespi_write(spi, REG_SLAVE_SELECT, SSELOUT | SSEL_DIRECT); + + control = mchp_corespi_read(spi, REG_CONTROL); + + control &= ~CONTROL_RESET; + control |= CONTROL_ENABLE; + + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi) +{ + u32 control; + + mchp_corespi_disable(spi); + + control = mchp_corespi_read(spi, REG_CONTROL); + if (spi->clk_mode) + control |= CONTROL_CLKMODE; + else + control &= ~CONTROL_CLKMODE; + + mchp_corespi_write(spi, REG_CLK_GEN, spi->clk_gen); + mchp_corespi_write(spi, REG_CONTROL, control); + mchp_corespi_write(spi, REG_CONTROL, control | CONTROL_ENABLE); +} + +static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int mode) +{ + u32 control, mode_val; + + switch (mode & SPI_MODE_X_MASK) { + case SPI_MODE_0: + mode_val = 0; + break; + case SPI_MODE_1: + mode_val = CONTROL_SPH; + break; + case SPI_MODE_2: + mode_val = CONTROL_SPO; + break; + case SPI_MODE_3: + mode_val = CONTROL_SPH | CONTROL_SPO; + break; + } + + /* + * Disable the SPI controller. Writes to the frame size have + * no effect when the controller is enabled. + */ + mchp_corespi_disable(spi); + + control = mchp_corespi_read(spi, REG_CONTROL); + control &= ~(SPI_MODE_X_MASK << MODE_X_MASK_SHIFT); + control |= mode_val; + + mchp_corespi_write(spi, REG_CONTROL, control); + + control |= CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id) +{ + struct spi_master *master = dev_id; + struct mchp_corespi *spi = spi_master_get_devdata(master); + u32 intfield = mchp_corespi_read(spi, REG_MIS) & 0xf; + bool finalise = false; + + /* Interrupt line may be shared and not for us at all */ + if (intfield == 0) + return IRQ_NONE; + + if (intfield & INT_TXDONE) { + mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE); + + if (spi->rx_len) + mchp_corespi_read_fifo(spi); + + if (spi->tx_len) + mchp_corespi_write_fifo(spi); + + if (!spi->rx_len) + finalise = true; + } + + if (intfield & INT_RXRDY) + mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY); + + if (intfield & INT_RX_CHANNEL_OVERFLOW) { + mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW); + finalise = true; + dev_err(&master->dev, + "%s: RX OVERFLOW: rxlen: %d, txlen: %d\n", __func__, + spi->rx_len, spi->tx_len); + } + + if (intfield & INT_TX_CHANNEL_UNDERRUN) { + mchp_corespi_write(spi, REG_INT_CLEAR, INT_TX_CHANNEL_UNDERRUN); + finalise = true; + dev_err(&master->dev, + "%s: TX UNDERFLOW: rxlen: %d, txlen: %d\n", __func__, + spi->rx_len, spi->tx_len); + } + + if (finalise) + spi_finalize_current_transfer(master); + + return IRQ_HANDLED; +} + +static int mchp_corespi_calculate_clkgen(struct mchp_corespi *spi, + unsigned long target_hz) +{ + unsigned long clk_hz, spi_hz, clk_gen; + + clk_hz = clk_get_rate(spi->clk); + if (!clk_hz) + return -EINVAL; + spi_hz = min(target_hz, clk_hz); + + /* + * There are two possible clock modes for the controller generated + * clock's division ratio: + * CLK_MODE = 0: 1 / (2^(CLK_GEN + 1)) where CLK_GEN = 0 to 15. + * CLK_MODE = 1: 1 / (2 * CLK_GEN + 1) where CLK_GEN = 0 to 255. + * First try mode 1, fall back to 0 and if we have tried both modes and + * we /still/ can't get a good setting, we then throw the toys out of + * the pram and give up + * clk_gen is the register name for the clock divider on MPFS. + */ + clk_gen = DIV_ROUND_UP(clk_hz, 2 * spi_hz) - 1; + if (clk_gen > CLK_GEN_MODE1_MAX || clk_gen <= CLK_GEN_MIN) { + clk_gen = DIV_ROUND_UP(clk_hz, spi_hz); + clk_gen = fls(clk_gen) - 1; + + if (clk_gen > CLK_GEN_MODE0_MAX) + return -EINVAL; + + spi->clk_mode = 0; + } else { + spi->clk_mode = 1; + } + + spi->clk_gen = clk_gen; + return 0; +} + +static int mchp_corespi_transfer_one(struct spi_master *master, + struct spi_device *spi_dev, + struct spi_transfer *xfer) +{ + struct mchp_corespi *spi = spi_master_get_devdata(master); + int ret; + + ret = mchp_corespi_calculate_clkgen(spi, (unsigned long)xfer->speed_hz); + if (ret) { + dev_err(&master->dev, "failed to set clk_gen for target %u Hz\n", xfer->speed_hz); + return ret; + } + + mchp_corespi_set_clk_gen(spi); + + spi->tx_buf = xfer->tx_buf; + spi->rx_buf = xfer->rx_buf; + spi->tx_len = xfer->len; + spi->rx_len = xfer->len; + spi->pending = 0; + + mchp_corespi_set_xfer_size(spi, (spi->tx_len > FIFO_DEPTH) + ? FIFO_DEPTH : spi->tx_len); + + if (spi->tx_len) + mchp_corespi_write_fifo(spi); + return 1; +} + +static int mchp_corespi_prepare_message(struct spi_master *master, + struct spi_message *msg) +{ + struct spi_device *spi_dev = msg->spi; + struct mchp_corespi *spi = spi_master_get_devdata(master); + + mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE); + mchp_corespi_set_mode(spi, spi_dev->mode); + + return 0; +} + +static int mchp_corespi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct mchp_corespi *spi; + struct resource *res; + u32 num_cs; + int ret = 0; + + master = devm_spi_alloc_master(&pdev->dev, sizeof(*spi)); + if (!master) + return dev_err_probe(&pdev->dev, -ENOMEM, + "unable to allocate master for SPI controller\n"); + + platform_set_drvdata(pdev, master); + + if (of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs)) + num_cs = MAX_CS; + + master->num_chipselect = num_cs; + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + master->setup = mchp_corespi_setup; + master->bits_per_word_mask = SPI_BPW_MASK(8); + master->transfer_one = mchp_corespi_transfer_one; + master->prepare_message = mchp_corespi_prepare_message; + master->set_cs = mchp_corespi_set_cs; + master->dev.of_node = pdev->dev.of_node; + + spi = spi_master_get_devdata(master); + + spi->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(spi->regs)) + return PTR_ERR(spi->regs); + + spi->irq = platform_get_irq(pdev, 0); + if (spi->irq <= 0) + return dev_err_probe(&pdev->dev, -ENXIO, + "invalid IRQ %d for SPI controller\n", + spi->irq); + + ret = devm_request_irq(&pdev->dev, spi->irq, mchp_corespi_interrupt, + IRQF_SHARED, dev_name(&pdev->dev), master); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "could not request irq: %d\n", ret); + + spi->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(spi->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(spi->clk), + "could not get clk: %d\n", ret); + + ret = clk_prepare_enable(spi->clk); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "failed to enable clock\n"); + + mchp_corespi_init(master, spi); + + ret = devm_spi_register_master(&pdev->dev, master); + if (ret) { + mchp_corespi_disable(spi); + clk_disable_unprepare(spi->clk); + return dev_err_probe(&pdev->dev, ret, + "unable to register master for SPI controller\n"); + } + + dev_info(&pdev->dev, "Registered SPI controller %d\n", master->bus_num); + + return 0; +} + +static int mchp_corespi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct mchp_corespi *spi = spi_master_get_devdata(master); + + mchp_corespi_disable_ints(spi); + clk_disable_unprepare(spi->clk); + mchp_corespi_disable(spi); + + return 0; +} + +#define MICROCHIP_SPI_PM_OPS (NULL) + +/* + * Platform driver data structure + */ + +#if defined(CONFIG_OF) +static const struct of_device_id mchp_corespi_dt_ids[] = { + { .compatible = "microchip,mpfs-spi" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mchp_corespi_dt_ids); +#endif + +static struct platform_driver mchp_corespi_driver = { + .probe = mchp_corespi_probe, + .driver = { + .name = "microchip-corespi", + .pm = MICROCHIP_SPI_PM_OPS, + .of_match_table = of_match_ptr(mchp_corespi_dt_ids), + }, + .remove = mchp_corespi_remove, +}; +module_platform_driver(mchp_corespi_driver); +MODULE_DESCRIPTION("Microchip coreSPI SPI controller driver"); +MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>"); +MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c index 7654736c2c0e..609311231e64 100644 --- a/drivers/spi/spi-mpc52xx-psc.c +++ b/drivers/spi/spi-mpc52xx-psc.c @@ -37,12 +37,6 @@ struct mpc52xx_psc_spi { struct mpc52xx_psc_fifo __iomem *fifo; unsigned int irq; u8 bits_per_word; - u8 busy; - - struct work_struct work; - - struct list_head queue; - spinlock_t lock; struct completion done; }; @@ -198,69 +192,53 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi, return 0; } -static void mpc52xx_psc_spi_work(struct work_struct *work) +int mpc52xx_psc_spi_transfer_one_message(struct spi_controller *ctlr, + struct spi_message *m) { - struct mpc52xx_psc_spi *mps = - container_of(work, struct mpc52xx_psc_spi, work); - - spin_lock_irq(&mps->lock); - mps->busy = 1; - while (!list_empty(&mps->queue)) { - struct spi_message *m; - struct spi_device *spi; - struct spi_transfer *t = NULL; - unsigned cs_change; - int status; - - m = container_of(mps->queue.next, struct spi_message, queue); - list_del_init(&m->queue); - spin_unlock_irq(&mps->lock); - - spi = m->spi; - cs_change = 1; - status = 0; - list_for_each_entry (t, &m->transfers, transfer_list) { - if (t->bits_per_word || t->speed_hz) { - status = mpc52xx_psc_spi_transfer_setup(spi, t); - if (status < 0) - break; - } - - if (cs_change) - mpc52xx_psc_spi_activate_cs(spi); - cs_change = t->cs_change; - - status = mpc52xx_psc_spi_transfer_rxtx(spi, t); - if (status) + struct spi_device *spi; + struct spi_transfer *t = NULL; + unsigned cs_change; + int status; + + spi = m->spi; + cs_change = 1; + status = 0; + list_for_each_entry (t, &m->transfers, transfer_list) { + if (t->bits_per_word || t->speed_hz) { + status = mpc52xx_psc_spi_transfer_setup(spi, t); + if (status < 0) break; - m->actual_length += t->len; + } - spi_transfer_delay_exec(t); + if (cs_change) + mpc52xx_psc_spi_activate_cs(spi); + cs_change = t->cs_change; - if (cs_change) - mpc52xx_psc_spi_deactivate_cs(spi); - } + status = mpc52xx_psc_spi_transfer_rxtx(spi, t); + if (status) + break; + m->actual_length += t->len; - m->status = status; - if (m->complete) - m->complete(m->context); + spi_transfer_delay_exec(t); - if (status || !cs_change) + if (cs_change) mpc52xx_psc_spi_deactivate_cs(spi); + } - mpc52xx_psc_spi_transfer_setup(spi, NULL); + m->status = status; + if (status || !cs_change) + mpc52xx_psc_spi_deactivate_cs(spi); - spin_lock_irq(&mps->lock); - } - mps->busy = 0; - spin_unlock_irq(&mps->lock); + mpc52xx_psc_spi_transfer_setup(spi, NULL); + + spi_finalize_current_message(ctlr); + + return 0; } static int mpc52xx_psc_spi_setup(struct spi_device *spi) { - struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); struct mpc52xx_psc_spi_cs *cs = spi->controller_state; - unsigned long flags; if (spi->bits_per_word%8) return -EINVAL; @@ -275,28 +253,6 @@ static int mpc52xx_psc_spi_setup(struct spi_device *spi) cs->bits_per_word = spi->bits_per_word; cs->speed_hz = spi->max_speed_hz; - spin_lock_irqsave(&mps->lock, flags); - if (!mps->busy) - mpc52xx_psc_spi_deactivate_cs(spi); - spin_unlock_irqrestore(&mps->lock, flags); - - return 0; -} - -static int mpc52xx_psc_spi_transfer(struct spi_device *spi, - struct spi_message *m) -{ - struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); - unsigned long flags; - - m->actual_length = 0; - m->status = -EINPROGRESS; - - spin_lock_irqsave(&mps->lock, flags); - list_add_tail(&m->queue, &mps->queue); - schedule_work(&mps->work); - spin_unlock_irqrestore(&mps->lock, flags); - return 0; } @@ -391,7 +347,7 @@ static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, master->num_chipselect = pdata->max_chipselect; } master->setup = mpc52xx_psc_spi_setup; - master->transfer = mpc52xx_psc_spi_transfer; + master->transfer_one_message = mpc52xx_psc_spi_transfer_one_message; master->cleanup = mpc52xx_psc_spi_cleanup; master->dev.of_node = dev->of_node; @@ -415,10 +371,7 @@ static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, goto free_irq; } - spin_lock_init(&mps->lock); init_completion(&mps->done); - INIT_WORK(&mps->work, mpc52xx_psc_spi_work); - INIT_LIST_HEAD(&mps->queue); ret = spi_register_master(master); if (ret < 0) @@ -470,7 +423,6 @@ static int mpc52xx_psc_spi_of_remove(struct platform_device *op) struct spi_master *master = spi_master_get(platform_get_drvdata(op)); struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master); - flush_work(&mps->work); spi_unregister_master(master); free_irq(mps->irq, mps); if (mps->psc) diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c index ba67dbed9fb8..49f6424e35af 100644 --- a/drivers/spi/spi-npcm-fiu.c +++ b/drivers/spi/spi-npcm-fiu.c @@ -36,6 +36,7 @@ #define NPCM_FIU_UMA_DR1 0x34 #define NPCM_FIU_UMA_DR2 0x38 #define NPCM_FIU_UMA_DR3 0x3C +#define NPCM_FIU_CFG 0x78 #define NPCM_FIU_MAX_REG_LIMIT 0x80 /* FIU Direct Read Configuration Register */ @@ -151,6 +152,9 @@ #define NPCM_FIU_UMA_DR3_RB13 GENMASK(15, 8) #define NPCM_FIU_UMA_DR3_RB12 GENMASK(7, 0) +/* FIU Configuration Register */ +#define NPCM_FIU_CFG_FIU_FIX BIT(31) + /* FIU Read Mode */ enum { DRD_SINGLE_WIRE_MODE = 0, @@ -187,6 +191,7 @@ enum { FIU0 = 0, FIU3, FIUX, + FIU1, }; struct npcm_fiu_info { @@ -214,6 +219,21 @@ static const struct fiu_data npcm7xx_fiu_data = { .fiu_max = 3, }; +static const struct npcm_fiu_info npxm8xx_fiu_info[] = { + {.name = "FIU0", .fiu_id = FIU0, + .max_map_size = MAP_SIZE_128MB, .max_cs = 2}, + {.name = "FIU3", .fiu_id = FIU3, + .max_map_size = MAP_SIZE_128MB, .max_cs = 4}, + {.name = "FIUX", .fiu_id = FIUX, + .max_map_size = MAP_SIZE_16MB, .max_cs = 2}, + {.name = "FIU1", .fiu_id = FIU1, + .max_map_size = MAP_SIZE_16MB, .max_cs = 4} }; + +static const struct fiu_data npxm8xx_fiu_data = { + .npcm_fiu_data_info = npxm8xx_fiu_info, + .fiu_max = 4, +}; + struct npcm_fiu_spi; struct npcm_fiu_chip { @@ -252,8 +272,7 @@ static void npcm_fiu_set_drd(struct npcm_fiu_spi *fiu, fiu->drd_op.addr.buswidth = op->addr.buswidth; regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG, NPCM_FIU_DRD_CFG_DBW, - ((op->dummy.nbytes * ilog2(op->addr.buswidth)) / BITS_PER_BYTE) - << NPCM_FIU_DRD_DBW_SHIFT); + op->dummy.nbytes << NPCM_FIU_DRD_DBW_SHIFT); fiu->drd_op.dummy.nbytes = op->dummy.nbytes; regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG, NPCM_FIU_DRD_CFG_RDCMD, op->cmd.opcode); @@ -625,6 +644,10 @@ static int npcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc) regmap_update_bits(gcr_regmap, NPCM7XX_INTCR3_OFFSET, NPCM7XX_INTCR3_FIU_FIX, NPCM7XX_INTCR3_FIU_FIX); + } else { + regmap_update_bits(fiu->regmap, NPCM_FIU_CFG, + NPCM_FIU_CFG_FIU_FIX, + NPCM_FIU_CFG_FIU_FIX); } if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN) { @@ -665,6 +688,7 @@ static const struct spi_controller_mem_ops npcm_fiu_mem_ops = { static const struct of_device_id npcm_fiu_dt_ids[] = { { .compatible = "nuvoton,npcm750-fiu", .data = &npcm7xx_fiu_data }, + { .compatible = "nuvoton,npcm845-fiu", .data = &npxm8xx_fiu_data }, { /* sentinel */ } }; diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index edb42d08857d..838d12e65144 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1404,6 +1404,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { { PCI_VDEVICE(INTEL, 0x7aab), LPSS_CNL_SSP }, { PCI_VDEVICE(INTEL, 0x7af9), LPSS_CNL_SSP }, { PCI_VDEVICE(INTEL, 0x7afb), LPSS_CNL_SSP }, + /* MTL-P */ + { PCI_VDEVICE(INTEL, 0x7e27), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x7e30), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x7e46), LPSS_CNL_SSP }, /* CNL-LP */ { PCI_VDEVICE(INTEL, 0x9daa), LPSS_CNL_SSP }, { PCI_VDEVICE(INTEL, 0x9dab), LPSS_CNL_SSP }, diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index c26440e9058d..7f346866614a 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -18,7 +18,7 @@ #include <linux/platform_data/spi-s3c64xx.h> -#define MAX_SPI_PORTS 6 +#define MAX_SPI_PORTS 12 #define S3C64XX_SPI_QUIRK_POLL (1 << 0) #define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1) #define AUTOSUSPEND_TIMEOUT 2000 @@ -59,6 +59,7 @@ #define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17) #define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17) #define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17) +#define S3C64XX_SPI_MODE_SELF_LOOPBACK (1<<3) #define S3C64XX_SPI_MODE_RXDMA_ON (1<<2) #define S3C64XX_SPI_MODE_TXDMA_ON (1<<1) #define S3C64XX_SPI_MODE_4BURST (1<<0) @@ -130,11 +131,13 @@ struct s3c64xx_spi_dma_data { * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register. * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter. * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter. + * @clk_div: Internal clock divider * @quirks: Bitmask of known quirks * @high_speed: True, if the controller supports HIGH_SPEED_EN bit. * @clk_from_cmu: True, if the controller does not include a clock mux and * prescaler unit. * @clk_ioclk: True if clock is present on this device + * @has_loopback: True if loopback mode can be supported * * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but * differ in some aspects such as the size of the fifo and spi bus clock @@ -146,9 +149,11 @@ struct s3c64xx_spi_port_config { int rx_lvl_offset; int tx_st_done; int quirks; + int clk_div; bool high_speed; bool clk_from_cmu; bool clk_ioclk; + bool has_loopback; }; /** @@ -350,19 +355,59 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) if (is_polling(sdd)) return 0; + /* Requests DMA channels */ + sdd->rx_dma.ch = dma_request_chan(&sdd->pdev->dev, "rx"); + if (IS_ERR(sdd->rx_dma.ch)) { + dev_err(&sdd->pdev->dev, "Failed to get RX DMA channel\n"); + sdd->rx_dma.ch = NULL; + return 0; + } + + sdd->tx_dma.ch = dma_request_chan(&sdd->pdev->dev, "tx"); + if (IS_ERR(sdd->tx_dma.ch)) { + dev_err(&sdd->pdev->dev, "Failed to get TX DMA channel\n"); + dma_release_channel(sdd->rx_dma.ch); + sdd->tx_dma.ch = NULL; + sdd->rx_dma.ch = NULL; + return 0; + } + spi->dma_rx = sdd->rx_dma.ch; spi->dma_tx = sdd->tx_dma.ch; return 0; } +static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi) +{ + struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); + + if (is_polling(sdd)) + return 0; + + /* Releases DMA channels if they are allocated */ + if (sdd->rx_dma.ch && sdd->tx_dma.ch) { + dma_release_channel(sdd->rx_dma.ch); + dma_release_channel(sdd->tx_dma.ch); + sdd->rx_dma.ch = 0; + sdd->tx_dma.ch = 0; + } + + return 0; +} + static bool s3c64xx_spi_can_dma(struct spi_master *master, struct spi_device *spi, struct spi_transfer *xfer) { struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); - return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1; + if (sdd->rx_dma.ch && sdd->tx_dma.ch) { + return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1; + } else { + return false; + } + } static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, @@ -577,6 +622,7 @@ static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) void __iomem *regs = sdd->regs; int ret; u32 val; + int div = sdd->port_conf->clk_div; /* Disable Clock */ if (!sdd->port_conf->clk_from_cmu) { @@ -619,19 +665,21 @@ static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) break; } + if ((sdd->cur_mode & SPI_LOOP) && sdd->port_conf->has_loopback) + val |= S3C64XX_SPI_MODE_SELF_LOOPBACK; + writel(val, regs + S3C64XX_SPI_MODE_CFG); if (sdd->port_conf->clk_from_cmu) { - /* The src_clk clock is divided internally by 2 */ - ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); + ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * div); if (ret) return ret; - sdd->cur_speed = clk_get_rate(sdd->src_clk) / 2; + sdd->cur_speed = clk_get_rate(sdd->src_clk) / div; } else { /* Configure Clock */ val = readl(regs + S3C64XX_SPI_CLK_CFG); val &= ~S3C64XX_SPI_PSR_MASK; - val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) + val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / div - 1) & S3C64XX_SPI_PSR_MASK); writel(val, regs + S3C64XX_SPI_CLK_CFG); @@ -697,7 +745,7 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, sdd->rx_dma.ch && sdd->tx_dma.ch) { use_dma = 1; - } else if (is_polling(sdd) && xfer->len > fifo_len) { + } else if (xfer->len > fifo_len) { tx_buf = xfer->tx_buf; rx_buf = xfer->rx_buf; origin_len = xfer->len; @@ -825,6 +873,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi) struct s3c64xx_spi_csinfo *cs = spi->controller_data; struct s3c64xx_spi_driver_data *sdd; int err; + int div; sdd = spi_master_get_devdata(spi->master); if (spi->dev.of_node) { @@ -843,22 +892,24 @@ static int s3c64xx_spi_setup(struct spi_device *spi) pm_runtime_get_sync(&sdd->pdev->dev); + div = sdd->port_conf->clk_div; + /* Check if we can provide the requested rate */ if (!sdd->port_conf->clk_from_cmu) { u32 psr, speed; /* Max possible */ - speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); + speed = clk_get_rate(sdd->src_clk) / div / (0 + 1); if (spi->max_speed_hz > speed) spi->max_speed_hz = speed; - psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; + psr = clk_get_rate(sdd->src_clk) / div / spi->max_speed_hz - 1; psr &= S3C64XX_SPI_PSR_MASK; if (psr == S3C64XX_SPI_PSR_MASK) psr--; - speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); + speed = clk_get_rate(sdd->src_clk) / div / (psr + 1); if (spi->max_speed_hz < speed) { if (psr+1 < S3C64XX_SPI_PSR_MASK) { psr++; @@ -868,7 +919,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi) } } - speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); + speed = clk_get_rate(sdd->src_clk) / div / (psr + 1); if (spi->max_speed_hz >= speed) { spi->max_speed_hz = speed; } else { @@ -1098,6 +1149,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) master->setup = s3c64xx_spi_setup; master->cleanup = s3c64xx_spi_cleanup; master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer; + master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer; master->prepare_message = s3c64xx_spi_prepare_message; master->transfer_one = s3c64xx_spi_transfer_one; master->num_chipselect = sci->num_cs; @@ -1107,6 +1159,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) SPI_BPW_MASK(8); /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + if (sdd->port_conf->has_loopback) + master->mode_bits |= SPI_LOOP; master->auto_runtime_pm = true; if (!is_polling(sdd)) master->can_dma = s3c64xx_spi_can_dma; @@ -1167,22 +1221,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) } } - if (!is_polling(sdd)) { - /* Acquire DMA channels */ - sdd->rx_dma.ch = dma_request_chan(&pdev->dev, "rx"); - if (IS_ERR(sdd->rx_dma.ch)) { - dev_err(&pdev->dev, "Failed to get RX DMA channel\n"); - ret = PTR_ERR(sdd->rx_dma.ch); - goto err_disable_io_clk; - } - sdd->tx_dma.ch = dma_request_chan(&pdev->dev, "tx"); - if (IS_ERR(sdd->tx_dma.ch)) { - dev_err(&pdev->dev, "Failed to get TX DMA channel\n"); - ret = PTR_ERR(sdd->tx_dma.ch); - goto err_release_rx_dma; - } - } - pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_active(&pdev->dev); @@ -1228,12 +1266,6 @@ err_pm_put: pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); - if (!is_polling(sdd)) - dma_release_channel(sdd->tx_dma.ch); -err_release_rx_dma: - if (!is_polling(sdd)) - dma_release_channel(sdd->rx_dma.ch); -err_disable_io_clk: clk_disable_unprepare(sdd->ioclk); err_disable_src_clk: clk_disable_unprepare(sdd->src_clk); @@ -1369,6 +1401,7 @@ static const struct s3c64xx_spi_port_config s3c2443_spi_port_config = { .fifo_lvl_mask = { 0x7f }, .rx_lvl_offset = 13, .tx_st_done = 21, + .clk_div = 2, .high_speed = true, }; @@ -1376,12 +1409,14 @@ static const struct s3c64xx_spi_port_config s3c6410_spi_port_config = { .fifo_lvl_mask = { 0x7f, 0x7F }, .rx_lvl_offset = 13, .tx_st_done = 21, + .clk_div = 2, }; static const struct s3c64xx_spi_port_config s5pv210_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7F }, .rx_lvl_offset = 15, .tx_st_done = 25, + .clk_div = 2, .high_speed = true, }; @@ -1389,6 +1424,7 @@ static const struct s3c64xx_spi_port_config exynos4_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F }, .rx_lvl_offset = 15, .tx_st_done = 25, + .clk_div = 2, .high_speed = true, .clk_from_cmu = true, .quirks = S3C64XX_SPI_QUIRK_CS_AUTO, @@ -1398,6 +1434,7 @@ static const struct s3c64xx_spi_port_config exynos7_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff}, .rx_lvl_offset = 15, .tx_st_done = 25, + .clk_div = 2, .high_speed = true, .clk_from_cmu = true, .quirks = S3C64XX_SPI_QUIRK_CS_AUTO, @@ -1407,16 +1444,31 @@ static const struct s3c64xx_spi_port_config exynos5433_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff}, .rx_lvl_offset = 15, .tx_st_done = 25, + .clk_div = 2, + .high_speed = true, + .clk_from_cmu = true, + .clk_ioclk = true, + .quirks = S3C64XX_SPI_QUIRK_CS_AUTO, +}; + +static const struct s3c64xx_spi_port_config exynosautov9_spi_port_config = { + .fifo_lvl_mask = { 0x1ff, 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f}, + .rx_lvl_offset = 15, + .tx_st_done = 25, + .clk_div = 4, .high_speed = true, .clk_from_cmu = true, .clk_ioclk = true, + .has_loopback = true, .quirks = S3C64XX_SPI_QUIRK_CS_AUTO, }; -static struct s3c64xx_spi_port_config fsd_spi_port_config = { +static const struct s3c64xx_spi_port_config fsd_spi_port_config = { .fifo_lvl_mask = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f}, .rx_lvl_offset = 15, .tx_st_done = 25, + .clk_div = 2, .high_speed = true, .clk_from_cmu = true, .clk_ioclk = false, @@ -1453,6 +1505,9 @@ static const struct of_device_id s3c64xx_spi_dt_match[] = { { .compatible = "samsung,exynos5433-spi", .data = (void *)&exynos5433_spi_port_config, }, + { .compatible = "samsung,exynosautov9-spi", + .data = (void *)&exynosautov9_spi_port_config, + }, { .compatible = "tesla,fsd-spi", .data = (void *)&fsd_spi_port_config, }, diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c index 45f304935332..3e72fad99adf 100644 --- a/drivers/spi/spi-sh.c +++ b/drivers/spi/spi-sh.c @@ -73,11 +73,8 @@ struct spi_sh_data { void __iomem *addr; int irq; struct spi_master *master; - struct list_head queue; - struct work_struct ws; unsigned long cr1; wait_queue_head_t wait; - spinlock_t lock; int width; }; @@ -271,47 +268,39 @@ static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg, return 0; } -static void spi_sh_work(struct work_struct *work) +static int spi_sh_transfer_one_message(struct spi_controller *ctlr, + struct spi_message *mesg) { - struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws); - struct spi_message *mesg; + struct spi_sh_data *ss = spi_controller_get_devdata(ctlr); struct spi_transfer *t; - unsigned long flags; int ret; pr_debug("%s: enter\n", __func__); - spin_lock_irqsave(&ss->lock, flags); - while (!list_empty(&ss->queue)) { - mesg = list_entry(ss->queue.next, struct spi_message, queue); - list_del_init(&mesg->queue); - - spin_unlock_irqrestore(&ss->lock, flags); - list_for_each_entry(t, &mesg->transfers, transfer_list) { - pr_debug("tx_buf = %p, rx_buf = %p\n", - t->tx_buf, t->rx_buf); - pr_debug("len = %d, delay.value = %d\n", - t->len, t->delay.value); - - if (t->tx_buf) { - ret = spi_sh_send(ss, mesg, t); - if (ret < 0) - goto error; - } - if (t->rx_buf) { - ret = spi_sh_receive(ss, mesg, t); - if (ret < 0) - goto error; - } - mesg->actual_length += t->len; - } - spin_lock_irqsave(&ss->lock, flags); + spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1); - mesg->status = 0; - if (mesg->complete) - mesg->complete(mesg->context); + list_for_each_entry(t, &mesg->transfers, transfer_list) { + pr_debug("tx_buf = %p, rx_buf = %p\n", + t->tx_buf, t->rx_buf); + pr_debug("len = %d, delay.value = %d\n", + t->len, t->delay.value); + + if (t->tx_buf) { + ret = spi_sh_send(ss, mesg, t); + if (ret < 0) + goto error; + } + if (t->rx_buf) { + ret = spi_sh_receive(ss, mesg, t); + if (ret < 0) + goto error; + } + mesg->actual_length += t->len; } + mesg->status = 0; + spi_finalize_current_message(ctlr); + clear_fifo(ss); spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1); udelay(100); @@ -321,12 +310,11 @@ static void spi_sh_work(struct work_struct *work) clear_fifo(ss); - spin_unlock_irqrestore(&ss->lock, flags); - - return; + return 0; error: mesg->status = ret; + spi_finalize_current_message(ctlr); if (mesg->complete) mesg->complete(mesg->context); @@ -334,6 +322,7 @@ static void spi_sh_work(struct work_struct *work) SPI_SH_CR1); clear_fifo(ss); + return ret; } static int spi_sh_setup(struct spi_device *spi) @@ -355,29 +344,6 @@ static int spi_sh_setup(struct spi_device *spi) return 0; } -static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg) -{ - struct spi_sh_data *ss = spi_master_get_devdata(spi->master); - unsigned long flags; - - pr_debug("%s: enter\n", __func__); - pr_debug("\tmode = %02x\n", spi->mode); - - spin_lock_irqsave(&ss->lock, flags); - - mesg->actual_length = 0; - mesg->status = -EINPROGRESS; - - spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1); - - list_add_tail(&mesg->queue, &ss->queue); - schedule_work(&ss->ws); - - spin_unlock_irqrestore(&ss->lock, flags); - - return 0; -} - static void spi_sh_cleanup(struct spi_device *spi) { struct spi_sh_data *ss = spi_master_get_devdata(spi->master); @@ -416,7 +382,6 @@ static int spi_sh_remove(struct platform_device *pdev) struct spi_sh_data *ss = platform_get_drvdata(pdev); spi_unregister_master(ss->master); - flush_work(&ss->ws); free_irq(ss->irq, ss); return 0; @@ -467,9 +432,6 @@ static int spi_sh_probe(struct platform_device *pdev) dev_err(&pdev->dev, "ioremap error.\n"); return -ENOMEM; } - INIT_LIST_HEAD(&ss->queue); - spin_lock_init(&ss->lock); - INIT_WORK(&ss->ws, spi_sh_work); init_waitqueue_head(&ss->wait); ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss); @@ -481,7 +443,7 @@ static int spi_sh_probe(struct platform_device *pdev) master->num_chipselect = 2; master->bus_num = pdev->id; master->setup = spi_sh_setup; - master->transfer = spi_sh_transfer; + master->transfer_one_message = spi_sh_transfer_one_message; master->cleanup = spi_sh_cleanup; ret = spi_register_master(master); diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c index f7c1e20432e0..e29e85cee88a 100644 --- a/drivers/spi/spi-sifive.c +++ b/drivers/spi/spi-sifive.c @@ -427,6 +427,44 @@ static int sifive_spi_remove(struct platform_device *pdev) return 0; } +static int sifive_spi_suspend(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct sifive_spi *spi = spi_master_get_devdata(master); + int ret; + + ret = spi_master_suspend(master); + if (ret) + return ret; + + /* Disable all the interrupts just in case */ + sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0); + + clk_disable_unprepare(spi->clk); + + return ret; +} + +static int sifive_spi_resume(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct sifive_spi *spi = spi_master_get_devdata(master); + int ret; + + ret = clk_prepare_enable(spi->clk); + if (ret) + return ret; + ret = spi_master_resume(master); + if (ret) + clk_disable_unprepare(spi->clk); + + return ret; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(sifive_spi_pm_ops, + sifive_spi_suspend, sifive_spi_resume); + + static const struct of_device_id sifive_spi_of_match[] = { { .compatible = "sifive,spi0", }, {} @@ -438,6 +476,7 @@ static struct platform_driver sifive_spi_driver = { .remove = sifive_spi_remove, .driver = { .name = SIFIVE_SPI_DRIVER_NAME, + .pm = &sifive_spi_pm_ops, .of_match_table = sifive_spi_of_match, }, }; diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index c0239e405c39..f3fe92300639 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -299,8 +299,7 @@ static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi) STM32_BUSY_TIMEOUT_US); } -static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi, - const struct spi_mem_op *op) +static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi) { u32 cr, sr; int err = 0; @@ -331,8 +330,7 @@ out: return err; } -static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi, - const struct spi_mem_op *op) +static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi) { u32 cr; @@ -349,7 +347,7 @@ static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi, return 0; } -static int stm32_qspi_get_mode(struct stm32_qspi *qspi, u8 buswidth) +static int stm32_qspi_get_mode(u8 buswidth) { if (buswidth == 4) return CCR_BUSWIDTH_4; @@ -382,11 +380,11 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op) ccr = qspi->fmode; ccr |= FIELD_PREP(CCR_INST_MASK, op->cmd.opcode); ccr |= FIELD_PREP(CCR_IMODE_MASK, - stm32_qspi_get_mode(qspi, op->cmd.buswidth)); + stm32_qspi_get_mode(op->cmd.buswidth)); if (op->addr.nbytes) { ccr |= FIELD_PREP(CCR_ADMODE_MASK, - stm32_qspi_get_mode(qspi, op->addr.buswidth)); + stm32_qspi_get_mode(op->addr.buswidth)); ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1); } @@ -396,7 +394,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op) if (op->data.nbytes) { ccr |= FIELD_PREP(CCR_DMODE_MASK, - stm32_qspi_get_mode(qspi, op->data.buswidth)); + stm32_qspi_get_mode(op->data.buswidth)); } writel_relaxed(ccr, qspi->io_base + QSPI_CCR); @@ -405,7 +403,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op) writel_relaxed(op->addr.val, qspi->io_base + QSPI_AR); if (qspi->fmode == CCR_FMODE_APM) - err_poll_status = stm32_qspi_wait_poll_status(qspi, op); + err_poll_status = stm32_qspi_wait_poll_status(qspi); err = stm32_qspi_tx(qspi, op); @@ -420,7 +418,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op) goto abort; /* wait end of tx in indirect mode */ - err = stm32_qspi_wait_cmd(qspi, op); + err = stm32_qspi_wait_cmd(qspi); if (err) goto abort; diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c index ea706d9629cb..47cbe73137c2 100644 --- a/drivers/spi/spi-synquacer.c +++ b/drivers/spi/spi-synquacer.c @@ -783,6 +783,7 @@ static int __maybe_unused synquacer_spi_resume(struct device *dev) ret = synquacer_spi_enable(master); if (ret) { + clk_disable_unprepare(sspi->clk); dev_err(dev, "failed to enable spi (%d)\n", ret); return ret; } diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 38360434d6e9..148043d0c2b8 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -1136,7 +1136,7 @@ exit_free_master: static int tegra_slink_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); struct tegra_slink_data *tspi = spi_master_get_devdata(master); spi_unregister_master(master); @@ -1151,6 +1151,7 @@ static int tegra_slink_remove(struct platform_device *pdev) if (tspi->rx_dma_chan) tegra_slink_deinit_dma_param(tspi, true); + spi_master_put(master); return 0; } diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c index 66f647f32876..c89592b21ffc 100644 --- a/drivers/spi/spi-tegra210-quad.c +++ b/drivers/spi/spi-tegra210-quad.c @@ -37,6 +37,16 @@ #define QSPI_RX_EN BIT(12) #define QSPI_CS_SW_VAL BIT(20) #define QSPI_CS_SW_HW BIT(21) + +#define QSPI_CS_POL_INACTIVE(n) (1 << (22 + (n))) +#define QSPI_CS_POL_INACTIVE_MASK (0xF << 22) +#define QSPI_CS_SEL_0 (0 << 26) +#define QSPI_CS_SEL_1 (1 << 26) +#define QSPI_CS_SEL_2 (2 << 26) +#define QSPI_CS_SEL_3 (3 << 26) +#define QSPI_CS_SEL_MASK (3 << 26) +#define QSPI_CS_SEL(x) (((x) & 0x3) << 26) + #define QSPI_CONTROL_MODE_0 (0 << 28) #define QSPI_CONTROL_MODE_3 (3 << 28) #define QSPI_CONTROL_MODE_MASK (3 << 28) @@ -154,6 +164,7 @@ struct tegra_qspi_soc_data { bool has_dma; bool cmb_xfer_capable; + unsigned int cs_count; }; struct tegra_qspi_client_data { @@ -812,6 +823,7 @@ static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_tran tegra_qspi_mask_clear_irq(tqspi); command1 = tqspi->def_command1_reg; + command1 |= QSPI_CS_SEL(spi->chip_select); command1 |= QSPI_BIT_LENGTH(bits_per_word - 1); command1 &= ~QSPI_CONTROL_MODE_MASK; @@ -941,10 +953,11 @@ static int tegra_qspi_setup(struct spi_device *spi) /* keep default cs state to inactive */ val = tqspi->def_command1_reg; + val |= QSPI_CS_SEL(spi->chip_select); if (spi->mode & SPI_CS_HIGH) - val &= ~QSPI_CS_SW_VAL; + val &= ~QSPI_CS_POL_INACTIVE(spi->chip_select); else - val |= QSPI_CS_SW_VAL; + val |= QSPI_CS_POL_INACTIVE(spi->chip_select); tqspi->def_command1_reg = val; tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1); @@ -1425,16 +1438,25 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data) static struct tegra_qspi_soc_data tegra210_qspi_soc_data = { .has_dma = true, .cmb_xfer_capable = false, + .cs_count = 1, }; static struct tegra_qspi_soc_data tegra186_qspi_soc_data = { .has_dma = true, .cmb_xfer_capable = true, + .cs_count = 1, }; static struct tegra_qspi_soc_data tegra234_qspi_soc_data = { .has_dma = false, .cmb_xfer_capable = true, + .cs_count = 1, +}; + +static struct tegra_qspi_soc_data tegra241_qspi_soc_data = { + .has_dma = false, + .cmb_xfer_capable = true, + .cs_count = 4, }; static const struct of_device_id tegra_qspi_of_match[] = { @@ -1450,6 +1472,9 @@ static const struct of_device_id tegra_qspi_of_match[] = { }, { .compatible = "nvidia,tegra234-qspi", .data = &tegra234_qspi_soc_data, + }, { + .compatible = "nvidia,tegra241-qspi", + .data = &tegra241_qspi_soc_data, }, {} }; @@ -1467,6 +1492,9 @@ static const struct acpi_device_id tegra_qspi_acpi_match[] = { }, { .id = "NVDA1413", .driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data, + }, { + .id = "NVDA1513", + .driver_data = (kernel_ulong_t)&tegra241_qspi_soc_data, }, {} }; @@ -1506,6 +1534,7 @@ static int tegra_qspi_probe(struct platform_device *pdev) spin_lock_init(&tqspi->lock); tqspi->soc_data = device_get_match_data(&pdev->dev); + master->num_chipselect = tqspi->soc_data->cs_count; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); tqspi->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(tqspi->base)) diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index b5b65d882d7a..60086869bcae 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -57,7 +57,6 @@ struct ti_qspi { void *rx_bb_addr; struct dma_chan *rx_chan; - u32 spi_max_frequency; u32 cmd; u32 dc; @@ -140,37 +139,19 @@ static inline void ti_qspi_write(struct ti_qspi *qspi, static int ti_qspi_setup(struct spi_device *spi) { struct ti_qspi *qspi = spi_master_get_devdata(spi->master); - struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg; - int clk_div = 0, ret; - u32 clk_ctrl_reg, clk_rate, clk_mask; + int ret; if (spi->master->busy) { dev_dbg(qspi->dev, "master busy doing other transfers\n"); return -EBUSY; } - if (!qspi->spi_max_frequency) { + if (!qspi->master->max_speed_hz) { dev_err(qspi->dev, "spi max frequency not defined\n"); return -EINVAL; } - clk_rate = clk_get_rate(qspi->fclk); - - clk_div = DIV_ROUND_UP(clk_rate, qspi->spi_max_frequency) - 1; - - if (clk_div < 0) { - dev_dbg(qspi->dev, "clock divider < 0, using /1 divider\n"); - return -EINVAL; - } - - if (clk_div > QSPI_CLK_DIV_MAX) { - dev_dbg(qspi->dev, "clock divider >%d , using /%d divider\n", - QSPI_CLK_DIV_MAX, QSPI_CLK_DIV_MAX + 1); - return -EINVAL; - } - - dev_dbg(qspi->dev, "hz: %d, clock divider %d\n", - qspi->spi_max_frequency, clk_div); + spi->max_speed_hz = min(spi->max_speed_hz, qspi->master->max_speed_hz); ret = pm_runtime_resume_and_get(qspi->dev); if (ret < 0) { @@ -178,18 +159,6 @@ static int ti_qspi_setup(struct spi_device *spi) return ret; } - clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG); - - clk_ctrl_reg &= ~QSPI_CLK_EN; - - /* disable SCLK */ - ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG); - - /* enable SCLK */ - clk_mask = QSPI_CLK_EN | clk_div; - ti_qspi_write(qspi, clk_mask, QSPI_SPI_CLOCK_CNTRL_REG); - ctx_reg->clkctrl = clk_mask; - pm_runtime_mark_last_busy(qspi->dev); ret = pm_runtime_put_autosuspend(qspi->dev); if (ret < 0) { @@ -200,6 +169,37 @@ static int ti_qspi_setup(struct spi_device *spi) return 0; } +static void ti_qspi_setup_clk(struct ti_qspi *qspi, u32 speed_hz) +{ + struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg; + int clk_div; + u32 clk_ctrl_reg, clk_rate, clk_ctrl_new; + + clk_rate = clk_get_rate(qspi->fclk); + clk_div = DIV_ROUND_UP(clk_rate, speed_hz) - 1; + clk_div = clamp(clk_div, 0, QSPI_CLK_DIV_MAX); + dev_dbg(qspi->dev, "hz: %d, clock divider %d\n", speed_hz, clk_div); + + pm_runtime_resume_and_get(qspi->dev); + + clk_ctrl_new = QSPI_CLK_EN | clk_div; + if (ctx_reg->clkctrl != clk_ctrl_new) { + clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG); + + clk_ctrl_reg &= ~QSPI_CLK_EN; + + /* disable SCLK */ + ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG); + + /* enable SCLK */ + ti_qspi_write(qspi, clk_ctrl_new, QSPI_SPI_CLOCK_CNTRL_REG); + ctx_reg->clkctrl = clk_ctrl_new; + } + + pm_runtime_mark_last_busy(qspi->dev); + pm_runtime_put_autosuspend(qspi->dev); +} + static void ti_qspi_restore_ctx(struct ti_qspi *qspi) { struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg; @@ -623,8 +623,10 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem, mutex_lock(&qspi->list_lock); - if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select) + if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select) { + ti_qspi_setup_clk(qspi, mem->spi->max_speed_hz); ti_qspi_enable_memory_map(mem->spi); + } ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth, op->addr.nbytes, op->dummy.nbytes); @@ -701,6 +703,7 @@ static int ti_qspi_start_transfer_one(struct spi_master *master, wlen = t->bits_per_word >> 3; transfer_len_words = min(t->len / wlen, frame_len_words); + ti_qspi_setup_clk(qspi, t->speed_hz); ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen); if (ret) { dev_dbg(qspi->dev, "transfer message failed\n"); @@ -851,7 +854,7 @@ static int ti_qspi_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); if (!of_property_read_u32(np, "spi-max-frequency", &max_freq)) - qspi->spi_max_frequency = max_freq; + master->max_speed_hz = max_freq; dma_cap_zero(mask); dma_cap_set(DMA_MEMCPY, mask); diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index dfaa1d79a78b..cbb60198a7f0 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -455,35 +455,10 @@ static void pch_spi_reset(struct spi_master *master) static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) { - - struct spi_transfer *transfer; struct pch_spi_data *data = spi_master_get_devdata(pspi->master); int retval; unsigned long flags; - spin_lock_irqsave(&data->lock, flags); - /* validate Tx/Rx buffers and Transfer length */ - list_for_each_entry(transfer, &pmsg->transfers, transfer_list) { - if (!transfer->tx_buf && !transfer->rx_buf) { - dev_err(&pspi->dev, - "%s Tx and Rx buffer NULL\n", __func__); - retval = -EINVAL; - goto err_return_spinlock; - } - - if (!transfer->len) { - dev_err(&pspi->dev, "%s Transfer length invalid\n", - __func__); - retval = -EINVAL; - goto err_return_spinlock; - } - - dev_dbg(&pspi->dev, - "%s Tx/Rx buffer valid. Transfer length valid\n", - __func__); - } - spin_unlock_irqrestore(&data->lock, flags); - /* We won't process any messages if we have been asked to terminate */ if (data->status == STATUS_EXITING) { dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__); @@ -518,10 +493,6 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) err_out: dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval); return retval; -err_return_spinlock: - dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval); - spin_unlock_irqrestore(&data->lock, flags); - return retval; } static inline void pch_spi_select_chip(struct pch_spi_data *data, @@ -1365,6 +1336,7 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev) master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); master->max_speed_hz = PCH_MAX_BAUDRATE; + master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; data->board_dat = board_dat; data->plat_dev = plat_dev; diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 2b5afae8ff7f..c760aac070e5 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -134,6 +134,8 @@ #define GQSPI_DMA_UNALIGN 0x3 #define GQSPI_DEFAULT_NUM_CS 1 /* Default number of chip selects */ +#define GQSPI_MAX_NUM_CS 2 /* Maximum number of chip selects */ + #define SPI_AUTOSUSPEND_TIMEOUT 3000 enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA}; @@ -363,8 +365,13 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high) genfifoentry |= GQSPI_GENFIFO_MODE_SPI; if (!is_high) { - xqspi->genfifobus = GQSPI_GENFIFO_BUS_LOWER; - xqspi->genfifocs = GQSPI_GENFIFO_CS_LOWER; + if (!qspi->chip_select) { + xqspi->genfifobus = GQSPI_GENFIFO_BUS_LOWER; + xqspi->genfifocs = GQSPI_GENFIFO_CS_LOWER; + } else { + xqspi->genfifobus = GQSPI_GENFIFO_BUS_UPPER; + xqspi->genfifocs = GQSPI_GENFIFO_CS_UPPER; + } genfifoentry |= xqspi->genfifobus; genfifoentry |= xqspi->genfifocs; genfifoentry |= GQSPI_GENFIFO_CS_SETUP; @@ -1099,6 +1106,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) struct zynqmp_qspi *xqspi; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; + u32 num_cs; ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi)); if (!ctlr) @@ -1176,8 +1184,19 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) if (ret) goto clk_dis_all; + ret = of_property_read_u32(np, "num-cs", &num_cs); + if (ret < 0) { + ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS; + } else if (num_cs > GQSPI_MAX_NUM_CS) { + ret = -EINVAL; + dev_err(&pdev->dev, "only %d chip selects are available\n", + GQSPI_MAX_NUM_CS); + goto clk_dis_all; + } else { + ctlr->num_chipselect = num_cs; + } + ctlr->bits_per_word_mask = SPI_BPW_MASK(8); - ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS; ctlr->mem_ops = &zynqmp_qspi_mem_ops; ctlr->setup = zynqmp_qspi_setup_op; ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ea09d1b42bf6..1c14d682ffed 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -33,6 +33,7 @@ #include <linux/idr.h> #include <linux/platform_data/x86/apple.h> #include <linux/ptp_clock_kernel.h> +#include <linux/percpu.h> #define CREATE_TRACE_POINTS #include <trace/events/spi.h> @@ -49,6 +50,7 @@ static void spidev_release(struct device *dev) spi_controller_put(spi->controller); kfree(spi->driver_override); + free_percpu(spi->pcpu_statistics); kfree(spi); } @@ -93,6 +95,47 @@ static ssize_t driver_override_show(struct device *dev, } static DEVICE_ATTR_RW(driver_override); +static struct spi_statistics *spi_alloc_pcpu_stats(struct device *dev) +{ + struct spi_statistics __percpu *pcpu_stats; + + if (dev) + pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics); + else + pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL); + + if (pcpu_stats) { + int cpu; + + for_each_possible_cpu(cpu) { + struct spi_statistics *stat; + + stat = per_cpu_ptr(pcpu_stats, cpu); + u64_stats_init(&stat->syncp); + } + } + return pcpu_stats; +} + +#define spi_pcpu_stats_totalize(ret, in, field) \ +do { \ + int i; \ + ret = 0; \ + for_each_possible_cpu(i) { \ + const struct spi_statistics *pcpu_stats; \ + u64 inc; \ + unsigned int start; \ + pcpu_stats = per_cpu_ptr(in, i); \ + do { \ + start = u64_stats_fetch_begin_irq( \ + &pcpu_stats->syncp); \ + inc = u64_stats_read(&pcpu_stats->field); \ + } while (u64_stats_fetch_retry_irq( \ + &pcpu_stats->syncp, start)); \ + ret += inc; \ + } \ +} while (0) + #define SPI_STATISTICS_ATTRS(field, file) \ static ssize_t spi_controller_##field##_show(struct device *dev, \ struct device_attribute *attr, \ @@ -100,7 +143,7 @@ static ssize_t spi_controller_##field##_show(struct device *dev, \ { \ struct spi_controller *ctlr = container_of(dev, \ struct spi_controller, dev); \ - return spi_statistics_##field##_show(&ctlr->statistics, buf); \ + return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \ } \ static struct device_attribute dev_attr_spi_controller_##field = { \ .attr = { .name = file, .mode = 0444 }, \ @@ -111,47 +154,46 @@ static ssize_t spi_device_##field##_show(struct device *dev, \ char *buf) \ { \ struct spi_device *spi = to_spi_device(dev); \ - return spi_statistics_##field##_show(&spi->statistics, buf); \ + return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \ } \ static struct device_attribute dev_attr_spi_device_##field = { \ .attr = { .name = file, .mode = 0444 }, \ .show = spi_device_##field##_show, \ } -#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ +#define SPI_STATISTICS_SHOW_NAME(name, file, field) \ static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ char *buf) \ { \ - unsigned long flags; \ ssize_t len; \ - spin_lock_irqsave(&stat->lock, flags); \ - len = sysfs_emit(buf, format_string "\n", stat->field); \ - spin_unlock_irqrestore(&stat->lock, flags); \ + u64 val; \ + spi_pcpu_stats_totalize(val, stat, field); \ + len = sysfs_emit(buf, "%llu\n", val); \ return len; \ } \ SPI_STATISTICS_ATTRS(name, file) -#define SPI_STATISTICS_SHOW(field, format_string) \ +#define SPI_STATISTICS_SHOW(field) \ SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ - field, format_string) + field) -SPI_STATISTICS_SHOW(messages, "%lu"); -SPI_STATISTICS_SHOW(transfers, "%lu"); -SPI_STATISTICS_SHOW(errors, "%lu"); -SPI_STATISTICS_SHOW(timedout, "%lu"); +SPI_STATISTICS_SHOW(messages); +SPI_STATISTICS_SHOW(transfers); +SPI_STATISTICS_SHOW(errors); +SPI_STATISTICS_SHOW(timedout); -SPI_STATISTICS_SHOW(spi_sync, "%lu"); -SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); -SPI_STATISTICS_SHOW(spi_async, "%lu"); +SPI_STATISTICS_SHOW(spi_sync); +SPI_STATISTICS_SHOW(spi_sync_immediate); +SPI_STATISTICS_SHOW(spi_async); -SPI_STATISTICS_SHOW(bytes, "%llu"); -SPI_STATISTICS_SHOW(bytes_rx, "%llu"); -SPI_STATISTICS_SHOW(bytes_tx, "%llu"); +SPI_STATISTICS_SHOW(bytes); +SPI_STATISTICS_SHOW(bytes_rx); +SPI_STATISTICS_SHOW(bytes_tx); #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ "transfer_bytes_histo_" number, \ - transfer_bytes_histo[index], "%lu") + transfer_bytes_histo[index]) SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); @@ -170,7 +212,7 @@ SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); -SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu"); +SPI_STATISTICS_SHOW(transfers_split_maxsize); static struct attribute *spi_dev_attrs[] = { &dev_attr_modalias.attr, @@ -267,30 +309,33 @@ static const struct attribute_group *spi_master_groups[] = { NULL, }; -static void spi_statistics_add_transfer_stats(struct spi_statistics *stats, +static void spi_statistics_add_transfer_stats(struct spi_statistics *pcpu_stats, struct spi_transfer *xfer, struct spi_controller *ctlr) { - unsigned long flags; int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; + struct spi_statistics *stats; if (l2len < 0) l2len = 0; - spin_lock_irqsave(&stats->lock, flags); + get_cpu(); + stats = this_cpu_ptr(pcpu_stats); + u64_stats_update_begin(&stats->syncp); - stats->transfers++; - stats->transfer_bytes_histo[l2len]++; + u64_stats_inc(&stats->transfers); + u64_stats_inc(&stats->transfer_bytes_histo[l2len]); - stats->bytes += xfer->len; + u64_stats_add(&stats->bytes, xfer->len); if ((xfer->tx_buf) && (xfer->tx_buf != ctlr->dummy_tx)) - stats->bytes_tx += xfer->len; + u64_stats_add(&stats->bytes_tx, xfer->len); if ((xfer->rx_buf) && (xfer->rx_buf != ctlr->dummy_rx)) - stats->bytes_rx += xfer->len; + u64_stats_add(&stats->bytes_rx, xfer->len); - spin_unlock_irqrestore(&stats->lock, flags); + u64_stats_update_end(&stats->syncp); + put_cpu(); } /* @@ -519,14 +564,19 @@ struct spi_device *spi_alloc_device(struct spi_controller *ctlr) return NULL; } + spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL); + if (!spi->pcpu_statistics) { + kfree(spi); + spi_controller_put(ctlr); + return NULL; + } + spi->master = spi->controller = ctlr; spi->dev.parent = &ctlr->dev; spi->dev.bus = &spi_bus_type; spi->dev.release = spidev_release; spi->mode = ctlr->buswidth_override_bits; - spin_lock_init(&spi->statistics.lock); - device_initialize(&spi->dev); return spi; } @@ -1225,8 +1275,8 @@ static int spi_transfer_wait(struct spi_controller *ctlr, struct spi_message *msg, struct spi_transfer *xfer) { - struct spi_statistics *statm = &ctlr->statistics; - struct spi_statistics *stats = &msg->spi->statistics; + struct spi_statistics *statm = ctlr->pcpu_statistics; + struct spi_statistics *stats = msg->spi->pcpu_statistics; u32 speed_hz = xfer->speed_hz; unsigned long long ms; @@ -1304,7 +1354,7 @@ int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) /* Nothing to do here */ break; case SPI_DELAY_UNIT_SCK: - /* clock cycles need to be obtained from spi_transfer */ + /* Clock cycles need to be obtained from spi_transfer */ if (!xfer) return -EINVAL; /* @@ -1353,7 +1403,7 @@ static void _spi_transfer_cs_change_delay(struct spi_message *msg, u32 unit = xfer->cs_change_delay.unit; int ret; - /* return early on "fast" mode - for everything but USECS */ + /* Return early on "fast" mode - for everything but USECS */ if (!delay) { if (unit == SPI_DELAY_UNIT_USECS) _spi_transfer_delay_ns(default_delay_ns); @@ -1382,8 +1432,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, struct spi_transfer *xfer; bool keep_cs = false; int ret = 0; - struct spi_statistics *statm = &ctlr->statistics; - struct spi_statistics *stats = &msg->spi->statistics; + struct spi_statistics *statm = ctlr->pcpu_statistics; + struct spi_statistics *stats = msg->spi->pcpu_statistics; spi_set_cs(msg->spi, true, false); @@ -1499,6 +1549,103 @@ static void spi_idle_runtime_pm(struct spi_controller *ctlr) } } +static int __spi_pump_transfer_message(struct spi_controller *ctlr, + struct spi_message *msg, bool was_busy) +{ + struct spi_transfer *xfer; + int ret; + + if (!was_busy && ctlr->auto_runtime_pm) { + ret = pm_runtime_get_sync(ctlr->dev.parent); + if (ret < 0) { + pm_runtime_put_noidle(ctlr->dev.parent); + dev_err(&ctlr->dev, "Failed to power device: %d\n", + ret); + return ret; + } + } + + if (!was_busy) + trace_spi_controller_busy(ctlr); + + if (!was_busy && ctlr->prepare_transfer_hardware) { + ret = ctlr->prepare_transfer_hardware(ctlr); + if (ret) { + dev_err(&ctlr->dev, + "failed to prepare transfer hardware: %d\n", + ret); + + if (ctlr->auto_runtime_pm) + pm_runtime_put(ctlr->dev.parent); + + msg->status = ret; + spi_finalize_current_message(ctlr); + + return ret; + } + } + + trace_spi_message_start(msg); + + if (ctlr->prepare_message) { + ret = ctlr->prepare_message(ctlr, msg); + if (ret) { + dev_err(&ctlr->dev, "failed to prepare message: %d\n", + ret); + msg->status = ret; + spi_finalize_current_message(ctlr); + return ret; + } + msg->prepared = true; + } + + ret = spi_map_msg(ctlr, msg); + if (ret) { + msg->status = ret; + spi_finalize_current_message(ctlr); + return ret; + } + + if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + xfer->ptp_sts_word_pre = 0; + ptp_read_system_prets(xfer->ptp_sts); + } + } + + /* + * Drivers implementation of transfer_one_message() must arrange for + * spi_finalize_current_message() to get called. Most drivers will do + * this in the calling context, but some don't. For those cases, a + * completion is used to guarantee that this function does not return + * until spi_finalize_current_message() is done accessing + * ctlr->cur_msg. + * Use of the following two flags enable to opportunistically skip the + * use of the completion since its use involves expensive spin locks. + * In case of a race with the context that calls + * spi_finalize_current_message() the completion will always be used, + * due to strict ordering of these flags using barriers. + */ + WRITE_ONCE(ctlr->cur_msg_incomplete, true); + WRITE_ONCE(ctlr->cur_msg_need_completion, false); + reinit_completion(&ctlr->cur_msg_completion); + smp_wmb(); /* Make these available to spi_finalize_current_message() */ + + ret = ctlr->transfer_one_message(ctlr, msg); + if (ret) { + dev_err(&ctlr->dev, + "failed to transfer one message from queue\n"); + return ret; + } + + WRITE_ONCE(ctlr->cur_msg_need_completion, true); + smp_mb(); /* See spi_finalize_current_message()... */ + if (READ_ONCE(ctlr->cur_msg_incomplete)) + wait_for_completion(&ctlr->cur_msg_completion); + + return 0; +} + /** * __spi_pump_messages - function which processes spi message queue * @ctlr: controller to process queue for @@ -1514,34 +1661,25 @@ static void spi_idle_runtime_pm(struct spi_controller *ctlr) */ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) { - struct spi_transfer *xfer; struct spi_message *msg; bool was_busy = false; unsigned long flags; int ret; + /* Take the IO mutex */ + mutex_lock(&ctlr->io_mutex); + /* Lock queue */ spin_lock_irqsave(&ctlr->queue_lock, flags); /* Make sure we are not already running a message */ - if (ctlr->cur_msg) { - spin_unlock_irqrestore(&ctlr->queue_lock, flags); - return; - } - - /* If another context is idling the device then defer */ - if (ctlr->idling) { - kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); - spin_unlock_irqrestore(&ctlr->queue_lock, flags); - return; - } + if (ctlr->cur_msg) + goto out_unlock; /* Check if the queue is idle */ if (list_empty(&ctlr->queue) || !ctlr->running) { - if (!ctlr->busy) { - spin_unlock_irqrestore(&ctlr->queue_lock, flags); - return; - } + if (!ctlr->busy) + goto out_unlock; /* Defer any non-atomic teardown to the thread */ if (!in_kthread) { @@ -1549,17 +1687,16 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) !ctlr->unprepare_transfer_hardware) { spi_idle_runtime_pm(ctlr); ctlr->busy = false; + ctlr->queue_empty = true; trace_spi_controller_idle(ctlr); } else { kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); } - spin_unlock_irqrestore(&ctlr->queue_lock, flags); - return; + goto out_unlock; } ctlr->busy = false; - ctlr->idling = true; spin_unlock_irqrestore(&ctlr->queue_lock, flags); kfree(ctlr->dummy_rx); @@ -1574,9 +1711,8 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) trace_spi_controller_idle(ctlr); spin_lock_irqsave(&ctlr->queue_lock, flags); - ctlr->idling = false; - spin_unlock_irqrestore(&ctlr->queue_lock, flags); - return; + ctlr->queue_empty = true; + goto out_unlock; } /* Extract head of queue */ @@ -1590,81 +1726,23 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) ctlr->busy = true; spin_unlock_irqrestore(&ctlr->queue_lock, flags); - mutex_lock(&ctlr->io_mutex); - - if (!was_busy && ctlr->auto_runtime_pm) { - ret = pm_runtime_resume_and_get(ctlr->dev.parent); - if (ret < 0) { - dev_err(&ctlr->dev, "Failed to power device: %d\n", - ret); - mutex_unlock(&ctlr->io_mutex); - return; - } - } - - if (!was_busy) - trace_spi_controller_busy(ctlr); - - if (!was_busy && ctlr->prepare_transfer_hardware) { - ret = ctlr->prepare_transfer_hardware(ctlr); - if (ret) { - dev_err(&ctlr->dev, - "failed to prepare transfer hardware: %d\n", - ret); - - if (ctlr->auto_runtime_pm) - pm_runtime_put(ctlr->dev.parent); - - msg->status = ret; - spi_finalize_current_message(ctlr); - - mutex_unlock(&ctlr->io_mutex); - return; - } - } - - trace_spi_message_start(msg); - - if (ctlr->prepare_message) { - ret = ctlr->prepare_message(ctlr, msg); - if (ret) { - dev_err(&ctlr->dev, "failed to prepare message: %d\n", - ret); - msg->status = ret; - spi_finalize_current_message(ctlr); - goto out; - } - ctlr->cur_msg_prepared = true; - } - - ret = spi_map_msg(ctlr, msg); - if (ret) { - msg->status = ret; - spi_finalize_current_message(ctlr); - goto out; - } - - if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { - list_for_each_entry(xfer, &msg->transfers, transfer_list) { - xfer->ptp_sts_word_pre = 0; - ptp_read_system_prets(xfer->ptp_sts); - } - } + ret = __spi_pump_transfer_message(ctlr, msg, was_busy); + if (!ret) + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); - ret = ctlr->transfer_one_message(ctlr, msg); - if (ret) { - dev_err(&ctlr->dev, - "failed to transfer one message from queue: %d\n", - ret); - goto out; - } + ctlr->cur_msg = NULL; + ctlr->fallback = false; -out: mutex_unlock(&ctlr->io_mutex); /* Prod the scheduler in case transfer_one() was busy waiting */ if (!ret) cond_resched(); + return; + +out_unlock: + spin_unlock_irqrestore(&ctlr->queue_lock, flags); + mutex_unlock(&ctlr->io_mutex); } /** @@ -1789,6 +1867,7 @@ static int spi_init_queue(struct spi_controller *ctlr) { ctlr->running = false; ctlr->busy = false; + ctlr->queue_empty = true; ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); if (IS_ERR(ctlr->kworker)) { @@ -1826,7 +1905,7 @@ struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) struct spi_message *next; unsigned long flags; - /* get a pointer to the next message, if any */ + /* Get a pointer to the next message, if any */ spin_lock_irqsave(&ctlr->queue_lock, flags); next = list_first_entry_or_null(&ctlr->queue, struct spi_message, queue); @@ -1847,12 +1926,9 @@ void spi_finalize_current_message(struct spi_controller *ctlr) { struct spi_transfer *xfer; struct spi_message *mesg; - unsigned long flags; int ret; - spin_lock_irqsave(&ctlr->queue_lock, flags); mesg = ctlr->cur_msg; - spin_unlock_irqrestore(&ctlr->queue_lock, flags); if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { list_for_each_entry(xfer, &mesg->transfers, transfer_list) { @@ -1876,7 +1952,7 @@ void spi_finalize_current_message(struct spi_controller *ctlr) */ spi_res_release(ctlr, mesg); - if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { + if (mesg->prepared && ctlr->unprepare_message) { ret = ctlr->unprepare_message(ctlr, mesg); if (ret) { dev_err(&ctlr->dev, "failed to unprepare message: %d\n", @@ -1884,12 +1960,12 @@ void spi_finalize_current_message(struct spi_controller *ctlr) } } - spin_lock_irqsave(&ctlr->queue_lock, flags); - ctlr->cur_msg = NULL; - ctlr->cur_msg_prepared = false; - ctlr->fallback = false; - kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); - spin_unlock_irqrestore(&ctlr->queue_lock, flags); + mesg->prepared = false; + + WRITE_ONCE(ctlr->cur_msg_incomplete, false); + smp_mb(); /* See __spi_pump_transfer_message()... */ + if (READ_ONCE(ctlr->cur_msg_need_completion)) + complete(&ctlr->cur_msg_completion); trace_spi_message_done(mesg); @@ -1992,6 +2068,7 @@ static int __spi_queued_transfer(struct spi_device *spi, msg->status = -EINPROGRESS; list_add_tail(&msg->queue, &ctlr->queue); + ctlr->queue_empty = false; if (!ctlr->busy && need_pump) kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); @@ -2376,9 +2453,6 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) if (lookup->index != -1 && lookup->n++ != lookup->index) return 1; - if (lookup->index == -1 && !ctlr) - return -ENODEV; - status = acpi_get_handle(NULL, sb->resource_source.string_ptr, &parent_handle); @@ -2398,7 +2472,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) ctlr = acpi_spi_find_controller_by_adev(adev); if (!ctlr) - return -ENODEV; + return -EPROBE_DEFER; lookup->ctlr = ctlr; } @@ -2481,8 +2555,8 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, acpi_dev_free_resource_list(&resource_list); if (ret < 0) - /* found SPI in _CRS but it points to another controller */ - return ERR_PTR(-ENODEV); + /* Found SPI in _CRS but it points to another controller */ + return ERR_PTR(ret); if (!lookup.max_speed_hz && ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && @@ -2937,7 +3011,7 @@ int spi_register_controller(struct spi_controller *ctlr) return status; if (ctlr->bus_num >= 0) { - /* devices with a fixed bus num must check-in with the num */ + /* Devices with a fixed bus num must check-in with the num */ mutex_lock(&board_lock); id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, ctlr->bus_num + 1, GFP_KERNEL); @@ -2946,7 +3020,7 @@ int spi_register_controller(struct spi_controller *ctlr) return id == -ENOSPC ? -EBUSY : id; ctlr->bus_num = id; } else if (ctlr->dev.of_node) { - /* allocate dynamic bus number using Linux idr */ + /* Allocate dynamic bus number using Linux idr */ id = of_alias_get_id(ctlr->dev.of_node, "spi"); if (id >= 0) { ctlr->bus_num = id; @@ -2975,6 +3049,7 @@ int spi_register_controller(struct spi_controller *ctlr) } ctlr->bus_lock_flag = 0; init_completion(&ctlr->xfer_completion); + init_completion(&ctlr->cur_msg_completion); if (!ctlr->max_dma_len) ctlr->max_dma_len = INT_MAX; @@ -3004,7 +3079,7 @@ int spi_register_controller(struct spi_controller *ctlr) goto free_bus_id; } - /* setting last_cs to -1 means no chip selected */ + /* Setting last_cs to -1 means no chip selected */ ctlr->last_cs = -1; status = device_add(&ctlr->dev); @@ -3028,8 +3103,13 @@ int spi_register_controller(struct spi_controller *ctlr) goto free_bus_id; } } - /* add statistics */ - spin_lock_init(&ctlr->statistics.lock); + /* Add statistics */ + ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev); + if (!ctlr->pcpu_statistics) { + dev_err(dev, "Error allocating per-cpu statistics\n"); + status = -ENOMEM; + goto destroy_queue; + } mutex_lock(&board_lock); list_add_tail(&ctlr->list, &spi_controller_list); @@ -3042,6 +3122,8 @@ int spi_register_controller(struct spi_controller *ctlr) acpi_register_spi_devices(ctlr); return status; +destroy_queue: + spi_destroy_queue(ctlr); free_bus_id: mutex_lock(&board_lock); idr_remove(&spi_master_idr, ctlr->bus_num); @@ -3050,9 +3132,9 @@ free_bus_id: } EXPORT_SYMBOL_GPL(spi_register_controller); -static void devm_spi_unregister(void *ctlr) +static void devm_spi_unregister(struct device *dev, void *res) { - spi_unregister_controller(ctlr); + spi_unregister_controller(*(struct spi_controller **)res); } /** @@ -3071,13 +3153,22 @@ static void devm_spi_unregister(void *ctlr) int devm_spi_register_controller(struct device *dev, struct spi_controller *ctlr) { + struct spi_controller **ptr; int ret; + ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return -ENOMEM; + ret = spi_register_controller(ctlr); - if (ret) - return ret; + if (!ret) { + *ptr = ctlr; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } - return devm_add_action_or_reset(dev, devm_spi_unregister, ctlr); + return ret; } EXPORT_SYMBOL_GPL(devm_spi_register_controller); @@ -3124,7 +3215,7 @@ void spi_unregister_controller(struct spi_controller *ctlr) device_del(&ctlr->dev); - /* free bus id */ + /* Free bus id */ mutex_lock(&board_lock); if (found == ctlr) idr_remove(&spi_master_idr, id); @@ -3183,14 +3274,14 @@ static void __spi_replace_transfers_release(struct spi_controller *ctlr, struct spi_replaced_transfers *rxfer = res; size_t i; - /* call extra callback if requested */ + /* Call extra callback if requested */ if (rxfer->release) rxfer->release(ctlr, msg, res); - /* insert replaced transfers back into the message */ + /* Insert replaced transfers back into the message */ list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); - /* remove the formerly inserted entries */ + /* Remove the formerly inserted entries */ for (i = 0; i < rxfer->inserted; i++) list_del(&rxfer->inserted_transfers[i].transfer_list); } @@ -3223,7 +3314,7 @@ static struct spi_replaced_transfers *spi_replace_transfers( struct spi_transfer *xfer; size_t i; - /* allocate the structure using spi_res */ + /* Allocate the structure using spi_res */ rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, struct_size(rxfer, inserted_transfers, insert) + extradatasize, @@ -3231,15 +3322,15 @@ static struct spi_replaced_transfers *spi_replace_transfers( if (!rxfer) return ERR_PTR(-ENOMEM); - /* the release code to invoke before running the generic release */ + /* The release code to invoke before running the generic release */ rxfer->release = release; - /* assign extradata */ + /* Assign extradata */ if (extradatasize) rxfer->extradata = &rxfer->inserted_transfers[insert]; - /* init the replaced_transfers list */ + /* Init the replaced_transfers list */ INIT_LIST_HEAD(&rxfer->replaced_transfers); /* @@ -3248,7 +3339,7 @@ static struct spi_replaced_transfers *spi_replace_transfers( */ rxfer->replaced_after = xfer_first->transfer_list.prev; - /* remove the requested number of transfers */ + /* Remove the requested number of transfers */ for (i = 0; i < remove; i++) { /* * If the entry after replaced_after it is msg->transfers @@ -3258,14 +3349,14 @@ static struct spi_replaced_transfers *spi_replace_transfers( if (rxfer->replaced_after->next == &msg->transfers) { dev_err(&msg->spi->dev, "requested to remove more spi_transfers than are available\n"); - /* insert replaced transfers back into the message */ + /* Insert replaced transfers back into the message */ list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); - /* free the spi_replace_transfer structure */ + /* Free the spi_replace_transfer structure... */ spi_res_free(rxfer); - /* and return with an error */ + /* ...and return with an error */ return ERR_PTR(-EINVAL); } @@ -3282,26 +3373,26 @@ static struct spi_replaced_transfers *spi_replace_transfers( * based on the first transfer to get removed. */ for (i = 0; i < insert; i++) { - /* we need to run in reverse order */ + /* We need to run in reverse order */ xfer = &rxfer->inserted_transfers[insert - 1 - i]; - /* copy all spi_transfer data */ + /* Copy all spi_transfer data */ memcpy(xfer, xfer_first, sizeof(*xfer)); - /* add to list */ + /* Add to list */ list_add(&xfer->transfer_list, rxfer->replaced_after); - /* clear cs_change and delay for all but the last */ + /* Clear cs_change and delay for all but the last */ if (i) { xfer->cs_change = false; xfer->delay.value = 0; } } - /* set up inserted */ + /* Set up inserted... */ rxfer->inserted = insert; - /* and register it with spi_res/spi_message */ + /* ...and register it with spi_res/spi_message */ spi_res_add(msg, rxfer); return rxfer; @@ -3318,10 +3409,10 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, size_t offset; size_t count, i; - /* calculate how many we have to replace */ + /* Calculate how many we have to replace */ count = DIV_ROUND_UP(xfer->len, maxsize); - /* create replacement */ + /* Create replacement */ srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); if (IS_ERR(srt)) return PTR_ERR(srt); @@ -3344,9 +3435,9 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, */ xfers[0].len = min_t(size_t, maxsize, xfer[0].len); - /* all the others need rx_buf/tx_buf also set */ + /* All the others need rx_buf/tx_buf also set */ for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { - /* update rx_buf, tx_buf and dma */ + /* Update rx_buf, tx_buf and dma */ if (xfers[i].rx_buf) xfers[i].rx_buf += offset; if (xfers[i].rx_dma) @@ -3356,7 +3447,7 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, if (xfers[i].tx_dma) xfers[i].tx_dma += offset; - /* update length */ + /* Update length */ xfers[i].len = min(maxsize, xfers[i].len - offset); } @@ -3366,10 +3457,10 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, */ *xferp = &xfers[count - 1]; - /* increment statistics counters */ - SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, + /* Increment statistics counters */ + SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, transfers_split_maxsize); - SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, + SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics, transfers_split_maxsize); return 0; @@ -3628,7 +3719,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) return ret; list_for_each_entry(xfer, &message->transfers, transfer_list) { - /* don't change cs_change on the last entry in the list */ + /* Don't change cs_change on the last entry in the list */ if (list_is_last(&xfer->transfer_list, &message->transfers)) break; xfer->cs_change = 1; @@ -3721,7 +3812,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) !(spi->mode & SPI_TX_QUAD)) return -EINVAL; } - /* check transfer rx_nbits */ + /* Check transfer rx_nbits */ if (xfer->rx_buf) { if (spi->mode & SPI_NO_RX) return -EINVAL; @@ -3760,8 +3851,8 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) message->spi = spi; - SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async); - SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); + SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async); + SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async); trace_spi_message_submit(message); @@ -3880,6 +3971,39 @@ static int spi_async_locked(struct spi_device *spi, struct spi_message *message) } +static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg) +{ + bool was_busy; + int ret; + + mutex_lock(&ctlr->io_mutex); + + was_busy = ctlr->busy; + + ctlr->cur_msg = msg; + ret = __spi_pump_transfer_message(ctlr, msg, was_busy); + if (ret) + goto out; + + ctlr->cur_msg = NULL; + ctlr->fallback = false; + + if (!was_busy) { + kfree(ctlr->dummy_rx); + ctlr->dummy_rx = NULL; + kfree(ctlr->dummy_tx); + ctlr->dummy_tx = NULL; + if (ctlr->unprepare_transfer_hardware && + ctlr->unprepare_transfer_hardware(ctlr)) + dev_err(&ctlr->dev, + "failed to unprepare transfer hardware\n"); + spi_idle_runtime_pm(ctlr); + } + +out: + mutex_unlock(&ctlr->io_mutex); +} + /*-------------------------------------------------------------------------*/ /* @@ -3898,51 +4022,51 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message) DECLARE_COMPLETION_ONSTACK(done); int status; struct spi_controller *ctlr = spi->controller; - unsigned long flags; status = __spi_validate(spi, message); if (status != 0) return status; - message->complete = spi_complete; - message->context = &done; message->spi = spi; - SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync); - SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); + SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync); + SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync); /* - * If we're not using the legacy transfer method then we will - * try to transfer in the calling context so special case. - * This code would be less tricky if we could remove the - * support for driver implemented message queues. + * Checking queue_empty here only guarantees async/sync message + * ordering when coming from the same context. It does not need to + * guard against reentrancy from a different context. The io_mutex + * will catch those cases. */ - if (ctlr->transfer == spi_queued_transfer) { - spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); + if (READ_ONCE(ctlr->queue_empty)) { + message->actual_length = 0; + message->status = -EINPROGRESS; trace_spi_message_submit(message); - status = __spi_queued_transfer(spi, message, false); + SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate); + SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate); - spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); - } else { - status = spi_async_locked(spi, message); + __spi_transfer_message_noqueue(ctlr, message); + + return message->status; } + /* + * There are messages in the async queue that could have originated + * from the same context, so we need to preserve ordering. + * Therefor we send the message to the async queue and wait until they + * are completed. + */ + message->complete = spi_complete; + message->context = &done; + status = spi_async_locked(spi, message); if (status == 0) { - /* Push out the messages in the calling context if we can */ - if (ctlr->transfer == spi_queued_transfer) { - SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, - spi_sync_immediate); - SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, - spi_sync_immediate); - __spi_pump_messages(ctlr, false); - } - wait_for_completion(&done); status = message->status; } message->context = NULL; + return status; } @@ -4026,7 +4150,7 @@ int spi_bus_lock(struct spi_controller *ctlr) ctlr->bus_lock_flag = 1; spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); - /* mutex remains locked until spi_bus_unlock is called */ + /* Mutex remains locked until spi_bus_unlock() is called */ return 0; } @@ -4055,7 +4179,7 @@ int spi_bus_unlock(struct spi_controller *ctlr) } EXPORT_SYMBOL_GPL(spi_bus_unlock); -/* portable code must never pass more than 32 bytes */ +/* Portable code must never pass more than 32 bytes */ #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) static u8 *buf; @@ -4121,7 +4245,7 @@ int spi_write_then_read(struct spi_device *spi, x[0].tx_buf = local_buf; x[1].rx_buf = local_buf + n_tx; - /* do the i/o */ + /* Do the i/o */ status = spi_sync(spi, &message); if (status == 0) memcpy(rxbuf, x[1].rx_buf, n_rx); @@ -4138,7 +4262,7 @@ EXPORT_SYMBOL_GPL(spi_write_then_read); /*-------------------------------------------------------------------------*/ #if IS_ENABLED(CONFIG_OF_DYNAMIC) -/* must call put_device() when done with returned spi_device device */ +/* Must call put_device() when done with returned spi_device device */ static struct spi_device *of_find_spi_device_by_node(struct device_node *node) { struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); @@ -4146,7 +4270,7 @@ static struct spi_device *of_find_spi_device_by_node(struct device_node *node) return dev ? to_spi_device(dev) : NULL; } -/* the spi controllers are not using spi_bus, so we find it with another way */ +/* The spi controllers are not using spi_bus, so we find it with another way */ static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) { struct device *dev; @@ -4157,7 +4281,7 @@ static struct spi_controller *of_find_spi_controller_by_node(struct device_node if (!dev) return NULL; - /* reference got in class_find_device */ + /* Reference got in class_find_device */ return container_of(dev, struct spi_controller, dev); } @@ -4172,7 +4296,7 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action, case OF_RECONFIG_CHANGE_ADD: ctlr = of_find_spi_controller_by_node(rd->dn->parent); if (ctlr == NULL) - return NOTIFY_OK; /* not for us */ + return NOTIFY_OK; /* Not for us */ if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { put_device(&ctlr->dev); @@ -4191,19 +4315,19 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action, break; case OF_RECONFIG_CHANGE_REMOVE: - /* already depopulated? */ + /* Already depopulated? */ if (!of_node_check_flag(rd->dn, OF_POPULATED)) return NOTIFY_OK; - /* find our device by node */ + /* Find our device by node */ spi = of_find_spi_device_by_node(rd->dn); if (spi == NULL) - return NOTIFY_OK; /* no? not meant for us */ + return NOTIFY_OK; /* No? not meant for us */ - /* unregister takes one ref away */ + /* Unregister takes one ref away */ spi_unregister_device(spi); - /* and put the reference of the find */ + /* And put the reference of the find */ put_device(&spi->dev); break; } diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c index 7e47db82de07..bf527b366ab3 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c +++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c @@ -1284,7 +1284,7 @@ static int gmin_get_config_var(struct device *maindev, const struct dmi_system_id *id; struct device *dev = maindev; char var8[CFG_VAR_NAME_MAX]; - struct efivar_entry *ev; + efi_status_t status; int i, ret; /* For sensors, try first to use the _DSM table */ @@ -1326,24 +1326,11 @@ static int gmin_get_config_var(struct device *maindev, for (i = 0; i < sizeof(var8) && var8[i]; i++) var16[i] = var8[i]; - /* Not sure this API usage is kosher; efivar_entry_get()'s - * implementation simply uses VariableName and VendorGuid from - * the struct and ignores the rest, but it seems like there - * ought to be an "official" efivar_entry registered - * somewhere? - */ - ev = kzalloc(sizeof(*ev), GFP_KERNEL); - if (!ev) - return -ENOMEM; - memcpy(&ev->var.VariableName, var16, sizeof(var16)); - ev->var.VendorGuid = GMIN_CFG_VAR_EFI_GUID; - ev->var.DataSize = *out_len; - - ret = efivar_entry_get(ev, &ev->var.Attributes, - &ev->var.DataSize, ev->var.Data); - if (ret == 0) { - memcpy(out, ev->var.Data, ev->var.DataSize); - *out_len = ev->var.DataSize; + status = EFI_UNSUPPORTED; + if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) + status = efi.get_variable(var16, &GMIN_CFG_VAR_EFI_GUID, NULL, + (unsigned long *)out_len, out); + if (status == EFI_SUCCESS) { dev_info(maindev, "found EFI entry for '%s'\n", var8); } else if (is_gmin) { dev_info(maindev, "Failed to find EFI gmin variable %s\n", var8); @@ -1351,8 +1338,6 @@ static int gmin_get_config_var(struct device *maindev, dev_info(maindev, "Failed to find EFI variable %s\n", var8); } - kfree(ev); - return ret; } diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 1ed9381751e6..30712a12b151 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -343,7 +343,7 @@ static void iblock_bio_done(struct bio *bio) } static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, - unsigned int opf) + blk_opf_t opf) { struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); struct bio *bio; @@ -723,7 +723,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, struct bio_list list; struct scatterlist *sg; u32 sg_num = sgl_nents; - unsigned int opf; + blk_opf_t opf; unsigned bio_cnt; int i, rc; struct sg_mapping_iter prot_miter; diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c index b8151d95a806..b76293cc989c 100644 --- a/drivers/thermal/cpufreq_cooling.c +++ b/drivers/thermal/cpufreq_cooling.c @@ -21,6 +21,7 @@ #include <linux/pm_qos.h> #include <linux/slab.h> #include <linux/thermal.h> +#include <linux/units.h> #include <trace/events/thermal.h> @@ -59,6 +60,7 @@ struct time_in_idle { * @cdev: thermal_cooling_device pointer to keep track of the * registered cooling device. * @policy: cpufreq policy. + * @cooling_ops: cpufreq callbacks to thermal cooling device ops * @idle_time: idle time stats * @qos_req: PM QoS contraint to apply * @@ -71,6 +73,7 @@ struct cpufreq_cooling_device { unsigned int max_level; struct em_perf_domain *em; struct cpufreq_policy *policy; + struct thermal_cooling_device_ops cooling_ops; #ifndef CONFIG_SMP struct time_in_idle *idle_time; #endif @@ -101,6 +104,7 @@ static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev, static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev, u32 freq) { + unsigned long power_mw; int i; for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) { @@ -108,16 +112,23 @@ static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev, break; } - return cpufreq_cdev->em->table[i + 1].power; + power_mw = cpufreq_cdev->em->table[i + 1].power; + power_mw /= MICROWATT_PER_MILLIWATT; + + return power_mw; } static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev, u32 power) { + unsigned long em_power_mw; int i; for (i = cpufreq_cdev->max_level; i > 0; i--) { - if (power >= cpufreq_cdev->em->table[i].power) + /* Convert EM power to milli-Watts to make safe comparison */ + em_power_mw = cpufreq_cdev->em->table[i].power; + em_power_mw /= MICROWATT_PER_MILLIWATT; + if (power >= em_power_mw) break; } @@ -137,11 +148,9 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev, static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu, int cpu_idx) { - unsigned long max = arch_scale_cpu_capacity(cpu); - unsigned long util; + unsigned long util = sched_cpu_util(cpu); - util = sched_cpu_util(cpu, max); - return (util * 100) / max; + return (util * 100) / arch_scale_cpu_capacity(cpu); } #else /* !CONFIG_SMP */ static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu, @@ -204,7 +213,7 @@ static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev, * complex code may be needed if experiments show that it's not * accurate enough. * - * Return: 0 on success, -E* if getting the static power failed. + * Return: 0 on success, this function doesn't fail. */ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, u32 *power) @@ -214,16 +223,9 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, u32 total_load = 0; struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; struct cpufreq_policy *policy = cpufreq_cdev->policy; - u32 *load_cpu = NULL; freq = cpufreq_quick_get(policy->cpu); - if (trace_thermal_power_cpu_get_power_enabled()) { - u32 ncpus = cpumask_weight(policy->related_cpus); - - load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL); - } - for_each_cpu(cpu, policy->related_cpus) { u32 load; @@ -233,22 +235,13 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, load = 0; total_load += load; - if (load_cpu) - load_cpu[i] = load; - - i++; } cpufreq_cdev->last_load = total_load; *power = get_dynamic_power(cpufreq_cdev, freq); - if (load_cpu) { - trace_thermal_power_cpu_get_power(policy->related_cpus, freq, - load_cpu, i, *power); - - kfree(load_cpu); - } + trace_thermal_power_cpu_get_power_simple(policy->cpu, *power); return 0; } @@ -263,9 +256,8 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, * milliwatts assuming 100% load. Store the calculated power in * @power. * - * Return: 0 on success, -EINVAL if the cooling device state could not - * be converted into a frequency or other -E* if there was an error - * when calculating the static power. + * Return: 0 on success, -EINVAL if the cooling device state is bigger + * than maximum allowed. */ static int cpufreq_state2power(struct thermal_cooling_device *cdev, unsigned long state, u32 *power) @@ -295,15 +287,11 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev, * Calculate a cooling device state for the cpus described by @cdev * that would allow them to consume at most @power mW and store it in * @state. Note that this calculation depends on external factors - * such as the cpu load or the current static power. Calling this - * function with the same power as input can yield different cooling - * device states depending on those external factors. - * - * Return: 0 on success, -ENODEV if no cpus are online or -EINVAL if - * the calculated frequency could not be converted to a valid state. - * The latter should not happen unless the frequencies available to - * cpufreq have changed since the initialization of the cpu cooling - * device. + * such as the CPUs load. Calling this function with the same power + * as input can yield different cooling device states depending on those + * external factors. + * + * Return: 0 on success, this function doesn't fail. */ static int cpufreq_power2state(struct thermal_cooling_device *cdev, u32 power, unsigned long *state) @@ -415,7 +403,7 @@ static unsigned int get_state_freq(struct cpufreq_cooling_device *cpufreq_cdev, * Callback for the thermal cooling device to return the cpufreq * max cooling state. * - * Return: 0 on success, an error code otherwise. + * Return: 0 on success, this function doesn't fail. */ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) @@ -434,7 +422,7 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev, * Callback for the thermal cooling device to return the cpufreq * current cooling state. * - * Return: 0 on success, an error code otherwise. + * Return: 0 on success, this function doesn't fail. */ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) @@ -485,14 +473,6 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, return ret; } -/* Bind cpufreq callbacks to thermal cooling device ops */ - -static struct thermal_cooling_device_ops cpufreq_cooling_ops = { - .get_max_state = cpufreq_get_max_state, - .get_cur_state = cpufreq_get_cur_state, - .set_cur_state = cpufreq_set_cur_state, -}; - /** * __cpufreq_cooling_register - helper function to create cpufreq cooling device * @np: a valid struct device_node to the cooling device device tree node @@ -501,7 +481,7 @@ static struct thermal_cooling_device_ops cpufreq_cooling_ops = { * @em: Energy Model of the cpufreq policy * * This interface function registers the cpufreq cooling device with the name - * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq + * "cpufreq-%s". This API can support multiple instances of cpufreq * cooling devices. It also gives the opportunity to link the cooling device * with a device tree node, in order to bind it via the thermal DT code. * @@ -554,7 +534,10 @@ __cpufreq_cooling_register(struct device_node *np, /* max_level is an index, not a counter */ cpufreq_cdev->max_level = i - 1; - cooling_ops = &cpufreq_cooling_ops; + cooling_ops = &cpufreq_cdev->cooling_ops; + cooling_ops->get_max_state = cpufreq_get_max_state; + cooling_ops->get_cur_state = cpufreq_get_cur_state; + cooling_ops->set_cur_state = cpufreq_set_cur_state; #ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR if (em_is_sane(cpufreq_cdev, em)) { @@ -609,8 +592,8 @@ free_cdev: * @policy: cpufreq policy * * This interface function registers the cpufreq cooling device with the name - * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq - * cooling devices. + * "cpufreq-%s". This API can support multiple instances of cpufreq cooling + * devices. * * Return: a valid struct thermal_cooling_device pointer on success, * on failure, it returns a corresponding ERR_PTR(). @@ -627,17 +610,14 @@ EXPORT_SYMBOL_GPL(cpufreq_cooling_register); * @policy: cpufreq policy * * This interface function registers the cpufreq cooling device with the name - * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq - * cooling devices. Using this API, the cpufreq cooling device will be - * linked to the device tree node provided. + * "cpufreq-%s". This API can support multiple instances of cpufreq cooling + * devices. Using this API, the cpufreq cooling device will be linked to the + * device tree node provided. * * Using this function, the cooling device will implement the power - * extensions by using a simple cpu power model. The cpus must have + * extensions by using the Energy Model (if present). The cpus must have * registered their OPPs using the OPP library. * - * It also takes into account, if property present in policy CPU node, the - * static power consumed by the cpu. - * * Return: a valid struct thermal_cooling_device pointer on success, * and NULL on failure. */ @@ -673,7 +653,7 @@ EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register); * cpufreq_cooling_unregister - function to remove cpufreq cooling device. * @cdev: thermal cooling device pointer. * - * This interface function unregisters the "thermal-cpufreq-%x" cooling device. + * This interface function unregisters the "cpufreq-%x" cooling device. */ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) { diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c index 21d4d6e6409a..121cf853e545 100644 --- a/drivers/thermal/db8500_thermal.c +++ b/drivers/thermal/db8500_thermal.c @@ -53,7 +53,6 @@ static const unsigned long db8500_thermal_points[] = { struct db8500_thermal_zone { struct thermal_zone_device *tz; - enum thermal_trend trend; unsigned long interpolated_temp; unsigned int cur_index; }; @@ -73,24 +72,12 @@ static int db8500_thermal_get_temp(void *data, int *temp) return 0; } -/* Callback to get temperature changing trend */ -static int db8500_thermal_get_trend(void *data, int trip, enum thermal_trend *trend) -{ - struct db8500_thermal_zone *th = data; - - *trend = th->trend; - - return 0; -} - static struct thermal_zone_of_device_ops thdev_ops = { .get_temp = db8500_thermal_get_temp, - .get_trend = db8500_thermal_get_trend, }; static void db8500_thermal_update_config(struct db8500_thermal_zone *th, unsigned int idx, - enum thermal_trend trend, unsigned long next_low, unsigned long next_high) { @@ -98,7 +85,6 @@ static void db8500_thermal_update_config(struct db8500_thermal_zone *th, th->cur_index = idx; th->interpolated_temp = (next_low + next_high)/2; - th->trend = trend; /* * The PRCMU accept absolute temperatures in celsius so divide @@ -127,8 +113,7 @@ static irqreturn_t prcmu_low_irq_handler(int irq, void *irq_data) } idx -= 1; - db8500_thermal_update_config(th, idx, THERMAL_TREND_DROPPING, - next_low, next_high); + db8500_thermal_update_config(th, idx, next_low, next_high); dev_dbg(&th->tz->device, "PRCMU set max %ld, min %ld\n", next_high, next_low); @@ -149,8 +134,7 @@ static irqreturn_t prcmu_high_irq_handler(int irq, void *irq_data) next_low = db8500_thermal_points[idx]; idx += 1; - db8500_thermal_update_config(th, idx, THERMAL_TREND_RAISING, - next_low, next_high); + db8500_thermal_update_config(th, idx, next_low, next_high); dev_dbg(&th->tz->device, "PRCMU set max %ld, min %ld\n", next_high, next_low); @@ -174,10 +158,8 @@ static int db8500_thermal_probe(struct platform_device *pdev) return -ENOMEM; low_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_LOW"); - if (low_irq < 0) { - dev_err(dev, "Get IRQ_HOTMON_LOW failed\n"); + if (low_irq < 0) return low_irq; - } ret = devm_request_threaded_irq(dev, low_irq, NULL, prcmu_low_irq_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT, @@ -188,10 +170,8 @@ static int db8500_thermal_probe(struct platform_device *pdev) } high_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_HIGH"); - if (high_irq < 0) { - dev_err(dev, "Get IRQ_HOTMON_HIGH failed\n"); + if (high_irq < 0) return high_irq; - } ret = devm_request_threaded_irq(dev, high_irq, NULL, prcmu_high_irq_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT, @@ -210,8 +190,7 @@ static int db8500_thermal_probe(struct platform_device *pdev) dev_info(dev, "thermal zone sensor registered\n"); /* Start measuring at the lowest point */ - db8500_thermal_update_config(th, 0, THERMAL_TREND_STABLE, - PRCMU_DEFAULT_LOW_TEMP, + db8500_thermal_update_config(th, 0, PRCMU_DEFAULT_LOW_TEMP, db8500_thermal_points[0]); platform_set_drvdata(pdev, th); @@ -232,8 +211,7 @@ static int db8500_thermal_resume(struct platform_device *pdev) struct db8500_thermal_zone *th = platform_get_drvdata(pdev); /* Resume and start measuring at the lowest point */ - db8500_thermal_update_config(th, 0, THERMAL_TREND_STABLE, - PRCMU_DEFAULT_LOW_TEMP, + db8500_thermal_update_config(th, 0, PRCMU_DEFAULT_LOW_TEMP, db8500_thermal_points[0]); return 0; diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c index 8c76f9655e57..24b474925cd6 100644 --- a/drivers/thermal/devfreq_cooling.c +++ b/drivers/thermal/devfreq_cooling.c @@ -28,6 +28,7 @@ * struct devfreq_cooling_device - Devfreq cooling device * devfreq_cooling_device registered. * @cdev: Pointer to associated thermal cooling device. + * @cooling_ops: devfreq callbacks to thermal cooling device ops * @devfreq: Pointer to associated devfreq device. * @cooling_state: Current cooling state. * @freq_table: Pointer to a table with the frequencies sorted in descending @@ -48,6 +49,7 @@ */ struct devfreq_cooling_device { struct thermal_cooling_device *cdev; + struct thermal_cooling_device_ops cooling_ops; struct devfreq *devfreq; unsigned long cooling_state; u32 *freq_table; @@ -200,7 +202,11 @@ static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cd res = dfc->power_ops->get_real_power(df, power, freq, voltage); if (!res) { state = dfc->capped_state; + + /* Convert EM power into milli-Watts first */ dfc->res_util = dfc->em_pd->table[state].power; + dfc->res_util /= MICROWATT_PER_MILLIWATT; + dfc->res_util *= SCALE_ERROR_MITIGATION; if (*power > 1) @@ -218,8 +224,10 @@ static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cd _normalize_load(&status); - /* Scale power for utilization */ + /* Convert EM power into milli-Watts first */ *power = dfc->em_pd->table[perf_idx].power; + *power /= MICROWATT_PER_MILLIWATT; + /* Scale power for utilization */ *power *= status.busy_time; *power >>= 10; } @@ -244,6 +252,7 @@ static int devfreq_cooling_state2power(struct thermal_cooling_device *cdev, perf_idx = dfc->max_state - state; *power = dfc->em_pd->table[perf_idx].power; + *power /= MICROWATT_PER_MILLIWATT; return 0; } @@ -254,7 +263,7 @@ static int devfreq_cooling_power2state(struct thermal_cooling_device *cdev, struct devfreq_cooling_device *dfc = cdev->devdata; struct devfreq *df = dfc->devfreq; struct devfreq_dev_status status; - unsigned long freq; + unsigned long freq, em_power_mw; s32 est_power; int i; @@ -279,9 +288,13 @@ static int devfreq_cooling_power2state(struct thermal_cooling_device *cdev, * Find the first cooling state that is within the power * budget. The EM power table is sorted ascending. */ - for (i = dfc->max_state; i > 0; i--) - if (est_power >= dfc->em_pd->table[i].power) + for (i = dfc->max_state; i > 0; i--) { + /* Convert EM power to milli-Watts to make safe comparison */ + em_power_mw = dfc->em_pd->table[i].power; + em_power_mw /= MICROWATT_PER_MILLIWATT; + if (est_power >= em_power_mw) break; + } *state = dfc->max_state - i; dfc->capped_state = *state; @@ -290,12 +303,6 @@ static int devfreq_cooling_power2state(struct thermal_cooling_device *cdev, return 0; } -static struct thermal_cooling_device_ops devfreq_cooling_ops = { - .get_max_state = devfreq_cooling_get_max_state, - .get_cur_state = devfreq_cooling_get_cur_state, - .set_cur_state = devfreq_cooling_set_cur_state, -}; - /** * devfreq_cooling_gen_tables() - Generate frequency table. * @dfc: Pointer to devfreq cooling device. @@ -363,18 +370,18 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, char *name; int err, num_opps; - ops = kmemdup(&devfreq_cooling_ops, sizeof(*ops), GFP_KERNEL); - if (!ops) - return ERR_PTR(-ENOMEM); dfc = kzalloc(sizeof(*dfc), GFP_KERNEL); - if (!dfc) { - err = -ENOMEM; - goto free_ops; - } + if (!dfc) + return ERR_PTR(-ENOMEM); dfc->devfreq = df; + ops = &dfc->cooling_ops; + ops->get_max_state = devfreq_cooling_get_max_state; + ops->get_cur_state = devfreq_cooling_get_cur_state; + ops->set_cur_state = devfreq_cooling_set_cur_state; + em = em_pd_get(dev); if (em && !em_is_artificial(em)) { dfc->em_pd = em; @@ -437,8 +444,6 @@ free_table: kfree(dfc->freq_table); free_dfc: kfree(dfc); -free_ops: - kfree(ops); return ERR_PTR(err); } @@ -520,13 +525,11 @@ EXPORT_SYMBOL_GPL(devfreq_cooling_em_register); void devfreq_cooling_unregister(struct thermal_cooling_device *cdev) { struct devfreq_cooling_device *dfc; - const struct thermal_cooling_device_ops *ops; struct device *dev; if (IS_ERR_OR_NULL(cdev)) return; - ops = cdev->ops; dfc = cdev->devdata; dev = dfc->devfreq->dev.parent; @@ -537,6 +540,5 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *cdev) kfree(dfc->freq_table); kfree(dfc); - kfree(ops); } EXPORT_SYMBOL_GPL(devfreq_cooling_unregister); diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c index 1e5abf4822be..6a2abcfc648f 100644 --- a/drivers/thermal/gov_fair_share.c +++ b/drivers/thermal/gov_fair_share.c @@ -25,10 +25,10 @@ static int get_trip_level(struct thermal_zone_device *tz) int trip_temp; enum thermal_trip_type trip_type; - if (tz->trips == 0 || !tz->ops->get_trip_temp) + if (tz->num_trips == 0 || !tz->ops->get_trip_temp) return 0; - for (count = 0; count < tz->trips; count++) { + for (count = 0; count < tz->num_trips; count++) { tz->ops->get_trip_temp(tz, count, &trip_temp); if (tz->temperature < trip_temp) break; @@ -53,7 +53,7 @@ static long get_target_state(struct thermal_zone_device *tz, cdev->ops->get_max_state(cdev, &max_state); - return (long)(percentage * level * max_state) / (100 * tz->trips); + return (long)(percentage * level * max_state) / (100 * tz->num_trips); } /** diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c index 13e375751d22..1d5052470967 100644 --- a/drivers/thermal/gov_power_allocator.c +++ b/drivers/thermal/gov_power_allocator.c @@ -527,7 +527,7 @@ static void get_governor_trips(struct thermal_zone_device *tz, last_active = INVALID_TRIP; last_passive = INVALID_TRIP; - for (i = 0; i < tz->trips; i++) { + for (i = 0; i < tz->num_trips; i++) { enum thermal_trip_type type; int ret; @@ -668,7 +668,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz) get_governor_trips(tz, params); - if (tz->trips > 0) { + if (tz->num_trips > 0) { ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature, &control_temp); diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c index 12acb12aac50..9729b46d0258 100644 --- a/drivers/thermal/gov_step_wise.c +++ b/drivers/thermal/gov_step_wise.c @@ -11,6 +11,7 @@ */ #include <linux/thermal.h> +#include <linux/minmax.h> #include <trace/events/thermal.h> #include "thermal_core.h" @@ -52,10 +53,7 @@ static unsigned long get_target_state(struct thermal_instance *instance, if (!instance->initialized) { if (throttle) { - next_target = (cur_state + 1) >= instance->upper ? - instance->upper : - ((cur_state + 1) < instance->lower ? - instance->lower : (cur_state + 1)); + next_target = clamp((cur_state + 1), instance->lower, instance->upper); } else { next_target = THERMAL_NO_TARGET; } @@ -66,35 +64,19 @@ static unsigned long get_target_state(struct thermal_instance *instance, switch (trend) { case THERMAL_TREND_RAISING: if (throttle) { - next_target = cur_state < instance->upper ? - (cur_state + 1) : instance->upper; - if (next_target < instance->lower) - next_target = instance->lower; + next_target = clamp((cur_state + 1), instance->lower, instance->upper); } break; - case THERMAL_TREND_RAISE_FULL: - if (throttle) - next_target = instance->upper; - break; case THERMAL_TREND_DROPPING: if (cur_state <= instance->lower) { if (!throttle) next_target = THERMAL_NO_TARGET; } else { if (!throttle) { - next_target = cur_state - 1; - if (next_target > instance->upper) - next_target = instance->upper; + next_target = clamp((cur_state - 1), instance->lower, instance->upper); } } break; - case THERMAL_TREND_DROP_FULL: - if (cur_state == instance->lower) { - if (!throttle) - next_target = THERMAL_NO_TARGET; - } else - next_target = instance->lower; - break; default: break; } diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c index c1fa2b29b153..dabf11a687a1 100644 --- a/drivers/thermal/intel/intel_pch_thermal.c +++ b/drivers/thermal/intel/intel_pch_thermal.c @@ -207,14 +207,6 @@ static int pch_wpt_suspend(struct pch_thermal_device *ptd) return 0; } - /* Do not check temperature if it is not a S0ix capable platform */ -#ifdef CONFIG_ACPI - if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) - return 0; -#else - return 0; -#endif - /* Do not check temperature if it is not s2idle */ if (pm_suspend_via_firmware()) return 0; diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c index 4d8edc61a78b..a0e234fce71a 100644 --- a/drivers/thermal/intel/x86_pkg_temp_thermal.c +++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c @@ -105,7 +105,7 @@ static struct zone_device *pkg_temp_thermal_get_dev(unsigned int cpu) } /* -* tj-max is is interesting because threshold is set relative to this +* tj-max is interesting because threshold is set relative to this * temperature. */ static int get_tj_max(int cpu, u32 *tj_max) diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c index 64e323158952..115a44eb4fbf 100644 --- a/drivers/thermal/k3_j72xx_bandgap.c +++ b/drivers/thermal/k3_j72xx_bandgap.c @@ -151,8 +151,6 @@ static int prep_lookup_table(struct err_values *err_vals, int *ref_table) /* 300 milli celsius steps */ while (i--) derived_table[i] = derived_table[i + 1] - 300; - /* case 0 */ - derived_table[i] = derived_table[i + 1] - 300; } /* @@ -433,7 +431,7 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev) GFP_KERNEL); if (!derived_table) { ret = -ENOMEM; - goto err_alloc; + goto err_free_ref_table; } /* Workaround not needed if bit30/bit31 is set even for J721e */ @@ -483,7 +481,7 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev) if (IS_ERR(ti_thermal)) { dev_err(bgp->dev, "thermal zone device is NULL\n"); ret = PTR_ERR(ti_thermal); - goto err_alloc; + goto err_free_ref_table; } } @@ -514,6 +512,9 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev) return 0; +err_free_ref_table: + kfree(ref_table); + err_alloc: pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); @@ -529,11 +530,11 @@ static int k3_j72xx_bandgap_remove(struct platform_device *pdev) return 0; } -const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j721e_data = { +static const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j721e_data = { .has_errata_i2128 = 1, }; -const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j7200_data = { +static const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j7200_data = { .has_errata_i2128 = 0, }; diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c index d9c9c975f931..073943cbcc2b 100644 --- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c +++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c @@ -20,6 +20,8 @@ #include <linux/thermal.h> #include <asm-generic/unaligned.h> +#include "../thermal_hwmon.h" + /* * Thermal monitoring block consists of 8 (ADC_TM5_NUM_CHANNELS) channels. Each * channel is programmed to use one of ADC channels for voltage comparison. @@ -687,6 +689,9 @@ static int adc_tm5_register_tzd(struct adc_tm5_chip *adc_tm) return PTR_ERR(tzd); } adc_tm->channels[i].tzd = tzd; + if (devm_thermal_add_hwmon_sysfs(tzd)) + dev_warn(adc_tm->dev, + "Failed to add hwmon sysfs attributes\n"); } return 0; diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c index 7419e196dbb0..770f82cc9bca 100644 --- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c @@ -16,6 +16,7 @@ #include <linux/thermal.h> #include "../thermal_core.h" +#include "../thermal_hwmon.h" #define QPNP_TM_REG_DIG_MAJOR 0x01 #define QPNP_TM_REG_TYPE 0x04 @@ -458,6 +459,10 @@ static int qpnp_tm_probe(struct platform_device *pdev) return ret; } + if (devm_thermal_add_hwmon_sysfs(chip->tz_dev)) + dev_warn(&pdev->dev, + "Failed to add hwmon sysfs attributes\n"); + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, qpnp_tm_isr, IRQF_ONESHOT, node->name, chip); if (ret < 0) diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index 7963ee33bf75..e49f58e83513 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -933,17 +933,6 @@ static int tsens_get_temp(void *data, int *temp) return priv->ops->get_temp(s, temp); } -static int tsens_get_trend(void *data, int trip, enum thermal_trend *trend) -{ - struct tsens_sensor *s = data; - struct tsens_priv *priv = s->priv; - - if (priv->ops->get_trend) - return priv->ops->get_trend(s, trend); - - return -ENOTSUPP; -} - static int __maybe_unused tsens_suspend(struct device *dev) { struct tsens_priv *priv = dev_get_drvdata(dev); @@ -1004,7 +993,6 @@ MODULE_DEVICE_TABLE(of, tsens_table); static const struct thermal_zone_of_device_ops tsens_of_ops = { .get_temp = tsens_get_temp, - .get_trend = tsens_get_trend, .set_trips = tsens_set_trips, }; diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h index 1471a2c00f15..ba05c8233356 100644 --- a/drivers/thermal/qcom/tsens.h +++ b/drivers/thermal/qcom/tsens.h @@ -65,7 +65,6 @@ struct tsens_sensor { * @disable: Function to disable the tsens device * @suspend: Function to suspend the tsens device * @resume: Function to resume the tsens device - * @get_trend: Function to get the thermal/temp trend */ struct tsens_ops { /* mandatory callbacks */ @@ -77,7 +76,6 @@ struct tsens_ops { void (*disable)(struct tsens_priv *priv); int (*suspend)(struct tsens_priv *priv); int (*resume)(struct tsens_priv *priv); - int (*get_trend)(struct tsens_sensor *s, enum thermal_trend *trend); }; #define REG_FIELD_FOR_EACH_SENSOR11(_name, _offset, _startbit, _stopbit) \ diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 43eb25b167bc..cda7c52f2319 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c @@ -399,6 +399,10 @@ static const struct of_device_id rcar_gen3_thermal_dt_ids[] = { .compatible = "renesas,r8a779a0-thermal", .data = &rcar_gen3_ths_tj_1, }, + { + .compatible = "renesas,r8a779f0-thermal", + .data = &rcar_gen3_ths_tj_1, + }, {}, }; MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids); @@ -507,7 +511,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev) zone = devm_thermal_zone_of_sensor_register(dev, i, tsc, &rcar_gen3_tz_of_ops); if (IS_ERR(zone)) { - dev_err(dev, "Can't register thermal zone\n"); + dev_err(dev, "Sensor %u: Can't register thermal zone\n", i); ret = PTR_ERR(zone); goto error_unregister; } @@ -529,7 +533,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev) if (ret < 0) goto error_unregister; - dev_info(dev, "TSC%u: Loaded %d trip points\n", i, ret); + dev_info(dev, "Sensor %u: Loaded %d trip points\n", i, ret); } if (!priv->num_tscs) { diff --git a/drivers/thermal/rzg2l_thermal.c b/drivers/thermal/rzg2l_thermal.c index be07e04c6926..51ae80eda6af 100644 --- a/drivers/thermal/rzg2l_thermal.c +++ b/drivers/thermal/rzg2l_thermal.c @@ -47,7 +47,7 @@ #define TS_CODE_AVE_SCALE(x) ((x) * 1000000) #define MCELSIUS(temp) ((temp) * MILLIDEGREE_PER_DEGREE) -#define TS_CODE_CAP_TIMES 8 /* Capture times */ +#define TS_CODE_CAP_TIMES 8 /* Total number of ADC data samples */ #define RZG2L_THERMAL_GRAN 500 /* milli Celsius */ #define RZG2L_TSU_SS_TIMEOUT_US 1000 @@ -80,7 +80,8 @@ static int rzg2l_thermal_get_temp(void *devdata, int *temp) int val, i; for (i = 0; i < TS_CODE_CAP_TIMES ; i++) { - /* TSU repeats measurement at 20 microseconds intervals and + /* + * TSU repeats measurement at 20 microseconds intervals and * automatically updates the results of measurement. As per * the HW manual for measuring temperature we need to read 8 * values consecutively and then take the average. @@ -92,16 +93,18 @@ static int rzg2l_thermal_get_temp(void *devdata, int *temp) ts_code_ave = result / TS_CODE_CAP_TIMES; - /* Calculate actual sensor value by applying curvature correction formula + /* + * Calculate actual sensor value by applying curvature correction formula * dsensor = ts_code_ave / (1 + ts_code_ave * 0.000013). Here we are doing * integer calculation by scaling all the values by 1000000. */ dsensor = TS_CODE_AVE_SCALE(ts_code_ave) / (TS_CODE_AVE_SCALE(1) + (ts_code_ave * CURVATURE_CORRECTION_CONST)); - /* The temperature Tj is calculated by the formula + /* + * The temperature Tj is calculated by the formula * Tj = (dsensor − calib1) * 165/ (calib0 − calib1) − 40 - * where calib0 and calib1 are the caliberation values. + * where calib0 and calib1 are the calibration values. */ val = ((dsensor - priv->calib1) * (MCELSIUS(165) / (priv->calib0 - priv->calib1))) - MCELSIUS(40); @@ -122,7 +125,8 @@ static int rzg2l_thermal_init(struct rzg2l_thermal_priv *priv) rzg2l_thermal_write(priv, TSU_SM, TSU_SM_NORMAL_MODE); rzg2l_thermal_write(priv, TSU_ST, 0); - /* Before setting the START bit, TSU should be in normal operating + /* + * Before setting the START bit, TSU should be in normal operating * mode. As per the HW manual, it will take 60 µs to place the TSU * into normal operating mode. */ @@ -217,7 +221,7 @@ static int rzg2l_thermal_probe(struct platform_device *pdev) if (ret) goto err; - dev_dbg(dev, "TSU probed with %s caliberation values", + dev_dbg(dev, "TSU probed with %s calibration values", rzg2l_thermal_read(priv, OTPTSUTRIM_REG(0)) ? "hw" : "sw"); return 0; diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c index d9cd23cbb671..212c87e63a66 100644 --- a/drivers/thermal/sun8i_thermal.c +++ b/drivers/thermal/sun8i_thermal.c @@ -237,7 +237,7 @@ static int sun50i_h6_ths_calibrate(struct ths_device *tmdev, * The calibration data on the H6 is the ambient temperature and * sensor values that are filled during the factory test stage. * - * The unit of stored FT temperature is 0.1 degreee celusis. + * The unit of stored FT temperature is 0.1 degree celsius. * * We need to calculate a delta between measured and caluclated * register values and this will become a calibration offset. diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c index 210325f92559..825eab526619 100644 --- a/drivers/thermal/tegra/soctherm.c +++ b/drivers/thermal/tegra/soctherm.c @@ -633,37 +633,6 @@ static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp) return 0; } -static int tegra_thermctl_get_trend(void *data, int trip, - enum thermal_trend *trend) -{ - struct tegra_thermctl_zone *zone = data; - struct thermal_zone_device *tz = zone->tz; - int trip_temp, temp, last_temp, ret; - - if (!tz) - return -EINVAL; - - ret = tz->ops->get_trip_temp(zone->tz, trip, &trip_temp); - if (ret) - return ret; - - temp = READ_ONCE(tz->temperature); - last_temp = READ_ONCE(tz->last_temperature); - - if (temp > trip_temp) { - if (temp >= last_temp) - *trend = THERMAL_TREND_RAISING; - else - *trend = THERMAL_TREND_STABLE; - } else if (temp < trip_temp) { - *trend = THERMAL_TREND_DROPPING; - } else { - *trend = THERMAL_TREND_STABLE; - } - - return 0; -} - static void thermal_irq_enable(struct tegra_thermctl_zone *zn) { u32 r; @@ -716,7 +685,6 @@ static int tegra_thermctl_set_trips(void *data, int lo, int hi) static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = { .get_temp = tegra_thermctl_get_temp, .set_trip_temp = tegra_thermctl_set_trip_temp, - .get_trend = tegra_thermctl_get_trend, .set_trips = tegra_thermctl_set_trips, }; diff --git a/drivers/thermal/tegra/tegra30-tsensor.c b/drivers/thermal/tegra/tegra30-tsensor.c index 9b6b693cbcf8..05886684f429 100644 --- a/drivers/thermal/tegra/tegra30-tsensor.c +++ b/drivers/thermal/tegra/tegra30-tsensor.c @@ -316,7 +316,7 @@ static void tegra_tsensor_get_hw_channel_trips(struct thermal_zone_device *tzd, *hot_trip = 85000; *crit_trip = 90000; - for (i = 0; i < tzd->trips; i++) { + for (i = 0; i < tzd->num_trips; i++) { enum thermal_trip_type type; int trip_temp; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index cdc0552e8c42..6a5d0ae5d7a4 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -340,12 +340,8 @@ void thermal_zone_device_critical(struct thermal_zone_device *tz) EXPORT_SYMBOL(thermal_zone_device_critical); static void handle_critical_trips(struct thermal_zone_device *tz, - int trip, enum thermal_trip_type trip_type) + int trip, int trip_temp, enum thermal_trip_type trip_type) { - int trip_temp; - - tz->ops->get_trip_temp(tz, trip, &trip_temp); - /* If we have not crossed the trip_temp, we do not care. */ if (trip_temp <= 0 || tz->temperature < trip_temp) return; @@ -384,7 +380,7 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip) } if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT) - handle_critical_trips(tz, trip, type); + handle_critical_trips(tz, trip, trip_temp, type); else handle_non_critical_trips(tz, trip); /* @@ -505,7 +501,7 @@ void thermal_zone_device_update(struct thermal_zone_device *tz, tz->notify_event = event; - for (count = 0; count < tz->trips; count++) + for (count = 0; count < tz->num_trips; count++) handle_thermal_trip(tz, count); } EXPORT_SYMBOL_GPL(thermal_zone_device_update); @@ -630,7 +626,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, unsigned long max_state; int result, ret; - if (trip >= tz->trips || trip < 0) + if (trip >= tz->num_trips || trip < 0) return -EINVAL; list_for_each_entry(pos1, &thermal_tz_list, node) { @@ -667,7 +663,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, dev->target = THERMAL_NO_TARGET; dev->weight = weight; - result = ida_simple_get(&tz->ida, 0, 0, GFP_KERNEL); + result = ida_alloc(&tz->ida, GFP_KERNEL); if (result < 0) goto free_mem; @@ -721,7 +717,7 @@ remove_trip_file: remove_symbol_link: sysfs_remove_link(&tz->device.kobj, dev->name); release_ida: - ida_simple_remove(&tz->ida, dev->id); + ida_free(&tz->ida, dev->id); free_mem: kfree(dev); return result; @@ -768,7 +764,7 @@ unbind: device_remove_file(&tz->device, &pos->weight_attr); device_remove_file(&tz->device, &pos->attr); sysfs_remove_link(&tz->device.kobj, pos->name); - ida_simple_remove(&tz->ida, pos->id); + ida_free(&tz->ida, pos->id); kfree(pos); return 0; } @@ -811,7 +807,7 @@ static void __bind(struct thermal_zone_device *tz, int mask, { int i, ret; - for (i = 0; i < tz->trips; i++) { + for (i = 0; i < tz->num_trips; i++) { if (mask & (1 << i)) { unsigned long upper, lower; @@ -901,7 +897,7 @@ __thermal_cooling_device_register(struct device_node *np, if (!cdev) return ERR_PTR(-ENOMEM); - ret = ida_simple_get(&thermal_cdev_ida, 0, 0, GFP_KERNEL); + ret = ida_alloc(&thermal_cdev_ida, GFP_KERNEL); if (ret < 0) goto out_kfree_cdev; cdev->id = ret; @@ -952,7 +948,7 @@ out_kfree_type: put_device(&cdev->device); cdev = NULL; out_ida_remove: - ida_simple_remove(&thermal_cdev_ida, id); + ida_free(&thermal_cdev_ida, id); out_kfree_cdev: kfree(cdev); return ERR_PTR(ret); @@ -1057,7 +1053,7 @@ static void __unbind(struct thermal_zone_device *tz, int mask, { int i; - for (i = 0; i < tz->trips; i++) + for (i = 0; i < tz->num_trips; i++) if (mask & (1 << i)) thermal_zone_unbind_cooling_device(tz, i, cdev); } @@ -1111,7 +1107,7 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) mutex_unlock(&thermal_list_lock); - ida_simple_remove(&thermal_cdev_ida, cdev->id); + ida_free(&thermal_cdev_ida, cdev->id); device_del(&cdev->device); thermal_cooling_device_destroy_sysfs(cdev); kfree(cdev->type); @@ -1159,10 +1155,18 @@ exit: mutex_unlock(&thermal_list_lock); } +static void thermal_set_delay_jiffies(unsigned long *delay_jiffies, int delay_ms) +{ + *delay_jiffies = msecs_to_jiffies(delay_ms); + if (delay_ms > 1000) + *delay_jiffies = round_jiffies(*delay_jiffies); +} + /** - * thermal_zone_device_register() - register a new thermal zone device + * thermal_zone_device_register_with_trips() - register a new thermal zone device * @type: the thermal zone device type - * @trips: the number of trip points the thermal zone support + * @trips: a pointer to an array of thermal trips + * @num_trips: the number of trip points the thermal zone support * @mask: a bit string indicating the writeablility of trip points * @devdata: private device data * @ops: standard thermal zone device callbacks @@ -1184,10 +1188,10 @@ exit: * IS_ERR*() helpers. */ struct thermal_zone_device * -thermal_zone_device_register(const char *type, int trips, int mask, - void *devdata, struct thermal_zone_device_ops *ops, - struct thermal_zone_params *tzp, int passive_delay, - int polling_delay) +thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *trips, int num_trips, int mask, + void *devdata, struct thermal_zone_device_ops *ops, + struct thermal_zone_params *tzp, int passive_delay, + int polling_delay) { struct thermal_zone_device *tz; enum thermal_trip_type trip_type; @@ -1198,27 +1202,27 @@ thermal_zone_device_register(const char *type, int trips, int mask, struct thermal_governor *governor; if (!type || strlen(type) == 0) { - pr_err("Error: No thermal zone type defined\n"); + pr_err("No thermal zone type defined\n"); return ERR_PTR(-EINVAL); } if (type && strlen(type) >= THERMAL_NAME_LENGTH) { - pr_err("Error: Thermal zone name (%s) too long, should be under %d chars\n", + pr_err("Thermal zone name (%s) too long, should be under %d chars\n", type, THERMAL_NAME_LENGTH); return ERR_PTR(-EINVAL); } - if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips) { - pr_err("Error: Incorrect number of thermal trips\n"); + if (num_trips > THERMAL_MAX_TRIPS || num_trips < 0 || mask >> num_trips) { + pr_err("Incorrect number of thermal trips\n"); return ERR_PTR(-EINVAL); } if (!ops) { - pr_err("Error: Thermal zone device ops not defined\n"); + pr_err("Thermal zone device ops not defined\n"); return ERR_PTR(-EINVAL); } - if (trips > 0 && (!ops->get_trip_type || !ops->get_trip_temp)) + if (num_trips > 0 && (!ops->get_trip_type || !ops->get_trip_temp)) return ERR_PTR(-EINVAL); tz = kzalloc(sizeof(*tz), GFP_KERNEL); @@ -1228,7 +1232,7 @@ thermal_zone_device_register(const char *type, int trips, int mask, INIT_LIST_HEAD(&tz->thermal_instances); ida_init(&tz->ida); mutex_init(&tz->lock); - id = ida_simple_get(&thermal_tz_ida, 0, 0, GFP_KERNEL); + id = ida_alloc(&thermal_tz_ida, GFP_KERNEL); if (id < 0) { result = id; goto free_tz; @@ -1249,6 +1253,7 @@ thermal_zone_device_register(const char *type, int trips, int mask, tz->device.class = &thermal_class; tz->devdata = devdata; tz->trips = trips; + tz->num_trips = num_trips; thermal_set_delay_jiffies(&tz->passive_delay_jiffies, passive_delay); thermal_set_delay_jiffies(&tz->polling_delay_jiffies, polling_delay); @@ -1266,7 +1271,7 @@ thermal_zone_device_register(const char *type, int trips, int mask, if (result) goto release_device; - for (count = 0; count < trips; count++) { + for (count = 0; count < num_trips; count++) { if (tz->ops->get_trip_type(tz, count, &trip_type) || tz->ops->get_trip_temp(tz, count, &trip_temp) || !trip_temp) @@ -1319,11 +1324,21 @@ release_device: put_device(&tz->device); tz = NULL; remove_id: - ida_simple_remove(&thermal_tz_ida, id); + ida_free(&thermal_tz_ida, id); free_tz: kfree(tz); return ERR_PTR(result); } + +struct thermal_zone_device *thermal_zone_device_register(const char *type, int ntrips, int mask, + void *devdata, struct thermal_zone_device_ops *ops, + struct thermal_zone_params *tzp, int passive_delay, + int polling_delay) +{ + return thermal_zone_device_register_with_trips(type, NULL, ntrips, mask, + devdata, ops, tzp, + passive_delay, polling_delay); +} EXPORT_SYMBOL_GPL(thermal_zone_device_register); /** @@ -1379,7 +1394,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) thermal_set_governor(tz, NULL); thermal_remove_hwmon_sysfs(tz); - ida_simple_remove(&thermal_tz_ida, tz->id); + ida_free(&thermal_tz_ida, tz->id); ida_destroy(&tz->ida); mutex_destroy(&tz->lock); device_unregister(&tz->device); diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 726e327b4205..c991bb290512 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -68,20 +68,6 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) void thermal_cdev_update(struct thermal_cooling_device *); void __thermal_cdev_update(struct thermal_cooling_device *cdev); -/** - * struct thermal_trip - representation of a point in temperature domain - * @np: pointer to struct device_node that this trip point was created from - * @temperature: temperature value in miliCelsius - * @hysteresis: relative hysteresis in miliCelsius - * @type: trip point type - */ -struct thermal_trip { - struct device_node *np; - int temperature; - int hysteresis; - enum thermal_trip_type type; -}; - int get_tz_trend(struct thermal_zone_device *tz, int trip); struct thermal_instance * @@ -126,7 +112,6 @@ int thermal_build_list_of_policies(char *buf); /* Helpers */ void thermal_zone_set_trips(struct thermal_zone_device *tz); -void thermal_set_delay_jiffies(unsigned long *delay_jiffies, int delay_ms); /* sysfs I/F */ int thermal_zone_create_device_groups(struct thermal_zone_device *, int); diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c index 3edd047e144f..690890f054a3 100644 --- a/drivers/thermal/thermal_helpers.c +++ b/drivers/thermal/thermal_helpers.c @@ -39,7 +39,6 @@ int get_tz_trend(struct thermal_zone_device *tz, int trip) return trend; } -EXPORT_SYMBOL(get_tz_trend); struct thermal_instance * get_thermal_instance(struct thermal_zone_device *tz, @@ -90,7 +89,7 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp) ret = tz->ops->get_temp(tz, temp); if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) { - for (count = 0; count < tz->trips; count++) { + for (count = 0; count < tz->num_trips; count++) { ret = tz->ops->get_trip_type(tz, count, &type); if (!ret && type == THERMAL_TRIP_CRITICAL) { ret = tz->ops->get_trip_temp(tz, count, @@ -138,7 +137,7 @@ void thermal_zone_set_trips(struct thermal_zone_device *tz) if (!tz->ops->set_trips || !tz->ops->get_trip_hyst) goto exit; - for (i = 0; i < tz->trips; i++) { + for (i = 0; i < tz->num_trips; i++) { int trip_low; tz->ops->get_trip_temp(tz, i, &trip_temp); @@ -175,13 +174,6 @@ exit: mutex_unlock(&tz->lock); } -void thermal_set_delay_jiffies(unsigned long *delay_jiffies, int delay_ms) -{ - *delay_jiffies = msecs_to_jiffies(delay_ms); - if (delay_ms > 1000) - *delay_jiffies = round_jiffies(*delay_jiffies); -} - static void thermal_cdev_set_cur_state(struct thermal_cooling_device *cdev, int target) { @@ -228,7 +220,6 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev) } mutex_unlock(&cdev->lock); } -EXPORT_SYMBOL(thermal_cdev_update); /** * thermal_zone_get_slope - return the slope attribute of the thermal zone diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c index 32fea5174cc0..050d243a5fa1 100644 --- a/drivers/thermal/thermal_netlink.c +++ b/drivers/thermal/thermal_netlink.c @@ -469,7 +469,7 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p) mutex_lock(&tz->lock); - for (i = 0; i < tz->trips; i++) { + for (i = 0; i < tz->num_trips; i++) { enum thermal_trip_type type; int temp, hyst = 0; diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c index b65d435cb92f..802c30b72a92 100644 --- a/drivers/thermal/thermal_of.c +++ b/drivers/thermal/thermal_of.c @@ -118,12 +118,7 @@ static int of_thermal_set_trips(struct thermal_zone_device *tz, */ int of_thermal_get_ntrips(struct thermal_zone_device *tz) { - struct __thermal_zone *data = tz->devdata; - - if (!data || IS_ERR(data)) - return -ENODEV; - - return data->ntrips; + return tz->num_trips; } EXPORT_SYMBOL_GPL(of_thermal_get_ntrips); @@ -139,9 +134,7 @@ EXPORT_SYMBOL_GPL(of_thermal_get_ntrips); */ bool of_thermal_is_trip_valid(struct thermal_zone_device *tz, int trip) { - struct __thermal_zone *data = tz->devdata; - - if (!data || trip >= data->ntrips || trip < 0) + if (trip >= tz->num_trips || trip < 0) return false; return true; @@ -161,12 +154,7 @@ EXPORT_SYMBOL_GPL(of_thermal_is_trip_valid); const struct thermal_trip * of_thermal_get_trip_points(struct thermal_zone_device *tz) { - struct __thermal_zone *data = tz->devdata; - - if (!data) - return NULL; - - return data->trips; + return tz->trips; } EXPORT_SYMBOL_GPL(of_thermal_get_trip_points); @@ -281,12 +269,10 @@ static int of_thermal_unbind(struct thermal_zone_device *thermal, static int of_thermal_get_trip_type(struct thermal_zone_device *tz, int trip, enum thermal_trip_type *type) { - struct __thermal_zone *data = tz->devdata; - - if (trip >= data->ntrips || trip < 0) + if (trip >= tz->num_trips || trip < 0) return -EDOM; - *type = data->trips[trip].type; + *type = tz->trips[trip].type; return 0; } @@ -294,12 +280,10 @@ static int of_thermal_get_trip_type(struct thermal_zone_device *tz, int trip, static int of_thermal_get_trip_temp(struct thermal_zone_device *tz, int trip, int *temp) { - struct __thermal_zone *data = tz->devdata; - - if (trip >= data->ntrips || trip < 0) + if (trip >= tz->num_trips || trip < 0) return -EDOM; - *temp = data->trips[trip].temperature; + *temp = tz->trips[trip].temperature; return 0; } @@ -309,7 +293,7 @@ static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip, { struct __thermal_zone *data = tz->devdata; - if (trip >= data->ntrips || trip < 0) + if (trip >= tz->num_trips || trip < 0) return -EDOM; if (data->ops && data->ops->set_trip_temp) { @@ -321,7 +305,7 @@ static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip, } /* thermal framework should take care of data->mask & (1 << trip) */ - data->trips[trip].temperature = temp; + tz->trips[trip].temperature = temp; return 0; } @@ -329,12 +313,10 @@ static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip, static int of_thermal_get_trip_hyst(struct thermal_zone_device *tz, int trip, int *hyst) { - struct __thermal_zone *data = tz->devdata; - - if (trip >= data->ntrips || trip < 0) + if (trip >= tz->num_trips || trip < 0) return -EDOM; - *hyst = data->trips[trip].hysteresis; + *hyst = tz->trips[trip].hysteresis; return 0; } @@ -342,13 +324,11 @@ static int of_thermal_get_trip_hyst(struct thermal_zone_device *tz, int trip, static int of_thermal_set_trip_hyst(struct thermal_zone_device *tz, int trip, int hyst) { - struct __thermal_zone *data = tz->devdata; - - if (trip >= data->ntrips || trip < 0) + if (trip >= tz->num_trips || trip < 0) return -EDOM; /* thermal framework should take care of data->mask & (1 << trip) */ - data->trips[trip].hysteresis = hyst; + tz->trips[trip].hysteresis = hyst; return 0; } @@ -356,12 +336,11 @@ static int of_thermal_set_trip_hyst(struct thermal_zone_device *tz, int trip, static int of_thermal_get_crit_temp(struct thermal_zone_device *tz, int *temp) { - struct __thermal_zone *data = tz->devdata; int i; - for (i = 0; i < data->ntrips; i++) - if (data->trips[i].type == THERMAL_TRIP_CRITICAL) { - *temp = data->trips[i].temperature; + for (i = 0; i < tz->num_trips; i++) + if (tz->trips[i].type == THERMAL_TRIP_CRITICAL) { + *temp = tz->trips[i].temperature; return 0; } @@ -671,6 +650,35 @@ EXPORT_SYMBOL_GPL(devm_thermal_zone_of_sensor_unregister); /*** functions parsing device tree nodes ***/ +static int of_find_trip_id(struct device_node *np, struct device_node *trip) +{ + struct device_node *trips; + struct device_node *t; + int i = 0; + + trips = of_get_child_by_name(np, "trips"); + if (!trips) { + pr_err("Failed to find 'trips' node\n"); + return -EINVAL; + } + + /* + * Find the trip id point associated with the cooling device map + */ + for_each_child_of_node(trips, t) { + + if (t == trip) + goto out; + i++; + } + + i = -ENXIO; +out: + of_node_put(trips); + + return i; +} + /** * thermal_of_populate_bind_params - parse and fill cooling map data * @np: DT node containing a cooling-map node @@ -685,15 +693,15 @@ EXPORT_SYMBOL_GPL(devm_thermal_zone_of_sensor_unregister); * * Return: 0 on success, proper error code otherwise */ -static int thermal_of_populate_bind_params(struct device_node *np, - struct __thermal_bind_params *__tbp, - struct thermal_trip *trips, - int ntrips) +static int thermal_of_populate_bind_params(struct device_node *tz_np, + struct device_node *np, + struct __thermal_bind_params *__tbp) { struct of_phandle_args cooling_spec; struct __thermal_cooling_bind_param *__tcbp; struct device_node *trip; int ret, i, count; + int trip_id; u32 prop; /* Default weight. Usage is optional */ @@ -708,18 +716,14 @@ static int thermal_of_populate_bind_params(struct device_node *np, return -ENODEV; } - /* match using device_node */ - for (i = 0; i < ntrips; i++) - if (trip == trips[i].np) { - __tbp->trip_id = i; - break; - } - - if (i == ntrips) { - ret = -ENODEV; + trip_id = of_find_trip_id(tz_np, trip); + if (trip_id < 0) { + ret = trip_id; goto end; } + __tbp->trip_id = trip_id; + count = of_count_phandle_with_args(np, "cooling-device", "#cooling-cells"); if (count <= 0) { @@ -843,13 +847,56 @@ static int thermal_of_populate_trip(struct device_node *np, return ret; } - /* Required for cooling map matching */ - trip->np = np; - of_node_get(np); - return 0; } +static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *ntrips) +{ + struct thermal_trip *tt; + struct device_node *trips, *trip; + int ret, count; + + trips = of_get_child_by_name(np, "trips"); + if (!trips) { + pr_err("Failed to find 'trips' node\n"); + return ERR_PTR(-EINVAL); + } + + count = of_get_child_count(trips); + if (!count) { + pr_err("No trip point defined\n"); + ret = -EINVAL; + goto out_of_node_put; + } + + tt = kzalloc(sizeof(*tt) * count, GFP_KERNEL); + if (!tt) { + ret = -ENOMEM; + goto out_of_node_put; + } + + *ntrips = count; + + count = 0; + for_each_child_of_node(trips, trip) { + ret = thermal_of_populate_trip(trip, &tt[count++]); + if (ret) + goto out_kfree; + } + + of_node_put(trips); + + return tt; + +out_kfree: + kfree(tt); + *ntrips = 0; +out_of_node_put: + of_node_put(trips); + + return ERR_PTR(ret); +} + /** * thermal_of_build_thermal_zone - parse and fill one thermal zone data * @np: DT node containing a thermal zone node @@ -909,32 +956,12 @@ __init *thermal_of_build_thermal_zone(struct device_node *np) tz->offset = 0; } - /* trips */ - child = of_get_child_by_name(np, "trips"); - - /* No trips provided */ - if (!child) + tz->trips = thermal_of_trips_init(np, &tz->ntrips); + if (IS_ERR(tz->trips)) { + ret = PTR_ERR(tz->trips); goto finish; - - tz->ntrips = of_get_child_count(child); - if (tz->ntrips == 0) /* must have at least one child */ - goto finish; - - tz->trips = kcalloc(tz->ntrips, sizeof(*tz->trips), GFP_KERNEL); - if (!tz->trips) { - ret = -ENOMEM; - goto free_tz; } - i = 0; - for_each_child_of_node(child, gchild) { - ret = thermal_of_populate_trip(gchild, &tz->trips[i++]); - if (ret) - goto free_trips; - } - - of_node_put(child); - /* cooling-maps */ child = of_get_child_by_name(np, "cooling-maps"); @@ -954,10 +981,11 @@ __init *thermal_of_build_thermal_zone(struct device_node *np) i = 0; for_each_child_of_node(child, gchild) { - ret = thermal_of_populate_bind_params(gchild, &tz->tbps[i++], - tz->trips, tz->ntrips); - if (ret) + ret = thermal_of_populate_bind_params(np, gchild, &tz->tbps[i++]); + if (ret) { + of_node_put(gchild); goto free_tbps; + } } finish: @@ -978,10 +1006,7 @@ free_tbps: kfree(tz->tbps); free_trips: - for (i = 0; i < tz->ntrips; i++) - of_node_put(tz->trips[i].np); kfree(tz->trips); - of_node_put(gchild); free_tz: kfree(tz); of_node_put(child); @@ -1004,8 +1029,6 @@ static __init void of_thermal_free_zone(struct __thermal_zone *tz) } kfree(tz->tbps); - for (i = 0; i < tz->ntrips; i++) - of_node_put(tz->trips[i].np); kfree(tz->trips); kfree(tz); } @@ -1103,11 +1126,9 @@ int __init of_parse_thermal_zones(void) tzp->slope = tz->slope; tzp->offset = tz->offset; - zone = thermal_zone_device_register(child->name, tz->ntrips, - mask, tz, - ops, tzp, - tz->passive_delay, - tz->polling_delay); + zone = thermal_zone_device_register_with_trips(child->name, tz->trips, tz->ntrips, + mask, tz, ops, tzp, tz->passive_delay, + tz->polling_delay); if (IS_ERR(zone)) { pr_err("Failed to build %pOFn zone %ld\n", child, PTR_ERR(zone)); diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index 1c4aac8464a7..5018459e8dd9 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -416,15 +416,15 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask) int indx; /* This function works only for zones with at least one trip */ - if (tz->trips <= 0) + if (tz->num_trips <= 0) return -EINVAL; - tz->trip_type_attrs = kcalloc(tz->trips, sizeof(*tz->trip_type_attrs), + tz->trip_type_attrs = kcalloc(tz->num_trips, sizeof(*tz->trip_type_attrs), GFP_KERNEL); if (!tz->trip_type_attrs) return -ENOMEM; - tz->trip_temp_attrs = kcalloc(tz->trips, sizeof(*tz->trip_temp_attrs), + tz->trip_temp_attrs = kcalloc(tz->num_trips, sizeof(*tz->trip_temp_attrs), GFP_KERNEL); if (!tz->trip_temp_attrs) { kfree(tz->trip_type_attrs); @@ -432,7 +432,7 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask) } if (tz->ops->get_trip_hyst) { - tz->trip_hyst_attrs = kcalloc(tz->trips, + tz->trip_hyst_attrs = kcalloc(tz->num_trips, sizeof(*tz->trip_hyst_attrs), GFP_KERNEL); if (!tz->trip_hyst_attrs) { @@ -442,7 +442,7 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask) } } - attrs = kcalloc(tz->trips * 3 + 1, sizeof(*attrs), GFP_KERNEL); + attrs = kcalloc(tz->num_trips * 3 + 1, sizeof(*attrs), GFP_KERNEL); if (!attrs) { kfree(tz->trip_type_attrs); kfree(tz->trip_temp_attrs); @@ -451,7 +451,7 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask) return -ENOMEM; } - for (indx = 0; indx < tz->trips; indx++) { + for (indx = 0; indx < tz->num_trips; indx++) { /* create trip type attribute */ snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH, "trip_point_%d_type", indx); @@ -478,7 +478,7 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask) tz->trip_temp_attrs[indx].attr.store = trip_point_temp_store; } - attrs[indx + tz->trips] = &tz->trip_temp_attrs[indx].attr.attr; + attrs[indx + tz->num_trips] = &tz->trip_temp_attrs[indx].attr.attr; /* create Optional trip hyst attribute */ if (!tz->ops->get_trip_hyst) @@ -496,10 +496,10 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask) tz->trip_hyst_attrs[indx].attr.store = trip_point_hyst_store; } - attrs[indx + tz->trips * 2] = + attrs[indx + tz->num_trips * 2] = &tz->trip_hyst_attrs[indx].attr.attr; } - attrs[tz->trips * 3] = NULL; + attrs[tz->num_trips * 3] = NULL; tz->trips_attribute_group.attrs = attrs; @@ -540,7 +540,7 @@ int thermal_zone_create_device_groups(struct thermal_zone_device *tz, for (i = 0; i < size - 2; i++) groups[i] = thermal_zone_attribute_groups[i]; - if (tz->trips) { + if (tz->num_trips) { result = create_trip_attrs(tz, mask); if (result) { kfree(groups); @@ -561,7 +561,7 @@ void thermal_zone_destroy_device_groups(struct thermal_zone_device *tz) if (!tz) return; - if (tz->trips) + if (tz->num_trips) destroy_trip_attrs(tz); kfree(tz->device.groups); diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c index ea0603b59309..67050a1a5b07 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c @@ -226,7 +226,7 @@ static irqreturn_t ti_bandgap_talert_irq_handler(int irq, void *data) /* * One TALERT interrupt: Two sources * If the interrupt is due to t_hot then mask t_hot and - * and unmask t_cold else mask t_cold and unmask t_hot + * unmask t_cold else mask t_cold and unmask t_hot */ if (t_hot) { ctrl &= ~tsr->mask_hot_mask; diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig index 4bfec8a28064..e76a6c173637 100644 --- a/drivers/thunderbolt/Kconfig +++ b/drivers/thunderbolt/Kconfig @@ -28,8 +28,10 @@ config USB4_DEBUGFS_WRITE this for production systems or distro kernels. config USB4_KUNIT_TEST - bool "KUnit tests" - depends on KUNIT=y + bool "KUnit tests" if !KUNIT_ALL_TESTS + depends on (USB4=m || KUNIT=y) + depends on KUNIT + default KUNIT_ALL_TESTS config USB4_DMA_TEST tristate "DMA traffic test driver" diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c index c89daac0ad8c..b1f0dc8df47c 100644 --- a/drivers/thunderbolt/acpi.c +++ b/drivers/thunderbolt/acpi.c @@ -301,37 +301,22 @@ static bool tb_acpi_bus_match(struct device *dev) return tb_is_switch(dev) || tb_is_usb4_port_device(dev); } -static struct acpi_device *tb_acpi_find_port(struct acpi_device *adev, - const struct tb_port *port) +static struct acpi_device *tb_acpi_switch_find_companion(struct tb_switch *sw) { - struct acpi_device *port_adev; - - if (!adev) - return NULL; + struct acpi_device *adev = NULL; + struct tb_switch *parent_sw; /* * Device routers exists under the downstream facing USB4 port * of the parent router. Their _ADR is always 0. */ - list_for_each_entry(port_adev, &adev->children, node) { - if (acpi_device_adr(port_adev) == port->port) - return port_adev; - } - - return NULL; -} - -static struct acpi_device *tb_acpi_switch_find_companion(struct tb_switch *sw) -{ - struct acpi_device *adev = NULL; - struct tb_switch *parent_sw; - parent_sw = tb_switch_parent(sw); if (parent_sw) { struct tb_port *port = tb_port_at(tb_route(sw), parent_sw); struct acpi_device *port_adev; - port_adev = tb_acpi_find_port(ACPI_COMPANION(&parent_sw->dev), port); + port_adev = acpi_find_child_by_adr(ACPI_COMPANION(&parent_sw->dev), + port->port); if (port_adev) adev = acpi_find_child_device(port_adev, 0, false); } else { @@ -364,8 +349,8 @@ static struct acpi_device *tb_acpi_find_companion(struct device *dev) if (tb_is_switch(dev)) return tb_acpi_switch_find_companion(tb_to_switch(dev)); else if (tb_is_usb4_port_device(dev)) - return tb_acpi_find_port(ACPI_COMPANION(dev->parent), - tb_to_usb4_port_device(dev)->port); + return acpi_find_child_by_adr(ACPI_COMPANION(dev->parent), + tb_to_usb4_port_device(dev)->port->port); return NULL; } diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c index 2889a214dadc..99211f35a5cd 100644 --- a/drivers/thunderbolt/domain.c +++ b/drivers/thunderbolt/domain.c @@ -872,7 +872,6 @@ int tb_domain_init(void) { int ret; - tb_test_init(); tb_debugfs_init(); tb_acpi_init(); @@ -890,7 +889,6 @@ err_xdomain: err_acpi: tb_acpi_exit(); tb_debugfs_exit(); - tb_test_exit(); return ret; } @@ -903,5 +901,4 @@ void tb_domain_exit(void) tb_xdomain_exit(); tb_acpi_exit(); tb_debugfs_exit(); - tb_test_exit(); } diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 4602c69913fa..a831faa50f65 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -1271,12 +1271,4 @@ static inline void tb_service_debugfs_init(struct tb_service *svc) { } static inline void tb_service_debugfs_remove(struct tb_service *svc) { } #endif -#ifdef CONFIG_USB4_KUNIT_TEST -int tb_test_init(void); -void tb_test_exit(void); -#else -static inline int tb_test_init(void) { return 0; } -static inline void tb_test_exit(void) { } -#endif - #endif diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c index ee37f8b58f50..24c06e7354cd 100644 --- a/drivers/thunderbolt/test.c +++ b/drivers/thunderbolt/test.c @@ -2817,14 +2817,4 @@ static struct kunit_suite tb_test_suite = { .test_cases = tb_test_cases, }; -static struct kunit_suite *tb_test_suites[] = { &tb_test_suite, NULL }; - -int tb_test_init(void) -{ - return __kunit_test_suites_init(tb_test_suites); -} - -void tb_test_exit(void) -{ - return __kunit_test_suites_exit(tb_test_suites); -} +kunit_test_suite(tb_test_suite); diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index a452748c69b2..7172cd1792df 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1099,8 +1099,8 @@ config SERIAL_TIMBERDALE config SERIAL_BCM63XX tristate "Broadcom BCM63xx/BCM33xx UART support" select SERIAL_CORE - depends on ARCH_BCM4908 || ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC || COMPILE_TEST - default ARCH_BCM4908 || ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC + depends on ARCH_BCM4908 || ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC || COMPILE_TEST + default ARCH_BCM4908 || ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC help This enables the driver for the onchip UART core found on the following chipsets: diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index c7b337480e3e..2b40174e93ac 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -2953,37 +2953,59 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int max_timeout) { - int err = 0; - unsigned long time_left; + unsigned long time_left = msecs_to_jiffies(max_timeout); unsigned long flags; + bool pending; + int err; +retry: time_left = wait_for_completion_timeout(hba->dev_cmd.complete, - msecs_to_jiffies(max_timeout)); + time_left); - spin_lock_irqsave(hba->host->host_lock, flags); - hba->dev_cmd.complete = NULL; if (likely(time_left)) { + /* + * The completion handler called complete() and the caller of + * this function still owns the @lrbp tag so the code below does + * not trigger any race conditions. + */ + hba->dev_cmd.complete = NULL; err = ufshcd_get_tr_ocs(lrbp); if (!err) err = ufshcd_dev_cmd_completion(hba, lrbp); - } - spin_unlock_irqrestore(hba->host->host_lock, flags); - - if (!time_left) { + } else { err = -ETIMEDOUT; dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", __func__, lrbp->task_tag); - if (!ufshcd_clear_cmds(hba, 1U << lrbp->task_tag)) + if (ufshcd_clear_cmds(hba, 1U << lrbp->task_tag) == 0) { /* successfully cleared the command, retry if needed */ err = -EAGAIN; - /* - * in case of an error, after clearing the doorbell, - * we also need to clear the outstanding_request - * field in hba - */ - spin_lock_irqsave(&hba->outstanding_lock, flags); - __clear_bit(lrbp->task_tag, &hba->outstanding_reqs); - spin_unlock_irqrestore(&hba->outstanding_lock, flags); + /* + * Since clearing the command succeeded we also need to + * clear the task tag bit from the outstanding_reqs + * variable. + */ + spin_lock_irqsave(&hba->outstanding_lock, flags); + pending = test_bit(lrbp->task_tag, + &hba->outstanding_reqs); + if (pending) { + hba->dev_cmd.complete = NULL; + __clear_bit(lrbp->task_tag, + &hba->outstanding_reqs); + } + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + if (!pending) { + /* + * The completion handler ran while we tried to + * clear the command. + */ + time_left = 1; + goto retry; + } + } else { + dev_err(hba->dev, "%s: failed to clear tag %d\n", + __func__, lrbp->task_tag); + } } return err; @@ -9487,7 +9509,7 @@ void ufshcd_remove(struct ufs_hba *hba) ufs_bsg_remove(hba); ufshpb_remove(hba); ufs_sysfs_remove_nodes(hba->dev); - blk_cleanup_queue(hba->tmf_queue); + blk_mq_destroy_queue(hba->tmf_queue); blk_mq_free_tag_set(&hba->tmf_tag_set); scsi_remove_host(hba->host); /* disable interrupts */ @@ -9783,7 +9805,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) return 0; free_tmf_queue: - blk_cleanup_queue(hba->tmf_queue); + blk_mq_destroy_queue(hba->tmf_queue); free_tmf_tag_set: blk_mq_free_tag_set(&hba->tmf_tag_set); out_remove_scsi_host: diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c index de2bb8401bc4..a1a7a1175a5a 100644 --- a/drivers/ufs/core/ufshpb.c +++ b/drivers/ufs/core/ufshpb.c @@ -433,9 +433,8 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) return 0; } -static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, - int rgn_idx, enum req_opf dir, - bool atomic) +static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, int rgn_idx, + enum req_op op, bool atomic) { struct ufshpb_req *rq; struct request *req; @@ -446,7 +445,7 @@ static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, return NULL; retry: - req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, dir, + req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, op, BLK_MQ_REQ_NOWAIT); if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) { diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c index e7332cc65b1f..173aea8e9997 100644 --- a/drivers/ufs/host/ufshcd-pltfrm.c +++ b/drivers/ufs/host/ufshcd-pltfrm.c @@ -108,9 +108,20 @@ out: return ret; } +static bool phandle_exists(const struct device_node *np, + const char *phandle_name, int index) +{ + struct device_node *parse_np = of_parse_phandle(np, phandle_name, index); + + if (parse_np) + of_node_put(parse_np); + + return parse_np != NULL; +} + #define MAX_PROP_SIZE 32 static int ufshcd_populate_vreg(struct device *dev, const char *name, - struct ufs_vreg **out_vreg) + struct ufs_vreg **out_vreg) { char prop_name[MAX_PROP_SIZE]; struct ufs_vreg *vreg = NULL; @@ -122,7 +133,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name, } snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name); - if (!of_parse_phandle(np, prop_name, 0)) { + if (!phandle_exists(np, prop_name, 0)) { dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n", __func__, prop_name); goto out; diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index d4dcaefd0ea4..6d93428432f1 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c @@ -124,22 +124,6 @@ out: */ #define USB_ACPI_LOCATION_VALID (1 << 31) -static struct acpi_device *usb_acpi_find_port(struct acpi_device *parent, - int raw) -{ - struct acpi_device *adev; - - if (!parent) - return NULL; - - list_for_each_entry(adev, &parent->children, node) { - if (acpi_device_adr(adev) == raw) - return adev; - } - - return acpi_find_child_device(parent, raw, false); -} - static struct acpi_device * usb_acpi_get_companion_for_port(struct usb_port *port_dev) { @@ -170,7 +154,7 @@ usb_acpi_get_companion_for_port(struct usb_port *port_dev) port1 = port_dev->portnum; } - return usb_acpi_find_port(adev, port1); + return acpi_find_child_by_adr(adev, port1); } static struct acpi_device * diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index e60b06f2ac22..521a3eabf0e1 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -1132,7 +1132,7 @@ static struct file *vfio_device_open(struct vfio_device *device) * Appears to be missing by lack of need rather than * explicitly prevented. Now there's need. */ - filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); + filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE); if (device->group->type == VFIO_NO_IOMMU) dev_warn(device->dev, "vfio-noiommu device opened by user " diff --git a/drivers/virt/nitro_enclaves/Kconfig b/drivers/virt/nitro_enclaves/Kconfig index 2d3d98158121..ce91add81401 100644 --- a/drivers/virt/nitro_enclaves/Kconfig +++ b/drivers/virt/nitro_enclaves/Kconfig @@ -16,8 +16,9 @@ config NITRO_ENCLAVES The module will be called nitro_enclaves. config NITRO_ENCLAVES_MISC_DEV_TEST - bool "Tests for the misc device functionality of the Nitro Enclaves" - depends on NITRO_ENCLAVES && KUNIT=y + bool "Tests for the misc device functionality of the Nitro Enclaves" if !KUNIT_ALL_TESTS + depends on NITRO_ENCLAVES && KUNIT + default KUNIT_ALL_TESTS help Enable KUnit tests for the misc device functionality of the Nitro Enclaves. Select this option only if you will boot the kernel for diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c index 20c881b6a4b6..241b94f62e56 100644 --- a/drivers/virt/nitro_enclaves/ne_misc_dev.c +++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c @@ -1759,35 +1759,10 @@ static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg) #if defined(CONFIG_NITRO_ENCLAVES_MISC_DEV_TEST) #include "ne_misc_dev_test.c" - -static inline int ne_misc_dev_test_init(void) -{ - return __kunit_test_suites_init(ne_misc_dev_test_suites); -} - -static inline void ne_misc_dev_test_exit(void) -{ - __kunit_test_suites_exit(ne_misc_dev_test_suites); -} -#else -static inline int ne_misc_dev_test_init(void) -{ - return 0; -} - -static inline void ne_misc_dev_test_exit(void) -{ -} #endif static int __init ne_init(void) { - int rc = 0; - - rc = ne_misc_dev_test_init(); - if (rc < 0) - return rc; - mutex_init(&ne_cpu_pool.mutex); return pci_register_driver(&ne_pci_driver); @@ -1798,8 +1773,6 @@ static void __exit ne_exit(void) pci_unregister_driver(&ne_pci_driver); ne_teardown_cpu_pool(); - - ne_misc_dev_test_exit(); } module_init(ne_init); diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev_test.c b/drivers/virt/nitro_enclaves/ne_misc_dev_test.c index 265797bed0ea..74df43b925be 100644 --- a/drivers/virt/nitro_enclaves/ne_misc_dev_test.c +++ b/drivers/virt/nitro_enclaves/ne_misc_dev_test.c @@ -151,7 +151,4 @@ static struct kunit_suite ne_misc_dev_test_suite = { .test_cases = ne_misc_dev_test_cases, }; -static struct kunit_suite *ne_misc_dev_test_suites[] = { - &ne_misc_dev_test_suite, - NULL -}; +kunit_test_suite(ne_misc_dev_test_suite); diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index b9737da6c4dd..bd360b91e9d3 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -17,9 +17,6 @@ #include <linux/oom.h> #include <linux/wait.h> #include <linux/mm.h> -#include <linux/mount.h> -#include <linux/magic.h> -#include <linux/pseudo_fs.h> #include <linux/page_reporting.h> /* @@ -42,10 +39,6 @@ (1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT)) #define VIRTIO_BALLOON_HINT_BLOCK_PAGES (1 << VIRTIO_BALLOON_HINT_BLOCK_ORDER) -#ifdef CONFIG_BALLOON_COMPACTION -static struct vfsmount *balloon_mnt; -#endif - enum virtio_balloon_vq { VIRTIO_BALLOON_VQ_INFLATE, VIRTIO_BALLOON_VQ_DEFLATE, @@ -805,18 +798,6 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, return MIGRATEPAGE_SUCCESS; } - -static int balloon_init_fs_context(struct fs_context *fc) -{ - return init_pseudo(fc, BALLOON_KVM_MAGIC) ? 0 : -ENOMEM; -} - -static struct file_system_type balloon_fs = { - .name = "balloon-kvm", - .init_fs_context = balloon_init_fs_context, - .kill_sb = kill_anon_super, -}; - #endif /* CONFIG_BALLOON_COMPACTION */ static unsigned long shrink_free_pages(struct virtio_balloon *vb, @@ -909,19 +890,7 @@ static int virtballoon_probe(struct virtio_device *vdev) goto out_free_vb; #ifdef CONFIG_BALLOON_COMPACTION - balloon_mnt = kern_mount(&balloon_fs); - if (IS_ERR(balloon_mnt)) { - err = PTR_ERR(balloon_mnt); - goto out_del_vqs; - } - vb->vb_dev_info.migratepage = virtballoon_migratepage; - vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb); - if (IS_ERR(vb->vb_dev_info.inode)) { - err = PTR_ERR(vb->vb_dev_info.inode); - goto out_kern_unmount; - } - vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops; #endif if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { /* @@ -930,13 +899,13 @@ static int virtballoon_probe(struct virtio_device *vdev) */ if (virtqueue_get_vring_size(vb->free_page_vq) < 2) { err = -ENOSPC; - goto out_iput; + goto out_del_vqs; } vb->balloon_wq = alloc_workqueue("balloon-wq", WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0); if (!vb->balloon_wq) { err = -ENOMEM; - goto out_iput; + goto out_del_vqs; } INIT_WORK(&vb->report_free_page_work, report_free_page_func); vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP; @@ -1030,13 +999,7 @@ out_unregister_shrinker: out_del_balloon_wq: if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) destroy_workqueue(vb->balloon_wq); -out_iput: -#ifdef CONFIG_BALLOON_COMPACTION - iput(vb->vb_dev_info.inode); -out_kern_unmount: - kern_unmount(balloon_mnt); out_del_vqs: -#endif vdev->config->del_vqs(vdev); out_free_vb: kfree(vb); @@ -1083,12 +1046,6 @@ static void virtballoon_remove(struct virtio_device *vdev) } remove_common(vb); -#ifdef CONFIG_BALLOON_COMPACTION - if (vb->vb_dev_info.inode) - iput(vb->vb_dev_info.inode); - - kern_unmount(balloon_mnt); -#endif kfree(vb); } diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 46d9295d9a6e..5e8321f43cbd 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -528,9 +528,10 @@ static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu, BUG_ON(irq == -1); if (IS_ENABLED(CONFIG_SMP) && force_affinity) { - cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu)); - cpumask_copy(irq_get_effective_affinity_mask(irq), - cpumask_of(cpu)); + struct irq_data *data = irq_get_irq_data(irq); + + irq_data_update_affinity(data, cpumask_of(cpu)); + irq_data_update_effective_affinity(data, cpumask_of(cpu)); } xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu); |