diff options
Diffstat (limited to 'drivers')
526 files changed, 11572 insertions, 5222 deletions
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index e42c1f9ffff8..e9a1cb779b30 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -23,6 +23,7 @@ #include <linux/wait.h> #include <drm/drm_file.h> #include <drm/drm_gem.h> +#include <drm/drm_prime.h> #include <drm/drm_print.h> #include <uapi/drm/qaic_accel.h> @@ -616,8 +617,7 @@ static void qaic_free_object(struct drm_gem_object *obj) if (obj->import_attach) { /* DMABUF/PRIME Path */ - dma_buf_detach(obj->import_attach->dmabuf, obj->import_attach); - dma_buf_put(obj->import_attach->dmabuf); + drm_prime_gem_destroy(obj, NULL); } else { /* Private buffer allocation path */ qaic_free_sgt(bo->sgt); diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index 2d0828db28d8..b5ba550a0c04 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -97,6 +97,7 @@ static int qaic_open(struct drm_device *dev, struct drm_file *file) cleanup_usr: cleanup_srcu_struct(&usr->qddev_lock); + ida_free(&qaic_usrs, usr->handle); free_usr: kfree(usr); dev_unlock: @@ -224,6 +225,9 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id) struct qaic_user *usr; qddev = qdev->qddev; + qdev->qddev = NULL; + if (!qddev) + return; /* * Existing users get unresolvable errors till they close FDs. diff --git a/drivers/acpi/acpi_ffh.c b/drivers/acpi/acpi_ffh.c index 19aff808bbb8..8d5126963dc7 100644 --- a/drivers/acpi/acpi_ffh.c +++ b/drivers/acpi/acpi_ffh.c @@ -9,8 +9,6 @@ #include <linux/idr.h> #include <linux/io.h> -#include <linux/arm-smccc.h> - static struct acpi_ffh_info ffh_ctx; int __weak acpi_ffh_address_space_arch_setup(void *handler_ctxt, diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 77186f084d3a..539e700de4d2 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -201,11 +201,19 @@ static void byt_i2c_setup(struct lpss_private_data *pdata) writel(0, pdata->mmio_base + LPSS_I2C_ENABLE); } -/* BSW PWM used for backlight control by the i915 driver */ +/* + * BSW PWM1 is used for backlight control by the i915 driver + * BSW PWM2 is used for backlight control for fixed (etched into the glass) + * touch controls on some models. These touch-controls have specialized + * drivers which know they need the "pwm_soc_lpss_2" con-id. + */ static struct pwm_lookup bsw_pwm_lookup[] = { PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0", "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL, "pwm-lpss-platform"), + PWM_LOOKUP_WITH_MODULE("80862289:00", 0, NULL, + "pwm_soc_lpss_2", 0, PWM_POLARITY_NORMAL, + "pwm-lpss-platform"), }; static void bsw_pwm_setup(struct lpss_private_data *pdata) diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 02f1a1b1143c..7a453c5ff303 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -66,6 +66,7 @@ static void power_saving_mwait_init(void) case X86_VENDOR_AMD: case X86_VENDOR_INTEL: case X86_VENDOR_ZHAOXIN: + case X86_VENDOR_CENTAUR: /* * AMD Fam10h TSC will tick in all * C/P/S0/S1 states when this bit is set. diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index ebf8fd373cf7..79bbfe00d241 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -101,8 +101,6 @@ acpi_status acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info, acpi_event_status *event_status); -acpi_status acpi_hw_disable_all_gpes(void); - acpi_status acpi_hw_enable_all_runtime_gpes(void); acpi_status acpi_hw_enable_all_wakeup_gpes(void); diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c index 7514e38d5640..5427e49e646b 100644 --- a/drivers/acpi/apei/bert.c +++ b/drivers/acpi/apei/bert.c @@ -34,7 +34,7 @@ #define ACPI_BERT_PRINT_MAX_RECORDS 5 #define ACPI_BERT_PRINT_MAX_LEN 1024 -static int bert_disable; +static int bert_disable __initdata; /* * Print "all" the error records in the BERT table, but avoid huge spam to diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 34ad071a64e9..ef59d6ea16da 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -152,7 +152,6 @@ struct ghes_vendor_record_entry { }; static struct gen_pool *ghes_estatus_pool; -static unsigned long ghes_estatus_pool_size_request; static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE]; static atomic_t ghes_estatus_cache_alloced; @@ -191,7 +190,6 @@ int ghes_estatus_pool_init(unsigned int num_ghes) len = GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX; len += (num_ghes * GHES_ESOURCE_PREALLOC_MAX_SIZE); - ghes_estatus_pool_size_request = PAGE_ALIGN(len); addr = (unsigned long)vmalloc(PAGE_ALIGN(len)); if (!addr) goto err_pool_alloc; @@ -1544,6 +1542,8 @@ struct list_head *ghes_get_devices(void) pr_warn_once("Force-loading ghes_edac on an unsupported platform. You're on your own!\n"); } + } else if (list_empty(&ghes_devs)) { + return NULL; } return &ghes_devs; diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile index e21a9e84e394..f81fe24894b2 100644 --- a/drivers/acpi/arm64/Makefile +++ b/drivers/acpi/arm64/Makefile @@ -3,4 +3,4 @@ obj-$(CONFIG_ACPI_AGDI) += agdi.o obj-$(CONFIG_ACPI_IORT) += iort.o obj-$(CONFIG_ACPI_GTDT) += gtdt.o obj-$(CONFIG_ACPI_APMT) += apmt.o -obj-y += dma.o +obj-y += dma.o init.o diff --git a/drivers/acpi/arm64/agdi.c b/drivers/acpi/arm64/agdi.c index f605302395c3..8b3c7d42b41b 100644 --- a/drivers/acpi/arm64/agdi.c +++ b/drivers/acpi/arm64/agdi.c @@ -9,11 +9,11 @@ #define pr_fmt(fmt) "ACPI: AGDI: " fmt #include <linux/acpi.h> -#include <linux/acpi_agdi.h> #include <linux/arm_sdei.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/platform_device.h> +#include "init.h" struct agdi_data { int sdei_event; diff --git a/drivers/acpi/arm64/apmt.c b/drivers/acpi/arm64/apmt.c index 8cab69fa5d59..bb010f6164e5 100644 --- a/drivers/acpi/arm64/apmt.c +++ b/drivers/acpi/arm64/apmt.c @@ -10,10 +10,10 @@ #define pr_fmt(fmt) "ACPI: APMT: " fmt #include <linux/acpi.h> -#include <linux/acpi_apmt.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> +#include "init.h" #define DEV_NAME "arm-cs-arch-pmu" @@ -35,11 +35,13 @@ static int __init apmt_init_resources(struct resource *res, num_res++; - res[num_res].start = node->base_address1; - res[num_res].end = node->base_address1 + SZ_4K - 1; - res[num_res].flags = IORESOURCE_MEM; + if (node->flags & ACPI_APMT_FLAGS_DUAL_PAGE) { + res[num_res].start = node->base_address1; + res[num_res].end = node->base_address1 + SZ_4K - 1; + res[num_res].flags = IORESOURCE_MEM; - num_res++; + num_res++; + } if (node->ovflw_irq != 0) { trigger = (node->ovflw_irq_flags & ACPI_APMT_OVFLW_IRQ_FLAGS_MODE); diff --git a/drivers/acpi/arm64/init.c b/drivers/acpi/arm64/init.c new file mode 100644 index 000000000000..d3ce53dda122 --- /dev/null +++ b/drivers/acpi/arm64/init.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/acpi.h> +#include "init.h" + +void __init acpi_arm_init(void) +{ + if (IS_ENABLED(CONFIG_ACPI_AGDI)) + acpi_agdi_init(); + if (IS_ENABLED(CONFIG_ACPI_APMT)) + acpi_apmt_init(); + if (IS_ENABLED(CONFIG_ACPI_IORT)) + acpi_iort_init(); +} diff --git a/drivers/acpi/arm64/init.h b/drivers/acpi/arm64/init.h new file mode 100644 index 000000000000..a1715a2a34e9 --- /dev/null +++ b/drivers/acpi/arm64/init.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#include <linux/init.h> + +void __init acpi_agdi_init(void); +void __init acpi_apmt_init(void); +void __init acpi_iort_init(void); diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 38fb84974f35..3631230a61c8 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -19,6 +19,7 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/dma-map-ops.h> +#include "init.h" #define IORT_TYPE_MASK(type) (1 << (type)) #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP) diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index d161ff707de4..e3e0bd0c5a50 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -26,9 +26,6 @@ #include <asm/mpspec.h> #include <linux/dmi.h> #endif -#include <linux/acpi_agdi.h> -#include <linux/acpi_apmt.h> -#include <linux/acpi_iort.h> #include <linux/acpi_viot.h> #include <linux/pci.h> #include <acpi/apei.h> @@ -530,65 +527,30 @@ static void acpi_notify_device(acpi_handle handle, u32 event, void *data) acpi_drv->ops.notify(device, event); } -static void acpi_notify_device_fixed(void *data) -{ - struct acpi_device *device = data; - - /* Fixed hardware devices have no handles */ - acpi_notify_device(NULL, ACPI_FIXED_HARDWARE_EVENT, device); -} - -static u32 acpi_device_fixed_event(void *data) -{ - acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_notify_device_fixed, data); - return ACPI_INTERRUPT_HANDLED; -} - static int acpi_device_install_notify_handler(struct acpi_device *device, struct acpi_driver *acpi_drv) { - acpi_status status; - - if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) { - status = - acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, - acpi_device_fixed_event, - device); - } else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) { - status = - acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, - acpi_device_fixed_event, - device); - } else { - u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ? + u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ? ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY; + acpi_status status; - status = acpi_install_notify_handler(device->handle, type, - acpi_notify_device, - device); - } - + status = acpi_install_notify_handler(device->handle, type, + acpi_notify_device, device); if (ACPI_FAILURE(status)) return -EINVAL; + return 0; } static void acpi_device_remove_notify_handler(struct acpi_device *device, struct acpi_driver *acpi_drv) { - if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) { - acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, - acpi_device_fixed_event); - } else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) { - acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, - acpi_device_fixed_event); - } else { - u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ? + u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ? ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY; - acpi_remove_notify_handler(device->handle, type, - acpi_notify_device); - } + acpi_remove_notify_handler(device->handle, type, + acpi_notify_device); + acpi_os_wait_events_complete(); } @@ -1408,7 +1370,7 @@ static int __init acpi_init(void) acpi_init_ffh(); pci_mmcfg_late_init(); - acpi_iort_init(); + acpi_arm_init(); acpi_viot_early_init(); acpi_hest_init(); acpi_ghes_init(); @@ -1420,8 +1382,6 @@ static int __init acpi_init(void) acpi_debugger_init(); acpi_setup_sb_notify_handler(); acpi_viot_init(); - acpi_agdi_init(); - acpi_apmt_init(); return 0; } diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 475e1eddfa3b..1e76a64cce0a 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -78,6 +78,15 @@ static const struct dmi_system_id dmi_lid_quirks[] = { .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED, }, { + /* Nextbook Ares 8A tablet, _LID device always reports lid closed */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"), + DMI_MATCH(DMI_BIOS_VERSION, "M882"), + }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED, + }, + { /* * Lenovo Yoga 9 14ITL5, initial notification of the LID device * never happens. @@ -126,7 +135,6 @@ static const struct dmi_system_id dmi_lid_quirks[] = { static int acpi_button_add(struct acpi_device *device); static void acpi_button_remove(struct acpi_device *device); -static void acpi_button_notify(struct acpi_device *device, u32 event); #ifdef CONFIG_PM_SLEEP static int acpi_button_suspend(struct device *dev); @@ -144,7 +152,6 @@ static struct acpi_driver acpi_button_driver = { .ops = { .add = acpi_button_add, .remove = acpi_button_remove, - .notify = acpi_button_notify, }, .drv.pm = &acpi_button_pm, }; @@ -400,45 +407,65 @@ static void acpi_lid_initialize_state(struct acpi_device *device) button->lid_state_initialized = true; } -static void acpi_button_notify(struct acpi_device *device, u32 event) +static void acpi_lid_notify(acpi_handle handle, u32 event, void *data) { - struct acpi_button *button = acpi_driver_data(device); + struct acpi_device *device = data; + struct acpi_button *button; + + if (event != ACPI_BUTTON_NOTIFY_STATUS) { + acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n", + event); + return; + } + + button = acpi_driver_data(device); + if (!button->lid_state_initialized) + return; + + acpi_lid_update_state(device, true); +} + +static void acpi_button_notify(acpi_handle handle, u32 event, void *data) +{ + struct acpi_device *device = data; + struct acpi_button *button; struct input_dev *input; + int keycode; - switch (event) { - case ACPI_FIXED_HARDWARE_EVENT: - event = ACPI_BUTTON_NOTIFY_STATUS; - fallthrough; - case ACPI_BUTTON_NOTIFY_STATUS: - input = button->input; - if (button->type == ACPI_BUTTON_TYPE_LID) { - if (button->lid_state_initialized) - acpi_lid_update_state(device, true); - } else { - int keycode; - - acpi_pm_wakeup_event(&device->dev); - if (button->suspended) - break; - - keycode = test_bit(KEY_SLEEP, input->keybit) ? - KEY_SLEEP : KEY_POWER; - input_report_key(input, keycode, 1); - input_sync(input); - input_report_key(input, keycode, 0); - input_sync(input); - - acpi_bus_generate_netlink_event( - device->pnp.device_class, - dev_name(&device->dev), - event, ++button->pushed); - } - break; - default: + if (event != ACPI_BUTTON_NOTIFY_STATUS) { acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n", event); - break; + return; } + + acpi_pm_wakeup_event(&device->dev); + + button = acpi_driver_data(device); + if (button->suspended) + return; + + input = button->input; + keycode = test_bit(KEY_SLEEP, input->keybit) ? KEY_SLEEP : KEY_POWER; + + input_report_key(input, keycode, 1); + input_sync(input); + input_report_key(input, keycode, 0); + input_sync(input); + + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), + event, ++button->pushed); +} + +static void acpi_button_notify_run(void *data) +{ + acpi_button_notify(NULL, ACPI_BUTTON_NOTIFY_STATUS, data); +} + +static u32 acpi_button_event(void *data) +{ + acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_button_notify_run, data); + return ACPI_INTERRUPT_HANDLED; } #ifdef CONFIG_PM_SLEEP @@ -480,11 +507,13 @@ static int acpi_lid_input_open(struct input_dev *input) static int acpi_button_add(struct acpi_device *device) { + acpi_notify_handler handler; struct acpi_button *button; struct input_dev *input; const char *hid = acpi_device_hid(device); + acpi_status status; char *name, *class; - int error; + int error = 0; if (!strcmp(hid, ACPI_BUTTON_HID_LID) && lid_init_state == ACPI_BUTTON_LID_INIT_DISABLED) @@ -508,17 +537,20 @@ static int acpi_button_add(struct acpi_device *device) if (!strcmp(hid, ACPI_BUTTON_HID_POWER) || !strcmp(hid, ACPI_BUTTON_HID_POWERF)) { button->type = ACPI_BUTTON_TYPE_POWER; + handler = acpi_button_notify; strcpy(name, ACPI_BUTTON_DEVICE_NAME_POWER); sprintf(class, "%s/%s", ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_POWER); } else if (!strcmp(hid, ACPI_BUTTON_HID_SLEEP) || !strcmp(hid, ACPI_BUTTON_HID_SLEEPF)) { button->type = ACPI_BUTTON_TYPE_SLEEP; + handler = acpi_button_notify; strcpy(name, ACPI_BUTTON_DEVICE_NAME_SLEEP); sprintf(class, "%s/%s", ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_SLEEP); } else if (!strcmp(hid, ACPI_BUTTON_HID_LID)) { button->type = ACPI_BUTTON_TYPE_LID; + handler = acpi_lid_notify; strcpy(name, ACPI_BUTTON_DEVICE_NAME_LID); sprintf(class, "%s/%s", ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID); @@ -526,12 +558,15 @@ static int acpi_button_add(struct acpi_device *device) } else { pr_info("Unsupported hid [%s]\n", hid); error = -ENODEV; - goto err_free_input; } - error = acpi_button_add_fs(device); - if (error) - goto err_free_input; + if (!error) + error = acpi_button_add_fs(device); + + if (error) { + input_free_device(input); + goto err_free_button; + } snprintf(button->phys, sizeof(button->phys), "%s/button/input0", hid); @@ -559,6 +594,29 @@ static int acpi_button_add(struct acpi_device *device) error = input_register_device(input); if (error) goto err_remove_fs; + + switch (device->device_type) { + case ACPI_BUS_TYPE_POWER_BUTTON: + status = acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, + acpi_button_event, + device); + break; + case ACPI_BUS_TYPE_SLEEP_BUTTON: + status = acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, + acpi_button_event, + device); + break; + default: + status = acpi_install_notify_handler(device->handle, + ACPI_DEVICE_NOTIFY, handler, + device); + break; + } + if (ACPI_FAILURE(status)) { + error = -ENODEV; + goto err_input_unregister; + } + if (button->type == ACPI_BUTTON_TYPE_LID) { /* * This assumes there's only one lid device, or if there are @@ -571,11 +629,11 @@ static int acpi_button_add(struct acpi_device *device) pr_info("%s [%s]\n", name, acpi_device_bid(device)); return 0; - err_remove_fs: +err_input_unregister: + input_unregister_device(input); +err_remove_fs: acpi_button_remove_fs(device); - err_free_input: - input_free_device(input); - err_free_button: +err_free_button: kfree(button); return error; } @@ -584,6 +642,24 @@ static void acpi_button_remove(struct acpi_device *device) { struct acpi_button *button = acpi_driver_data(device); + switch (device->device_type) { + case ACPI_BUS_TYPE_POWER_BUTTON: + acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, + acpi_button_event); + break; + case ACPI_BUS_TYPE_SLEEP_BUTTON: + acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, + acpi_button_event); + break; + default: + acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, + button->type == ACPI_BUTTON_TYPE_LID ? + acpi_lid_notify : + acpi_button_notify); + break; + } + acpi_os_wait_events_complete(); + acpi_button_remove_fs(device); input_unregister_device(button->input); kfree(button); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 928899ab9502..8569f55e55b6 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -662,21 +662,6 @@ static void advance_transaction(struct acpi_ec *ec, bool interrupt) ec_dbg_stm("%s (%d)", interrupt ? "IRQ" : "TASK", smp_processor_id()); - /* - * Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1 - * changes to always trigger a GPE interrupt. - * - * GPE STS is a W1C register, which means: - * - * 1. Software can clear it without worrying about clearing the other - * GPEs' STS bits when the hardware sets them in parallel. - * - * 2. As long as software can ensure only clearing it when it is set, - * hardware won't set it in parallel. - */ - if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec)) - acpi_clear_gpe(NULL, ec->gpe); - status = acpi_ec_read_status(ec); /* @@ -1287,6 +1272,22 @@ static void acpi_ec_handle_interrupt(struct acpi_ec *ec) unsigned long flags; spin_lock_irqsave(&ec->lock, flags); + + /* + * Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1 + * changes to always trigger a GPE interrupt. + * + * GPE STS is a W1C register, which means: + * + * 1. Software can clear it without worrying about clearing the other + * GPEs' STS bits when the hardware sets them in parallel. + * + * 2. As long as software can ensure only clearing it when it is set, + * hardware won't set it in parallel. + */ + if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec)) + acpi_clear_gpe(NULL, ec->gpe); + advance_transaction(ec, true); spin_unlock_irqrestore(&ec->lock, flags); } diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 6023ad61831a..573bc0de2990 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -347,4 +347,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev); bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus); extern struct device_attribute dev_attr_firmware_activate_noidle; +void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem); + #endif /* __NFIT_H__ */ diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 9718d07cc2a2..dc615ef6550a 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -597,10 +597,6 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) io_idle(cx->address); } else return -ENODEV; - -#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU) - cond_wakeup_cpu0(); -#endif } /* Never reached */ diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 0800a9d77558..1dd8d5aebf67 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -470,52 +470,6 @@ static const struct dmi_system_id asus_laptop[] = { { } }; -static const struct dmi_system_id lenovo_laptop[] = { - { - .ident = "LENOVO IdeaPad Flex 5 14ALC7", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "82R9"), - }, - }, - { - .ident = "LENOVO IdeaPad Flex 5 16ALC7", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "82RA"), - }, - }, - { } -}; - -static const struct dmi_system_id tongfang_gm_rg[] = { - { - .ident = "TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"), - }, - }, - { } -}; - -static const struct dmi_system_id maingear_laptop[] = { - { - .ident = "MAINGEAR Vector Pro 2 15", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"), - DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"), - } - }, - { - .ident = "MAINGEAR Vector Pro 2 17", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"), - DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-17A3070T"), - }, - }, - { } -}; - static const struct dmi_system_id lg_laptop[] = { { .ident = "LG Electronics 17U70P", @@ -539,10 +493,6 @@ struct irq_override_cmp { static const struct irq_override_cmp override_table[] = { { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, - { lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, - { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, - { tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, - { maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, { lg_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, }; @@ -562,16 +512,6 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, return entry->override; } -#ifdef CONFIG_X86 - /* - * IRQ override isn't needed on modern AMD Zen systems and - * this override breaks active low IRQs on AMD Ryzen 6000 and - * newer systems. Skip it. - */ - if (boot_cpu_has(X86_FEATURE_ZEN)) - return false; -#endif - return true; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 0c6f06abe3f4..1c3e1e2bb0b5 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -2029,8 +2029,6 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep) return count; } -static bool acpi_bus_scan_second_pass; - static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, struct acpi_device **adev_p) { @@ -2050,10 +2048,8 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, return AE_OK; /* Bail out if there are dependencies. */ - if (acpi_scan_check_dep(handle, check_dep) > 0) { - acpi_bus_scan_second_pass = true; + if (acpi_scan_check_dep(handle, check_dep) > 0) return AE_CTRL_DEPTH; - } fallthrough; case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */ @@ -2301,6 +2297,12 @@ static bool acpi_scan_clear_dep_queue(struct acpi_device *adev) return true; } +static void acpi_scan_delete_dep_data(struct acpi_dep_data *dep) +{ + list_del(&dep->node); + kfree(dep); +} + static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data) { struct acpi_device *adev = acpi_get_acpi_dev(dep->consumer); @@ -2311,8 +2313,10 @@ static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data) acpi_dev_put(adev); } - list_del(&dep->node); - kfree(dep); + if (dep->free_when_met) + acpi_scan_delete_dep_data(dep); + else + dep->met = true; return 0; } @@ -2406,6 +2410,55 @@ struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier, } EXPORT_SYMBOL_GPL(acpi_dev_get_next_consumer_dev); +static void acpi_scan_postponed_branch(acpi_handle handle) +{ + struct acpi_device *adev = NULL; + + if (ACPI_FAILURE(acpi_bus_check_add(handle, false, &adev))) + return; + + acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, + acpi_bus_check_add_2, NULL, NULL, (void **)&adev); + acpi_bus_attach(adev, NULL); +} + +static void acpi_scan_postponed(void) +{ + struct acpi_dep_data *dep, *tmp; + + mutex_lock(&acpi_dep_list_lock); + + list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) { + acpi_handle handle = dep->consumer; + + /* + * In case there are multiple acpi_dep_list entries with the + * same consumer, skip the current entry if the consumer device + * object corresponding to it is present already. + */ + if (!acpi_fetch_acpi_dev(handle)) { + /* + * Even though the lock is released here, tmp is + * guaranteed to be valid, because none of the list + * entries following dep is marked as "free when met" + * and so they cannot be deleted. + */ + mutex_unlock(&acpi_dep_list_lock); + + acpi_scan_postponed_branch(handle); + + mutex_lock(&acpi_dep_list_lock); + } + + if (dep->met) + acpi_scan_delete_dep_data(dep); + else + dep->free_when_met = true; + } + + mutex_unlock(&acpi_dep_list_lock); +} + /** * acpi_bus_scan - Add ACPI device node objects in a given namespace scope. * @handle: Root of the namespace scope to scan. @@ -2424,8 +2477,6 @@ int acpi_bus_scan(acpi_handle handle) { struct acpi_device *device = NULL; - acpi_bus_scan_second_pass = false; - /* Pass 1: Avoid enumerating devices with missing dependencies. */ if (ACPI_SUCCESS(acpi_bus_check_add(handle, true, &device))) @@ -2438,19 +2489,9 @@ int acpi_bus_scan(acpi_handle handle) acpi_bus_attach(device, (void *)true); - if (!acpi_bus_scan_second_pass) - return 0; - /* Pass 2: Enumerate all of the remaining devices. */ - device = NULL; - - if (ACPI_SUCCESS(acpi_bus_check_add(handle, false, &device))) - acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, - acpi_bus_check_add_2, NULL, NULL, - (void **)&device); - - acpi_bus_attach(device, NULL); + acpi_scan_postponed(); return 0; } diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 72470b9f16c4..808484d11209 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -636,11 +636,19 @@ static int acpi_suspend_enter(suspend_state_t pm_state) } /* - * Disable and clear GPE status before interrupt is enabled. Some GPEs - * (like wakeup GPE) haven't handler, this can avoid such GPE misfire. - * acpi_leave_sleep_state will reenable specific GPEs later + * Disable all GPE and clear their status bits before interrupts are + * enabled. Some GPEs (like wakeup GPEs) have no handlers and this can + * prevent them from producing spurious interrups. + * + * acpi_leave_sleep_state() will reenable specific GPEs later. + * + * Because this code runs on one CPU with disabled interrupts (all of + * the other CPUs are offline at this time), it need not acquire any + * sleeping locks which may trigger an implicit preemption point even + * if there is no contention, so avoid doing that by using a low-level + * library routine here. */ - acpi_disable_all_gpes(); + acpi_hw_disable_all_gpes(); /* Allow EC transactions to happen. */ acpi_ec_unblock_transactions(); @@ -840,7 +848,7 @@ void __weak acpi_s2idle_setup(void) s2idle_set_ops(&acpi_s2idle_ops); } -static void acpi_sleep_suspend_setup(void) +static void __init acpi_sleep_suspend_setup(void) { bool suspend_ops_needed = false; int i; diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 4720a3649a61..f9f6ebb08fdb 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -40,12 +40,35 @@ #define ACPI_THERMAL_NOTIFY_HOT 0xF1 #define ACPI_THERMAL_MODE_ACTIVE 0x00 -#define ACPI_THERMAL_MAX_ACTIVE 10 -#define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65 +#define ACPI_THERMAL_MAX_ACTIVE 10 +#define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65 -MODULE_AUTHOR("Paul Diefenbaugh"); -MODULE_DESCRIPTION("ACPI Thermal Zone Driver"); -MODULE_LICENSE("GPL"); +#define ACPI_TRIPS_CRITICAL BIT(0) +#define ACPI_TRIPS_HOT BIT(1) +#define ACPI_TRIPS_PASSIVE BIT(2) +#define ACPI_TRIPS_ACTIVE BIT(3) +#define ACPI_TRIPS_DEVICES BIT(4) + +#define ACPI_TRIPS_THRESHOLDS (ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE) + +#define ACPI_TRIPS_INIT (ACPI_TRIPS_CRITICAL | ACPI_TRIPS_HOT | \ + ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE | \ + ACPI_TRIPS_DEVICES) + +/* + * This exception is thrown out in two cases: + * 1.An invalid trip point becomes invalid or a valid trip point becomes invalid + * when re-evaluating the AML code. + * 2.TODO: Devices listed in _PSL, _ALx, _TZD may change. + * We need to re-bind the cooling devices of a thermal zone when this occurs. + */ +#define ACPI_THERMAL_TRIPS_EXCEPTION(flags, tz, str) \ +do { \ + if (flags != ACPI_TRIPS_INIT) \ + acpi_handle_info(tz->device->handle, \ + "ACPI thermal trip point %s changed\n" \ + "Please report to linux-acpi@vger.kernel.org\n", str); \ +} while (0) static int act; module_param(act, int, 0644); @@ -73,75 +96,30 @@ MODULE_PARM_DESC(psv, "Disable or override all passive trip points."); static struct workqueue_struct *acpi_thermal_pm_queue; -static int acpi_thermal_add(struct acpi_device *device); -static void acpi_thermal_remove(struct acpi_device *device); -static void acpi_thermal_notify(struct acpi_device *device, u32 event); - -static const struct acpi_device_id thermal_device_ids[] = { - {ACPI_THERMAL_HID, 0}, - {"", 0}, -}; -MODULE_DEVICE_TABLE(acpi, thermal_device_ids); - -#ifdef CONFIG_PM_SLEEP -static int acpi_thermal_suspend(struct device *dev); -static int acpi_thermal_resume(struct device *dev); -#else -#define acpi_thermal_suspend NULL -#define acpi_thermal_resume NULL -#endif -static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, acpi_thermal_suspend, acpi_thermal_resume); - -static struct acpi_driver acpi_thermal_driver = { - .name = "thermal", - .class = ACPI_THERMAL_CLASS, - .ids = thermal_device_ids, - .ops = { - .add = acpi_thermal_add, - .remove = acpi_thermal_remove, - .notify = acpi_thermal_notify, - }, - .drv.pm = &acpi_thermal_pm, -}; - -struct acpi_thermal_state { - u8 critical:1; - u8 hot:1; - u8 passive:1; - u8 active:1; - u8 reserved:4; - int active_index; -}; - -struct acpi_thermal_state_flags { - u8 valid:1; - u8 enabled:1; - u8 reserved:6; -}; - struct acpi_thermal_critical { - struct acpi_thermal_state_flags flags; unsigned long temperature; + bool valid; }; struct acpi_thermal_hot { - struct acpi_thermal_state_flags flags; unsigned long temperature; + bool valid; }; struct acpi_thermal_passive { - struct acpi_thermal_state_flags flags; + struct acpi_handle_list devices; unsigned long temperature; unsigned long tc1; unsigned long tc2; unsigned long tsp; - struct acpi_handle_list devices; + bool valid; }; struct acpi_thermal_active { - struct acpi_thermal_state_flags flags; - unsigned long temperature; struct acpi_handle_list devices; + unsigned long temperature; + bool valid; + bool enabled; }; struct acpi_thermal_trips { @@ -151,12 +129,6 @@ struct acpi_thermal_trips { struct acpi_thermal_active active[ACPI_THERMAL_MAX_ACTIVE]; }; -struct acpi_thermal_flags { - u8 cooling_mode:1; /* _SCP */ - u8 devices:1; /* _TZD */ - u8 reserved:6; -}; - struct acpi_thermal { struct acpi_device *device; acpi_bus_id name; @@ -164,8 +136,6 @@ struct acpi_thermal { unsigned long last_temperature; unsigned long polling_frequency; volatile u8 zombie; - struct acpi_thermal_flags flags; - struct acpi_thermal_state state; struct acpi_thermal_trips trips; struct acpi_handle_list devices; struct thermal_zone_device *thermal_zone; @@ -220,52 +190,12 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz) return 0; } -static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode) -{ - if (!tz) - return -EINVAL; - - if (ACPI_FAILURE(acpi_execute_simple_method(tz->device->handle, - "_SCP", mode))) - return -ENODEV; - - return 0; -} - -#define ACPI_TRIPS_CRITICAL 0x01 -#define ACPI_TRIPS_HOT 0x02 -#define ACPI_TRIPS_PASSIVE 0x04 -#define ACPI_TRIPS_ACTIVE 0x08 -#define ACPI_TRIPS_DEVICES 0x10 - -#define ACPI_TRIPS_REFRESH_THRESHOLDS (ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE) -#define ACPI_TRIPS_REFRESH_DEVICES ACPI_TRIPS_DEVICES - -#define ACPI_TRIPS_INIT (ACPI_TRIPS_CRITICAL | ACPI_TRIPS_HOT | \ - ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE | \ - ACPI_TRIPS_DEVICES) - -/* - * This exception is thrown out in two cases: - * 1.An invalid trip point becomes invalid or a valid trip point becomes invalid - * when re-evaluating the AML code. - * 2.TODO: Devices listed in _PSL, _ALx, _TZD may change. - * We need to re-bind the cooling devices of a thermal zone when this occurs. - */ -#define ACPI_THERMAL_TRIPS_EXCEPTION(flags, tz, str) \ -do { \ - if (flags != ACPI_TRIPS_INIT) \ - acpi_handle_info(tz->device->handle, \ - "ACPI thermal trip point %s changed\n" \ - "Please report to linux-acpi@vger.kernel.org\n", str); \ -} while (0) - static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) { acpi_status status; unsigned long long tmp; struct acpi_handle_list devices; - int valid = 0; + bool valid = false; int i; /* Critical Shutdown */ @@ -279,21 +209,21 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) * ... so lets discard those as invalid. */ if (ACPI_FAILURE(status)) { - tz->trips.critical.flags.valid = 0; + tz->trips.critical.valid = false; acpi_handle_debug(tz->device->handle, "No critical threshold\n"); } else if (tmp <= 2732) { pr_info(FW_BUG "Invalid critical threshold (%llu)\n", tmp); - tz->trips.critical.flags.valid = 0; + tz->trips.critical.valid = false; } else { - tz->trips.critical.flags.valid = 1; + tz->trips.critical.valid = true; acpi_handle_debug(tz->device->handle, "Found critical threshold [%lu]\n", tz->trips.critical.temperature); } - if (tz->trips.critical.flags.valid) { + if (tz->trips.critical.valid) { if (crt == -1) { - tz->trips.critical.flags.valid = 0; + tz->trips.critical.valid = false; } else if (crt > 0) { unsigned long crt_k = celsius_to_deci_kelvin(crt); @@ -312,12 +242,12 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) if (flag & ACPI_TRIPS_HOT) { status = acpi_evaluate_integer(tz->device->handle, "_HOT", NULL, &tmp); if (ACPI_FAILURE(status)) { - tz->trips.hot.flags.valid = 0; + tz->trips.hot.valid = false; acpi_handle_debug(tz->device->handle, "No hot threshold\n"); } else { tz->trips.hot.temperature = tmp; - tz->trips.hot.flags.valid = 1; + tz->trips.hot.valid = true; acpi_handle_debug(tz->device->handle, "Found hot threshold [%lu]\n", tz->trips.hot.temperature); @@ -325,9 +255,9 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) } /* Passive (optional) */ - if (((flag & ACPI_TRIPS_PASSIVE) && tz->trips.passive.flags.valid) || + if (((flag & ACPI_TRIPS_PASSIVE) && tz->trips.passive.valid) || flag == ACPI_TRIPS_INIT) { - valid = tz->trips.passive.flags.valid; + valid = tz->trips.passive.valid; if (psv == -1) { status = AE_SUPPORT; } else if (psv > 0) { @@ -339,44 +269,44 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) } if (ACPI_FAILURE(status)) { - tz->trips.passive.flags.valid = 0; + tz->trips.passive.valid = false; } else { tz->trips.passive.temperature = tmp; - tz->trips.passive.flags.valid = 1; + tz->trips.passive.valid = true; if (flag == ACPI_TRIPS_INIT) { status = acpi_evaluate_integer(tz->device->handle, "_TC1", NULL, &tmp); if (ACPI_FAILURE(status)) - tz->trips.passive.flags.valid = 0; + tz->trips.passive.valid = false; else tz->trips.passive.tc1 = tmp; status = acpi_evaluate_integer(tz->device->handle, "_TC2", NULL, &tmp); if (ACPI_FAILURE(status)) - tz->trips.passive.flags.valid = 0; + tz->trips.passive.valid = false; else tz->trips.passive.tc2 = tmp; status = acpi_evaluate_integer(tz->device->handle, "_TSP", NULL, &tmp); if (ACPI_FAILURE(status)) - tz->trips.passive.flags.valid = 0; + tz->trips.passive.valid = false; else tz->trips.passive.tsp = tmp; } } } - if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.flags.valid) { + if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.valid) { memset(&devices, 0, sizeof(struct acpi_handle_list)); status = acpi_evaluate_reference(tz->device->handle, "_PSL", NULL, &devices); if (ACPI_FAILURE(status)) { acpi_handle_info(tz->device->handle, "Invalid passive threshold\n"); - tz->trips.passive.flags.valid = 0; + tz->trips.passive.valid = false; } else { - tz->trips.passive.flags.valid = 1; + tz->trips.passive.valid = true; } if (memcmp(&tz->trips.passive.devices, &devices, @@ -387,24 +317,24 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) } } if ((flag & ACPI_TRIPS_PASSIVE) || (flag & ACPI_TRIPS_DEVICES)) { - if (valid != tz->trips.passive.flags.valid) + if (valid != tz->trips.passive.valid) ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state"); } /* Active (optional) */ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { char name[5] = { '_', 'A', 'C', ('0' + i), '\0' }; - valid = tz->trips.active[i].flags.valid; + valid = tz->trips.active[i].valid; if (act == -1) break; /* disable all active trip points */ if (flag == ACPI_TRIPS_INIT || ((flag & ACPI_TRIPS_ACTIVE) && - tz->trips.active[i].flags.valid)) { + tz->trips.active[i].valid)) { status = acpi_evaluate_integer(tz->device->handle, name, NULL, &tmp); if (ACPI_FAILURE(status)) { - tz->trips.active[i].flags.valid = 0; + tz->trips.active[i].valid = false; if (i == 0) break; @@ -426,21 +356,21 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) break; } else { tz->trips.active[i].temperature = tmp; - tz->trips.active[i].flags.valid = 1; + tz->trips.active[i].valid = true; } } name[2] = 'L'; - if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].flags.valid) { + if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].valid) { memset(&devices, 0, sizeof(struct acpi_handle_list)); status = acpi_evaluate_reference(tz->device->handle, name, NULL, &devices); if (ACPI_FAILURE(status)) { acpi_handle_info(tz->device->handle, "Invalid active%d threshold\n", i); - tz->trips.active[i].flags.valid = 0; + tz->trips.active[i].valid = false; } else { - tz->trips.active[i].flags.valid = 1; + tz->trips.active[i].valid = true; } if (memcmp(&tz->trips.active[i].devices, &devices, @@ -451,10 +381,10 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) } } if ((flag & ACPI_TRIPS_ACTIVE) || (flag & ACPI_TRIPS_DEVICES)) - if (valid != tz->trips.active[i].flags.valid) + if (valid != tz->trips.active[i].valid) ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state"); - if (!tz->trips.active[i].flags.valid) + if (!tz->trips.active[i].valid) break; } @@ -474,17 +404,18 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) static int acpi_thermal_get_trip_points(struct acpi_thermal *tz) { - int i, valid, ret = acpi_thermal_trips_update(tz, ACPI_TRIPS_INIT); + int i, ret = acpi_thermal_trips_update(tz, ACPI_TRIPS_INIT); + bool valid; if (ret) return ret; - valid = tz->trips.critical.flags.valid | - tz->trips.hot.flags.valid | - tz->trips.passive.flags.valid; + valid = tz->trips.critical.valid | + tz->trips.hot.valid | + tz->trips.passive.valid; for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) - valid |= tz->trips.active[i].flags.valid; + valid = valid || tz->trips.active[i].valid; if (!valid) { pr_warn(FW_BUG "No valid trip found\n"); @@ -521,7 +452,7 @@ static int thermal_get_trip_type(struct thermal_zone_device *thermal, if (!tz || trip < 0) return -EINVAL; - if (tz->trips.critical.flags.valid) { + if (tz->trips.critical.valid) { if (!trip) { *type = THERMAL_TRIP_CRITICAL; return 0; @@ -529,7 +460,7 @@ static int thermal_get_trip_type(struct thermal_zone_device *thermal, trip--; } - if (tz->trips.hot.flags.valid) { + if (tz->trips.hot.valid) { if (!trip) { *type = THERMAL_TRIP_HOT; return 0; @@ -537,7 +468,7 @@ static int thermal_get_trip_type(struct thermal_zone_device *thermal, trip--; } - if (tz->trips.passive.flags.valid) { + if (tz->trips.passive.valid) { if (!trip) { *type = THERMAL_TRIP_PASSIVE; return 0; @@ -545,7 +476,7 @@ static int thermal_get_trip_type(struct thermal_zone_device *thermal, trip--; } - for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].flags.valid; i++) { + for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].valid; i++) { if (!trip) { *type = THERMAL_TRIP_ACTIVE; return 0; @@ -565,7 +496,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, if (!tz || trip < 0) return -EINVAL; - if (tz->trips.critical.flags.valid) { + if (tz->trips.critical.valid) { if (!trip) { *temp = deci_kelvin_to_millicelsius_with_offset( tz->trips.critical.temperature, @@ -575,7 +506,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, trip--; } - if (tz->trips.hot.flags.valid) { + if (tz->trips.hot.valid) { if (!trip) { *temp = deci_kelvin_to_millicelsius_with_offset( tz->trips.hot.temperature, @@ -585,7 +516,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, trip--; } - if (tz->trips.passive.flags.valid) { + if (tz->trips.passive.valid) { if (!trip) { *temp = deci_kelvin_to_millicelsius_with_offset( tz->trips.passive.temperature, @@ -596,7 +527,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, } for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && - tz->trips.active[i].flags.valid; i++) { + tz->trips.active[i].valid; i++) { if (!trip) { *temp = deci_kelvin_to_millicelsius_with_offset( tz->trips.active[i].temperature, @@ -614,7 +545,7 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal, { struct acpi_thermal *tz = thermal_zone_device_priv(thermal); - if (tz->trips.critical.flags.valid) { + if (tz->trips.critical.valid) { *temperature = deci_kelvin_to_millicelsius_with_offset( tz->trips.critical.temperature, tz->kelvin_offset); @@ -700,13 +631,13 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, int trip = -1; int result = 0; - if (tz->trips.critical.flags.valid) + if (tz->trips.critical.valid) trip++; - if (tz->trips.hot.flags.valid) + if (tz->trips.hot.valid) trip++; - if (tz->trips.passive.flags.valid) { + if (tz->trips.passive.valid) { trip++; for (i = 0; i < tz->trips.passive.devices.count; i++) { handle = tz->trips.passive.devices.handles[i]; @@ -731,7 +662,7 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, } for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { - if (!tz->trips.active[i].flags.valid) + if (!tz->trips.active[i].valid) break; trip++; @@ -819,19 +750,19 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz) acpi_status status; int i; - if (tz->trips.critical.flags.valid) + if (tz->trips.critical.valid) trips++; - if (tz->trips.hot.flags.valid) + if (tz->trips.hot.valid) trips++; - if (tz->trips.passive.flags.valid) + if (tz->trips.passive.valid) trips++; - for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].flags.valid; + for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].valid; i++, trips++); - if (tz->trips.passive.flags.valid) + if (tz->trips.passive.valid) tz->thermal_zone = thermal_zone_device_register("acpitz", trips, 0, tz, &acpi_thermal_zone_ops, NULL, tz->trips.passive.tsp * 100, @@ -906,13 +837,13 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event) acpi_queue_thermal_check(tz); break; case ACPI_THERMAL_NOTIFY_THRESHOLDS: - acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS); + acpi_thermal_trips_update(tz, ACPI_TRIPS_THRESHOLDS); acpi_queue_thermal_check(tz); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); break; case ACPI_THERMAL_NOTIFY_DEVICES: - acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES); + acpi_thermal_trips_update(tz, ACPI_TRIPS_DEVICES); acpi_queue_thermal_check(tz); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); @@ -976,9 +907,8 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz) return result; /* Set the cooling mode [_SCP] to active cooling (default) */ - result = acpi_thermal_set_cooling_mode(tz, ACPI_THERMAL_MODE_ACTIVE); - if (!result) - tz->flags.cooling_mode = 1; + acpi_execute_simple_method(tz->device->handle, "_SCP", + ACPI_THERMAL_MODE_ACTIVE); /* Get default polling frequency [_TZP] (optional) */ if (tzp) @@ -1001,7 +931,7 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz) */ static void acpi_thermal_guess_offset(struct acpi_thermal *tz) { - if (tz->trips.critical.flags.valid && + if (tz->trips.critical.valid && (tz->trips.critical.temperature % 5) == 1) tz->kelvin_offset = 273100; else @@ -1110,27 +1040,48 @@ static int acpi_thermal_resume(struct device *dev) return -EINVAL; for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { - if (!tz->trips.active[i].flags.valid) + if (!tz->trips.active[i].valid) break; - tz->trips.active[i].flags.enabled = 1; + tz->trips.active[i].enabled = true; for (j = 0; j < tz->trips.active[i].devices.count; j++) { result = acpi_bus_update_power( tz->trips.active[i].devices.handles[j], &power_state); if (result || (power_state != ACPI_STATE_D0)) { - tz->trips.active[i].flags.enabled = 0; + tz->trips.active[i].enabled = false; break; } } - tz->state.active |= tz->trips.active[i].flags.enabled; } acpi_queue_thermal_check(tz); return AE_OK; } +#else +#define acpi_thermal_suspend NULL +#define acpi_thermal_resume NULL #endif +static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, acpi_thermal_suspend, acpi_thermal_resume); + +static const struct acpi_device_id thermal_device_ids[] = { + {ACPI_THERMAL_HID, 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, thermal_device_ids); + +static struct acpi_driver acpi_thermal_driver = { + .name = "thermal", + .class = ACPI_THERMAL_CLASS, + .ids = thermal_device_ids, + .ops = { + .add = acpi_thermal_add, + .remove = acpi_thermal_remove, + .notify = acpi_thermal_notify, + }, + .drv.pm = &acpi_thermal_pm, +}; static int thermal_act(const struct dmi_system_id *d) { if (act == 0) { @@ -1236,3 +1187,7 @@ static void __exit acpi_thermal_exit(void) module_init(acpi_thermal_init); module_exit(acpi_thermal_exit); + +MODULE_AUTHOR("Paul Diefenbaugh"); +MODULE_DESCRIPTION("ACPI Thermal Zone Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/tiny-power-button.c b/drivers/acpi/tiny-power-button.c index 598f548b21f3..6353be6fec69 100644 --- a/drivers/acpi/tiny-power-button.c +++ b/drivers/acpi/tiny-power-button.c @@ -19,18 +19,52 @@ static const struct acpi_device_id tiny_power_button_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, tiny_power_button_device_ids); -static int acpi_noop_add(struct acpi_device *device) +static void acpi_tiny_power_button_notify(acpi_handle handle, u32 event, void *data) { - return 0; + kill_cad_pid(power_signal, 1); } -static void acpi_noop_remove(struct acpi_device *device) +static void acpi_tiny_power_button_notify_run(void *not_used) { + acpi_tiny_power_button_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, NULL); } -static void acpi_tiny_power_button_notify(struct acpi_device *device, u32 event) +static u32 acpi_tiny_power_button_event(void *not_used) { - kill_cad_pid(power_signal, 1); + acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_tiny_power_button_notify_run, NULL); + return ACPI_INTERRUPT_HANDLED; +} + +static int acpi_tiny_power_button_add(struct acpi_device *device) +{ + acpi_status status; + + if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) { + status = acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, + acpi_tiny_power_button_event, + NULL); + } else { + status = acpi_install_notify_handler(device->handle, + ACPI_DEVICE_NOTIFY, + acpi_tiny_power_button_notify, + NULL); + } + if (ACPI_FAILURE(status)) + return -ENODEV; + + return 0; +} + +static void acpi_tiny_power_button_remove(struct acpi_device *device) +{ + if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) { + acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, + acpi_tiny_power_button_event); + } else { + acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, + acpi_tiny_power_button_notify); + } + acpi_os_wait_events_complete(); } static struct acpi_driver acpi_tiny_power_button_driver = { @@ -38,9 +72,8 @@ static struct acpi_driver acpi_tiny_power_button_driver = { .class = "tiny-power-button", .ids = tiny_power_button_device_ids, .ops = { - .add = acpi_noop_add, - .remove = acpi_noop_remove, - .notify = acpi_tiny_power_button_notify, + .add = acpi_tiny_power_button_add, + .remove = acpi_tiny_power_button_remove, }, }; diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index bcc25d457581..18cc08c858cf 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -471,6 +471,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, }, { + .callback = video_detect_force_native, + /* Lenovo ThinkPad X131e (3371 AMD version) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "3371"), + }, + }, + { + .callback = video_detect_force_native, + /* Apple iMac11,3 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac11,3"), + }, + }, + { /* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */ .callback = video_detect_force_native, /* Apple MacBook Pro 12,1 */ @@ -514,6 +530,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, { .callback = video_detect_force_native, + /* Dell Studio 1569 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1569"), + }, + }, + { + .callback = video_detect_force_native, /* Acer Aspire 3830TG */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), @@ -828,6 +852,27 @@ enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto if (native_available) return acpi_backlight_native; + /* + * The vendor specific BIOS interfaces are only necessary for + * laptops from before ~2008. + * + * For laptops from ~2008 till ~2023 this point is never reached + * because on those (video_caps & ACPI_VIDEO_BACKLIGHT) above is true. + * + * Laptops from after ~2023 no longer support ACPI_VIDEO_BACKLIGHT, + * if this point is reached on those, this likely means that + * the GPU kms driver which sets native_available has not loaded yet. + * + * Returning acpi_backlight_vendor in this case is known to sometimes + * cause a non working vendor specific /sys/class/backlight device to + * get registered. + * + * Return acpi_backlight_none on laptops with ACPI tables written + * for Windows 8 (laptops from after ~2012) to avoid this problem. + */ + if (acpi_osi_is_win8()) + return acpi_backlight_none; + /* No ACPI video/native (old hw), use vendor specific fw methods. */ return acpi_backlight_vendor; } diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c index e499c60c4579..ce62e61a9605 100644 --- a/drivers/acpi/x86/s2idle.c +++ b/drivers/acpi/x86/s2idle.c @@ -59,6 +59,7 @@ static int lps0_dsm_func_mask; static guid_t lps0_dsm_guid_microsoft; static int lps0_dsm_func_mask_microsoft; +static int lps0_dsm_state; /* Device constraint entry structure */ struct lpi_device_info { @@ -320,6 +321,44 @@ static void lpi_check_constraints(void) } } +static bool acpi_s2idle_vendor_amd(void) +{ + return boot_cpu_data.x86_vendor == X86_VENDOR_AMD; +} + +static const char *acpi_sleep_dsm_state_to_str(unsigned int state) +{ + if (lps0_dsm_func_mask_microsoft || !acpi_s2idle_vendor_amd()) { + switch (state) { + case ACPI_LPS0_SCREEN_OFF: + return "screen off"; + case ACPI_LPS0_SCREEN_ON: + return "screen on"; + case ACPI_LPS0_ENTRY: + return "lps0 entry"; + case ACPI_LPS0_EXIT: + return "lps0 exit"; + case ACPI_LPS0_MS_ENTRY: + return "lps0 ms entry"; + case ACPI_LPS0_MS_EXIT: + return "lps0 ms exit"; + } + } else { + switch (state) { + case ACPI_LPS0_SCREEN_ON_AMD: + return "screen on"; + case ACPI_LPS0_SCREEN_OFF_AMD: + return "screen off"; + case ACPI_LPS0_ENTRY_AMD: + return "lps0 entry"; + case ACPI_LPS0_EXIT_AMD: + return "lps0 exit"; + } + } + + return "unknown"; +} + static void acpi_sleep_run_lps0_dsm(unsigned int func, unsigned int func_mask, guid_t dsm_guid) { union acpi_object *out_obj; @@ -331,14 +370,15 @@ static void acpi_sleep_run_lps0_dsm(unsigned int func, unsigned int func_mask, g rev_id, func, NULL); ACPI_FREE(out_obj); - acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n", - func, out_obj ? "successful" : "failed"); + lps0_dsm_state = func; + if (pm_debug_messages_on) { + acpi_handle_info(lps0_device_handle, + "%s transitioned to state %s\n", + out_obj ? "Successfully" : "Failed to", + acpi_sleep_dsm_state_to_str(lps0_dsm_state)); + } } -static bool acpi_s2idle_vendor_amd(void) -{ - return boot_cpu_data.x86_vendor == X86_VENDOR_AMD; -} static int validate_dsm(acpi_handle handle, const char *uuid, int rev, guid_t *dsm_guid) { @@ -485,11 +525,11 @@ int acpi_s2idle_prepare_late(void) ACPI_LPS0_ENTRY, lps0_dsm_func_mask, lps0_dsm_guid); if (lps0_dsm_func_mask_microsoft > 0) { - acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY, - lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); /* modern standby entry */ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); + acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY, + lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); } list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) { @@ -524,11 +564,6 @@ void acpi_s2idle_restore_early(void) if (handler->restore) handler->restore(); - /* Modern standby exit */ - if (lps0_dsm_func_mask_microsoft > 0) - acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT, - lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); - /* LPS0 exit */ if (lps0_dsm_func_mask > 0) acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ? @@ -539,6 +574,11 @@ void acpi_s2idle_restore_early(void) acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); + /* Modern standby exit */ + if (lps0_dsm_func_mask_microsoft > 0) + acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT, + lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); + /* Screen on */ if (lps0_dsm_func_mask_microsoft > 0) acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON, diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index 9c2d6f35f88a..c2b925f8cd4e 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -259,10 +259,11 @@ bool force_storage_d3(void) * drivers/platform/x86/x86-android-tablets.c kernel module. */ #define ACPI_QUIRK_SKIP_I2C_CLIENTS BIT(0) -#define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(1) -#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(2) -#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(3) -#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(4) +#define ACPI_QUIRK_UART1_SKIP BIT(1) +#define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(2) +#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(3) +#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(4) +#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(5) static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { /* @@ -319,6 +320,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_UART1_SKIP | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, @@ -365,7 +367,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), }, { - /* Nextbook Ares 8 */ + /* Nextbook Ares 8 (BYT version)*/ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"), @@ -375,6 +377,16 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { + /* Nextbook Ares 8A (CHT version)*/ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"), + DMI_MATCH(DMI_BIOS_VERSION, "M882"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + }, + { /* Whitelabel (sold as various brands) TM800A550L */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), @@ -392,6 +404,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { #if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS) static const struct acpi_device_id i2c_acpi_known_good_ids[] = { { "10EC5640", 0 }, /* RealTek ALC5640 audio codec */ + { "10EC5651", 0 }, /* RealTek ALC5651 audio codec */ { "INT33F4", 0 }, /* X-Powers AXP288 PMIC */ { "INT33FD", 0 }, /* Intel Crystal Cove PMIC */ { "INT34D3", 0 }, /* Intel Whiskey Cove PMIC */ @@ -438,6 +451,9 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s if (dmi_id) quirks = (unsigned long)dmi_id->driver_data; + if ((quirks & ACPI_QUIRK_UART1_SKIP) && uid == 1) + *skip = true; + if (quirks & ACPI_QUIRK_UART1_TTY_UART2_SKIP) { if (uid == 1) return -ENODEV; /* Create tty cdev instead of serdev */ diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 8bf612bdd61a..b4f246f0cac7 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5348,7 +5348,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host) mutex_init(&ap->scsi_scan_mutex); INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); - INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); + INIT_DELAYED_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); INIT_LIST_HEAD(&ap->eh_done_q); init_waitqueue_head(&ap->eh_wait_q); init_completion(&ap->park_req_pending); @@ -5954,6 +5954,7 @@ static void ata_port_detach(struct ata_port *ap) WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); cancel_delayed_work_sync(&ap->hotplug_task); + cancel_delayed_work_sync(&ap->scsi_rescan_task); skip_eh: /* clean up zpodd on port removal */ diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index a6c901811802..6f8d14191593 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2984,7 +2984,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, ehc->i.flags |= ATA_EHI_SETMODE; /* schedule the scsi_rescan_device() here */ - schedule_work(&(ap->scsi_rescan_task)); + schedule_delayed_work(&ap->scsi_rescan_task, 0); } else if (dev->class == ATA_DEV_UNKNOWN && ehc->tries[dev->devno] && ata_class_enabled(ehc->classes[dev->devno])) { diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 8ce90284eb34..551077cea4e4 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -4597,10 +4597,11 @@ int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, void ata_scsi_dev_rescan(struct work_struct *work) { struct ata_port *ap = - container_of(work, struct ata_port, scsi_rescan_task); + container_of(work, struct ata_port, scsi_rescan_task.work); struct ata_link *link; struct ata_device *dev; unsigned long flags; + bool delay_rescan = false; mutex_lock(&ap->scsi_scan_mutex); spin_lock_irqsave(ap->lock, flags); @@ -4614,6 +4615,21 @@ void ata_scsi_dev_rescan(struct work_struct *work) if (scsi_device_get(sdev)) continue; + /* + * If the rescan work was scheduled because of a resume + * event, the port is already fully resumed, but the + * SCSI device may not yet be fully resumed. In such + * case, executing scsi_rescan_device() may cause a + * deadlock with the PM code on device_lock(). Prevent + * this by giving up and retrying rescan after a short + * delay. + */ + delay_rescan = sdev->sdev_gendev.power.is_suspended; + if (delay_rescan) { + scsi_device_put(sdev); + break; + } + spin_unlock_irqrestore(ap->lock, flags); scsi_rescan_device(&(sdev->sdev_gendev)); scsi_device_put(sdev); @@ -4623,4 +4639,8 @@ void ata_scsi_dev_rescan(struct work_struct *work) spin_unlock_irqrestore(ap->lock, flags); mutex_unlock(&ap->scsi_scan_mutex); + + if (delay_rescan) + schedule_delayed_work(&ap->scsi_rescan_task, + msecs_to_jiffies(5)); } diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index 02425991c159..d44814b9562a 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -820,7 +820,7 @@ static const struct of_device_id ht16k33_of_match[] = { MODULE_DEVICE_TABLE(of, ht16k33_of_match); static struct i2c_driver ht16k33_driver = { - .probe_new = ht16k33_probe, + .probe = ht16k33_probe, .remove = ht16k33_remove, .driver = { .name = DRIVER_NAME, diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c index 135831a16514..6422be0dfe20 100644 --- a/drivers/auxdisplay/lcd2s.c +++ b/drivers/auxdisplay/lcd2s.c @@ -365,7 +365,7 @@ static struct i2c_driver lcd2s_i2c_driver = { .name = "lcd2s", .of_match_table = lcd2s_of_table, }, - .probe_new = lcd2s_i2c_probe, + .probe = lcd2s_i2c_probe, .remove = lcd2s_i2c_remove, .id_table = lcd2s_i2c_id, }; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 9c09ca5c4ab6..878aa7646b37 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -751,14 +751,12 @@ static int really_probe_debug(struct device *dev, struct device_driver *drv) * * Should somehow figure out how to use a semaphore, not an atomic variable... */ -int driver_probe_done(void) +bool __init driver_probe_done(void) { int local_probe_count = atomic_read(&probe_count); pr_debug("%s: probe_count = %d\n", __func__, local_probe_count); - if (local_probe_count) - return -EBUSY; - return 0; + return !local_probe_count; } /** diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 5c998cfac335..3df0025d12aa 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -29,10 +29,10 @@ struct devres { * Some archs want to perform DMA into kmalloc caches * and need a guaranteed alignment larger than * the alignment of a 64-bit integer. - * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same - * buffer alignment as if it was allocated by plain kmalloc(). + * Thus we use ARCH_DMA_MINALIGN for data[] which will force the same + * alignment for struct devres when allocated by kmalloc(). */ - u8 __aligned(ARCH_KMALLOC_MINALIGN) data[]; + u8 __aligned(ARCH_DMA_MINALIGN) data[]; }; struct devres_group { diff --git a/drivers/base/node.c b/drivers/base/node.c index b46db17124f3..655975946ef6 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -449,6 +449,9 @@ static ssize_t node_read_meminfo(struct device *dev, "Node %d FileHugePages: %8lu kB\n" "Node %d FilePmdMapped: %8lu kB\n" #endif +#ifdef CONFIG_UNACCEPTED_MEMORY + "Node %d Unaccepted: %8lu kB\n" +#endif , nid, K(node_page_state(pgdat, NR_FILE_DIRTY)), nid, K(node_page_state(pgdat, NR_WRITEBACK)), @@ -478,6 +481,10 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(node_page_state(pgdat, NR_FILE_THPS)), nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED)) #endif +#ifdef CONFIG_UNACCEPTED_MEMORY + , + nid, K(sum_zone_node_page_state(nid, NR_UNACCEPTED)) +#endif ); len += hugetlb_report_node_meminfo(buf, len, nid); return len; diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 32084e38b73d..5cb2023581d4 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1632,9 +1632,6 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, dev_dbg(dev, "%s()\n", __func__); - if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) - return -EINVAL; - gpd_data = genpd_alloc_dev_data(dev, gd); if (IS_ERR(gpd_data)) return PTR_ERR(gpd_data); @@ -1676,6 +1673,9 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) { int ret; + if (!genpd || !dev) + return -EINVAL; + mutex_lock(&gpd_list_lock); ret = genpd_add_device(genpd, dev, dev); mutex_unlock(&gpd_list_lock); @@ -2523,6 +2523,9 @@ int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev) struct generic_pm_domain *genpd; int ret; + if (!dev) + return -EINVAL; + mutex_lock(&gpd_list_lock); genpd = genpd_get_from_provider(genpdspec); @@ -2939,10 +2942,10 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, err = of_property_read_u32(state_node, "min-residency-us", &residency); if (!err) - genpd_state->residency_ns = 1000 * residency; + genpd_state->residency_ns = 1000LL * residency; - genpd_state->power_on_latency_ns = 1000 * exit_latency; - genpd_state->power_off_latency_ns = 1000 * entry_latency; + genpd_state->power_on_latency_ns = 1000LL * exit_latency; + genpd_state->power_off_latency_ns = 1000LL * entry_latency; genpd_state->fwnode = &state_node->fwnode; return 0; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 7cc0c0cf8eaa..a917219feea6 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -19,11 +19,6 @@ #include "power.h" -#ifndef CONFIG_SUSPEND -suspend_state_t pm_suspend_target_state; -#define pm_suspend_target_state (PM_SUSPEND_ON) -#endif - #define list_for_each_entry_rcu_locked(pos, head, member) \ list_for_each_entry_rcu(pos, head, member, \ srcu_read_lock_held(&wakeup_srcu)) diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 1f52646159df..28bc3ae9458a 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -284,6 +284,9 @@ bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg, { int ret; + if (!regmap_writeable(map, reg)) + return false; + /* If we don't know the chip just got reset, then sync everything. */ if (!map->no_sync_defaults) return true; diff --git a/drivers/base/regmap/regmap-spi-avmm.c b/drivers/base/regmap/regmap-spi-avmm.c index 4c2b94b3e30b..6af692844c19 100644 --- a/drivers/base/regmap/regmap-spi-avmm.c +++ b/drivers/base/regmap/regmap-spi-avmm.c @@ -660,7 +660,7 @@ static const struct regmap_bus regmap_spi_avmm_bus = { .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, .val_format_endian_default = REGMAP_ENDIAN_NATIVE, .max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT, - .max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT, + .max_raw_write = SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT, .free_context = spi_avmm_bridge_ctx_free, }; diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 4c8b2ba579ee..e460c9799d9f 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1532,7 +1532,7 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, +static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long param) { struct amiga_floppy_struct *p = bdev->bd_disk->private_data; @@ -1607,7 +1607,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, return 0; } -static int fd_ioctl(struct block_device *bdev, fmode_t mode, +static int fd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long param) { int ret; @@ -1654,10 +1654,10 @@ static void fd_probe(int dev) * /dev/PS0 etc), and disallows simultaneous access to the same * drive with different device numbers. */ -static int floppy_open(struct block_device *bdev, fmode_t mode) +static int floppy_open(struct gendisk *disk, blk_mode_t mode) { - int drive = MINOR(bdev->bd_dev) & 3; - int system = (MINOR(bdev->bd_dev) & 4) >> 2; + int drive = disk->first_minor & 3; + int system = (disk->first_minor & 4) >> 2; int old_dev; unsigned long flags; @@ -1673,10 +1673,9 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) mutex_unlock(&amiflop_mutex); return -ENXIO; } - - if (mode & (FMODE_READ|FMODE_WRITE)) { - bdev_check_media_change(bdev); - if (mode & FMODE_WRITE) { + if (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) { + disk_check_media_change(disk); + if (mode & BLK_OPEN_WRITE) { int wrprot; get_fdc(drive); @@ -1691,7 +1690,6 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) } } } - local_irq_save(flags); fd_ref[drive]++; fd_device[drive] = system; @@ -1709,7 +1707,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) return 0; } -static void floppy_release(struct gendisk *disk, fmode_t mode) +static void floppy_release(struct gendisk *disk) { struct amiga_floppy_struct *p = disk->private_data; int drive = p - unit; diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 128722cf6c3c..cf6883756155 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -204,9 +204,9 @@ aoedisk_rm_debugfs(struct aoedev *d) } static int -aoeblk_open(struct block_device *bdev, fmode_t mode) +aoeblk_open(struct gendisk *disk, blk_mode_t mode) { - struct aoedev *d = bdev->bd_disk->private_data; + struct aoedev *d = disk->private_data; ulong flags; if (!virt_addr_valid(d)) { @@ -232,7 +232,7 @@ aoeblk_open(struct block_device *bdev, fmode_t mode) } static void -aoeblk_release(struct gendisk *disk, fmode_t mode) +aoeblk_release(struct gendisk *disk) { struct aoedev *d = disk->private_data; ulong flags; @@ -285,7 +285,7 @@ aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo) } static int -aoeblk_ioctl(struct block_device *bdev, fmode_t mode, uint cmd, ulong arg) +aoeblk_ioctl(struct block_device *bdev, blk_mode_t mode, uint cmd, ulong arg) { struct aoedev *d; diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c index 4c666f72203f..a42c4bcc85ba 100644 --- a/drivers/block/aoe/aoechr.c +++ b/drivers/block/aoe/aoechr.c @@ -49,7 +49,7 @@ static int emsgs_head_idx, emsgs_tail_idx; static struct completion emsgs_comp; static spinlock_t emsgs_lock; static int nblocked_emsgs_readers; -static struct class *aoe_class; + static struct aoe_chardev chardevs[] = { { MINOR_ERR, "err" }, { MINOR_DISCOVER, "discover" }, @@ -58,6 +58,16 @@ static struct aoe_chardev chardevs[] = { { MINOR_FLUSH, "flush" }, }; +static char *aoe_devnode(const struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev)); +} + +static const struct class aoe_class = { + .name = "aoe", + .devnode = aoe_devnode, +}; + static int discover(void) { @@ -273,11 +283,6 @@ static const struct file_operations aoe_fops = { .llseek = noop_llseek, }; -static char *aoe_devnode(const struct device *dev, umode_t *mode) -{ - return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev)); -} - int __init aoechr_init(void) { @@ -290,15 +295,14 @@ aoechr_init(void) } init_completion(&emsgs_comp); spin_lock_init(&emsgs_lock); - aoe_class = class_create("aoe"); - if (IS_ERR(aoe_class)) { + n = class_register(&aoe_class); + if (n) { unregister_chrdev(AOE_MAJOR, "aoechr"); - return PTR_ERR(aoe_class); + return n; } - aoe_class->devnode = aoe_devnode; for (i = 0; i < ARRAY_SIZE(chardevs); ++i) - device_create(aoe_class, NULL, + device_create(&aoe_class, NULL, MKDEV(AOE_MAJOR, chardevs[i].minor), NULL, chardevs[i].name); @@ -311,8 +315,8 @@ aoechr_exit(void) int i; for (i = 0; i < ARRAY_SIZE(chardevs); ++i) - device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor)); - class_destroy(aoe_class); + device_destroy(&aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor)); + class_unregister(&aoe_class); unregister_chrdev(AOE_MAJOR, "aoechr"); } diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 9deb4df6bdb8..cd738cab725f 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -442,13 +442,13 @@ static void fd_times_out(struct timer_list *unused); static void finish_fdc( void ); static void finish_fdc_done( int dummy ); static void setup_req_params( int drive ); -static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int - cmd, unsigned long param); +static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, unsigned long param); static void fd_probe( int drive ); static int fd_test_drive_present( int drive ); static void config_types( void ); -static int floppy_open(struct block_device *bdev, fmode_t mode); -static void floppy_release(struct gendisk *disk, fmode_t mode); +static int floppy_open(struct gendisk *disk, blk_mode_t mode); +static void floppy_release(struct gendisk *disk); /************************* End of Prototypes **************************/ @@ -1581,7 +1581,7 @@ out: return BLK_STS_OK; } -static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, +static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long param) { struct gendisk *disk = bdev->bd_disk; @@ -1760,15 +1760,15 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, /* invalidate the buffer track to force a reread */ BufferDrive = -1; set_bit(drive, &fake_change); - if (bdev_check_media_change(bdev)) - floppy_revalidate(bdev->bd_disk); + if (disk_check_media_change(disk)) + floppy_revalidate(disk); return 0; default: return -EINVAL; } } -static int fd_ioctl(struct block_device *bdev, fmode_t mode, +static int fd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { int ret; @@ -1915,32 +1915,31 @@ static void __init config_types( void ) * drive with different device numbers. */ -static int floppy_open(struct block_device *bdev, fmode_t mode) +static int floppy_open(struct gendisk *disk, blk_mode_t mode) { - struct atari_floppy_struct *p = bdev->bd_disk->private_data; - int type = MINOR(bdev->bd_dev) >> 2; + struct atari_floppy_struct *p = disk->private_data; + int type = disk->first_minor >> 2; DPRINT(("fd_open: type=%d\n",type)); if (p->ref && p->type != type) return -EBUSY; - if (p->ref == -1 || (p->ref && mode & FMODE_EXCL)) + if (p->ref == -1 || (p->ref && mode & BLK_OPEN_EXCL)) return -EBUSY; - - if (mode & FMODE_EXCL) + if (mode & BLK_OPEN_EXCL) p->ref = -1; else p->ref++; p->type = type; - if (mode & FMODE_NDELAY) + if (mode & BLK_OPEN_NDELAY) return 0; - if (mode & (FMODE_READ|FMODE_WRITE)) { - if (bdev_check_media_change(bdev)) - floppy_revalidate(bdev->bd_disk); - if (mode & FMODE_WRITE) { + if (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) { + if (disk_check_media_change(disk)) + floppy_revalidate(disk); + if (mode & BLK_OPEN_WRITE) { if (p->wpstat) { if (p->ref < 0) p->ref = 0; @@ -1953,18 +1952,18 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) return 0; } -static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) +static int floppy_unlocked_open(struct gendisk *disk, blk_mode_t mode) { int ret; mutex_lock(&ataflop_mutex); - ret = floppy_open(bdev, mode); + ret = floppy_open(disk, mode); mutex_unlock(&ataflop_mutex); return ret; } -static void floppy_release(struct gendisk *disk, fmode_t mode) +static void floppy_release(struct gendisk *disk) { struct atari_floppy_struct *p = disk->private_data; mutex_lock(&ataflop_mutex); diff --git a/drivers/block/brd.c b/drivers/block/brd.c index bcad9b926b0c..970bd6ff38c4 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -19,7 +19,7 @@ #include <linux/highmem.h> #include <linux/mutex.h> #include <linux/pagemap.h> -#include <linux/radix-tree.h> +#include <linux/xarray.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/backing-dev.h> @@ -28,7 +28,7 @@ #include <linux/uaccess.h> /* - * Each block ramdisk device has a radix_tree brd_pages of pages that stores + * Each block ramdisk device has a xarray brd_pages of pages that stores * the pages containing the block device's contents. A brd page's ->index is * its offset in PAGE_SIZE units. This is similar to, but in no way connected * with, the kernel's pagecache or buffer cache (which sit above our block @@ -40,11 +40,9 @@ struct brd_device { struct list_head brd_list; /* - * Backing store of pages and lock to protect it. This is the contents - * of the block device. + * Backing store of pages. This is the contents of the block device. */ - spinlock_t brd_lock; - struct radix_tree_root brd_pages; + struct xarray brd_pages; u64 brd_nr_pages; }; @@ -56,21 +54,8 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) pgoff_t idx; struct page *page; - /* - * The page lifetime is protected by the fact that we have opened the - * device node -- brd pages will never be deleted under us, so we - * don't need any further locking or refcounting. - * - * This is strictly true for the radix-tree nodes as well (ie. we - * don't actually need the rcu_read_lock()), however that is not a - * documented feature of the radix-tree API so it is better to be - * safe here (we don't have total exclusion from radix tree updates - * here, only deletes). - */ - rcu_read_lock(); idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */ - page = radix_tree_lookup(&brd->brd_pages, idx); - rcu_read_unlock(); + page = xa_load(&brd->brd_pages, idx); BUG_ON(page && page->index != idx); @@ -83,7 +68,7 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp) { pgoff_t idx; - struct page *page; + struct page *page, *cur; int ret = 0; page = brd_lookup_page(brd, sector); @@ -94,71 +79,42 @@ static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp) if (!page) return -ENOMEM; - if (radix_tree_maybe_preload(gfp)) { - __free_page(page); - return -ENOMEM; - } + xa_lock(&brd->brd_pages); - spin_lock(&brd->brd_lock); idx = sector >> PAGE_SECTORS_SHIFT; page->index = idx; - if (radix_tree_insert(&brd->brd_pages, idx, page)) { + + cur = __xa_cmpxchg(&brd->brd_pages, idx, NULL, page, gfp); + + if (unlikely(cur)) { __free_page(page); - page = radix_tree_lookup(&brd->brd_pages, idx); - if (!page) - ret = -ENOMEM; - else if (page->index != idx) + ret = xa_err(cur); + if (!ret && (cur->index != idx)) ret = -EIO; } else { brd->brd_nr_pages++; } - spin_unlock(&brd->brd_lock); - radix_tree_preload_end(); + xa_unlock(&brd->brd_pages); + return ret; } /* - * Free all backing store pages and radix tree. This must only be called when + * Free all backing store pages and xarray. This must only be called when * there are no other users of the device. */ -#define FREE_BATCH 16 static void brd_free_pages(struct brd_device *brd) { - unsigned long pos = 0; - struct page *pages[FREE_BATCH]; - int nr_pages; - - do { - int i; - - nr_pages = radix_tree_gang_lookup(&brd->brd_pages, - (void **)pages, pos, FREE_BATCH); - - for (i = 0; i < nr_pages; i++) { - void *ret; - - BUG_ON(pages[i]->index < pos); - pos = pages[i]->index; - ret = radix_tree_delete(&brd->brd_pages, pos); - BUG_ON(!ret || ret != pages[i]); - __free_page(pages[i]); - } - - pos++; + struct page *page; + pgoff_t idx; - /* - * It takes 3.4 seconds to remove 80GiB ramdisk. - * So, we need cond_resched to avoid stalling the CPU. - */ + xa_for_each(&brd->brd_pages, idx, page) { + __free_page(page); cond_resched(); + } - /* - * This assumes radix_tree_gang_lookup always returns as - * many pages as possible. If the radix-tree code changes, - * so will this have to. - */ - } while (nr_pages == FREE_BATCH); + xa_destroy(&brd->brd_pages); } /* @@ -372,8 +328,7 @@ static int brd_alloc(int i) brd->brd_number = i; list_add_tail(&brd->brd_list, &brd_devices); - spin_lock_init(&brd->brd_lock); - INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); + xa_init(&brd->brd_pages); snprintf(buf, DISK_NAME_LEN, "ram%d", i); if (!IS_ERR_OR_NULL(brd_debugfs_dir)) diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 6ac8c54b44c7..85ca000a0564 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -1043,9 +1043,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op, GFP_NOIO, &drbd_md_io_bio_set); bio->bi_iter.bi_sector = on_disk_sector; - /* bio_add_page of a single page to an empty bio will always succeed, - * according to api. Do we want to assert that? */ - bio_add_page(bio, page, len, 0); + __bio_add_page(bio, page, len, 0); bio->bi_private = ctx; bio->bi_end_io = drbd_bm_endio; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 83987e7a5ef2..965f672557f2 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -37,7 +37,6 @@ #include <linux/notifier.h> #include <linux/kthread.h> #include <linux/workqueue.h> -#define __KERNEL_SYSCALLS__ #include <linux/unistd.h> #include <linux/vmalloc.h> #include <linux/sched/signal.h> @@ -50,8 +49,8 @@ #include "drbd_debugfs.h" static DEFINE_MUTEX(drbd_main_mutex); -static int drbd_open(struct block_device *bdev, fmode_t mode); -static void drbd_release(struct gendisk *gd, fmode_t mode); +static int drbd_open(struct gendisk *disk, blk_mode_t mode); +static void drbd_release(struct gendisk *gd); static void md_sync_timer_fn(struct timer_list *t); static int w_bitmap_io(struct drbd_work *w, int unused); @@ -1883,9 +1882,9 @@ int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void return 0; } -static int drbd_open(struct block_device *bdev, fmode_t mode) +static int drbd_open(struct gendisk *disk, blk_mode_t mode) { - struct drbd_device *device = bdev->bd_disk->private_data; + struct drbd_device *device = disk->private_data; unsigned long flags; int rv = 0; @@ -1895,7 +1894,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode) * and no race with updating open_cnt */ if (device->state.role != R_PRIMARY) { - if (mode & FMODE_WRITE) + if (mode & BLK_OPEN_WRITE) rv = -EROFS; else if (!drbd_allow_oos) rv = -EMEDIUMTYPE; @@ -1909,9 +1908,10 @@ static int drbd_open(struct block_device *bdev, fmode_t mode) return rv; } -static void drbd_release(struct gendisk *gd, fmode_t mode) +static void drbd_release(struct gendisk *gd) { struct drbd_device *device = gd->private_data; + mutex_lock(&drbd_main_mutex); device->open_cnt--; mutex_unlock(&drbd_main_mutex); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 1a5d3d72d91d..cddae6f4b00f 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1640,8 +1640,8 @@ static struct block_device *open_backing_dev(struct drbd_device *device, struct block_device *bdev; int err = 0; - bdev = blkdev_get_by_path(bdev_path, - FMODE_READ | FMODE_WRITE | FMODE_EXCL, claim_ptr); + bdev = blkdev_get_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE, + claim_ptr, NULL); if (IS_ERR(bdev)) { drbd_err(device, "open(\"%s\") failed with %ld\n", bdev_path, PTR_ERR(bdev)); @@ -1653,7 +1653,7 @@ static struct block_device *open_backing_dev(struct drbd_device *device, err = bd_link_disk_holder(bdev, device->vdisk); if (err) { - blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(bdev, claim_ptr); drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n", bdev_path, err); bdev = ERR_PTR(err); @@ -1695,13 +1695,13 @@ static int open_backing_devices(struct drbd_device *device, } static void close_backing_dev(struct drbd_device *device, struct block_device *bdev, - bool do_bd_unlink) + void *claim_ptr, bool do_bd_unlink) { if (!bdev) return; if (do_bd_unlink) bd_unlink_disk_holder(bdev, device->vdisk); - blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(bdev, claim_ptr); } void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev) @@ -1709,8 +1709,11 @@ void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev * if (ldev == NULL) return; - close_backing_dev(device, ldev->md_bdev, ldev->md_bdev != ldev->backing_bdev); - close_backing_dev(device, ldev->backing_bdev, true); + close_backing_dev(device, ldev->md_bdev, + ldev->md.meta_dev_idx < 0 ? + (void *)device : (void *)drbd_m_holder, + ldev->md_bdev != ldev->backing_bdev); + close_backing_dev(device, ldev->backing_bdev, device, true); kfree(ldev->disk_conf); kfree(ldev); @@ -2126,8 +2129,11 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) fail: conn_reconfig_done(connection); if (nbc) { - close_backing_dev(device, nbc->md_bdev, nbc->md_bdev != nbc->backing_bdev); - close_backing_dev(device, nbc->backing_bdev, true); + close_backing_dev(device, nbc->md_bdev, + nbc->disk_conf->meta_dev_idx < 0 ? + (void *)device : (void *)drbd_m_holder, + nbc->md_bdev != nbc->backing_bdev); + close_backing_dev(device, nbc->backing_bdev, device, true); kfree(nbc); } kfree(new_disk_conf); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 8c2bc47de473..0c9f54197768 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -27,7 +27,6 @@ #include <uapi/linux/sched/types.h> #include <linux/sched/signal.h> #include <linux/pkt_sched.h> -#define __KERNEL_SYSCALLS__ #include <linux/unistd.h> #include <linux/vmalloc.h> #include <linux/random.h> diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index cec2c20f5e59..2db9b186b977 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -402,7 +402,7 @@ static struct floppy_drive_struct drive_state[N_DRIVE]; static struct floppy_write_errors write_errors[N_DRIVE]; static struct timer_list motor_off_timer[N_DRIVE]; static struct blk_mq_tag_set tag_sets[N_DRIVE]; -static struct block_device *opened_bdev[N_DRIVE]; +static struct gendisk *opened_disk[N_DRIVE]; static DEFINE_MUTEX(open_lock); static struct floppy_raw_cmd *raw_cmd, default_raw_cmd; @@ -3210,13 +3210,13 @@ static int floppy_raw_cmd_ioctl(int type, int drive, int cmd, #endif -static int invalidate_drive(struct block_device *bdev) +static int invalidate_drive(struct gendisk *disk) { /* invalidate the buffer track to force a reread */ - set_bit((long)bdev->bd_disk->private_data, &fake_change); + set_bit((long)disk->private_data, &fake_change); process_fd_request(); - if (bdev_check_media_change(bdev)) - floppy_revalidate(bdev->bd_disk); + if (disk_check_media_change(disk)) + floppy_revalidate(disk); return 0; } @@ -3251,10 +3251,11 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g, floppy_type[type].size + 1; process_fd_request(); for (cnt = 0; cnt < N_DRIVE; cnt++) { - struct block_device *bdev = opened_bdev[cnt]; - if (!bdev || ITYPE(drive_state[cnt].fd_device) != type) + struct gendisk *disk = opened_disk[cnt]; + + if (!disk || ITYPE(drive_state[cnt].fd_device) != type) continue; - __invalidate_device(bdev, true); + __invalidate_device(disk->part0, true); } mutex_unlock(&open_lock); } else { @@ -3287,7 +3288,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g, drive_state[current_drive].maxtrack || ((user_params[drive].sect ^ oldStretch) & (FD_SWAPSIDES | FD_SECTBASEMASK))) - invalidate_drive(bdev); + invalidate_drive(bdev->bd_disk); else process_fd_request(); } @@ -3393,8 +3394,8 @@ static bool valid_floppy_drive_params(const short autodetect[FD_AUTODETECT_SIZE] return true; } -static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, - unsigned long param) +static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, unsigned long param) { int drive = (long)bdev->bd_disk->private_data; int type = ITYPE(drive_state[drive].fd_device); @@ -3427,7 +3428,8 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int return ret; /* permission checks */ - if (((cmd & 0x40) && !(mode & (FMODE_WRITE | FMODE_WRITE_IOCTL))) || + if (((cmd & 0x40) && + !(mode & (BLK_OPEN_WRITE | BLK_OPEN_WRITE_IOCTL))) || ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))) return -EPERM; @@ -3464,7 +3466,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int current_type[drive] = NULL; floppy_sizes[drive] = MAX_DISK_SIZE << 1; drive_state[drive].keep_data = 0; - return invalidate_drive(bdev); + return invalidate_drive(bdev->bd_disk); case FDSETPRM: case FDDEFPRM: return set_geometry(cmd, &inparam.g, drive, type, bdev); @@ -3503,7 +3505,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int case FDFLUSH: if (lock_fdc(drive)) return -EINTR; - return invalidate_drive(bdev); + return invalidate_drive(bdev->bd_disk); case FDSETEMSGTRESH: drive_params[drive].max_errors.reporting = (unsigned short)(param & 0x0f); return 0; @@ -3565,7 +3567,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int return 0; } -static int fd_ioctl(struct block_device *bdev, fmode_t mode, +static int fd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long param) { int ret; @@ -3653,8 +3655,8 @@ struct compat_floppy_write_errors { #define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state) #define FDWERRORGET32 _IOR(2, 0x17, struct compat_floppy_write_errors) -static int compat_set_geometry(struct block_device *bdev, fmode_t mode, unsigned int cmd, - struct compat_floppy_struct __user *arg) +static int compat_set_geometry(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, struct compat_floppy_struct __user *arg) { struct floppy_struct v; int drive, type; @@ -3663,7 +3665,7 @@ static int compat_set_geometry(struct block_device *bdev, fmode_t mode, unsigned BUILD_BUG_ON(offsetof(struct floppy_struct, name) != offsetof(struct compat_floppy_struct, name)); - if (!(mode & (FMODE_WRITE | FMODE_WRITE_IOCTL))) + if (!(mode & (BLK_OPEN_WRITE | BLK_OPEN_WRITE_IOCTL))) return -EPERM; memset(&v, 0, sizeof(struct floppy_struct)); @@ -3860,8 +3862,8 @@ static int compat_werrorget(int drive, return 0; } -static int fd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, - unsigned long param) +static int fd_compat_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, unsigned long param) { int drive = (long)bdev->bd_disk->private_data; switch (cmd) { @@ -3962,7 +3964,7 @@ static void __init config_types(void) pr_cont("\n"); } -static void floppy_release(struct gendisk *disk, fmode_t mode) +static void floppy_release(struct gendisk *disk) { int drive = (long)disk->private_data; @@ -3973,7 +3975,7 @@ static void floppy_release(struct gendisk *disk, fmode_t mode) drive_state[drive].fd_ref = 0; } if (!drive_state[drive].fd_ref) - opened_bdev[drive] = NULL; + opened_disk[drive] = NULL; mutex_unlock(&open_lock); mutex_unlock(&floppy_mutex); } @@ -3983,9 +3985,9 @@ static void floppy_release(struct gendisk *disk, fmode_t mode) * /dev/PS0 etc), and disallows simultaneous access to the same * drive with different device numbers. */ -static int floppy_open(struct block_device *bdev, fmode_t mode) +static int floppy_open(struct gendisk *disk, blk_mode_t mode) { - int drive = (long)bdev->bd_disk->private_data; + int drive = (long)disk->private_data; int old_dev, new_dev; int try; int res = -EBUSY; @@ -3994,7 +3996,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) mutex_lock(&floppy_mutex); mutex_lock(&open_lock); old_dev = drive_state[drive].fd_device; - if (opened_bdev[drive] && opened_bdev[drive] != bdev) + if (opened_disk[drive] && opened_disk[drive] != disk) goto out2; if (!drive_state[drive].fd_ref && (drive_params[drive].flags & FD_BROKEN_DCL)) { @@ -4004,7 +4006,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) drive_state[drive].fd_ref++; - opened_bdev[drive] = bdev; + opened_disk[drive] = disk; res = -ENXIO; @@ -4038,7 +4040,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) } } - new_dev = MINOR(bdev->bd_dev); + new_dev = disk->first_minor; drive_state[drive].fd_device = new_dev; set_capacity(disks[drive][ITYPE(new_dev)], floppy_sizes[new_dev]); if (old_dev != -1 && old_dev != new_dev) { @@ -4048,21 +4050,20 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) if (fdc_state[FDC(drive)].rawcmd == 1) fdc_state[FDC(drive)].rawcmd = 2; - - if (!(mode & FMODE_NDELAY)) { - if (mode & (FMODE_READ|FMODE_WRITE)) { + if (!(mode & BLK_OPEN_NDELAY)) { + if (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) { drive_state[drive].last_checked = 0; clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags); - if (bdev_check_media_change(bdev)) - floppy_revalidate(bdev->bd_disk); + if (disk_check_media_change(disk)) + floppy_revalidate(disk); if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags)) goto out; if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags)) goto out; } res = -EROFS; - if ((mode & FMODE_WRITE) && + if ((mode & BLK_OPEN_WRITE) && !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags)) goto out; } @@ -4073,7 +4074,7 @@ out: drive_state[drive].fd_ref--; if (!drive_state[drive].fd_ref) - opened_bdev[drive] = NULL; + opened_disk[drive] = NULL; out2: mutex_unlock(&open_lock); mutex_unlock(&floppy_mutex); @@ -4147,7 +4148,7 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive) cbdata.drive = drive; bio_init(&bio, bdev, &bio_vec, 1, REQ_OP_READ); - bio_add_page(&bio, page, block_size(bdev), 0); + __bio_add_page(&bio, page, block_size(bdev), 0); bio.bi_iter.bi_sector = 0; bio.bi_flags |= (1 << BIO_QUIET); @@ -4203,7 +4204,8 @@ static int floppy_revalidate(struct gendisk *disk) drive_state[drive].generation++; if (drive_no_geom(drive)) { /* auto-sensing */ - res = __floppy_read_block_0(opened_bdev[drive], drive); + res = __floppy_read_block_0(opened_disk[drive]->part0, + drive); } else { if (cf) poll_drive(false, FD_RAW_NEED_DISK); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index bc31bb7072a2..37511d2b2caf 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -990,7 +990,7 @@ loop_set_status_from_info(struct loop_device *lo, return 0; } -static int loop_configure(struct loop_device *lo, fmode_t mode, +static int loop_configure(struct loop_device *lo, blk_mode_t mode, struct block_device *bdev, const struct loop_config *config) { @@ -1014,8 +1014,8 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, * If we don't hold exclusive handle for the device, upgrade to it * here to avoid changing device under exclusive owner. */ - if (!(mode & FMODE_EXCL)) { - error = bd_prepare_to_claim(bdev, loop_configure); + if (!(mode & BLK_OPEN_EXCL)) { + error = bd_prepare_to_claim(bdev, loop_configure, NULL); if (error) goto out_putf; } @@ -1050,7 +1050,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, if (error) goto out_unlock; - if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || + if (!(file->f_mode & FMODE_WRITE) || !(mode & BLK_OPEN_WRITE) || !file->f_op->write_iter) lo->lo_flags |= LO_FLAGS_READ_ONLY; @@ -1116,7 +1116,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, if (partscan) loop_reread_partitions(lo); - if (!(mode & FMODE_EXCL)) + if (!(mode & BLK_OPEN_EXCL)) bd_abort_claiming(bdev, loop_configure); return 0; @@ -1124,7 +1124,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, out_unlock: loop_global_unlock(lo, is_loop); out_bdev: - if (!(mode & FMODE_EXCL)) + if (!(mode & BLK_OPEN_EXCL)) bd_abort_claiming(bdev, loop_configure); out_putf: fput(file); @@ -1528,7 +1528,7 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, return err; } -static int lo_ioctl(struct block_device *bdev, fmode_t mode, +static int lo_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct loop_device *lo = bdev->bd_disk->private_data; @@ -1563,24 +1563,22 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, return loop_clr_fd(lo); case LOOP_SET_STATUS: err = -EPERM; - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) { + if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN)) err = loop_set_status_old(lo, argp); - } break; case LOOP_GET_STATUS: return loop_get_status_old(lo, argp); case LOOP_SET_STATUS64: err = -EPERM; - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) { + if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN)) err = loop_set_status64(lo, argp); - } break; case LOOP_GET_STATUS64: return loop_get_status64(lo, argp); case LOOP_SET_CAPACITY: case LOOP_SET_DIRECT_IO: case LOOP_SET_BLOCK_SIZE: - if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN)) + if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN)) return -EPERM; fallthrough; default: @@ -1691,7 +1689,7 @@ loop_get_status_compat(struct loop_device *lo, return err; } -static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, +static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct loop_device *lo = bdev->bd_disk->private_data; @@ -1727,7 +1725,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, } #endif -static void lo_release(struct gendisk *disk, fmode_t mode) +static void lo_release(struct gendisk *disk) { struct loop_device *lo = disk->private_data; diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 815d77ba6381..b200950e8fb5 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3041,7 +3041,7 @@ static int rssd_disk_name_format(char *prefix, * structure pointer. */ static int mtip_block_ioctl(struct block_device *dev, - fmode_t mode, + blk_mode_t mode, unsigned cmd, unsigned long arg) { @@ -3079,7 +3079,7 @@ static int mtip_block_ioctl(struct block_device *dev, * structure pointer. */ static int mtip_block_compat_ioctl(struct block_device *dev, - fmode_t mode, + blk_mode_t mode, unsigned cmd, unsigned long arg) { diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 65ecde3e2a5b..8576d696c7a2 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1502,7 +1502,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, return -ENOTTY; } -static int nbd_ioctl(struct block_device *bdev, fmode_t mode, +static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct nbd_device *nbd = bdev->bd_disk->private_data; @@ -1553,13 +1553,13 @@ static struct nbd_config *nbd_alloc_config(void) return config; } -static int nbd_open(struct block_device *bdev, fmode_t mode) +static int nbd_open(struct gendisk *disk, blk_mode_t mode) { struct nbd_device *nbd; int ret = 0; mutex_lock(&nbd_index_mutex); - nbd = bdev->bd_disk->private_data; + nbd = disk->private_data; if (!nbd) { ret = -ENXIO; goto out; @@ -1587,17 +1587,17 @@ static int nbd_open(struct block_device *bdev, fmode_t mode) refcount_inc(&nbd->refs); mutex_unlock(&nbd->config_lock); if (max_part) - set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); + set_bit(GD_NEED_PART_SCAN, &disk->state); } else if (nbd_disconnected(nbd->config)) { if (max_part) - set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); + set_bit(GD_NEED_PART_SCAN, &disk->state); } out: mutex_unlock(&nbd_index_mutex); return ret; } -static void nbd_release(struct gendisk *disk, fmode_t mode) +static void nbd_release(struct gendisk *disk) { struct nbd_device *nbd = disk->private_data; @@ -1776,7 +1776,8 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) if (err == -ENOSPC) err = -EEXIST; } else { - err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); + err = idr_alloc(&nbd_index_idr, nbd, 0, + (MINORMASK >> part_shift) + 1, GFP_KERNEL); if (err >= 0) index = err; } diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index d5d7884cedd4..a1428538bda5 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -46,47 +46,34 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/pktcdvd.h> -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> +#include <linux/backing-dev.h> #include <linux/compat.h> -#include <linux/kthread.h> +#include <linux/debugfs.h> +#include <linux/device.h> #include <linux/errno.h> -#include <linux/spinlock.h> #include <linux/file.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/miscdevice.h> #include <linux/freezer.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/miscdevice.h> +#include <linux/module.h> #include <linux/mutex.h> +#include <linux/nospec.h> +#include <linux/pktcdvd.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <linux/slab.h> -#include <linux/backing-dev.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/uaccess.h> + +#include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_ioctl.h> -#include <scsi/scsi.h> -#include <linux/debugfs.h> -#include <linux/device.h> -#include <linux/nospec.h> -#include <linux/uaccess.h> -#define DRIVER_NAME "pktcdvd" +#include <asm/unaligned.h> -#define pkt_err(pd, fmt, ...) \ - pr_err("%s: " fmt, pd->name, ##__VA_ARGS__) -#define pkt_notice(pd, fmt, ...) \ - pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__) -#define pkt_info(pd, fmt, ...) \ - pr_info("%s: " fmt, pd->name, ##__VA_ARGS__) - -#define pkt_dbg(level, pd, fmt, ...) \ -do { \ - if (level == 2 && PACKET_DEBUG >= 2) \ - pr_notice("%s: %s():" fmt, \ - pd->name, __func__, ##__VA_ARGS__); \ - else if (level == 1 && PACKET_DEBUG >= 1) \ - pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \ -} while (0) +#define DRIVER_NAME "pktcdvd" #define MAX_SPEED 0xffff @@ -107,7 +94,6 @@ static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */ /* forward declaration */ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev); static int pkt_remove_dev(dev_t pkt_dev); -static int pkt_seq_show(struct seq_file *m, void *p); static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd) { @@ -253,15 +239,16 @@ static ssize_t congestion_off_store(struct device *dev, const char *buf, size_t len) { struct pktcdvd_device *pd = dev_get_drvdata(dev); - int val; + int val, ret; - if (sscanf(buf, "%d", &val) == 1) { - spin_lock(&pd->lock); - pd->write_congestion_off = val; - init_write_congestion_marks(&pd->write_congestion_off, - &pd->write_congestion_on); - spin_unlock(&pd->lock); - } + ret = kstrtoint(buf, 10, &val); + if (ret) + return ret; + + spin_lock(&pd->lock); + pd->write_congestion_off = val; + init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on); + spin_unlock(&pd->lock); return len; } static DEVICE_ATTR_RW(congestion_off); @@ -283,15 +270,16 @@ static ssize_t congestion_on_store(struct device *dev, const char *buf, size_t len) { struct pktcdvd_device *pd = dev_get_drvdata(dev); - int val; + int val, ret; - if (sscanf(buf, "%d", &val) == 1) { - spin_lock(&pd->lock); - pd->write_congestion_on = val; - init_write_congestion_marks(&pd->write_congestion_off, - &pd->write_congestion_on); - spin_unlock(&pd->lock); - } + ret = kstrtoint(buf, 10, &val); + if (ret) + return ret; + + spin_lock(&pd->lock); + pd->write_congestion_on = val; + init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on); + spin_unlock(&pd->lock); return len; } static DEVICE_ATTR_RW(congestion_on); @@ -319,7 +307,7 @@ static void pkt_sysfs_dev_new(struct pktcdvd_device *pd) if (class_is_registered(&class_pktcdvd)) { pd->dev = device_create_with_groups(&class_pktcdvd, NULL, MKDEV(0, 0), pd, pkt_groups, - "%s", pd->name); + "%s", pd->disk->disk_name); if (IS_ERR(pd->dev)) pd->dev = NULL; } @@ -349,8 +337,8 @@ static ssize_t device_map_show(const struct class *c, const struct class_attribu struct pktcdvd_device *pd = pkt_devs[idx]; if (!pd) continue; - n += sprintf(data+n, "%s %u:%u %u:%u\n", - pd->name, + n += sysfs_emit_at(data, n, "%s %u:%u %u:%u\n", + pd->disk->disk_name, MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev), MAJOR(pd->bdev->bd_dev), MINOR(pd->bdev->bd_dev)); @@ -428,34 +416,92 @@ static void pkt_sysfs_cleanup(void) *******************************************************************/ -static int pkt_debugfs_seq_show(struct seq_file *m, void *p) +static void pkt_count_states(struct pktcdvd_device *pd, int *states) { - return pkt_seq_show(m, p); + struct packet_data *pkt; + int i; + + for (i = 0; i < PACKET_NUM_STATES; i++) + states[i] = 0; + + spin_lock(&pd->cdrw.active_list_lock); + list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { + states[pkt->state]++; + } + spin_unlock(&pd->cdrw.active_list_lock); } -static int pkt_debugfs_fops_open(struct inode *inode, struct file *file) +static int pkt_seq_show(struct seq_file *m, void *p) { - return single_open(file, pkt_debugfs_seq_show, inode->i_private); -} + struct pktcdvd_device *pd = m->private; + char *msg; + int states[PACKET_NUM_STATES]; -static const struct file_operations debug_fops = { - .open = pkt_debugfs_fops_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; + seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name, pd->bdev); + + seq_printf(m, "\nSettings:\n"); + seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); + + if (pd->settings.write_type == 0) + msg = "Packet"; + else + msg = "Unknown"; + seq_printf(m, "\twrite type:\t\t%s\n", msg); + + seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable"); + seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss); + + seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode); + + if (pd->settings.block_mode == PACKET_BLOCK_MODE1) + msg = "Mode 1"; + else if (pd->settings.block_mode == PACKET_BLOCK_MODE2) + msg = "Mode 2"; + else + msg = "Unknown"; + seq_printf(m, "\tblock mode:\t\t%s\n", msg); + + seq_printf(m, "\nStatistics:\n"); + seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started); + seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended); + seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1); + seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1); + seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1); + + seq_printf(m, "\nMisc:\n"); + seq_printf(m, "\treference count:\t%d\n", pd->refcnt); + seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags); + seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed); + seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed); + seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset); + seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset); + + seq_printf(m, "\nQueue state:\n"); + seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size); + seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios)); + seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", pd->current_sector); + + pkt_count_states(pd, states); + seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", + states[0], states[1], states[2], states[3], states[4], states[5]); + + seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n", + pd->write_congestion_off, + pd->write_congestion_on); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(pkt_seq); static void pkt_debugfs_dev_new(struct pktcdvd_device *pd) { if (!pkt_debugfs_root) return; - pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root); + pd->dfs_d_root = debugfs_create_dir(pd->disk->disk_name, pkt_debugfs_root); if (!pd->dfs_d_root) return; - pd->dfs_f_info = debugfs_create_file("info", 0444, - pd->dfs_d_root, pd, &debug_fops); + pd->dfs_f_info = debugfs_create_file("info", 0444, pd->dfs_d_root, + pd, &pkt_seq_fops); } static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd) @@ -484,9 +530,11 @@ static void pkt_debugfs_cleanup(void) static void pkt_bio_finished(struct pktcdvd_device *pd) { + struct device *ddev = disk_to_dev(pd->disk); + BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0); if (atomic_dec_and_test(&pd->cdrw.pending_bios)) { - pkt_dbg(2, pd, "queue empty\n"); + dev_dbg(ddev, "queue empty\n"); atomic_set(&pd->iosched.attention, 1); wake_up(&pd->wqueue); } @@ -717,15 +765,16 @@ static const char *sense_key_string(__u8 index) static void pkt_dump_sense(struct pktcdvd_device *pd, struct packet_command *cgc) { + struct device *ddev = disk_to_dev(pd->disk); struct scsi_sense_hdr *sshdr = cgc->sshdr; if (sshdr) - pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n", + dev_err(ddev, "%*ph - sense %02x.%02x.%02x (%s)\n", CDROM_PACKET_SIZE, cgc->cmd, sshdr->sense_key, sshdr->asc, sshdr->ascq, sense_key_string(sshdr->sense_key)); else - pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd); + dev_err(ddev, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd); } /* @@ -762,10 +811,8 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.sshdr = &sshdr; cgc.cmd[0] = GPCMD_SET_SPEED; - cgc.cmd[2] = (read_speed >> 8) & 0xff; - cgc.cmd[3] = read_speed & 0xff; - cgc.cmd[4] = (write_speed >> 8) & 0xff; - cgc.cmd[5] = write_speed & 0xff; + put_unaligned_be16(read_speed, &cgc.cmd[2]); + put_unaligned_be16(write_speed, &cgc.cmd[4]); ret = pkt_generic_packet(pd, &cgc); if (ret) @@ -809,6 +856,7 @@ static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) */ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) { + struct device *ddev = disk_to_dev(pd->disk); if (atomic_read(&pd->iosched.attention) == 0) return; @@ -836,7 +884,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) need_write_seek = 0; if (need_write_seek && reads_queued) { if (atomic_read(&pd->cdrw.pending_bios) > 0) { - pkt_dbg(2, pd, "write, waiting\n"); + dev_dbg(ddev, "write, waiting\n"); break; } pkt_flush_cache(pd); @@ -845,7 +893,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) } else { if (!reads_queued && writes_queued) { if (atomic_read(&pd->cdrw.pending_bios) > 0) { - pkt_dbg(2, pd, "read, waiting\n"); + dev_dbg(ddev, "read, waiting\n"); break; } pd->iosched.writing = 1; @@ -892,25 +940,27 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) */ static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) { - if ((pd->settings.size << 9) / CD_FRAMESIZE - <= queue_max_segments(q)) { + struct device *ddev = disk_to_dev(pd->disk); + + if ((pd->settings.size << 9) / CD_FRAMESIZE <= queue_max_segments(q)) { /* * The cdrom device can handle one segment/frame */ clear_bit(PACKET_MERGE_SEGS, &pd->flags); return 0; - } else if ((pd->settings.size << 9) / PAGE_SIZE - <= queue_max_segments(q)) { + } + + if ((pd->settings.size << 9) / PAGE_SIZE <= queue_max_segments(q)) { /* * We can handle this case at the expense of some extra memory * copies during write operations */ set_bit(PACKET_MERGE_SEGS, &pd->flags); return 0; - } else { - pkt_err(pd, "cdrom max_phys_segments too small\n"); - return -EIO; } + + dev_err(ddev, "cdrom max_phys_segments too small\n"); + return -EIO; } static void pkt_end_io_read(struct bio *bio) @@ -919,9 +969,8 @@ static void pkt_end_io_read(struct bio *bio) struct pktcdvd_device *pd = pkt->pd; BUG_ON(!pd); - pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", - bio, (unsigned long long)pkt->sector, - (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status); + dev_dbg(disk_to_dev(pd->disk), "bio=%p sec0=%llx sec=%llx err=%d\n", + bio, pkt->sector, bio->bi_iter.bi_sector, bio->bi_status); if (bio->bi_status) atomic_inc(&pkt->io_errors); @@ -939,7 +988,7 @@ static void pkt_end_io_packet_write(struct bio *bio) struct pktcdvd_device *pd = pkt->pd; BUG_ON(!pd); - pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status); + dev_dbg(disk_to_dev(pd->disk), "id=%d, err=%d\n", pkt->id, bio->bi_status); pd->stats.pkt_ended++; @@ -955,6 +1004,7 @@ static void pkt_end_io_packet_write(struct bio *bio) */ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) { + struct device *ddev = disk_to_dev(pd->disk); int frames_read = 0; struct bio *bio; int f; @@ -983,8 +1033,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) spin_unlock(&pkt->lock); if (pkt->cache_valid) { - pkt_dbg(2, pd, "zone %llx cached\n", - (unsigned long long)pkt->sector); + dev_dbg(ddev, "zone %llx cached\n", pkt->sector); goto out_account; } @@ -1005,8 +1054,8 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) p = (f * CD_FRAMESIZE) / PAGE_SIZE; offset = (f * CD_FRAMESIZE) % PAGE_SIZE; - pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n", - f, pkt->pages[p], offset); + dev_dbg(ddev, "Adding frame %d, page:%p offs:%d\n", f, + pkt->pages[p], offset); if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset)) BUG(); @@ -1016,8 +1065,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) } out_account: - pkt_dbg(2, pd, "need %d frames for zone %llx\n", - frames_read, (unsigned long long)pkt->sector); + dev_dbg(ddev, "need %d frames for zone %llx\n", frames_read, pkt->sector); pd->stats.pkt_started++; pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9); } @@ -1051,17 +1099,17 @@ static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *p } } -static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state) +static inline void pkt_set_state(struct device *ddev, struct packet_data *pkt, + enum packet_data_state state) { -#if PACKET_DEBUG > 1 static const char *state_name[] = { "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED" }; enum packet_data_state old_state = pkt->state; - pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n", - pkt->id, (unsigned long long)pkt->sector, - state_name[old_state], state_name[state]); -#endif + + dev_dbg(ddev, "pkt %2d : s=%6llx %s -> %s\n", + pkt->id, pkt->sector, state_name[old_state], state_name[state]); + pkt->state = state; } @@ -1071,6 +1119,7 @@ static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state */ static int pkt_handle_queue(struct pktcdvd_device *pd) { + struct device *ddev = disk_to_dev(pd->disk); struct packet_data *pkt, *p; struct bio *bio = NULL; sector_t zone = 0; /* Suppress gcc warning */ @@ -1080,7 +1129,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd) atomic_set(&pd->scan_queue, 0); if (list_empty(&pd->cdrw.pkt_free_list)) { - pkt_dbg(2, pd, "no pkt\n"); + dev_dbg(ddev, "no pkt\n"); return 0; } @@ -1117,7 +1166,7 @@ try_next_bio: } spin_unlock(&pd->lock); if (!bio) { - pkt_dbg(2, pd, "no bio\n"); + dev_dbg(ddev, "no bio\n"); return 0; } @@ -1133,12 +1182,13 @@ try_next_bio: * to this packet. */ spin_lock(&pd->lock); - pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); + dev_dbg(ddev, "looking for zone %llx\n", zone); while ((node = pkt_rbtree_find(pd, zone)) != NULL) { + sector_t tmp = get_zone(node->bio->bi_iter.bi_sector, pd); + bio = node->bio; - pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long) - get_zone(bio->bi_iter.bi_sector, pd)); - if (get_zone(bio->bi_iter.bi_sector, pd) != zone) + dev_dbg(ddev, "found zone=%llx\n", tmp); + if (tmp != zone) break; pkt_rbtree_erase(pd, node); spin_lock(&pkt->lock); @@ -1157,7 +1207,7 @@ try_next_bio: spin_unlock(&pd->lock); pkt->sleep_time = max(PACKET_WAIT_TIME, 1); - pkt_set_state(pkt, PACKET_WAITING_STATE); + pkt_set_state(ddev, pkt, PACKET_WAITING_STATE); atomic_set(&pkt->run_sm, 1); spin_lock(&pd->cdrw.active_list_lock); @@ -1209,6 +1259,7 @@ static void bio_list_copy_data(struct bio *dst, struct bio *src) */ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) { + struct device *ddev = disk_to_dev(pd->disk); int f; bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames, @@ -1225,7 +1276,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset)) BUG(); } - pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt); + dev_dbg(ddev, "vcnt=%d\n", pkt->w_bio->bi_vcnt); /* * Fill-in bvec with data from orig_bios. @@ -1233,11 +1284,10 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) spin_lock(&pkt->lock); bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head); - pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE); + pkt_set_state(ddev, pkt, PACKET_WRITE_WAIT_STATE); spin_unlock(&pkt->lock); - pkt_dbg(2, pd, "Writing %d frames for zone %llx\n", - pkt->write_size, (unsigned long long)pkt->sector); + dev_dbg(ddev, "Writing %d frames for zone %llx\n", pkt->write_size, pkt->sector); if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) pkt->cache_valid = 1; @@ -1265,7 +1315,9 @@ static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status) static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) { - pkt_dbg(2, pd, "pkt %d\n", pkt->id); + struct device *ddev = disk_to_dev(pd->disk); + + dev_dbg(ddev, "pkt %d\n", pkt->id); for (;;) { switch (pkt->state) { @@ -1275,7 +1327,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data pkt->sleep_time = 0; pkt_gather_data(pd, pkt); - pkt_set_state(pkt, PACKET_READ_WAIT_STATE); + pkt_set_state(ddev, pkt, PACKET_READ_WAIT_STATE); break; case PACKET_READ_WAIT_STATE: @@ -1283,7 +1335,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data return; if (atomic_read(&pkt->io_errors) > 0) { - pkt_set_state(pkt, PACKET_RECOVERY_STATE); + pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE); } else { pkt_start_write(pd, pkt); } @@ -1294,15 +1346,15 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data return; if (!pkt->w_bio->bi_status) { - pkt_set_state(pkt, PACKET_FINISHED_STATE); + pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE); } else { - pkt_set_state(pkt, PACKET_RECOVERY_STATE); + pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE); } break; case PACKET_RECOVERY_STATE: - pkt_dbg(2, pd, "No recovery possible\n"); - pkt_set_state(pkt, PACKET_FINISHED_STATE); + dev_dbg(ddev, "No recovery possible\n"); + pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE); break; case PACKET_FINISHED_STATE: @@ -1318,6 +1370,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data static void pkt_handle_packets(struct pktcdvd_device *pd) { + struct device *ddev = disk_to_dev(pd->disk); struct packet_data *pkt, *next; /* @@ -1338,28 +1391,13 @@ static void pkt_handle_packets(struct pktcdvd_device *pd) if (pkt->state == PACKET_FINISHED_STATE) { list_del(&pkt->list); pkt_put_packet_data(pd, pkt); - pkt_set_state(pkt, PACKET_IDLE_STATE); + pkt_set_state(ddev, pkt, PACKET_IDLE_STATE); atomic_set(&pd->scan_queue, 1); } } spin_unlock(&pd->cdrw.active_list_lock); } -static void pkt_count_states(struct pktcdvd_device *pd, int *states) -{ - struct packet_data *pkt; - int i; - - for (i = 0; i < PACKET_NUM_STATES; i++) - states[i] = 0; - - spin_lock(&pd->cdrw.active_list_lock); - list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { - states[pkt->state]++; - } - spin_unlock(&pd->cdrw.active_list_lock); -} - /* * kcdrwd is woken up when writes have been queued for one of our * registered devices @@ -1367,7 +1405,9 @@ static void pkt_count_states(struct pktcdvd_device *pd, int *states) static int kcdrwd(void *foobar) { struct pktcdvd_device *pd = foobar; + struct device *ddev = disk_to_dev(pd->disk); struct packet_data *pkt; + int states[PACKET_NUM_STATES]; long min_sleep_time, residue; set_user_nice(current, MIN_NICE); @@ -1398,13 +1438,9 @@ static int kcdrwd(void *foobar) goto work_to_do; /* Otherwise, go to sleep */ - if (PACKET_DEBUG > 1) { - int states[PACKET_NUM_STATES]; - pkt_count_states(pd, states); - pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", - states[0], states[1], states[2], - states[3], states[4], states[5]); - } + pkt_count_states(pd, states); + dev_dbg(ddev, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", + states[0], states[1], states[2], states[3], states[4], states[5]); min_sleep_time = MAX_SCHEDULE_TIMEOUT; list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { @@ -1412,9 +1448,9 @@ static int kcdrwd(void *foobar) min_sleep_time = pkt->sleep_time; } - pkt_dbg(2, pd, "sleeping\n"); + dev_dbg(ddev, "sleeping\n"); residue = schedule_timeout(min_sleep_time); - pkt_dbg(2, pd, "wake up\n"); + dev_dbg(ddev, "wake up\n"); /* make swsusp happy with our thread */ try_to_freeze(); @@ -1462,7 +1498,7 @@ work_to_do: static void pkt_print_settings(struct pktcdvd_device *pd) { - pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n", + dev_info(disk_to_dev(pd->disk), "%s packets, %u blocks, Mode-%c disc\n", pd->settings.fp ? "Fixed" : "Variable", pd->settings.size >> 2, pd->settings.block_mode == 8 ? '1' : '2'); @@ -1474,8 +1510,7 @@ static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, cgc->cmd[0] = GPCMD_MODE_SENSE_10; cgc->cmd[2] = page_code | (page_control << 6); - cgc->cmd[7] = cgc->buflen >> 8; - cgc->cmd[8] = cgc->buflen & 0xff; + put_unaligned_be16(cgc->buflen, &cgc->cmd[7]); cgc->data_direction = CGC_DATA_READ; return pkt_generic_packet(pd, cgc); } @@ -1486,8 +1521,7 @@ static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc memset(cgc->buffer, 0, 2); cgc->cmd[0] = GPCMD_MODE_SELECT_10; cgc->cmd[1] = 0x10; /* PF */ - cgc->cmd[7] = cgc->buflen >> 8; - cgc->cmd[8] = cgc->buflen & 0xff; + put_unaligned_be16(cgc->buflen, &cgc->cmd[7]); cgc->data_direction = CGC_DATA_WRITE; return pkt_generic_packet(pd, cgc); } @@ -1528,8 +1562,7 @@ static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ); cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO; cgc.cmd[1] = type & 3; - cgc.cmd[4] = (track & 0xff00) >> 8; - cgc.cmd[5] = track & 0xff; + put_unaligned_be16(track, &cgc.cmd[4]); cgc.cmd[8] = 8; cgc.quiet = 1; @@ -1590,6 +1623,7 @@ static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, */ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) { + struct device *ddev = disk_to_dev(pd->disk); struct packet_command cgc; struct scsi_sense_hdr sshdr; write_param_page *wp; @@ -1609,8 +1643,8 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) return ret; } - size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff)); - pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff); + size = 2 + get_unaligned_be16(&buffer[0]); + pd->mode_offset = get_unaligned_be16(&buffer[6]); if (size > sizeof(buffer)) size = sizeof(buffer); @@ -1656,7 +1690,7 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) /* * paranoia */ - pkt_err(pd, "write mode wrong %d\n", wp->data_block_type); + dev_err(ddev, "write mode wrong %d\n", wp->data_block_type); return 1; } wp->packet_size = cpu_to_be32(pd->settings.size >> 2); @@ -1677,6 +1711,8 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) */ static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) { + struct device *ddev = disk_to_dev(pd->disk); + switch (pd->mmc3_profile) { case 0x1a: /* DVD+RW */ case 0x12: /* DVD-RAM */ @@ -1701,7 +1737,7 @@ static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) if (ti->rt == 1 && ti->blank == 0) return 1; - pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); + dev_err(ddev, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); return 0; } @@ -1710,6 +1746,8 @@ static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) */ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) { + struct device *ddev = disk_to_dev(pd->disk); + switch (pd->mmc3_profile) { case 0x0a: /* CD-RW */ case 0xffff: /* MMC3 not supported */ @@ -1719,8 +1757,7 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) case 0x12: /* DVD-RAM */ return 1; default: - pkt_dbg(2, pd, "Wrong disc profile (%x)\n", - pd->mmc3_profile); + dev_dbg(ddev, "Wrong disc profile (%x)\n", pd->mmc3_profile); return 0; } @@ -1729,22 +1766,22 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) * but i'm not sure, should we leave this to user apps? probably. */ if (di->disc_type == 0xff) { - pkt_notice(pd, "unknown disc - no track?\n"); + dev_notice(ddev, "unknown disc - no track?\n"); return 0; } if (di->disc_type != 0x20 && di->disc_type != 0) { - pkt_err(pd, "wrong disc type (%x)\n", di->disc_type); + dev_err(ddev, "wrong disc type (%x)\n", di->disc_type); return 0; } if (di->erasable == 0) { - pkt_notice(pd, "disc not erasable\n"); + dev_err(ddev, "disc not erasable\n"); return 0; } if (di->border_status == PACKET_SESSION_RESERVED) { - pkt_err(pd, "can't write to last track (reserved)\n"); + dev_err(ddev, "can't write to last track (reserved)\n"); return 0; } @@ -1753,6 +1790,7 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) { + struct device *ddev = disk_to_dev(pd->disk); struct packet_command cgc; unsigned char buf[12]; disc_information di; @@ -1763,14 +1801,14 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) cgc.cmd[0] = GPCMD_GET_CONFIGURATION; cgc.cmd[8] = 8; ret = pkt_generic_packet(pd, &cgc); - pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7]; + pd->mmc3_profile = ret ? 0xffff : get_unaligned_be16(&buf[6]); memset(&di, 0, sizeof(disc_information)); memset(&ti, 0, sizeof(track_information)); ret = pkt_get_disc_info(pd, &di); if (ret) { - pkt_err(pd, "failed get_disc\n"); + dev_err(ddev, "failed get_disc\n"); return ret; } @@ -1782,12 +1820,12 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */ ret = pkt_get_track_info(pd, track, 1, &ti); if (ret) { - pkt_err(pd, "failed get_track\n"); + dev_err(ddev, "failed get_track\n"); return ret; } if (!pkt_writable_track(pd, &ti)) { - pkt_err(pd, "can't write to this track\n"); + dev_err(ddev, "can't write to this track\n"); return -EROFS; } @@ -1797,11 +1835,11 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) */ pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; if (pd->settings.size == 0) { - pkt_notice(pd, "detected zero packet size!\n"); + dev_notice(ddev, "detected zero packet size!\n"); return -ENXIO; } if (pd->settings.size > PACKET_MAX_SECTORS) { - pkt_err(pd, "packet size is too big\n"); + dev_err(ddev, "packet size is too big\n"); return -EROFS; } pd->settings.fp = ti.fp; @@ -1843,7 +1881,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) pd->settings.block_mode = PACKET_BLOCK_MODE2; break; default: - pkt_err(pd, "unknown data mode\n"); + dev_err(ddev, "unknown data mode\n"); return -EROFS; } return 0; @@ -1854,6 +1892,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) */ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd) { + struct device *ddev = disk_to_dev(pd->disk); struct packet_command cgc; struct scsi_sense_hdr sshdr; unsigned char buf[64]; @@ -1880,13 +1919,13 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd) */ buf[pd->mode_offset + 10] |= (set << 2); - cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff)); + cgc.buflen = cgc.cmd[8] = 2 + get_unaligned_be16(&buf[0]); ret = pkt_mode_select(pd, &cgc); if (ret) { - pkt_err(pd, "write caching control failed\n"); + dev_err(ddev, "write caching control failed\n"); pkt_dump_sense(pd, &cgc); } else if (!ret && set) - pkt_notice(pd, "enabled write caching\n"); + dev_notice(ddev, "enabled write caching\n"); return ret; } @@ -1935,12 +1974,12 @@ static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, * Speed Performance Descriptor Block", use the information * in the first block. (contains the highest speed) */ - int num_spdb = (cap_buf[30] << 8) + cap_buf[31]; + int num_spdb = get_unaligned_be16(&cap_buf[30]); if (num_spdb > 0) offset = 34; } - *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1]; + *write_speed = get_unaligned_be16(&cap_buf[offset]); return 0; } @@ -1967,6 +2006,7 @@ static char us_clv_to_speed[16] = { static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed) { + struct device *ddev = disk_to_dev(pd->disk); struct packet_command cgc; struct scsi_sense_hdr sshdr; unsigned char buf[64]; @@ -1984,7 +2024,7 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, pkt_dump_sense(pd, &cgc); return ret; } - size = ((unsigned int) buf[0]<<8) + buf[1] + 2; + size = 2 + get_unaligned_be16(&buf[0]); if (size > sizeof(buf)) size = sizeof(buf); @@ -2001,11 +2041,11 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, } if (!(buf[6] & 0x40)) { - pkt_notice(pd, "disc type is not CD-RW\n"); + dev_notice(ddev, "disc type is not CD-RW\n"); return 1; } if (!(buf[6] & 0x4)) { - pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n"); + dev_notice(ddev, "A1 values on media are not valid, maybe not CDRW?\n"); return 1; } @@ -2025,25 +2065,26 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, *speed = us_clv_to_speed[sp]; break; default: - pkt_notice(pd, "unknown disc sub-type %d\n", st); + dev_notice(ddev, "unknown disc sub-type %d\n", st); return 1; } if (*speed) { - pkt_info(pd, "maximum media speed: %d\n", *speed); + dev_info(ddev, "maximum media speed: %d\n", *speed); return 0; } else { - pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st); + dev_notice(ddev, "unknown speed %d for sub-type %d\n", sp, st); return 1; } } static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) { + struct device *ddev = disk_to_dev(pd->disk); struct packet_command cgc; struct scsi_sense_hdr sshdr; int ret; - pkt_dbg(2, pd, "Performing OPC\n"); + dev_dbg(ddev, "Performing OPC\n"); init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.sshdr = &sshdr; @@ -2058,18 +2099,19 @@ static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) static int pkt_open_write(struct pktcdvd_device *pd) { + struct device *ddev = disk_to_dev(pd->disk); int ret; unsigned int write_speed, media_write_speed, read_speed; ret = pkt_probe_settings(pd); if (ret) { - pkt_dbg(2, pd, "failed probe\n"); + dev_dbg(ddev, "failed probe\n"); return ret; } ret = pkt_set_write_settings(pd); if (ret) { - pkt_dbg(1, pd, "failed saving write settings\n"); + dev_notice(ddev, "failed saving write settings\n"); return -EIO; } @@ -2082,30 +2124,29 @@ static int pkt_open_write(struct pktcdvd_device *pd) case 0x13: /* DVD-RW */ case 0x1a: /* DVD+RW */ case 0x12: /* DVD-RAM */ - pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed); + dev_notice(ddev, "write speed %ukB/s\n", write_speed); break; default: ret = pkt_media_speed(pd, &media_write_speed); if (ret) media_write_speed = 16; write_speed = min(write_speed, media_write_speed * 177); - pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176); + dev_notice(ddev, "write speed %ux\n", write_speed / 176); break; } read_speed = write_speed; ret = pkt_set_speed(pd, write_speed, read_speed); if (ret) { - pkt_dbg(1, pd, "couldn't set write speed\n"); + dev_notice(ddev, "couldn't set write speed\n"); return -EIO; } pd->write_speed = write_speed; pd->read_speed = read_speed; ret = pkt_perform_opc(pd); - if (ret) { - pkt_dbg(1, pd, "Optimum Power Calibration failed\n"); - } + if (ret) + dev_notice(ddev, "Optimum Power Calibration failed\n"); return 0; } @@ -2113,8 +2154,9 @@ static int pkt_open_write(struct pktcdvd_device *pd) /* * called at open time. */ -static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) +static int pkt_open_dev(struct pktcdvd_device *pd, bool write) { + struct device *ddev = disk_to_dev(pd->disk); int ret; long lba; struct request_queue *q; @@ -2125,7 +2167,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) * to read/write from/to it. It is already opened in O_NONBLOCK mode * so open should not fail. */ - bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd); + bdev = blkdev_get_by_dev(pd->bdev->bd_dev, BLK_OPEN_READ, pd, NULL); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); goto out; @@ -2133,7 +2175,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) ret = pkt_get_last_written(pd, &lba); if (ret) { - pkt_err(pd, "pkt_get_last_written failed\n"); + dev_err(ddev, "pkt_get_last_written failed\n"); goto out_putdev; } @@ -2162,17 +2204,17 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) if (write) { if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) { - pkt_err(pd, "not enough memory for buffers\n"); + dev_err(ddev, "not enough memory for buffers\n"); ret = -ENOMEM; goto out_putdev; } - pkt_info(pd, "%lukB available on disc\n", lba << 1); + dev_info(ddev, "%lukB available on disc\n", lba << 1); } return 0; out_putdev: - blkdev_put(bdev, FMODE_READ | FMODE_EXCL); + blkdev_put(bdev, pd); out: return ret; } @@ -2183,13 +2225,15 @@ out: */ static void pkt_release_dev(struct pktcdvd_device *pd, int flush) { + struct device *ddev = disk_to_dev(pd->disk); + if (flush && pkt_flush_cache(pd)) - pkt_dbg(1, pd, "not flushing cache\n"); + dev_notice(ddev, "not flushing cache\n"); pkt_lock_door(pd, 0); pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); - blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); + blkdev_put(pd->bdev, pd); pkt_shrink_pktlist(pd); } @@ -2203,14 +2247,14 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor) return pkt_devs[dev_minor]; } -static int pkt_open(struct block_device *bdev, fmode_t mode) +static int pkt_open(struct gendisk *disk, blk_mode_t mode) { struct pktcdvd_device *pd = NULL; int ret; mutex_lock(&pktcdvd_mutex); mutex_lock(&ctl_mutex); - pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev)); + pd = pkt_find_dev_from_minor(disk->first_minor); if (!pd) { ret = -ENODEV; goto out; @@ -2219,22 +2263,21 @@ static int pkt_open(struct block_device *bdev, fmode_t mode) pd->refcnt++; if (pd->refcnt > 1) { - if ((mode & FMODE_WRITE) && + if ((mode & BLK_OPEN_WRITE) && !test_bit(PACKET_WRITABLE, &pd->flags)) { ret = -EBUSY; goto out_dec; } } else { - ret = pkt_open_dev(pd, mode & FMODE_WRITE); + ret = pkt_open_dev(pd, mode & BLK_OPEN_WRITE); if (ret) goto out_dec; /* * needed here as well, since ext2 (among others) may change * the blocksize at mount time */ - set_blocksize(bdev, CD_FRAMESIZE); + set_blocksize(disk->part0, CD_FRAMESIZE); } - mutex_unlock(&ctl_mutex); mutex_unlock(&pktcdvd_mutex); return 0; @@ -2247,7 +2290,7 @@ out: return ret; } -static void pkt_close(struct gendisk *disk, fmode_t mode) +static void pkt_release(struct gendisk *disk) { struct pktcdvd_device *pd = disk->private_data; @@ -2385,15 +2428,15 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio) static void pkt_submit_bio(struct bio *bio) { struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata; + struct device *ddev = disk_to_dev(pd->disk); struct bio *split; bio = bio_split_to_limits(bio); if (!bio) return; - pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", - (unsigned long long)bio->bi_iter.bi_sector, - (unsigned long long)bio_end_sector(bio)); + dev_dbg(ddev, "start = %6llx stop = %6llx\n", + bio->bi_iter.bi_sector, bio_end_sector(bio)); /* * Clone READ bios so we can have our own bi_end_io callback. @@ -2404,13 +2447,12 @@ static void pkt_submit_bio(struct bio *bio) } if (!test_bit(PACKET_WRITABLE, &pd->flags)) { - pkt_notice(pd, "WRITE for ro device (%llu)\n", - (unsigned long long)bio->bi_iter.bi_sector); + dev_notice(ddev, "WRITE for ro device (%llu)\n", bio->bi_iter.bi_sector); goto end_io; } if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) { - pkt_err(pd, "wrong bio size\n"); + dev_err(ddev, "wrong bio size\n"); goto end_io; } @@ -2446,74 +2488,15 @@ static void pkt_init_queue(struct pktcdvd_device *pd) q->queuedata = pd; } -static int pkt_seq_show(struct seq_file *m, void *p) -{ - struct pktcdvd_device *pd = m->private; - char *msg; - int states[PACKET_NUM_STATES]; - - seq_printf(m, "Writer %s mapped to %pg:\n", pd->name, pd->bdev); - - seq_printf(m, "\nSettings:\n"); - seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); - - if (pd->settings.write_type == 0) - msg = "Packet"; - else - msg = "Unknown"; - seq_printf(m, "\twrite type:\t\t%s\n", msg); - - seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable"); - seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss); - - seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode); - - if (pd->settings.block_mode == PACKET_BLOCK_MODE1) - msg = "Mode 1"; - else if (pd->settings.block_mode == PACKET_BLOCK_MODE2) - msg = "Mode 2"; - else - msg = "Unknown"; - seq_printf(m, "\tblock mode:\t\t%s\n", msg); - - seq_printf(m, "\nStatistics:\n"); - seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started); - seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended); - seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1); - seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1); - seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1); - - seq_printf(m, "\nMisc:\n"); - seq_printf(m, "\treference count:\t%d\n", pd->refcnt); - seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags); - seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed); - seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed); - seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset); - seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset); - - seq_printf(m, "\nQueue state:\n"); - seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size); - seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios)); - seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector); - - pkt_count_states(pd, states); - seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", - states[0], states[1], states[2], states[3], states[4], states[5]); - - seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n", - pd->write_congestion_off, - pd->write_congestion_on); - return 0; -} - static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) { + struct device *ddev = disk_to_dev(pd->disk); int i; struct block_device *bdev; struct scsi_device *sdev; if (pd->pkt_dev == dev) { - pkt_err(pd, "recursive setup not allowed\n"); + dev_err(ddev, "recursive setup not allowed\n"); return -EBUSY; } for (i = 0; i < MAX_WRITERS; i++) { @@ -2521,21 +2504,22 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) if (!pd2) continue; if (pd2->bdev->bd_dev == dev) { - pkt_err(pd, "%pg already setup\n", pd2->bdev); + dev_err(ddev, "%pg already setup\n", pd2->bdev); return -EBUSY; } if (pd2->pkt_dev == dev) { - pkt_err(pd, "can't chain pktcdvd devices\n"); + dev_err(ddev, "can't chain pktcdvd devices\n"); return -EBUSY; } } - bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL); + bdev = blkdev_get_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY, NULL, + NULL); if (IS_ERR(bdev)) return PTR_ERR(bdev); sdev = scsi_device_from_queue(bdev->bd_disk->queue); if (!sdev) { - blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); + blkdev_put(bdev, NULL); return -EINVAL; } put_device(&sdev->sdev_gendev); @@ -2549,30 +2533,31 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) pkt_init_queue(pd); atomic_set(&pd->cdrw.pending_bios, 0); - pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name); + pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name); if (IS_ERR(pd->cdrw.thread)) { - pkt_err(pd, "can't start kernel thread\n"); + dev_err(ddev, "can't start kernel thread\n"); goto out_mem; } - proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd); - pkt_dbg(1, pd, "writer mapped to %pg\n", bdev); + proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd); + dev_notice(ddev, "writer mapped to %pg\n", bdev); return 0; out_mem: - blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); + blkdev_put(bdev, NULL); /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); return -ENOMEM; } -static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) +static int pkt_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, unsigned long arg) { struct pktcdvd_device *pd = bdev->bd_disk->private_data; + struct device *ddev = disk_to_dev(pd->disk); int ret; - pkt_dbg(2, pd, "cmd %x, dev %d:%d\n", - cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); + dev_dbg(ddev, "cmd %x, dev %d:%d\n", cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); mutex_lock(&pktcdvd_mutex); switch (cmd) { @@ -2598,7 +2583,7 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); break; default: - pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd); + dev_dbg(ddev, "Unknown ioctl (%x)\n", cmd); ret = -ENOTTY; } mutex_unlock(&pktcdvd_mutex); @@ -2631,7 +2616,7 @@ static const struct block_device_operations pktcdvd_ops = { .owner = THIS_MODULE, .submit_bio = pkt_submit_bio, .open = pkt_open, - .release = pkt_close, + .release = pkt_release, .ioctl = pkt_ioctl, .compat_ioctl = blkdev_compat_ptr_ioctl, .check_events = pkt_check_events, @@ -2676,7 +2661,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) spin_lock_init(&pd->iosched.lock); bio_list_init(&pd->iosched.read_queue); bio_list_init(&pd->iosched.write_queue); - sprintf(pd->name, DRIVER_NAME"%d", idx); init_waitqueue_head(&pd->wqueue); pd->bio_queue = RB_ROOT; @@ -2693,7 +2677,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) disk->minors = 1; disk->fops = &pktcdvd_ops; disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART; - strcpy(disk->disk_name, pd->name); + snprintf(disk->disk_name, sizeof(disk->disk_name), DRIVER_NAME"%d", idx); disk->private_data = pd; pd->pkt_dev = MKDEV(pktdev_major, idx); @@ -2735,6 +2719,7 @@ out_mutex: static int pkt_remove_dev(dev_t pkt_dev) { struct pktcdvd_device *pd; + struct device *ddev; int idx; int ret = 0; @@ -2755,6 +2740,9 @@ static int pkt_remove_dev(dev_t pkt_dev) ret = -EBUSY; goto out; } + + ddev = disk_to_dev(pd->disk); + if (!IS_ERR(pd->cdrw.thread)) kthread_stop(pd->cdrw.thread); @@ -2763,10 +2751,10 @@ static int pkt_remove_dev(dev_t pkt_dev) pkt_debugfs_dev_remove(pd); pkt_sysfs_dev_remove(pd); - blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY); + blkdev_put(pd->bdev, NULL); - remove_proc_entry(pd->name, pkt_proc); - pkt_dbg(1, pd, "writer unmapped\n"); + remove_proc_entry(pd->disk->disk_name, pkt_proc); + dev_notice(ddev, "writer unmapped\n"); del_gendisk(pd->disk); put_disk(pd->disk); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 632751ddb287..bd0e075a5d89 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -660,9 +660,9 @@ static bool pending_result_dec(struct pending_result *pending, int *result) return true; } -static int rbd_open(struct block_device *bdev, fmode_t mode) +static int rbd_open(struct gendisk *disk, blk_mode_t mode) { - struct rbd_device *rbd_dev = bdev->bd_disk->private_data; + struct rbd_device *rbd_dev = disk->private_data; bool removing = false; spin_lock_irq(&rbd_dev->lock); @@ -679,7 +679,7 @@ static int rbd_open(struct block_device *bdev, fmode_t mode) return 0; } -static void rbd_release(struct gendisk *disk, fmode_t mode) +static void rbd_release(struct gendisk *disk) { struct rbd_device *rbd_dev = disk->private_data; unsigned long open_count_before; diff --git a/drivers/block/rnbd/Makefile b/drivers/block/rnbd/Makefile index 40b31630822c..208e5f865497 100644 --- a/drivers/block/rnbd/Makefile +++ b/drivers/block/rnbd/Makefile @@ -3,13 +3,11 @@ ccflags-y := -I$(srctree)/drivers/infiniband/ulp/rtrs rnbd-client-y := rnbd-clt.o \ - rnbd-clt-sysfs.o \ - rnbd-common.o + rnbd-clt-sysfs.o CFLAGS_rnbd-srv-trace.o = -I$(src) -rnbd-server-y := rnbd-common.o \ - rnbd-srv.o \ +rnbd-server-y := rnbd-srv.o \ rnbd-srv-sysfs.o \ rnbd-srv-trace.o diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c index 8c6087949794..c36d8b1ceeed 100644 --- a/drivers/block/rnbd/rnbd-clt-sysfs.c +++ b/drivers/block/rnbd/rnbd-clt-sysfs.c @@ -24,7 +24,9 @@ #include "rnbd-clt.h" static struct device *rnbd_dev; -static struct class *rnbd_dev_class; +static const struct class rnbd_dev_class = { + .name = "rnbd_client", +}; static struct kobject *rnbd_devs_kobj; enum { @@ -278,7 +280,7 @@ static ssize_t access_mode_show(struct kobject *kobj, dev = container_of(kobj, struct rnbd_clt_dev, kobj); - return sysfs_emit(page, "%s\n", rnbd_access_mode_str(dev->access_mode)); + return sysfs_emit(page, "%s\n", rnbd_access_modes[dev->access_mode].str); } static struct kobj_attribute rnbd_clt_access_mode = @@ -596,7 +598,7 @@ static ssize_t rnbd_clt_map_device_store(struct kobject *kobj, pr_info("Mapping device %s on session %s, (access_mode: %s, nr_poll_queues: %d)\n", pathname, sessname, - rnbd_access_mode_str(access_mode), + rnbd_access_modes[access_mode].str, nr_poll_queues); dev = rnbd_clt_map_device(sessname, paths, path_cnt, port_nr, pathname, @@ -646,11 +648,11 @@ int rnbd_clt_create_sysfs_files(void) { int err; - rnbd_dev_class = class_create("rnbd-client"); - if (IS_ERR(rnbd_dev_class)) - return PTR_ERR(rnbd_dev_class); + err = class_register(&rnbd_dev_class); + if (err) + return err; - rnbd_dev = device_create_with_groups(rnbd_dev_class, NULL, + rnbd_dev = device_create_with_groups(&rnbd_dev_class, NULL, MKDEV(0, 0), NULL, default_attr_groups, "ctl"); if (IS_ERR(rnbd_dev)) { @@ -666,9 +668,9 @@ int rnbd_clt_create_sysfs_files(void) return 0; dev_destroy: - device_destroy(rnbd_dev_class, MKDEV(0, 0)); + device_destroy(&rnbd_dev_class, MKDEV(0, 0)); cls_destroy: - class_destroy(rnbd_dev_class); + class_unregister(&rnbd_dev_class); return err; } @@ -678,6 +680,6 @@ void rnbd_clt_destroy_sysfs_files(void) sysfs_remove_group(&rnbd_dev->kobj, &default_attr_group); kobject_del(rnbd_devs_kobj); kobject_put(rnbd_devs_kobj); - device_destroy(rnbd_dev_class, MKDEV(0, 0)); - class_destroy(rnbd_dev_class); + device_destroy(&rnbd_dev_class, MKDEV(0, 0)); + class_unregister(&rnbd_dev_class); } diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index 5eb8c7855970..b0550b68645d 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -921,11 +921,11 @@ rnbd_clt_session *find_or_create_sess(const char *sessname, bool *first) return sess; } -static int rnbd_client_open(struct block_device *block_device, fmode_t mode) +static int rnbd_client_open(struct gendisk *disk, blk_mode_t mode) { - struct rnbd_clt_dev *dev = block_device->bd_disk->private_data; + struct rnbd_clt_dev *dev = disk->private_data; - if (get_disk_ro(dev->gd) && (mode & FMODE_WRITE)) + if (get_disk_ro(dev->gd) && (mode & BLK_OPEN_WRITE)) return -EPERM; if (dev->dev_state == DEV_STATE_UNMAPPED || @@ -935,7 +935,7 @@ static int rnbd_client_open(struct block_device *block_device, fmode_t mode) return 0; } -static void rnbd_client_release(struct gendisk *gen, fmode_t mode) +static void rnbd_client_release(struct gendisk *gen) { struct rnbd_clt_dev *dev = gen->private_data; diff --git a/drivers/block/rnbd/rnbd-common.c b/drivers/block/rnbd/rnbd-common.c deleted file mode 100644 index 596c3f732403..000000000000 --- a/drivers/block/rnbd/rnbd-common.c +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * RDMA Network Block Driver - * - * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved. - * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved. - * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved. - */ -#include "rnbd-proto.h" - -const char *rnbd_access_mode_str(enum rnbd_access_mode mode) -{ - switch (mode) { - case RNBD_ACCESS_RO: - return "ro"; - case RNBD_ACCESS_RW: - return "rw"; - case RNBD_ACCESS_MIGRATION: - return "migration"; - default: - return "unknown"; - } -} diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h index da1d0542d7e2..e32f8f2c868a 100644 --- a/drivers/block/rnbd/rnbd-proto.h +++ b/drivers/block/rnbd/rnbd-proto.h @@ -61,6 +61,15 @@ enum rnbd_access_mode { RNBD_ACCESS_MIGRATION, }; +static const __maybe_unused struct { + enum rnbd_access_mode mode; + const char *str; +} rnbd_access_modes[] = { + [RNBD_ACCESS_RO] = {RNBD_ACCESS_RO, "ro"}, + [RNBD_ACCESS_RW] = {RNBD_ACCESS_RW, "rw"}, + [RNBD_ACCESS_MIGRATION] = {RNBD_ACCESS_MIGRATION, "migration"}, +}; + /** * struct rnbd_msg_sess_info - initial session info from client to server * @hdr: message header @@ -185,7 +194,6 @@ struct rnbd_msg_io { enum rnbd_io_flags { /* Operations */ - RNBD_OP_READ = 0, RNBD_OP_WRITE = 1, RNBD_OP_FLUSH = 2, @@ -193,15 +201,9 @@ enum rnbd_io_flags { RNBD_OP_SECURE_ERASE = 4, RNBD_OP_WRITE_SAME = 5, - RNBD_OP_LAST, - /* Flags */ - RNBD_F_SYNC = 1<<(RNBD_OP_BITS + 0), RNBD_F_FUA = 1<<(RNBD_OP_BITS + 1), - - RNBD_F_ALL = (RNBD_F_SYNC | RNBD_F_FUA) - }; static inline u32 rnbd_op(u32 flags) @@ -214,21 +216,6 @@ static inline u32 rnbd_flags(u32 flags) return flags & ~RNBD_OP_MASK; } -static inline bool rnbd_flags_supported(u32 flags) -{ - u32 op; - - op = rnbd_op(flags); - flags = rnbd_flags(flags); - - if (op >= RNBD_OP_LAST) - return false; - if (flags & ~RNBD_F_ALL) - return false; - - return true; -} - static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf) { blk_opf_t bio_opf; diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c index d5d9267e1fa5..cba6ba43c2c2 100644 --- a/drivers/block/rnbd/rnbd-srv-sysfs.c +++ b/drivers/block/rnbd/rnbd-srv-sysfs.c @@ -9,7 +9,6 @@ #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt -#include <uapi/linux/limits.h> #include <linux/kobject.h> #include <linux/sysfs.h> #include <linux/stat.h> @@ -20,7 +19,9 @@ #include "rnbd-srv.h" static struct device *rnbd_dev; -static struct class *rnbd_dev_class; +static const struct class rnbd_dev_class = { + .name = "rnbd-server", +}; static struct kobject *rnbd_devs_kobj; static void rnbd_srv_dev_release(struct kobject *kobj) @@ -88,8 +89,7 @@ static ssize_t read_only_show(struct kobject *kobj, struct kobj_attribute *attr, sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj); - return sysfs_emit(page, "%d\n", - !(sess_dev->open_flags & FMODE_WRITE)); + return sysfs_emit(page, "%d\n", sess_dev->readonly); } static struct kobj_attribute rnbd_srv_dev_session_ro_attr = @@ -104,7 +104,7 @@ static ssize_t access_mode_show(struct kobject *kobj, sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj); return sysfs_emit(page, "%s\n", - rnbd_access_mode_str(sess_dev->access_mode)); + rnbd_access_modes[sess_dev->access_mode].str); } static struct kobj_attribute rnbd_srv_dev_session_access_mode_attr = @@ -215,12 +215,12 @@ int rnbd_srv_create_sysfs_files(void) { int err; - rnbd_dev_class = class_create("rnbd-server"); - if (IS_ERR(rnbd_dev_class)) - return PTR_ERR(rnbd_dev_class); + err = class_register(&rnbd_dev_class); + if (err) + return err; - rnbd_dev = device_create(rnbd_dev_class, NULL, - MKDEV(0, 0), NULL, "ctl"); + rnbd_dev = device_create(&rnbd_dev_class, NULL, + MKDEV(0, 0), NULL, "ctl"); if (IS_ERR(rnbd_dev)) { err = PTR_ERR(rnbd_dev); goto cls_destroy; @@ -234,9 +234,9 @@ int rnbd_srv_create_sysfs_files(void) return 0; dev_destroy: - device_destroy(rnbd_dev_class, MKDEV(0, 0)); + device_destroy(&rnbd_dev_class, MKDEV(0, 0)); cls_destroy: - class_destroy(rnbd_dev_class); + class_unregister(&rnbd_dev_class); return err; } @@ -245,6 +245,6 @@ void rnbd_srv_destroy_sysfs_files(void) { kobject_del(rnbd_devs_kobj); kobject_put(rnbd_devs_kobj); - device_destroy(rnbd_dev_class, MKDEV(0, 0)); - class_destroy(rnbd_dev_class); + device_destroy(&rnbd_dev_class, MKDEV(0, 0)); + class_unregister(&rnbd_dev_class); } diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index 2cfed2e58d64..c186df0ec641 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -96,7 +96,7 @@ rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess) ret = kref_get_unless_zero(&sess_dev->kref); rcu_read_unlock(); - if (!sess_dev || !ret) + if (!ret) return ERR_PTR(-ENXIO); return sess_dev; @@ -180,7 +180,7 @@ static void destroy_device(struct kref *kref) WARN_ONCE(!list_empty(&dev->sess_dev_list), "Device %s is being destroyed but still in use!\n", - dev->id); + dev->name); spin_lock(&dev_lock); list_del(&dev->list); @@ -219,10 +219,10 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id) rnbd_put_sess_dev(sess_dev); wait_for_completion(&dc); /* wait for inflights to drop to zero */ - blkdev_put(sess_dev->bdev, sess_dev->open_flags); + blkdev_put(sess_dev->bdev, NULL); mutex_lock(&sess_dev->dev->lock); list_del(&sess_dev->dev_list); - if (sess_dev->open_flags & FMODE_WRITE) + if (!sess_dev->readonly) sess_dev->dev->open_write_cnt--; mutex_unlock(&sess_dev->dev->lock); @@ -356,7 +356,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, const void *msg, size_t len, void *data, size_t datalen); -static int process_msg_sess_info(struct rnbd_srv_session *srv_sess, +static void process_msg_sess_info(struct rnbd_srv_session *srv_sess, const void *msg, size_t len, void *data, size_t datalen); @@ -384,8 +384,7 @@ static int rnbd_srv_rdma_ev(void *priv, struct rtrs_srv_op *id, ret = process_msg_open(srv_sess, usr, usrlen, data, datalen); break; case RNBD_MSG_SESS_INFO: - ret = process_msg_sess_info(srv_sess, usr, usrlen, data, - datalen); + process_msg_sess_info(srv_sess, usr, usrlen, data, datalen); break; default: pr_warn("Received unexpected message type %d from session %s\n", @@ -431,7 +430,7 @@ static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(struct block_device *bdev) if (!dev) return ERR_PTR(-ENOMEM); - snprintf(dev->id, sizeof(dev->id), "%pg", bdev); + snprintf(dev->name, sizeof(dev->name), "%pg", bdev); kref_init(&dev->kref); INIT_LIST_HEAD(&dev->sess_dev_list); mutex_init(&dev->lock); @@ -446,7 +445,7 @@ rnbd_srv_find_or_add_srv_dev(struct rnbd_srv_dev *new_dev) spin_lock(&dev_lock); list_for_each_entry(dev, &dev_list, list) { - if (!strncmp(dev->id, new_dev->id, sizeof(dev->id))) { + if (!strncmp(dev->name, new_dev->name, sizeof(dev->name))) { if (!kref_get_unless_zero(&dev->kref)) /* * We lost the race, device is almost dead. @@ -467,39 +466,38 @@ static int rnbd_srv_check_update_open_perm(struct rnbd_srv_dev *srv_dev, struct rnbd_srv_session *srv_sess, enum rnbd_access_mode access_mode) { - int ret = -EPERM; + int ret = 0; mutex_lock(&srv_dev->lock); switch (access_mode) { case RNBD_ACCESS_RO: - ret = 0; break; case RNBD_ACCESS_RW: if (srv_dev->open_write_cnt == 0) { srv_dev->open_write_cnt++; - ret = 0; } else { pr_err("Mapping device '%s' for session %s with RW permissions failed. Device already opened as 'RW' by %d client(s), access mode %s.\n", - srv_dev->id, srv_sess->sessname, + srv_dev->name, srv_sess->sessname, srv_dev->open_write_cnt, - rnbd_access_mode_str(access_mode)); + rnbd_access_modes[access_mode].str); + ret = -EPERM; } break; case RNBD_ACCESS_MIGRATION: if (srv_dev->open_write_cnt < 2) { srv_dev->open_write_cnt++; - ret = 0; } else { pr_err("Mapping device '%s' for session %s with migration permissions failed. Device already opened as 'RW' by %d client(s), access mode %s.\n", - srv_dev->id, srv_sess->sessname, + srv_dev->name, srv_sess->sessname, srv_dev->open_write_cnt, - rnbd_access_mode_str(access_mode)); + rnbd_access_modes[access_mode].str); + ret = -EPERM; } break; default: pr_err("Received mapping request for device '%s' on session %s with invalid access mode: %d\n", - srv_dev->id, srv_sess->sessname, access_mode); + srv_dev->name, srv_sess->sessname, access_mode); ret = -EINVAL; } @@ -561,7 +559,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp, static struct rnbd_srv_sess_dev * rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess, const struct rnbd_msg_open *open_msg, - struct block_device *bdev, fmode_t open_flags, + struct block_device *bdev, bool readonly, struct rnbd_srv_dev *srv_dev) { struct rnbd_srv_sess_dev *sdev = rnbd_sess_dev_alloc(srv_sess); @@ -576,7 +574,7 @@ rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess, sdev->bdev = bdev; sdev->sess = srv_sess; sdev->dev = srv_dev; - sdev->open_flags = open_flags; + sdev->readonly = readonly; sdev->access_mode = open_msg->access_mode; return sdev; @@ -631,7 +629,7 @@ static char *rnbd_srv_get_full_path(struct rnbd_srv_session *srv_sess, return full_path; } -static int process_msg_sess_info(struct rnbd_srv_session *srv_sess, +static void process_msg_sess_info(struct rnbd_srv_session *srv_sess, const void *msg, size_t len, void *data, size_t datalen) { @@ -644,8 +642,6 @@ static int process_msg_sess_info(struct rnbd_srv_session *srv_sess, rsp->hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO_RSP); rsp->ver = srv_sess->ver; - - return 0; } /** @@ -681,15 +677,14 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, struct rnbd_srv_sess_dev *srv_sess_dev; const struct rnbd_msg_open *open_msg = msg; struct block_device *bdev; - fmode_t open_flags; + blk_mode_t open_flags = BLK_OPEN_READ; char *full_path; struct rnbd_msg_open_rsp *rsp = data; trace_process_msg_open(srv_sess, open_msg); - open_flags = FMODE_READ; if (open_msg->access_mode != RNBD_ACCESS_RO) - open_flags |= FMODE_WRITE; + open_flags |= BLK_OPEN_WRITE; mutex_lock(&srv_sess->lock); @@ -719,7 +714,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, goto reject; } - bdev = blkdev_get_by_path(full_path, open_flags, THIS_MODULE); + bdev = blkdev_get_by_path(full_path, open_flags, NULL, NULL); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %d\n", @@ -736,9 +731,9 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, goto blkdev_put; } - srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg, - bdev, open_flags, - srv_dev); + srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg, bdev, + open_msg->access_mode == RNBD_ACCESS_RO, + srv_dev); if (IS_ERR(srv_sess_dev)) { pr_err("Opening device '%s' on session %s failed, creating sess_dev failed, err: %ld\n", full_path, srv_sess->sessname, PTR_ERR(srv_sess_dev)); @@ -774,7 +769,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, list_add(&srv_sess_dev->dev_list, &srv_dev->sess_dev_list); mutex_unlock(&srv_dev->lock); - rnbd_srv_info(srv_sess_dev, "Opened device '%s'\n", srv_dev->id); + rnbd_srv_info(srv_sess_dev, "Opened device '%s'\n", srv_dev->name); kfree(full_path); @@ -795,7 +790,7 @@ srv_dev_put: } rnbd_put_srv_dev(srv_dev); blkdev_put: - blkdev_put(bdev, open_flags); + blkdev_put(bdev, NULL); free_path: kfree(full_path); reject: @@ -808,7 +803,7 @@ static struct rtrs_srv_ctx *rtrs_ctx; static struct rtrs_srv_ops rtrs_ops; static int __init rnbd_srv_init_module(void) { - int err; + int err = 0; BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr) != 4); BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info) != 36); @@ -822,19 +817,17 @@ static int __init rnbd_srv_init_module(void) }; rtrs_ctx = rtrs_srv_open(&rtrs_ops, port_nr); if (IS_ERR(rtrs_ctx)) { - err = PTR_ERR(rtrs_ctx); pr_err("rtrs_srv_open(), err: %d\n", err); - return err; + return PTR_ERR(rtrs_ctx); } err = rnbd_srv_create_sysfs_files(); if (err) { pr_err("rnbd_srv_create_sysfs_files(), err: %d\n", err); rtrs_srv_close(rtrs_ctx); - return err; } - return 0; + return err; } static void __exit rnbd_srv_cleanup_module(void) diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h index f5962fd31d62..1027656dedb0 100644 --- a/drivers/block/rnbd/rnbd-srv.h +++ b/drivers/block/rnbd/rnbd-srv.h @@ -35,7 +35,7 @@ struct rnbd_srv_dev { struct kobject dev_kobj; struct kobject *dev_sessions_kobj; struct kref kref; - char id[NAME_MAX]; + char name[NAME_MAX]; /* List of rnbd_srv_sess_dev structs */ struct list_head sess_dev_list; struct mutex lock; @@ -52,7 +52,7 @@ struct rnbd_srv_sess_dev { struct kobject kobj; u32 device_id; bool keep_id; - fmode_t open_flags; + bool readonly; struct kref kref; struct completion *destroy_comp; char pathname[NAME_MAX]; diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 9fa821fa76b0..7bf4b48e2282 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -139,7 +139,7 @@ static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo) * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD. * Needed to be able to install inside an ldom from an iso image. */ -static int vdc_ioctl(struct block_device *bdev, fmode_t mode, +static int vdc_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned command, unsigned long argument) { struct vdc_port *port = bdev->bd_disk->private_data; diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 42b4b6828690..f85b6af414b4 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -608,20 +608,18 @@ static void setup_medium(struct floppy_state *fs) } } -static int floppy_open(struct block_device *bdev, fmode_t mode) +static int floppy_open(struct gendisk *disk, blk_mode_t mode) { - struct floppy_state *fs = bdev->bd_disk->private_data; + struct floppy_state *fs = disk->private_data; struct swim __iomem *base = fs->swd->base; int err; - if (fs->ref_count == -1 || (fs->ref_count && mode & FMODE_EXCL)) + if (fs->ref_count == -1 || (fs->ref_count && mode & BLK_OPEN_EXCL)) return -EBUSY; - - if (mode & FMODE_EXCL) + if (mode & BLK_OPEN_EXCL) fs->ref_count = -1; else fs->ref_count++; - swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2); udelay(10); swim_drive(base, fs->location); @@ -636,13 +634,13 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) set_capacity(fs->disk, fs->total_secs); - if (mode & FMODE_NDELAY) + if (mode & BLK_OPEN_NDELAY) return 0; - if (mode & (FMODE_READ|FMODE_WRITE)) { - if (bdev_check_media_change(bdev) && fs->disk_in) + if (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) { + if (disk_check_media_change(disk) && fs->disk_in) fs->ejected = 0; - if ((mode & FMODE_WRITE) && fs->write_protected) { + if ((mode & BLK_OPEN_WRITE) && fs->write_protected) { err = -EROFS; goto out; } @@ -659,18 +657,18 @@ out: return err; } -static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) +static int floppy_unlocked_open(struct gendisk *disk, blk_mode_t mode) { int ret; mutex_lock(&swim_mutex); - ret = floppy_open(bdev, mode); + ret = floppy_open(disk, mode); mutex_unlock(&swim_mutex); return ret; } -static void floppy_release(struct gendisk *disk, fmode_t mode) +static void floppy_release(struct gendisk *disk) { struct floppy_state *fs = disk->private_data; struct swim __iomem *base = fs->swd->base; @@ -686,7 +684,7 @@ static void floppy_release(struct gendisk *disk, fmode_t mode) mutex_unlock(&swim_mutex); } -static int floppy_ioctl(struct block_device *bdev, fmode_t mode, +static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long param) { struct floppy_state *fs = bdev->bd_disk->private_data; diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index da811a7da03f..dc43a63b3469 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -246,10 +246,9 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state, int interruptible); static void release_drive(struct floppy_state *fs); static int fd_eject(struct floppy_state *fs); -static int floppy_ioctl(struct block_device *bdev, fmode_t mode, +static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long param); -static int floppy_open(struct block_device *bdev, fmode_t mode); -static void floppy_release(struct gendisk *disk, fmode_t mode); +static int floppy_open(struct gendisk *disk, blk_mode_t mode); static unsigned int floppy_check_events(struct gendisk *disk, unsigned int clearing); static int floppy_revalidate(struct gendisk *disk); @@ -883,7 +882,7 @@ static int fd_eject(struct floppy_state *fs) static struct floppy_struct floppy_type = { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */ -static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode, +static int floppy_locked_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long param) { struct floppy_state *fs = bdev->bd_disk->private_data; @@ -911,7 +910,7 @@ static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode, return -ENOTTY; } -static int floppy_ioctl(struct block_device *bdev, fmode_t mode, +static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long param) { int ret; @@ -923,9 +922,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode, return ret; } -static int floppy_open(struct block_device *bdev, fmode_t mode) +static int floppy_open(struct gendisk *disk, blk_mode_t mode) { - struct floppy_state *fs = bdev->bd_disk->private_data; + struct floppy_state *fs = disk->private_data; struct swim3 __iomem *sw = fs->swim3; int n, err = 0; @@ -958,18 +957,18 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) swim3_action(fs, SETMFM); swim3_select(fs, RELAX); - } else if (fs->ref_count == -1 || mode & FMODE_EXCL) + } else if (fs->ref_count == -1 || mode & BLK_OPEN_EXCL) return -EBUSY; - if (err == 0 && (mode & FMODE_NDELAY) == 0 - && (mode & (FMODE_READ|FMODE_WRITE))) { - if (bdev_check_media_change(bdev)) - floppy_revalidate(bdev->bd_disk); + if (err == 0 && !(mode & BLK_OPEN_NDELAY) && + (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE))) { + if (disk_check_media_change(disk)) + floppy_revalidate(disk); if (fs->ejected) err = -ENXIO; } - if (err == 0 && (mode & FMODE_WRITE)) { + if (err == 0 && (mode & BLK_OPEN_WRITE)) { if (fs->write_prot < 0) fs->write_prot = swim3_readbit(fs, WRITE_PROT); if (fs->write_prot) @@ -985,7 +984,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) return err; } - if (mode & FMODE_EXCL) + if (mode & BLK_OPEN_EXCL) fs->ref_count = -1; else ++fs->ref_count; @@ -993,18 +992,18 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) return 0; } -static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) +static int floppy_unlocked_open(struct gendisk *disk, blk_mode_t mode) { int ret; mutex_lock(&swim3_mutex); - ret = floppy_open(bdev, mode); + ret = floppy_open(disk, mode); mutex_unlock(&swim3_mutex); return ret; } -static void floppy_release(struct gendisk *disk, fmode_t mode) +static void floppy_release(struct gendisk *disk) { struct floppy_state *fs = disk->private_data; struct swim3 __iomem *sw = fs->swim3; diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 33d3298a0da1..1c823750c95a 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -43,6 +43,7 @@ #include <asm/page.h> #include <linux/task_work.h> #include <linux/namei.h> +#include <linux/kref.h> #include <uapi/linux/ublk_cmd.h> #define UBLK_MINORS (1U << MINORBITS) @@ -54,7 +55,8 @@ | UBLK_F_USER_RECOVERY \ | UBLK_F_USER_RECOVERY_REISSUE \ | UBLK_F_UNPRIVILEGED_DEV \ - | UBLK_F_CMD_IOCTL_ENCODE) + | UBLK_F_CMD_IOCTL_ENCODE \ + | UBLK_F_USER_COPY) /* All UBLK_PARAM_TYPE_* should be included here */ #define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | \ @@ -62,7 +64,8 @@ struct ublk_rq_data { struct llist_node node; - struct callback_head work; + + struct kref ref; }; struct ublk_uring_cmd_pdu { @@ -182,8 +185,13 @@ struct ublk_params_header { __u32 types; }; +static inline void __ublk_complete_rq(struct request *req); +static void ublk_complete_rq(struct kref *ref); + static dev_t ublk_chr_devt; -static struct class *ublk_chr_class; +static const struct class ublk_chr_class = { + .name = "ublk-char", +}; static DEFINE_IDR(ublk_index_idr); static DEFINE_SPINLOCK(ublk_idr_lock); @@ -202,6 +210,23 @@ static unsigned int ublks_added; /* protected by ublk_ctl_mutex */ static struct miscdevice ublk_misc; +static inline unsigned ublk_pos_to_hwq(loff_t pos) +{ + return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_QID_OFF) & + UBLK_QID_BITS_MASK; +} + +static inline unsigned ublk_pos_to_buf_off(loff_t pos) +{ + return (pos - UBLKSRV_IO_BUF_OFFSET) & UBLK_IO_BUF_BITS_MASK; +} + +static inline unsigned ublk_pos_to_tag(loff_t pos) +{ + return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_TAG_OFF) & + UBLK_TAG_BITS_MASK; +} + static void ublk_dev_param_basic_apply(struct ublk_device *ub) { struct request_queue *q = ub->ub_disk->queue; @@ -290,12 +315,52 @@ static int ublk_apply_params(struct ublk_device *ub) return 0; } -static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq) +static inline bool ublk_support_user_copy(const struct ublk_queue *ubq) { - if (IS_BUILTIN(CONFIG_BLK_DEV_UBLK) && - !(ubq->flags & UBLK_F_URING_CMD_COMP_IN_TASK)) - return true; - return false; + return ubq->flags & UBLK_F_USER_COPY; +} + +static inline bool ublk_need_req_ref(const struct ublk_queue *ubq) +{ + /* + * read()/write() is involved in user copy, so request reference + * has to be grabbed + */ + return ublk_support_user_copy(ubq); +} + +static inline void ublk_init_req_ref(const struct ublk_queue *ubq, + struct request *req) +{ + if (ublk_need_req_ref(ubq)) { + struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); + + kref_init(&data->ref); + } +} + +static inline bool ublk_get_req_ref(const struct ublk_queue *ubq, + struct request *req) +{ + if (ublk_need_req_ref(ubq)) { + struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); + + return kref_get_unless_zero(&data->ref); + } + + return true; +} + +static inline void ublk_put_req_ref(const struct ublk_queue *ubq, + struct request *req) +{ + if (ublk_need_req_ref(ubq)) { + struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); + + kref_put(&data->ref, ublk_complete_rq); + } else { + __ublk_complete_rq(req); + } } static inline bool ublk_need_get_data(const struct ublk_queue *ubq) @@ -384,9 +449,9 @@ static void ublk_store_owner_uid_gid(unsigned int *owner_uid, *owner_gid = from_kgid(&init_user_ns, gid); } -static int ublk_open(struct block_device *bdev, fmode_t mode) +static int ublk_open(struct gendisk *disk, blk_mode_t mode) { - struct ublk_device *ub = bdev->bd_disk->private_data; + struct ublk_device *ub = disk->private_data; if (capable(CAP_SYS_ADMIN)) return 0; @@ -421,49 +486,39 @@ static const struct block_device_operations ub_fops = { #define UBLK_MAX_PIN_PAGES 32 -struct ublk_map_data { - const struct request *rq; - unsigned long ubuf; - unsigned int len; -}; - struct ublk_io_iter { struct page *pages[UBLK_MAX_PIN_PAGES]; - unsigned pg_off; /* offset in the 1st page in pages */ - int nr_pages; /* how many page pointers in pages */ struct bio *bio; struct bvec_iter iter; }; -static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data, - unsigned max_bytes, bool to_vm) +/* return how many pages are copied */ +static void ublk_copy_io_pages(struct ublk_io_iter *data, + size_t total, size_t pg_off, int dir) { - const unsigned total = min_t(unsigned, max_bytes, - PAGE_SIZE - data->pg_off + - ((data->nr_pages - 1) << PAGE_SHIFT)); unsigned done = 0; unsigned pg_idx = 0; while (done < total) { struct bio_vec bv = bio_iter_iovec(data->bio, data->iter); - const unsigned int bytes = min3(bv.bv_len, total - done, - (unsigned)(PAGE_SIZE - data->pg_off)); + unsigned int bytes = min3(bv.bv_len, (unsigned)total - done, + (unsigned)(PAGE_SIZE - pg_off)); void *bv_buf = bvec_kmap_local(&bv); void *pg_buf = kmap_local_page(data->pages[pg_idx]); - if (to_vm) - memcpy(pg_buf + data->pg_off, bv_buf, bytes); + if (dir == ITER_DEST) + memcpy(pg_buf + pg_off, bv_buf, bytes); else - memcpy(bv_buf, pg_buf + data->pg_off, bytes); + memcpy(bv_buf, pg_buf + pg_off, bytes); kunmap_local(pg_buf); kunmap_local(bv_buf); /* advance page array */ - data->pg_off += bytes; - if (data->pg_off == PAGE_SIZE) { + pg_off += bytes; + if (pg_off == PAGE_SIZE) { pg_idx += 1; - data->pg_off = 0; + pg_off = 0; } done += bytes; @@ -477,41 +532,58 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data, data->iter = data->bio->bi_iter; } } +} - return done; +static bool ublk_advance_io_iter(const struct request *req, + struct ublk_io_iter *iter, unsigned int offset) +{ + struct bio *bio = req->bio; + + for_each_bio(bio) { + if (bio->bi_iter.bi_size > offset) { + iter->bio = bio; + iter->iter = bio->bi_iter; + bio_advance_iter(iter->bio, &iter->iter, offset); + return true; + } + offset -= bio->bi_iter.bi_size; + } + return false; } -static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm) +/* + * Copy data between request pages and io_iter, and 'offset' + * is the start point of linear offset of request. + */ +static size_t ublk_copy_user_pages(const struct request *req, + unsigned offset, struct iov_iter *uiter, int dir) { - const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0; - const unsigned long start_vm = data->ubuf; - unsigned int done = 0; - struct ublk_io_iter iter = { - .pg_off = start_vm & (PAGE_SIZE - 1), - .bio = data->rq->bio, - .iter = data->rq->bio->bi_iter, - }; - const unsigned int nr_pages = round_up(data->len + - (start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT; - - while (done < nr_pages) { - const unsigned to_pin = min_t(unsigned, UBLK_MAX_PIN_PAGES, - nr_pages - done); - unsigned i, len; - - iter.nr_pages = get_user_pages_fast(start_vm + - (done << PAGE_SHIFT), to_pin, gup_flags, - iter.pages); - if (iter.nr_pages <= 0) - return done == 0 ? iter.nr_pages : done; - len = ublk_copy_io_pages(&iter, data->len, to_vm); - for (i = 0; i < iter.nr_pages; i++) { - if (to_vm) + struct ublk_io_iter iter; + size_t done = 0; + + if (!ublk_advance_io_iter(req, &iter, offset)) + return 0; + + while (iov_iter_count(uiter) && iter.bio) { + unsigned nr_pages; + ssize_t len; + size_t off; + int i; + + len = iov_iter_get_pages2(uiter, iter.pages, + iov_iter_count(uiter), + UBLK_MAX_PIN_PAGES, &off); + if (len <= 0) + return done; + + ublk_copy_io_pages(&iter, len, off, dir); + nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE); + for (i = 0; i < nr_pages; i++) { + if (dir == ITER_DEST) set_page_dirty(iter.pages[i]); put_page(iter.pages[i]); } - data->len -= len; - done += iter.nr_pages; + done += len; } return done; @@ -532,21 +604,23 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req, { const unsigned int rq_bytes = blk_rq_bytes(req); + if (ublk_support_user_copy(ubq)) + return rq_bytes; + /* * no zero copy, we delay copy WRITE request data into ublksrv * context and the big benefit is that pinning pages in current * context is pretty fast, see ublk_pin_user_pages */ if (ublk_need_map_req(req)) { - struct ublk_map_data data = { - .rq = req, - .ubuf = io->addr, - .len = rq_bytes, - }; + struct iov_iter iter; + struct iovec iov; + const int dir = ITER_DEST; - ublk_copy_user_pages(&data, true); + import_single_range(dir, u64_to_user_ptr(io->addr), rq_bytes, + &iov, &iter); - return rq_bytes - data.len; + return ublk_copy_user_pages(req, 0, &iter, dir); } return rq_bytes; } @@ -557,18 +631,19 @@ static int ublk_unmap_io(const struct ublk_queue *ubq, { const unsigned int rq_bytes = blk_rq_bytes(req); + if (ublk_support_user_copy(ubq)) + return rq_bytes; + if (ublk_need_unmap_req(req)) { - struct ublk_map_data data = { - .rq = req, - .ubuf = io->addr, - .len = io->res, - }; + struct iov_iter iter; + struct iovec iov; + const int dir = ITER_SOURCE; WARN_ON_ONCE(io->res > rq_bytes); - ublk_copy_user_pages(&data, false); - - return io->res - data.len; + import_single_range(dir, u64_to_user_ptr(io->addr), io->res, + &iov, &iter); + return ublk_copy_user_pages(req, 0, &iter, dir); } return rq_bytes; } @@ -648,13 +723,19 @@ static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq) } /* todo: handle partial completion */ -static void ublk_complete_rq(struct request *req) +static inline void __ublk_complete_rq(struct request *req) { struct ublk_queue *ubq = req->mq_hctx->driver_data; struct ublk_io *io = &ubq->ios[req->tag]; unsigned int unmapped_bytes; blk_status_t res = BLK_STS_OK; + /* called from ublk_abort_queue() code path */ + if (io->flags & UBLK_IO_FLAG_ABORTED) { + res = BLK_STS_IOERR; + goto exit; + } + /* failed read IO if nothing is read */ if (!io->res && req_op(req) == REQ_OP_READ) io->res = -EIO; @@ -694,6 +775,15 @@ exit: blk_mq_end_request(req, res); } +static void ublk_complete_rq(struct kref *ref) +{ + struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data, + ref); + struct request *req = blk_mq_rq_from_pdu(data); + + __ublk_complete_rq(req); +} + /* * Since __ublk_rq_task_work always fails requests immediately during * exiting, __ublk_fail_req() is only called from abort context during @@ -712,7 +802,7 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io, if (ublk_queue_can_use_recovery_reissue(ubq)) blk_mq_requeue_request(req, false); else - blk_mq_end_request(req, BLK_STS_IOERR); + ublk_put_req_ref(ubq, req); } } @@ -821,6 +911,7 @@ static inline void __ublk_rq_task_work(struct request *req, mapped_bytes >> 9; } + ublk_init_req_ref(ubq, req); ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags); } @@ -852,17 +943,6 @@ static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags) ublk_forward_io_cmds(ubq, issue_flags); } -static void ublk_rq_task_work_fn(struct callback_head *work) -{ - struct ublk_rq_data *data = container_of(work, - struct ublk_rq_data, work); - struct request *req = blk_mq_rq_from_pdu(data); - struct ublk_queue *ubq = req->mq_hctx->driver_data; - unsigned issue_flags = IO_URING_F_UNLOCKED; - - ublk_forward_io_cmds(ubq, issue_flags); -} - static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq) { struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq); @@ -886,10 +966,6 @@ static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq) */ if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) { ublk_abort_io_cmds(ubq); - } else if (ublk_can_use_task_work(ubq)) { - if (task_work_add(ubq->ubq_daemon, &data->work, - TWA_SIGNAL_NO_IPI)) - ublk_abort_io_cmds(ubq); } else { struct io_uring_cmd *cmd = io->cmd; struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); @@ -961,19 +1037,9 @@ static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data, return 0; } -static int ublk_init_rq(struct blk_mq_tag_set *set, struct request *req, - unsigned int hctx_idx, unsigned int numa_node) -{ - struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); - - init_task_work(&data->work, ublk_rq_task_work_fn); - return 0; -} - static const struct blk_mq_ops ublk_mq_ops = { .queue_rq = ublk_queue_rq, .init_hctx = ublk_init_hctx, - .init_request = ublk_init_rq, .timeout = ublk_timeout, }; @@ -1050,7 +1116,7 @@ static void ublk_commit_completion(struct ublk_device *ub, req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag); if (req && likely(!blk_should_fake_timeout(req->q))) - ublk_complete_rq(req); + ublk_put_req_ref(ubq, req); } /* @@ -1295,6 +1361,14 @@ static inline int ublk_check_cmd_op(u32 cmd_op) return 0; } +static inline void ublk_fill_io_cmd(struct ublk_io *io, + struct io_uring_cmd *cmd, unsigned long buf_addr) +{ + io->cmd = cmd; + io->flags |= UBLK_IO_FLAG_ACTIVE; + io->addr = buf_addr; +} + static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags, const struct ublksrv_io_cmd *ub_cmd) @@ -1340,6 +1414,11 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, ^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA)) goto out; + if (ublk_support_user_copy(ubq) && ub_cmd->addr) { + ret = -EINVAL; + goto out; + } + ret = ublk_check_cmd_op(cmd_op); if (ret) goto out; @@ -1358,36 +1437,41 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, */ if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) goto out; - /* FETCH_RQ has to provide IO buffer if NEED GET DATA is not enabled */ - if (!ub_cmd->addr && !ublk_need_get_data(ubq)) - goto out; - io->cmd = cmd; - io->flags |= UBLK_IO_FLAG_ACTIVE; - io->addr = ub_cmd->addr; + if (!ublk_support_user_copy(ubq)) { + /* + * FETCH_RQ has to provide IO buffer if NEED GET + * DATA is not enabled + */ + if (!ub_cmd->addr && !ublk_need_get_data(ubq)) + goto out; + } + + ublk_fill_io_cmd(io, cmd, ub_cmd->addr); ublk_mark_io_ready(ub, ubq); break; case UBLK_IO_COMMIT_AND_FETCH_REQ: req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag); - /* - * COMMIT_AND_FETCH_REQ has to provide IO buffer if NEED GET DATA is - * not enabled or it is Read IO. - */ - if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || req_op(req) == REQ_OP_READ)) - goto out; + if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) goto out; - io->addr = ub_cmd->addr; - io->flags |= UBLK_IO_FLAG_ACTIVE; - io->cmd = cmd; + + if (!ublk_support_user_copy(ubq)) { + /* + * COMMIT_AND_FETCH_REQ has to provide IO buffer if + * NEED GET DATA is not enabled or it is Read IO. + */ + if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || + req_op(req) == REQ_OP_READ)) + goto out; + } + ublk_fill_io_cmd(io, cmd, ub_cmd->addr); ublk_commit_completion(ub, ub_cmd); break; case UBLK_IO_NEED_GET_DATA: if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) goto out; - io->addr = ub_cmd->addr; - io->cmd = cmd; - io->flags |= UBLK_IO_FLAG_ACTIVE; + ublk_fill_io_cmd(io, cmd, ub_cmd->addr); ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag); break; default: @@ -1402,6 +1486,36 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, return -EIOCBQUEUED; } +static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, + struct ublk_queue *ubq, int tag, size_t offset) +{ + struct request *req; + + if (!ublk_need_req_ref(ubq)) + return NULL; + + req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag); + if (!req) + return NULL; + + if (!ublk_get_req_ref(ubq, req)) + return NULL; + + if (unlikely(!blk_mq_request_started(req) || req->tag != tag)) + goto fail_put; + + if (!ublk_rq_has_data(req)) + goto fail_put; + + if (offset > blk_rq_bytes(req)) + goto fail_put; + + return req; +fail_put: + ublk_put_req_ref(ubq, req); + return NULL; +} + static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) { /* @@ -1419,11 +1533,112 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd); } +static inline bool ublk_check_ubuf_dir(const struct request *req, + int ubuf_dir) +{ + /* copy ubuf to request pages */ + if (req_op(req) == REQ_OP_READ && ubuf_dir == ITER_SOURCE) + return true; + + /* copy request pages to ubuf */ + if (req_op(req) == REQ_OP_WRITE && ubuf_dir == ITER_DEST) + return true; + + return false; +} + +static struct request *ublk_check_and_get_req(struct kiocb *iocb, + struct iov_iter *iter, size_t *off, int dir) +{ + struct ublk_device *ub = iocb->ki_filp->private_data; + struct ublk_queue *ubq; + struct request *req; + size_t buf_off; + u16 tag, q_id; + + if (!ub) + return ERR_PTR(-EACCES); + + if (!user_backed_iter(iter)) + return ERR_PTR(-EACCES); + + if (ub->dev_info.state == UBLK_S_DEV_DEAD) + return ERR_PTR(-EACCES); + + tag = ublk_pos_to_tag(iocb->ki_pos); + q_id = ublk_pos_to_hwq(iocb->ki_pos); + buf_off = ublk_pos_to_buf_off(iocb->ki_pos); + + if (q_id >= ub->dev_info.nr_hw_queues) + return ERR_PTR(-EINVAL); + + ubq = ublk_get_queue(ub, q_id); + if (!ubq) + return ERR_PTR(-EINVAL); + + if (tag >= ubq->q_depth) + return ERR_PTR(-EINVAL); + + req = __ublk_check_and_get_req(ub, ubq, tag, buf_off); + if (!req) + return ERR_PTR(-EINVAL); + + if (!req->mq_hctx || !req->mq_hctx->driver_data) + goto fail; + + if (!ublk_check_ubuf_dir(req, dir)) + goto fail; + + *off = buf_off; + return req; +fail: + ublk_put_req_ref(ubq, req); + return ERR_PTR(-EACCES); +} + +static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to) +{ + struct ublk_queue *ubq; + struct request *req; + size_t buf_off; + size_t ret; + + req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST); + ubq = req->mq_hctx->driver_data; + ublk_put_req_ref(ubq, req); + + return ret; +} + +static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from) +{ + struct ublk_queue *ubq; + struct request *req; + size_t buf_off; + size_t ret; + + req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE); + ubq = req->mq_hctx->driver_data; + ublk_put_req_ref(ubq, req); + + return ret; +} + static const struct file_operations ublk_ch_fops = { .owner = THIS_MODULE, .open = ublk_ch_open, .release = ublk_ch_release, .llseek = no_llseek, + .read_iter = ublk_ch_read_iter, + .write_iter = ublk_ch_write_iter, .uring_cmd = ublk_ch_uring_cmd, .mmap = ublk_ch_mmap, }; @@ -1547,7 +1762,7 @@ static int ublk_add_chdev(struct ublk_device *ub) dev->parent = ublk_misc.this_device; dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor); - dev->class = ublk_chr_class; + dev->class = &ublk_chr_class; dev->release = ublk_cdev_rel; device_initialize(dev); @@ -1818,10 +2033,12 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd) */ ub->dev_info.flags &= UBLK_F_ALL; - if (!IS_BUILTIN(CONFIG_BLK_DEV_UBLK)) - ub->dev_info.flags |= UBLK_F_URING_CMD_COMP_IN_TASK; + ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE | + UBLK_F_URING_CMD_COMP_IN_TASK; - ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE; + /* GET_DATA isn't needed any more with USER_COPY */ + if (ub->dev_info.flags & UBLK_F_USER_COPY) + ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA; /* We are not ready to support zero copy */ ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY; @@ -2133,6 +2350,21 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub, return ret; } +static int ublk_ctrl_get_features(struct io_uring_cmd *cmd) +{ + const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); + void __user *argp = (void __user *)(unsigned long)header->addr; + u64 features = UBLK_F_ALL & ~UBLK_F_SUPPORT_ZERO_COPY; + + if (header->len != UBLK_FEATURES_LEN || !header->addr) + return -EINVAL; + + if (copy_to_user(argp, &features, UBLK_FEATURES_LEN)) + return -EFAULT; + + return 0; +} + /* * All control commands are sent via /dev/ublk-control, so we have to check * the destination device's permission @@ -2213,6 +2445,7 @@ static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub, case UBLK_CMD_GET_DEV_INFO2: case UBLK_CMD_GET_QUEUE_AFFINITY: case UBLK_CMD_GET_PARAMS: + case (_IOC_NR(UBLK_U_CMD_GET_FEATURES)): mask = MAY_READ; break; case UBLK_CMD_START_DEV: @@ -2262,6 +2495,11 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, if (ret) goto out; + if (cmd_op == UBLK_U_CMD_GET_FEATURES) { + ret = ublk_ctrl_get_features(cmd); + goto out; + } + if (_IOC_NR(cmd_op) != UBLK_CMD_ADD_DEV) { ret = -ENODEV; ub = ublk_get_device_from_id(header->dev_id); @@ -2337,6 +2575,9 @@ static int __init ublk_init(void) { int ret; + BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET + + UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET); + init_waitqueue_head(&ublk_idr_wq); ret = misc_register(&ublk_misc); @@ -2347,11 +2588,10 @@ static int __init ublk_init(void) if (ret) goto unregister_mis; - ublk_chr_class = class_create("ublk-char"); - if (IS_ERR(ublk_chr_class)) { - ret = PTR_ERR(ublk_chr_class); + ret = class_register(&ublk_chr_class); + if (ret) goto free_chrdev_region; - } + return 0; free_chrdev_region: @@ -2369,7 +2609,7 @@ static void __exit ublk_exit(void) idr_for_each_entry(&ublk_index_idr, ub, id) ublk_remove(ub); - class_destroy(ublk_chr_class); + class_unregister(&ublk_chr_class); misc_deregister(&ublk_misc); idr_destroy(&ublk_index_idr); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 2b918e28acaa..b47358da92a2 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -348,63 +348,33 @@ static inline void virtblk_request_done(struct request *req) blk_mq_end_request(req, status); } -static void virtblk_complete_batch(struct io_comp_batch *iob) -{ - struct request *req; - - rq_list_for_each(&iob->req_list, req) { - virtblk_unmap_data(req, blk_mq_rq_to_pdu(req)); - virtblk_cleanup_cmd(req); - } - blk_mq_end_request_batch(iob); -} - -static int virtblk_handle_req(struct virtio_blk_vq *vq, - struct io_comp_batch *iob) -{ - struct virtblk_req *vbr; - int req_done = 0; - unsigned int len; - - while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) { - struct request *req = blk_mq_rq_from_pdu(vbr); - - if (likely(!blk_should_fake_timeout(req->q)) && - !blk_mq_complete_request_remote(req) && - !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr), - virtblk_complete_batch)) - virtblk_request_done(req); - req_done++; - } - - return req_done; -} - static void virtblk_done(struct virtqueue *vq) { struct virtio_blk *vblk = vq->vdev->priv; - struct virtio_blk_vq *vblk_vq = &vblk->vqs[vq->index]; - int req_done = 0; + bool req_done = false; + int qid = vq->index; + struct virtblk_req *vbr; unsigned long flags; - DEFINE_IO_COMP_BATCH(iob); + unsigned int len; - spin_lock_irqsave(&vblk_vq->lock, flags); + spin_lock_irqsave(&vblk->vqs[qid].lock, flags); do { virtqueue_disable_cb(vq); - req_done += virtblk_handle_req(vblk_vq, &iob); + while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { + struct request *req = blk_mq_rq_from_pdu(vbr); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); + req_done = true; + } if (unlikely(virtqueue_is_broken(vq))) break; } while (!virtqueue_enable_cb(vq)); - if (req_done) { - if (!rq_list_empty(iob.req_list)) - iob.complete(&iob); - - /* In case queue is stopped waiting for more buffers. */ + /* In case queue is stopped waiting for more buffers. */ + if (req_done) blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); - } - spin_unlock_irqrestore(&vblk_vq->lock, flags); + spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); } static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) @@ -1283,15 +1253,37 @@ static void virtblk_map_queues(struct blk_mq_tag_set *set) } } +static void virtblk_complete_batch(struct io_comp_batch *iob) +{ + struct request *req; + + rq_list_for_each(&iob->req_list, req) { + virtblk_unmap_data(req, blk_mq_rq_to_pdu(req)); + virtblk_cleanup_cmd(req); + } + blk_mq_end_request_batch(iob); +} + static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct virtio_blk *vblk = hctx->queue->queuedata; struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx); + struct virtblk_req *vbr; unsigned long flags; + unsigned int len; int found = 0; spin_lock_irqsave(&vq->lock, flags); - found = virtblk_handle_req(vq, iob); + + while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) { + struct request *req = blk_mq_rq_from_pdu(vbr); + + found++; + if (!blk_mq_complete_request_remote(req) && + !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr), + virtblk_complete_batch)) + virtblk_request_done(req); + } if (found) blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 4807af1d5805..bb66178c432b 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -473,7 +473,7 @@ static void xenvbd_sysfs_delif(struct xenbus_device *dev) static void xen_vbd_free(struct xen_vbd *vbd) { if (vbd->bdev) - blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE); + blkdev_put(vbd->bdev, NULL); vbd->bdev = NULL; } @@ -492,7 +492,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, vbd->pdevice = MKDEV(major, minor); bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ? - FMODE_READ : FMODE_WRITE, NULL); + BLK_OPEN_READ : BLK_OPEN_WRITE, NULL, NULL); if (IS_ERR(bdev)) { pr_warn("xen_vbd_create: device %08x could not be opened\n", diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index c1890c8a9f6e..434fab306777 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -509,7 +509,7 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) return 0; } -static int blkif_ioctl(struct block_device *bdev, fmode_t mode, +static int blkif_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned command, unsigned long argument) { struct blkfront_info *info = bdev->bd_disk->private_data; diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index c1e85f356e4d..11493167b0a8 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -140,16 +140,14 @@ static void get_chipram(void) return; } -static int z2_open(struct block_device *bdev, fmode_t mode) +static int z2_open(struct gendisk *disk, blk_mode_t mode) { - int device; + int device = disk->first_minor; int max_z2_map = (Z2RAM_SIZE / Z2RAM_CHUNKSIZE) * sizeof(z2ram_map[0]); int max_chip_map = (amiga_chip_size / Z2RAM_CHUNKSIZE) * sizeof(z2ram_map[0]); int rc = -ENOMEM; - device = MINOR(bdev->bd_dev); - mutex_lock(&z2ram_mutex); if (current_device != -1 && current_device != device) { rc = -EBUSY; @@ -290,7 +288,7 @@ err_out: return rc; } -static void z2_release(struct gendisk *disk, fmode_t mode) +static void z2_release(struct gendisk *disk) { mutex_lock(&z2ram_mutex); if (current_device == -1) { diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index f6d90f1ba5cf..5676e6dd5b16 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -420,7 +420,7 @@ static void reset_bdev(struct zram *zram) return; bdev = zram->bdev; - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); + blkdev_put(bdev, zram); /* hope filp_close flush all of IO */ filp_close(zram->backing_dev, NULL); zram->backing_dev = NULL; @@ -507,8 +507,8 @@ static ssize_t backing_dev_store(struct device *dev, goto out; } - bdev = blkdev_get_by_dev(inode->i_rdev, - FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); + bdev = blkdev_get_by_dev(inode->i_rdev, BLK_OPEN_READ | BLK_OPEN_WRITE, + zram, NULL); if (IS_ERR(bdev)) { err = PTR_ERR(bdev); bdev = NULL; @@ -539,7 +539,7 @@ out: kvfree(bitmap); if (bdev) - blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(bdev, zram); if (backing_dev) filp_close(backing_dev, NULL); @@ -700,7 +700,7 @@ static ssize_t writeback_store(struct device *dev, bio_init(&bio, zram->bdev, &bio_vec, 1, REQ_OP_WRITE | REQ_SYNC); bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9); - bio_add_page(&bio, page, PAGE_SIZE, 0); + __bio_add_page(&bio, page, PAGE_SIZE, 0); /* * XXX: A single page IO would be inefficient for write @@ -1753,7 +1753,7 @@ static ssize_t recompress_store(struct device *dev, } } - if (threshold >= PAGE_SIZE) + if (threshold >= huge_class_size) return -EINVAL; down_read(&zram->init_lock); @@ -2097,19 +2097,16 @@ static ssize_t reset_store(struct device *dev, return len; } -static int zram_open(struct block_device *bdev, fmode_t mode) +static int zram_open(struct gendisk *disk, blk_mode_t mode) { - int ret = 0; - struct zram *zram; + struct zram *zram = disk->private_data; - WARN_ON(!mutex_is_locked(&bdev->bd_disk->open_mutex)); + WARN_ON(!mutex_is_locked(&disk->open_mutex)); - zram = bdev->bd_disk->private_data; /* zram was claimed to reset so open request fails */ if (zram->claim) - ret = -EBUSY; - - return ret; + return -EBUSY; + return 0; } static const struct block_device_operations zram_devops = { diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 416f723a2dbb..cc2839805983 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -264,6 +264,7 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/nospec.h> #include <linux/slab.h> #include <linux/cdrom.h> #include <linux/sysctl.h> @@ -978,15 +979,6 @@ static void cdrom_dvd_rw_close_write(struct cdrom_device_info *cdi) cdi->media_written = 0; } -static int cdrom_close_write(struct cdrom_device_info *cdi) -{ -#if 0 - return cdrom_flush_cache(cdi); -#else - return 0; -#endif -} - /* badly broken, I know. Is due for a fixup anytime. */ static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype *tracks) { @@ -1155,8 +1147,7 @@ clean_up_and_return: * is in their own interest: device control becomes a lot easier * this way. */ -int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, - fmode_t mode) +int cdrom_open(struct cdrom_device_info *cdi, blk_mode_t mode) { int ret; @@ -1165,7 +1156,7 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, /* if this was a O_NONBLOCK open and we should honor the flags, * do a quick open without drive/disc integrity checks. */ cdi->use_count++; - if ((mode & FMODE_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) { + if ((mode & BLK_OPEN_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) { ret = cdi->ops->open(cdi, 1); } else { ret = open_for_data(cdi); @@ -1173,7 +1164,7 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, goto err; if (CDROM_CAN(CDC_GENERIC_PACKET)) cdrom_mmc3_profile(cdi); - if (mode & FMODE_WRITE) { + if (mode & BLK_OPEN_WRITE) { ret = -EROFS; if (cdrom_open_write(cdi)) goto err_release; @@ -1182,6 +1173,7 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, ret = 0; cdi->media_written = 0; } + cdi->opened_for_data = true; } if (ret) @@ -1259,10 +1251,9 @@ static int check_for_audio_disc(struct cdrom_device_info *cdi, return 0; } -void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode) +void cdrom_release(struct cdrom_device_info *cdi) { const struct cdrom_device_ops *cdo = cdi->ops; - int opened_for_data; cd_dbg(CD_CLOSE, "entering cdrom_release\n"); @@ -1280,20 +1271,12 @@ void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode) } } - opened_for_data = !(cdi->options & CDO_USE_FFLAGS) || - !(mode & FMODE_NDELAY); - - /* - * flush cache on last write release - */ - if (CDROM_CAN(CDC_RAM) && !cdi->use_count && cdi->for_data) - cdrom_close_write(cdi); - cdo->release(cdi); - if (cdi->use_count == 0) { /* last process that closes dev*/ - if (opened_for_data && - cdi->options & CDO_AUTO_EJECT && CDROM_CAN(CDC_OPEN_TRAY)) + + if (cdi->use_count == 0 && cdi->opened_for_data) { + if (cdi->options & CDO_AUTO_EJECT && CDROM_CAN(CDC_OPEN_TRAY)) cdo->tray_move(cdi, 1); + cdi->opened_for_data = false; } } EXPORT_SYMBOL(cdrom_release); @@ -2329,6 +2312,9 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi, if (arg >= cdi->capacity) return -EINVAL; + /* Prevent arg from speculatively bypassing the length check */ + barrier_nospec(); + info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; @@ -3337,7 +3323,7 @@ static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, * ATAPI / SCSI specific code now mainly resides in mmc_ioctl(). */ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, - fmode_t mode, unsigned int cmd, unsigned long arg) + unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int ret; diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index ceded5772aac..3a46e27479ff 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -474,19 +474,19 @@ static const struct cdrom_device_ops gdrom_ops = { CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R, }; -static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode) +static int gdrom_bdops_open(struct gendisk *disk, blk_mode_t mode) { int ret; - bdev_check_media_change(bdev); + disk_check_media_change(disk); mutex_lock(&gdrom_mutex); - ret = cdrom_open(gd.cd_info, bdev, mode); + ret = cdrom_open(gd.cd_info); mutex_unlock(&gdrom_mutex); return ret; } -static void gdrom_bdops_release(struct gendisk *disk, fmode_t mode) +static void gdrom_bdops_release(struct gendisk *disk) { mutex_lock(&gdrom_mutex); cdrom_release(gd.cd_info, mode); @@ -499,13 +499,13 @@ static unsigned int gdrom_bdops_check_events(struct gendisk *disk, return cdrom_check_events(gd.cd_info, clearing); } -static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode, +static int gdrom_bdops_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned cmd, unsigned long arg) { int ret; mutex_lock(&gdrom_mutex); - ret = cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg); + ret = cdrom_ioctl(gd.cd_info, bdev, cmd, arg); mutex_unlock(&gdrom_mutex); return ret; diff --git a/drivers/char/random.c b/drivers/char/random.c index 253f2ddb8913..3cb37760dfec 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1546,7 +1546,7 @@ const struct file_operations random_fops = { .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, .llseek = noop_llseek, - .splice_read = generic_file_splice_read, + .splice_read = copy_splice_read, .splice_write = iter_file_splice_write, }; @@ -1557,7 +1557,7 @@ const struct file_operations urandom_fops = { .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, .llseek = noop_llseek, - .splice_read = generic_file_splice_read, + .splice_read = copy_splice_read, .splice_write = iter_file_splice_write, }; diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c index edfa94641bbf..66759fe28fad 100644 --- a/drivers/clk/clk-composite.c +++ b/drivers/clk/clk-composite.c @@ -119,7 +119,10 @@ static int clk_composite_determine_rate(struct clk_hw *hw, if (ret) continue; - rate_diff = abs(req->rate - tmp_req.rate); + if (req->rate >= tmp_req.rate) + rate_diff = req->rate - tmp_req.rate; + else + rate_diff = tmp_req.rate - req->rate; if (!rate_diff || !req->best_parent_hw || best_rate_diff > rate_diff) { diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c index 70ae1dd2e474..bacdcbb287ac 100644 --- a/drivers/clk/clk-loongson2.c +++ b/drivers/clk/clk-loongson2.c @@ -40,7 +40,7 @@ static struct clk_hw *loongson2_clk_register(struct device *dev, { int ret; struct clk_hw *hw; - struct clk_init_data init; + struct clk_init_data init = { }; hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL); if (!hw) diff --git a/drivers/clk/imx/clk-imx1.c b/drivers/clk/imx/clk-imx1.c index 22fc7491ba00..f6ea7e5052d5 100644 --- a/drivers/clk/imx/clk-imx1.c +++ b/drivers/clk/imx/clk-imx1.c @@ -10,7 +10,6 @@ #include <linux/of.h> #include <linux/of_address.h> #include <dt-bindings/clock/imx1-clock.h> -#include <soc/imx/timer.h> #include <asm/irq.h> #include "clk.h" diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c index 5d177125728d..99618ded0939 100644 --- a/drivers/clk/imx/clk-imx27.c +++ b/drivers/clk/imx/clk-imx27.c @@ -8,7 +8,6 @@ #include <linux/of_address.h> #include <dt-bindings/clock/imx27-clock.h> #include <soc/imx/revision.h> -#include <soc/imx/timer.h> #include <asm/irq.h> #include "clk.h" diff --git a/drivers/clk/imx/clk-imx31.c b/drivers/clk/imx/clk-imx31.c index c44e18c6f63f..4c8d9ff0b2ad 100644 --- a/drivers/clk/imx/clk-imx31.c +++ b/drivers/clk/imx/clk-imx31.c @@ -11,7 +11,6 @@ #include <linux/of.h> #include <linux/of_address.h> #include <soc/imx/revision.h> -#include <soc/imx/timer.h> #include <asm/irq.h> #include "clk.h" diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c index 7dcbaea3fea3..3b6fdb4e0be7 100644 --- a/drivers/clk/imx/clk-imx35.c +++ b/drivers/clk/imx/clk-imx35.c @@ -10,7 +10,6 @@ #include <linux/of.h> #include <linux/err.h> #include <soc/imx/revision.h> -#include <soc/imx/timer.h> #include <asm/irq.h> #include "clk.h" diff --git a/drivers/clk/mediatek/clk-mt8365.c b/drivers/clk/mediatek/clk-mt8365.c index 6b4e193f648d..c87a6c4a7967 100644 --- a/drivers/clk/mediatek/clk-mt8365.c +++ b/drivers/clk/mediatek/clk-mt8365.c @@ -23,6 +23,7 @@ static DEFINE_SPINLOCK(mt8365_clk_lock); static const struct mtk_fixed_clk top_fixed_clks[] = { + FIXED_CLK(CLK_TOP_CLK_NULL, "clk_null", NULL, 0), FIXED_CLK(CLK_TOP_I2S0_BCK, "i2s0_bck", NULL, 26000000), FIXED_CLK(CLK_TOP_DSI0_LNTC_DSICK, "dsi0_lntc_dsick", "clk26m", 75000000), @@ -559,6 +560,14 @@ static const struct mtk_clk_divider top_adj_divs[] = { 0x324, 16, 8, CLK_DIVIDER_ROUND_CLOSEST), DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV3, "apll12_ck_div3", "apll_i2s3_sel", 0x324, 24, 8, CLK_DIVIDER_ROUND_CLOSEST), + DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV4, "apll12_ck_div4", "apll_tdmout_sel", + 0x328, 0, 8, CLK_DIVIDER_ROUND_CLOSEST), + DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV4B, "apll12_ck_div4b", "apll_tdmout_sel", + 0x328, 8, 8, CLK_DIVIDER_ROUND_CLOSEST), + DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV5, "apll12_ck_div5", "apll_tdmin_sel", + 0x328, 16, 8, CLK_DIVIDER_ROUND_CLOSEST), + DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV5B, "apll12_ck_div5b", "apll_tdmin_sel", + 0x328, 24, 8, CLK_DIVIDER_ROUND_CLOSEST), DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV6, "apll12_ck_div6", "apll_spdif_sel", 0x32c, 0, 8, CLK_DIVIDER_ROUND_CLOSEST), }; @@ -583,15 +592,15 @@ static const struct mtk_gate_regs top2_cg_regs = { #define GATE_TOP0(_id, _name, _parent, _shift) \ GATE_MTK(_id, _name, _parent, &top0_cg_regs, \ - _shift, &mtk_clk_gate_ops_no_setclr_inv) + _shift, &mtk_clk_gate_ops_no_setclr) #define GATE_TOP1(_id, _name, _parent, _shift) \ GATE_MTK(_id, _name, _parent, &top1_cg_regs, \ - _shift, &mtk_clk_gate_ops_no_setclr) + _shift, &mtk_clk_gate_ops_no_setclr_inv) #define GATE_TOP2(_id, _name, _parent, _shift) \ GATE_MTK(_id, _name, _parent, &top2_cg_regs, \ - _shift, &mtk_clk_gate_ops_no_setclr) + _shift, &mtk_clk_gate_ops_no_setclr_inv) static const struct mtk_gate top_clk_gates[] = { GATE_TOP0(CLK_TOP_CONN_32K, "conn_32k", "clk32k", 10), @@ -696,6 +705,7 @@ static const struct mtk_gate ifr_clks[] = { GATE_IFR3(CLK_IFR_GCPU, "ifr_gcpu", "axi_sel", 8), GATE_IFR3(CLK_IFR_TRNG, "ifr_trng", "axi_sel", 9), GATE_IFR3(CLK_IFR_AUXADC, "ifr_auxadc", "clk26m", 10), + GATE_IFR3(CLK_IFR_CPUM, "ifr_cpum", "clk26m", 11), GATE_IFR3(CLK_IFR_AUXADC_MD, "ifr_auxadc_md", "clk26m", 14), GATE_IFR3(CLK_IFR_AP_DMA, "ifr_ap_dma", "axi_sel", 18), GATE_IFR3(CLK_IFR_DEBUGSYS, "ifr_debugsys", "axi_sel", 24), @@ -717,6 +727,8 @@ static const struct mtk_gate ifr_clks[] = { GATE_IFR5(CLK_IFR_PWRAP_TMR, "ifr_pwrap_tmr", "clk26m", 12), GATE_IFR5(CLK_IFR_PWRAP_SPI, "ifr_pwrap_spi", "clk26m", 13), GATE_IFR5(CLK_IFR_PWRAP_SYS, "ifr_pwrap_sys", "clk26m", 14), + GATE_MTK_FLAGS(CLK_IFR_MCU_PM_BK, "ifr_mcu_pm_bk", NULL, &ifr5_cg_regs, + 17, &mtk_clk_gate_ops_setclr, CLK_IGNORE_UNUSED), GATE_IFR5(CLK_IFR_IRRX_26M, "ifr_irrx_26m", "clk26m", 22), GATE_IFR5(CLK_IFR_IRRX_32K, "ifr_irrx_32k", "clk32k", 23), GATE_IFR5(CLK_IFR_I2C0_AXI, "ifr_i2c0_axi", "i2c_sel", 24), diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c index 42958a542662..621e298f101a 100644 --- a/drivers/clk/pxa/clk-pxa3xx.c +++ b/drivers/clk/pxa/clk-pxa3xx.c @@ -164,7 +164,7 @@ void pxa3xx_clk_update_accr(u32 disable, u32 enable, u32 xclkcfg, u32 mask) accr &= ~disable; accr |= enable; - writel(accr, ACCR); + writel(accr, clk_regs + ACCR); if (xclkcfg) __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg)); diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 526382dc7482..c4d671a5a13d 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -612,6 +612,15 @@ config TIMER_IMX_SYS_CTR Enable this option to use i.MX system counter timer as a clockevent. +config CLKSRC_LOONGSON1_PWM + bool "Clocksource using Loongson1 PWM" + depends on MACH_LOONGSON32 || COMPILE_TEST + select MIPS_EXTERNAL_TIMER + select TIMER_OF + help + Enable this option to use Loongson1 PWM timer as clocksource + instead of the performance counter. + config CLKSRC_ST_LPC bool "Low power clocksource found in the LPC" if COMPILE_TEST select TIMER_OF if OF diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index f12d3987a960..5d93c9e3fc55 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -89,3 +89,4 @@ obj-$(CONFIG_MICROCHIP_PIT64B) += timer-microchip-pit64b.o obj-$(CONFIG_MSC313E_TIMER) += timer-msc313e.o obj-$(CONFIG_GOLDFISH_TIMER) += timer-goldfish.o obj-$(CONFIG_GXP_TIMER) += timer-gxp.o +obj-$(CONFIG_CLKSRC_LOONGSON1_PWM) += timer-loongson1-pwm.o diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index e09d4427f604..e733a2a1927a 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -191,22 +191,40 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, return val; } -static notrace u64 arch_counter_get_cntpct_stable(void) +static noinstr u64 raw_counter_get_cntpct_stable(void) { return __arch_counter_get_cntpct_stable(); } -static notrace u64 arch_counter_get_cntpct(void) +static notrace u64 arch_counter_get_cntpct_stable(void) +{ + u64 val; + preempt_disable_notrace(); + val = __arch_counter_get_cntpct_stable(); + preempt_enable_notrace(); + return val; +} + +static noinstr u64 arch_counter_get_cntpct(void) { return __arch_counter_get_cntpct(); } -static notrace u64 arch_counter_get_cntvct_stable(void) +static noinstr u64 raw_counter_get_cntvct_stable(void) { return __arch_counter_get_cntvct_stable(); } -static notrace u64 arch_counter_get_cntvct(void) +static notrace u64 arch_counter_get_cntvct_stable(void) +{ + u64 val; + preempt_disable_notrace(); + val = __arch_counter_get_cntvct_stable(); + preempt_enable_notrace(); + return val; +} + +static noinstr u64 arch_counter_get_cntvct(void) { return __arch_counter_get_cntvct(); } @@ -753,14 +771,14 @@ static int arch_timer_set_next_event_phys(unsigned long evt, return 0; } -static u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo) +static noinstr u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo) { u32 cnt_lo, cnt_hi, tmp_hi; do { - cnt_hi = readl_relaxed(t->base + offset_lo + 4); - cnt_lo = readl_relaxed(t->base + offset_lo); - tmp_hi = readl_relaxed(t->base + offset_lo + 4); + cnt_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4)); + cnt_lo = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo)); + tmp_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4)); } while (cnt_hi != tmp_hi); return ((u64) cnt_hi << 32) | cnt_lo; @@ -1060,7 +1078,7 @@ bool arch_timer_evtstrm_available(void) return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available); } -static u64 arch_counter_get_cntvct_mem(void) +static noinstr u64 arch_counter_get_cntvct_mem(void) { return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO); } @@ -1074,6 +1092,7 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void) static void __init arch_counter_register(unsigned type) { + u64 (*scr)(void); u64 start_count; int width; @@ -1083,21 +1102,28 @@ static void __init arch_counter_register(unsigned type) if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) || arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) { - if (arch_timer_counter_has_wa()) + if (arch_timer_counter_has_wa()) { rd = arch_counter_get_cntvct_stable; - else + scr = raw_counter_get_cntvct_stable; + } else { rd = arch_counter_get_cntvct; + scr = arch_counter_get_cntvct; + } } else { - if (arch_timer_counter_has_wa()) + if (arch_timer_counter_has_wa()) { rd = arch_counter_get_cntpct_stable; - else + scr = raw_counter_get_cntpct_stable; + } else { rd = arch_counter_get_cntpct; + scr = arch_counter_get_cntpct; + } } arch_timer_read_counter = rd; clocksource_counter.vdso_clock_mode = vdso_default; } else { arch_timer_read_counter = arch_counter_get_cntvct_mem; + scr = arch_counter_get_cntvct_mem; } width = arch_counter_get_width(); @@ -1113,7 +1139,7 @@ static void __init arch_counter_register(unsigned type) timecounter_init(&arch_timer_kvm_info.timecounter, &cyclecounter, start_count); - sched_clock_register(arch_timer_read_counter, width, arch_timer_rate); + sched_clock_register(scr, width, arch_timer_rate); } static void arch_timer_stop(struct clock_event_device *clk) diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index bcd9042a0c9f..e56307a81f4d 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -365,6 +365,20 @@ void hv_stimer_global_cleanup(void) } EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup); +static __always_inline u64 read_hv_clock_msr(void) +{ + /* + * Read the partition counter to get the current tick count. This count + * is set to 0 when the partition is created and is incremented in 100 + * nanosecond units. + * + * Use hv_raw_get_register() because this function is used from + * noinstr. Notable; while HV_REGISTER_TIME_REF_COUNT is a synthetic + * register it doesn't need the GHCB path. + */ + return hv_raw_get_register(HV_REGISTER_TIME_REF_COUNT); +} + /* * Code and definitions for the Hyper-V clocksources. Two * clocksources are defined: one that reads the Hyper-V defined MSR, and @@ -393,14 +407,20 @@ struct ms_hyperv_tsc_page *hv_get_tsc_page(void) } EXPORT_SYMBOL_GPL(hv_get_tsc_page); -static u64 notrace read_hv_clock_tsc(void) +static __always_inline u64 read_hv_clock_tsc(void) { - u64 current_tick = hv_read_tsc_page(hv_get_tsc_page()); + u64 cur_tsc, time; - if (current_tick == U64_MAX) - current_tick = hv_get_register(HV_REGISTER_TIME_REF_COUNT); + /* + * The Hyper-V Top-Level Function Spec (TLFS), section Timers, + * subsection Refererence Counter, guarantees that the TSC and MSR + * times are in sync and monotonic. Therefore we can fall back + * to the MSR in case the TSC page indicates unavailability. + */ + if (!hv_read_tsc_page_tsc(tsc_page, &cur_tsc, &time)) + time = read_hv_clock_msr(); - return current_tick; + return time; } static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg) @@ -408,7 +428,7 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg) return read_hv_clock_tsc(); } -static u64 notrace read_hv_sched_clock_tsc(void) +static u64 noinstr read_hv_sched_clock_tsc(void) { return (read_hv_clock_tsc() - hv_sched_clock_offset) * (NSEC_PER_SEC / HV_CLOCK_HZ); @@ -460,30 +480,14 @@ static struct clocksource hyperv_cs_tsc = { #endif }; -static u64 notrace read_hv_clock_msr(void) -{ - /* - * Read the partition counter to get the current tick count. This count - * is set to 0 when the partition is created and is incremented in - * 100 nanosecond units. - */ - return hv_get_register(HV_REGISTER_TIME_REF_COUNT); -} - static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg) { return read_hv_clock_msr(); } -static u64 notrace read_hv_sched_clock_msr(void) -{ - return (read_hv_clock_msr() - hv_sched_clock_offset) * - (NSEC_PER_SEC / HV_CLOCK_HZ); -} - static struct clocksource hyperv_cs_msr = { .name = "hyperv_clocksource_msr", - .rating = 500, + .rating = 495, .read = read_hv_clock_msr_cs, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, @@ -513,7 +517,7 @@ static __always_inline void hv_setup_sched_clock(void *sched_clock) static __always_inline void hv_setup_sched_clock(void *sched_clock) {} #endif /* CONFIG_GENERIC_SCHED_CLOCK */ -static bool __init hv_init_tsc_clocksource(void) +static void __init hv_init_tsc_clocksource(void) { union hv_reference_tsc_msr tsc_msr; @@ -524,17 +528,14 @@ static bool __init hv_init_tsc_clocksource(void) * Hyper-V Reference TSC rating, causing the generic TSC to be used. * TSC_INVARIANT is not offered on ARM64, so the Hyper-V Reference * TSC will be preferred over the virtualized ARM64 arch counter. - * While the Hyper-V MSR clocksource won't be used since the - * Reference TSC clocksource is present, change its rating as - * well for consistency. */ if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) { hyperv_cs_tsc.rating = 250; - hyperv_cs_msr.rating = 250; + hyperv_cs_msr.rating = 245; } if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE)) - return false; + return; hv_read_reference_counter = read_hv_clock_tsc; @@ -565,33 +566,34 @@ static bool __init hv_init_tsc_clocksource(void) clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); - hv_sched_clock_offset = hv_read_reference_counter(); - hv_setup_sched_clock(read_hv_sched_clock_tsc); - - return true; + /* + * If TSC is invariant, then let it stay as the sched clock since it + * will be faster than reading the TSC page. But if not invariant, use + * the TSC page so that live migrations across hosts with different + * frequencies is handled correctly. + */ + if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT)) { + hv_sched_clock_offset = hv_read_reference_counter(); + hv_setup_sched_clock(read_hv_sched_clock_tsc); + } } void __init hv_init_clocksource(void) { /* - * Try to set up the TSC page clocksource. If it succeeds, we're - * done. Otherwise, set up the MSR clocksource. At least one of - * these will always be available except on very old versions of - * Hyper-V on x86. In that case we won't have a Hyper-V + * Try to set up the TSC page clocksource, then the MSR clocksource. + * At least one of these will always be available except on very old + * versions of Hyper-V on x86. In that case we won't have a Hyper-V * clocksource, but Linux will still run with a clocksource based * on the emulated PIT or LAPIC timer. + * + * Never use the MSR clocksource as sched clock. It's too slow. + * Better to use the native sched clock as the fallback. */ - if (hv_init_tsc_clocksource()) - return; - - if (!(ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE)) - return; - - hv_read_reference_counter = read_hv_clock_msr; - clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); + hv_init_tsc_clocksource(); - hv_sched_clock_offset = hv_read_reference_counter(); - hv_setup_sched_clock(read_hv_sched_clock_msr); + if (ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE) + clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); } void __init hv_remap_tsc_clocksource(void) diff --git a/drivers/clocksource/ingenic-timer.c b/drivers/clocksource/ingenic-timer.c index 089ce64b1c3f..154ee5f7954a 100644 --- a/drivers/clocksource/ingenic-timer.c +++ b/drivers/clocksource/ingenic-timer.c @@ -369,7 +369,7 @@ static int __init ingenic_tcu_probe(struct platform_device *pdev) return 0; } -static int __maybe_unused ingenic_tcu_suspend(struct device *dev) +static int ingenic_tcu_suspend(struct device *dev) { struct ingenic_tcu *tcu = dev_get_drvdata(dev); unsigned int cpu; @@ -382,7 +382,7 @@ static int __maybe_unused ingenic_tcu_suspend(struct device *dev) return 0; } -static int __maybe_unused ingenic_tcu_resume(struct device *dev) +static int ingenic_tcu_resume(struct device *dev) { struct ingenic_tcu *tcu = dev_get_drvdata(dev); unsigned int cpu; @@ -406,7 +406,7 @@ err_timer_clk_disable: return ret; } -static const struct dev_pm_ops __maybe_unused ingenic_tcu_pm_ops = { +static const struct dev_pm_ops ingenic_tcu_pm_ops = { /* _noirq: We want the TCU clocks to be gated last / ungated first */ .suspend_noirq = ingenic_tcu_suspend, .resume_noirq = ingenic_tcu_resume, @@ -415,9 +415,7 @@ static const struct dev_pm_ops __maybe_unused ingenic_tcu_pm_ops = { static struct platform_driver ingenic_tcu_driver = { .driver = { .name = "ingenic-tcu-timer", -#ifdef CONFIG_PM_SLEEP - .pm = &ingenic_tcu_pm_ops, -#endif + .pm = pm_sleep_ptr(&ingenic_tcu_pm_ops), .of_match_table = ingenic_tcu_of_match, }, }; diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c index 4efd0cf3b602..0d52e28fea4d 100644 --- a/drivers/clocksource/timer-cadence-ttc.c +++ b/drivers/clocksource/timer-cadence-ttc.c @@ -486,10 +486,10 @@ static int __init ttc_timer_probe(struct platform_device *pdev) * and use it. Note that the event timer uses the interrupt and it's the * 2nd TTC hence the irq_of_parse_and_map(,1) */ - timer_baseaddr = of_iomap(timer, 0); - if (!timer_baseaddr) { + timer_baseaddr = devm_of_iomap(&pdev->dev, timer, 0, NULL); + if (IS_ERR(timer_baseaddr)) { pr_err("ERROR: invalid timer base address\n"); - return -ENXIO; + return PTR_ERR(timer_baseaddr); } irq = irq_of_parse_and_map(timer, 1); @@ -513,20 +513,27 @@ static int __init ttc_timer_probe(struct platform_device *pdev) clk_ce = of_clk_get(timer, clksel); if (IS_ERR(clk_ce)) { pr_err("ERROR: timer input clock not found\n"); - return PTR_ERR(clk_ce); + ret = PTR_ERR(clk_ce); + goto put_clk_cs; } ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width); if (ret) - return ret; + goto put_clk_ce; ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq); if (ret) - return ret; + goto put_clk_ce; pr_info("%pOFn #0 at %p, irq=%d\n", timer, timer_baseaddr, irq); return 0; + +put_clk_ce: + clk_put(clk_ce); +put_clk_cs: + clk_put(clk_cs); + return ret; } static const struct of_device_id ttc_timer_of_match[] = { diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c index ca3e4cbc80c6..28ab4f1a7c71 100644 --- a/drivers/clocksource/timer-imx-gpt.c +++ b/drivers/clocksource/timer-imx-gpt.c @@ -16,7 +16,6 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <soc/imx/timer.h> /* * There are 4 versions of the timer hardware on Freescale MXC hardware. @@ -25,6 +24,12 @@ * - MX25, MX31, MX35, MX37, MX51, MX6Q(rev1.0) * - MX6DL, MX6SX, MX6Q(rev1.1+) */ +enum imx_gpt_type { + GPT_TYPE_IMX1, /* i.MX1 */ + GPT_TYPE_IMX21, /* i.MX21/27 */ + GPT_TYPE_IMX31, /* i.MX31/35/25/37/51/6Q */ + GPT_TYPE_IMX6DL, /* i.MX6DL/SX/SL */ +}; /* defines common for all i.MX */ #define MXC_TCTL 0x00 @@ -93,13 +98,11 @@ static void imx1_gpt_irq_disable(struct imx_timer *imxtm) tmp = readl_relaxed(imxtm->base + MXC_TCTL); writel_relaxed(tmp & ~MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL); } -#define imx21_gpt_irq_disable imx1_gpt_irq_disable static void imx31_gpt_irq_disable(struct imx_timer *imxtm) { writel_relaxed(0, imxtm->base + V2_IR); } -#define imx6dl_gpt_irq_disable imx31_gpt_irq_disable static void imx1_gpt_irq_enable(struct imx_timer *imxtm) { @@ -108,13 +111,11 @@ static void imx1_gpt_irq_enable(struct imx_timer *imxtm) tmp = readl_relaxed(imxtm->base + MXC_TCTL); writel_relaxed(tmp | MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL); } -#define imx21_gpt_irq_enable imx1_gpt_irq_enable static void imx31_gpt_irq_enable(struct imx_timer *imxtm) { writel_relaxed(1<<0, imxtm->base + V2_IR); } -#define imx6dl_gpt_irq_enable imx31_gpt_irq_enable static void imx1_gpt_irq_acknowledge(struct imx_timer *imxtm) { @@ -131,7 +132,6 @@ static void imx31_gpt_irq_acknowledge(struct imx_timer *imxtm) { writel_relaxed(V2_TSTAT_OF1, imxtm->base + V2_TSTAT); } -#define imx6dl_gpt_irq_acknowledge imx31_gpt_irq_acknowledge static void __iomem *sched_clock_reg; @@ -296,7 +296,6 @@ static void imx1_gpt_setup_tctl(struct imx_timer *imxtm) tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN; writel_relaxed(tctl_val, imxtm->base + MXC_TCTL); } -#define imx21_gpt_setup_tctl imx1_gpt_setup_tctl static void imx31_gpt_setup_tctl(struct imx_timer *imxtm) { @@ -343,10 +342,10 @@ static const struct imx_gpt_data imx21_gpt_data = { .reg_tstat = MX1_2_TSTAT, .reg_tcn = MX1_2_TCN, .reg_tcmp = MX1_2_TCMP, - .gpt_irq_enable = imx21_gpt_irq_enable, - .gpt_irq_disable = imx21_gpt_irq_disable, + .gpt_irq_enable = imx1_gpt_irq_enable, + .gpt_irq_disable = imx1_gpt_irq_disable, .gpt_irq_acknowledge = imx21_gpt_irq_acknowledge, - .gpt_setup_tctl = imx21_gpt_setup_tctl, + .gpt_setup_tctl = imx1_gpt_setup_tctl, .set_next_event = mx1_2_set_next_event, }; @@ -365,9 +364,9 @@ static const struct imx_gpt_data imx6dl_gpt_data = { .reg_tstat = V2_TSTAT, .reg_tcn = V2_TCN, .reg_tcmp = V2_TCMP, - .gpt_irq_enable = imx6dl_gpt_irq_enable, - .gpt_irq_disable = imx6dl_gpt_irq_disable, - .gpt_irq_acknowledge = imx6dl_gpt_irq_acknowledge, + .gpt_irq_enable = imx31_gpt_irq_enable, + .gpt_irq_disable = imx31_gpt_irq_disable, + .gpt_irq_acknowledge = imx31_gpt_irq_acknowledge, .gpt_setup_tctl = imx6dl_gpt_setup_tctl, .set_next_event = v2_set_next_event, }; diff --git a/drivers/clocksource/timer-loongson1-pwm.c b/drivers/clocksource/timer-loongson1-pwm.c new file mode 100644 index 000000000000..6335fee03017 --- /dev/null +++ b/drivers/clocksource/timer-loongson1-pwm.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Clocksource driver for Loongson-1 SoC + * + * Copyright (c) 2023 Keguang Zhang <keguang.zhang@gmail.com> + */ + +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/sizes.h> +#include "timer-of.h" + +/* Loongson-1 PWM Timer Register Definitions */ +#define PWM_CNTR 0x0 +#define PWM_HRC 0x4 +#define PWM_LRC 0x8 +#define PWM_CTRL 0xc + +/* PWM Control Register Bits */ +#define INT_LRC_EN BIT(11) +#define INT_HRC_EN BIT(10) +#define CNTR_RST BIT(7) +#define INT_SR BIT(6) +#define INT_EN BIT(5) +#define PWM_SINGLE BIT(4) +#define PWM_OE BIT(3) +#define CNT_EN BIT(0) + +#define CNTR_WIDTH 24 + +DEFINE_RAW_SPINLOCK(ls1x_timer_lock); + +struct ls1x_clocksource { + void __iomem *reg_base; + unsigned long ticks_per_jiffy; + struct clocksource clksrc; +}; + +static inline struct ls1x_clocksource *to_ls1x_clksrc(struct clocksource *c) +{ + return container_of(c, struct ls1x_clocksource, clksrc); +} + +static inline void ls1x_pwmtimer_set_period(unsigned int period, + struct timer_of *to) +{ + writel(period, timer_of_base(to) + PWM_LRC); + writel(period, timer_of_base(to) + PWM_HRC); +} + +static inline void ls1x_pwmtimer_clear(struct timer_of *to) +{ + writel(0, timer_of_base(to) + PWM_CNTR); +} + +static inline void ls1x_pwmtimer_start(struct timer_of *to) +{ + writel((INT_EN | PWM_OE | CNT_EN), timer_of_base(to) + PWM_CTRL); +} + +static inline void ls1x_pwmtimer_stop(struct timer_of *to) +{ + writel(0, timer_of_base(to) + PWM_CTRL); +} + +static inline void ls1x_pwmtimer_irq_ack(struct timer_of *to) +{ + int val; + + val = readl(timer_of_base(to) + PWM_CTRL); + val |= INT_SR; + writel(val, timer_of_base(to) + PWM_CTRL); +} + +static irqreturn_t ls1x_clockevent_isr(int irq, void *dev_id) +{ + struct clock_event_device *clkevt = dev_id; + struct timer_of *to = to_timer_of(clkevt); + + ls1x_pwmtimer_irq_ack(to); + ls1x_pwmtimer_clear(to); + ls1x_pwmtimer_start(to); + + clkevt->event_handler(clkevt); + + return IRQ_HANDLED; +} + +static int ls1x_clockevent_set_state_periodic(struct clock_event_device *clkevt) +{ + struct timer_of *to = to_timer_of(clkevt); + + raw_spin_lock(&ls1x_timer_lock); + ls1x_pwmtimer_set_period(timer_of_period(to), to); + ls1x_pwmtimer_clear(to); + ls1x_pwmtimer_start(to); + raw_spin_unlock(&ls1x_timer_lock); + + return 0; +} + +static int ls1x_clockevent_tick_resume(struct clock_event_device *clkevt) +{ + raw_spin_lock(&ls1x_timer_lock); + ls1x_pwmtimer_start(to_timer_of(clkevt)); + raw_spin_unlock(&ls1x_timer_lock); + + return 0; +} + +static int ls1x_clockevent_set_state_shutdown(struct clock_event_device *clkevt) +{ + raw_spin_lock(&ls1x_timer_lock); + ls1x_pwmtimer_stop(to_timer_of(clkevt)); + raw_spin_unlock(&ls1x_timer_lock); + + return 0; +} + +static int ls1x_clockevent_set_next(unsigned long evt, + struct clock_event_device *clkevt) +{ + struct timer_of *to = to_timer_of(clkevt); + + raw_spin_lock(&ls1x_timer_lock); + ls1x_pwmtimer_set_period(evt, to); + ls1x_pwmtimer_clear(to); + ls1x_pwmtimer_start(to); + raw_spin_unlock(&ls1x_timer_lock); + + return 0; +} + +static struct timer_of ls1x_to = { + .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK, + .clkevt = { + .name = "ls1x-pwmtimer", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .rating = 300, + .set_next_event = ls1x_clockevent_set_next, + .set_state_periodic = ls1x_clockevent_set_state_periodic, + .set_state_oneshot = ls1x_clockevent_set_state_shutdown, + .set_state_shutdown = ls1x_clockevent_set_state_shutdown, + .tick_resume = ls1x_clockevent_tick_resume, + }, + .of_irq = { + .handler = ls1x_clockevent_isr, + .flags = IRQF_TIMER, + }, +}; + +/* + * Since the PWM timer overflows every two ticks, its not very useful + * to just read by itself. So use jiffies to emulate a free + * running counter: + */ +static u64 ls1x_clocksource_read(struct clocksource *cs) +{ + struct ls1x_clocksource *ls1x_cs = to_ls1x_clksrc(cs); + unsigned long flags; + int count; + u32 jifs; + static int old_count; + static u32 old_jifs; + + raw_spin_lock_irqsave(&ls1x_timer_lock, flags); + /* + * Although our caller may have the read side of xtime_lock, + * this is now a seqlock, and we are cheating in this routine + * by having side effects on state that we cannot undo if + * there is a collision on the seqlock and our caller has to + * retry. (Namely, old_jifs and old_count.) So we must treat + * jiffies as volatile despite the lock. We read jiffies + * before latching the timer count to guarantee that although + * the jiffies value might be older than the count (that is, + * the counter may underflow between the last point where + * jiffies was incremented and the point where we latch the + * count), it cannot be newer. + */ + jifs = jiffies; + /* read the count */ + count = readl(ls1x_cs->reg_base + PWM_CNTR); + + /* + * It's possible for count to appear to go the wrong way for this + * reason: + * + * The timer counter underflows, but we haven't handled the resulting + * interrupt and incremented jiffies yet. + * + * Previous attempts to handle these cases intelligently were buggy, so + * we just do the simple thing now. + */ + if (count < old_count && jifs == old_jifs) + count = old_count; + + old_count = count; + old_jifs = jifs; + + raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags); + + return (u64)(jifs * ls1x_cs->ticks_per_jiffy) + count; +} + +static struct ls1x_clocksource ls1x_clocksource = { + .clksrc = { + .name = "ls1x-pwmtimer", + .rating = 300, + .read = ls1x_clocksource_read, + .mask = CLOCKSOURCE_MASK(CNTR_WIDTH), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + }, +}; + +static int __init ls1x_pwm_clocksource_init(struct device_node *np) +{ + struct timer_of *to = &ls1x_to; + int ret; + + ret = timer_of_init(np, to); + if (ret) + return ret; + + clockevents_config_and_register(&to->clkevt, timer_of_rate(to), + 0x1, GENMASK(CNTR_WIDTH - 1, 0)); + + ls1x_clocksource.reg_base = timer_of_base(to); + ls1x_clocksource.ticks_per_jiffy = timer_of_period(to); + + return clocksource_register_hz(&ls1x_clocksource.clksrc, + timer_of_rate(to)); +} + +TIMER_OF_DECLARE(ls1x_pwm_clocksource, "loongson,ls1b-pwmtimer", + ls1x_pwm_clocksource_init); diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 2c839bd2b051..a1c51abddbc5 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -38,7 +38,7 @@ choice prompt "Default CPUFreq governor" default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1110_CPUFREQ default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if ARM64 || ARM - default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if X86_INTEL_PSTATE && SMP + default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if (X86_INTEL_PSTATE || X86_AMD_PSTATE) && SMP default CPU_FREQ_DEFAULT_GOV_PERFORMANCE help This option sets which CPUFreq governor shall be loaded at diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index 00476e94db90..438c9e75a04d 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -51,6 +51,23 @@ config X86_AMD_PSTATE If in doubt, say N. +config X86_AMD_PSTATE_DEFAULT_MODE + int "AMD Processor P-State default mode" + depends on X86_AMD_PSTATE + default 3 if X86_AMD_PSTATE + range 1 4 + help + Select the default mode the amd-pstate driver will use on + supported hardware. + The value set has the following meanings: + 1 -> Disabled + 2 -> Passive + 3 -> Active (EPP) + 4 -> Guided + + For details, take a look at: + <file:Documentation/admin-guide/pm/amd-pstate.rst>. + config X86_AMD_PSTATE_UT tristate "selftest for AMD Processor P-State driver" depends on X86 && ACPI_PROCESSOR diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index ddd346a239e0..81fba0dcbee9 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -62,7 +62,8 @@ static struct cpufreq_driver *current_pstate_driver; static struct cpufreq_driver amd_pstate_driver; static struct cpufreq_driver amd_pstate_epp_driver; -static int cppc_state = AMD_PSTATE_DISABLE; +static int cppc_state = AMD_PSTATE_UNDEFINED; +static bool cppc_enabled; /* * AMD Energy Preference Performance (EPP) @@ -228,7 +229,28 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata, static inline int pstate_enable(bool enable) { - return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable); + int ret, cpu; + unsigned long logical_proc_id_mask = 0; + + if (enable == cppc_enabled) + return 0; + + for_each_present_cpu(cpu) { + unsigned long logical_id = topology_logical_die_id(cpu); + + if (test_bit(logical_id, &logical_proc_id_mask)) + continue; + + set_bit(logical_id, &logical_proc_id_mask); + + ret = wrmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_ENABLE, + enable); + if (ret) + return ret; + } + + cppc_enabled = enable; + return 0; } static int cppc_enable(bool enable) @@ -236,6 +258,9 @@ static int cppc_enable(bool enable) int cpu, ret = 0; struct cppc_perf_ctrls perf_ctrls; + if (enable == cppc_enabled) + return 0; + for_each_present_cpu(cpu) { ret = cppc_set_enable(cpu, enable); if (ret) @@ -251,6 +276,7 @@ static int cppc_enable(bool enable) } } + cppc_enabled = enable; return ret; } @@ -1045,6 +1071,26 @@ static const struct attribute_group amd_pstate_global_attr_group = { .attrs = pstate_global_attributes, }; +static bool amd_pstate_acpi_pm_profile_server(void) +{ + switch (acpi_gbl_FADT.preferred_profile) { + case PM_ENTERPRISE_SERVER: + case PM_SOHO_SERVER: + case PM_PERFORMANCE_SERVER: + return true; + } + return false; +} + +static bool amd_pstate_acpi_pm_profile_undefined(void) +{ + if (acpi_gbl_FADT.preferred_profile == PM_UNSPECIFIED) + return true; + if (acpi_gbl_FADT.preferred_profile >= NR_PM_PROFILES) + return true; + return false; +} + static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) { int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret; @@ -1102,10 +1148,14 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) policy->max = policy->cpuinfo.max_freq; /* - * Set the policy to powersave to provide a valid fallback value in case + * Set the policy to provide a valid fallback value in case * the default cpufreq governor is neither powersave nor performance. */ - policy->policy = CPUFREQ_POLICY_POWERSAVE; + if (amd_pstate_acpi_pm_profile_server() || + amd_pstate_acpi_pm_profile_undefined()) + policy->policy = CPUFREQ_POLICY_PERFORMANCE; + else + policy->policy = CPUFREQ_POLICY_POWERSAVE; if (boot_cpu_has(X86_FEATURE_CPPC)) { ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); @@ -1356,10 +1406,29 @@ static struct cpufreq_driver amd_pstate_epp_driver = { .online = amd_pstate_epp_cpu_online, .suspend = amd_pstate_epp_suspend, .resume = amd_pstate_epp_resume, - .name = "amd_pstate_epp", + .name = "amd-pstate-epp", .attr = amd_pstate_epp_attr, }; +static int __init amd_pstate_set_driver(int mode_idx) +{ + if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) { + cppc_state = mode_idx; + if (cppc_state == AMD_PSTATE_DISABLE) + pr_info("driver is explicitly disabled\n"); + + if (cppc_state == AMD_PSTATE_ACTIVE) + current_pstate_driver = &amd_pstate_epp_driver; + + if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED) + current_pstate_driver = &amd_pstate_driver; + + return 0; + } + + return -EINVAL; +} + static int __init amd_pstate_init(void) { struct device *dev_root; @@ -1367,15 +1436,6 @@ static int __init amd_pstate_init(void) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) return -ENODEV; - /* - * by default the pstate driver is disabled to load - * enable the amd_pstate passive mode driver explicitly - * with amd_pstate=passive or other modes in kernel command line - */ - if (cppc_state == AMD_PSTATE_DISABLE) { - pr_info("driver load is disabled, boot with specific mode to enable this\n"); - return -ENODEV; - } if (!acpi_cpc_valid()) { pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n"); @@ -1386,6 +1446,33 @@ static int __init amd_pstate_init(void) if (cpufreq_get_current_driver()) return -EEXIST; + switch (cppc_state) { + case AMD_PSTATE_UNDEFINED: + /* Disable on the following configs by default: + * 1. Undefined platforms + * 2. Server platforms + * 3. Shared memory designs + */ + if (amd_pstate_acpi_pm_profile_undefined() || + amd_pstate_acpi_pm_profile_server() || + !boot_cpu_has(X86_FEATURE_CPPC)) { + pr_info("driver load is disabled, boot with specific mode to enable this\n"); + return -ENODEV; + } + ret = amd_pstate_set_driver(CONFIG_X86_AMD_PSTATE_DEFAULT_MODE); + if (ret) + return ret; + break; + case AMD_PSTATE_DISABLE: + return -ENODEV; + case AMD_PSTATE_PASSIVE: + case AMD_PSTATE_ACTIVE: + case AMD_PSTATE_GUIDED: + break; + default: + return -EINVAL; + } + /* capability check */ if (boot_cpu_has(X86_FEATURE_CPPC)) { pr_debug("AMD CPPC MSR based functionality is supported\n"); @@ -1438,21 +1525,7 @@ static int __init amd_pstate_param(char *str) size = strlen(str); mode_idx = get_mode_idx_from_str(str, size); - if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) { - cppc_state = mode_idx; - if (cppc_state == AMD_PSTATE_DISABLE) - pr_info("driver is explicitly disabled\n"); - - if (cppc_state == AMD_PSTATE_ACTIVE) - current_pstate_driver = &amd_pstate_epp_driver; - - if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED) - current_pstate_driver = &amd_pstate_driver; - - return 0; - } - - return -EINVAL; + return amd_pstate_set_driver(mode_idx); } early_param("amd_pstate", amd_pstate_param); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 6b52ebe5a890..50bbc969ffe5 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2828,7 +2828,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) (driver_data->setpolicy && (driver_data->target_index || driver_data->target)) || (!driver_data->get_intermediate != !driver_data->target_intermediate) || - (!driver_data->online != !driver_data->offline)) + (!driver_data->online != !driver_data->offline) || + (driver_data->adjust_perf && !driver_data->fast_switch)) return -EINVAL; pr_debug("trying to register driver %s\n", driver_data->name); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 2548ec92faa2..f29182512b98 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -824,6 +824,8 @@ static ssize_t store_energy_performance_preference( err = cpufreq_start_governor(policy); if (!ret) ret = err; + } else { + ret = 0; } } diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 8e929f6602ce..737a026ef58a 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -145,7 +145,7 @@ static noinstr void enter_s2idle_proper(struct cpuidle_driver *drv, instrumentation_begin(); - time_start = ns_to_ktime(local_clock()); + time_start = ns_to_ktime(local_clock_noinstr()); tick_freeze(); /* @@ -169,7 +169,7 @@ static noinstr void enter_s2idle_proper(struct cpuidle_driver *drv, tick_unfreeze(); start_critical_timings(); - time_end = ns_to_ktime(local_clock()); + time_end = ns_to_ktime(local_clock_noinstr()); dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start); dev->states_usage[index].s2idle_usage++; @@ -243,7 +243,7 @@ noinstr int cpuidle_enter_state(struct cpuidle_device *dev, sched_idle_set_state(target_state); trace_cpu_idle(index, dev->cpu); - time_start = ns_to_ktime(local_clock()); + time_start = ns_to_ktime(local_clock_noinstr()); stop_critical_timings(); if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) { @@ -276,7 +276,7 @@ noinstr int cpuidle_enter_state(struct cpuidle_device *dev, start_critical_timings(); sched_clock_idle_wakeup_event(); - time_end = ns_to_ktime(local_clock()); + time_end = ns_to_ktime(local_clock_noinstr()); trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); /* The cpu is no longer idle or about to enter idle. */ diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c index bdcfeaecd228..9b6d90a72601 100644 --- a/drivers/cpuidle/poll_state.c +++ b/drivers/cpuidle/poll_state.c @@ -15,7 +15,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev, { u64 time_start; - time_start = local_clock(); + time_start = local_clock_noinstr(); dev->poll_time_limit = false; @@ -32,7 +32,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev, continue; loop_count = 0; - if (local_clock() - time_start > limit) { + if (local_clock_noinstr() - time_start > limit) { dev->poll_time_limit = true; break; } diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c index 10fe9f73a5fb..f2dd66706c10 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c @@ -8,7 +8,7 @@ * keysize in CBC and ECB mode. * Add support also for DES and 3DES in CBC and ECB mode. * - * You could find the datasheet in Documentation/arm/sunxi.rst + * You could find the datasheet in Documentation/arch/arm/sunxi.rst */ #include "sun4i-ss.h" diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c index 006e40133c28..51a3a7b5b985 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c @@ -6,7 +6,7 @@ * * Core file which registers crypto algorithms supported by the SS. * - * You could find a link for the datasheet in Documentation/arm/sunxi.rst + * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ #include <linux/clk.h> #include <linux/crypto.h> diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c index d28292762b32..f7893e4ac59d 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c @@ -6,7 +6,7 @@ * * This file add support for MD5 and SHA1. * - * You could find the datasheet in Documentation/arm/sunxi.rst + * You could find the datasheet in Documentation/arch/arm/sunxi.rst */ #include "sun4i-ss.h" #include <asm/unaligned.h> diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h index ba59c7a48825..6c5d4aa6453c 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h @@ -8,7 +8,7 @@ * Support MD5 and SHA1 hash algorithms. * Support DES and 3DES * - * You could find the datasheet in Documentation/arm/sunxi.rst + * You could find the datasheet in Documentation/arch/arm/sunxi.rst */ #include <linux/clk.h> diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 74b4e910a38d..c13550090785 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -8,7 +8,7 @@ * This file add support for AES cipher with 128,192,256 bits keysize in * CBC and ECB mode. * - * You could find a link for the datasheet in Documentation/arm/sunxi.rst + * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ #include <linux/bottom_half.h> diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index a6865ff4d400..07ea0cc82b16 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -7,7 +7,7 @@ * * Core file which registers crypto algorithms supported by the CryptoEngine. * - * You could find a link for the datasheet in Documentation/arm/sunxi.rst + * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ #include <linux/clk.h> #include <linux/crypto.h> diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 8b5b9b9d04c3..930ad157004c 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -7,7 +7,7 @@ * * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512. * - * You could find the datasheet in Documentation/arm/sunxi.rst + * You could find the datasheet in Documentation/arch/arm/sunxi.rst */ #include <linux/bottom_half.h> #include <linux/dma-mapping.h> diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c index b3cc43ea6c8a..80815379f6fc 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c @@ -7,7 +7,7 @@ * * This file handle the PRNG * - * You could find a link for the datasheet in Documentation/arm/sunxi.rst + * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ #include "sun8i-ce.h" #include <linux/dma-mapping.h> diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c index e2b9b9104694..9c35f2a83eda 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c @@ -7,7 +7,7 @@ * * This file handle the TRNG * - * You could find a link for the datasheet in Documentation/arm/sunxi.rst + * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ #include "sun8i-ce.h" #include <linux/dma-mapping.h> diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 16966cc94e24..381a90fbeaff 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -8,7 +8,7 @@ * This file add support for AES cipher with 128,192,256 bits keysize in * CBC and ECB mode. * - * You could find a link for the datasheet in Documentation/arm/sunxi.rst + * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ #include <linux/bottom_half.h> diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index c9dc06f97857..3dd844b40ff7 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -7,7 +7,7 @@ * * Core file which registers crypto algorithms supported by the SecuritySystem * - * You could find a link for the datasheet in Documentation/arm/sunxi.rst + * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ #include <linux/clk.h> #include <linux/crypto.h> diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index 577bf636f7fb..a4b67d130d11 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -7,7 +7,7 @@ * * This file add support for MD5 and SHA1/SHA224/SHA256. * - * You could find the datasheet in Documentation/arm/sunxi.rst + * You could find the datasheet in Documentation/arch/arm/sunxi.rst */ #include <linux/bottom_half.h> #include <linux/dma-mapping.h> diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c index 70c7b5d571b8..a923cfc6553f 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c @@ -7,7 +7,7 @@ * * This file handle the PRNG found in the SS * - * You could find a link for the datasheet in Documentation/arm/sunxi.rst + * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ #include "sun8i-ss.h" #include <linux/dma-mapping.h> diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c index ddf6e913c1c4..30e6acfc93d9 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c @@ -357,9 +357,9 @@ static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs) u64 vfpf_mbox_base; int err, i; - cptpf->vfpf_mbox_wq = alloc_workqueue("cpt_vfpf_mailbox", - WQ_UNBOUND | WQ_HIGHPRI | - WQ_MEM_RECLAIM, 1); + cptpf->vfpf_mbox_wq = + alloc_ordered_workqueue("cpt_vfpf_mailbox", + WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!cptpf->vfpf_mbox_wq) return -ENOMEM; @@ -453,9 +453,9 @@ static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf) resource_size_t offset; int err; - cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox", - WQ_UNBOUND | WQ_HIGHPRI | - WQ_MEM_RECLAIM, 1); + cptpf->afpf_mbox_wq = + alloc_ordered_workqueue("cpt_afpf_mailbox", + WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!cptpf->afpf_mbox_wq) return -ENOMEM; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c index 392e9fee05e8..6023a7adb70c 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c @@ -75,9 +75,9 @@ static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf) resource_size_t offset, size; int ret; - cptvf->pfvf_mbox_wq = alloc_workqueue("cpt_pfvf_mailbox", - WQ_UNBOUND | WQ_HIGHPRI | - WQ_MEM_RECLAIM, 1); + cptvf->pfvf_mbox_wq = + alloc_ordered_workqueue("cpt_pfvf_mailbox", + WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!cptvf->pfvf_mbox_wq) return -ENOMEM; diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c index 88414445adf3..245898f1a88e 100644 --- a/drivers/devfreq/exynos-bus.c +++ b/drivers/devfreq/exynos-bus.c @@ -518,6 +518,7 @@ static struct platform_driver exynos_bus_platdrv = { }; module_platform_driver(exynos_bus_platdrv); +MODULE_SOFTDEP("pre: exynos_ppmu"); MODULE_DESCRIPTION("Generic Exynos Bus frequency driver"); MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c index e5458ada5197..6354622eda65 100644 --- a/drivers/devfreq/mtk-cci-devfreq.c +++ b/drivers/devfreq/mtk-cci-devfreq.c @@ -127,7 +127,7 @@ static int mtk_ccifreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct mtk_ccifreq_drv *drv = dev_get_drvdata(dev); - struct clk *cci_pll = clk_get_parent(drv->cci_clk); + struct clk *cci_pll; struct dev_pm_opp *opp; unsigned long opp_rate; int voltage, pre_voltage, inter_voltage, target_voltage, ret; @@ -139,6 +139,7 @@ static int mtk_ccifreq_target(struct device *dev, unsigned long *freq, return 0; inter_voltage = drv->inter_voltage; + cci_pll = clk_get_parent(drv->cci_clk); opp_rate = *freq; opp = devfreq_recommended_opp(dev, &opp_rate, 1); diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index 01f2e86f3f7c..12cf6bb2e3ce 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -12,7 +12,6 @@ #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/udmabuf.h> -#include <linux/hugetlb.h> #include <linux/vmalloc.h> #include <linux/iosys-map.h> @@ -207,9 +206,7 @@ static long udmabuf_create(struct miscdevice *device, struct udmabuf *ubuf; struct dma_buf *buf; pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit; - struct page *page, *hpage = NULL; - pgoff_t subpgoff, maxsubpgs; - struct hstate *hpstate; + struct page *page; int seals, ret = -EINVAL; u32 i, flags; @@ -245,7 +242,7 @@ static long udmabuf_create(struct miscdevice *device, if (!memfd) goto err; mapping = memfd->f_mapping; - if (!shmem_mapping(mapping) && !is_file_hugepages(memfd)) + if (!shmem_mapping(mapping)) goto err; seals = memfd_fcntl(memfd, F_GET_SEALS, 0); if (seals == -EINVAL) @@ -256,48 +253,16 @@ static long udmabuf_create(struct miscdevice *device, goto err; pgoff = list[i].offset >> PAGE_SHIFT; pgcnt = list[i].size >> PAGE_SHIFT; - if (is_file_hugepages(memfd)) { - hpstate = hstate_file(memfd); - pgoff = list[i].offset >> huge_page_shift(hpstate); - subpgoff = (list[i].offset & - ~huge_page_mask(hpstate)) >> PAGE_SHIFT; - maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT; - } for (pgidx = 0; pgidx < pgcnt; pgidx++) { - if (is_file_hugepages(memfd)) { - if (!hpage) { - hpage = find_get_page_flags(mapping, pgoff, - FGP_ACCESSED); - if (!hpage) { - ret = -EINVAL; - goto err; - } - } - page = hpage + subpgoff; - get_page(page); - subpgoff++; - if (subpgoff == maxsubpgs) { - put_page(hpage); - hpage = NULL; - subpgoff = 0; - pgoff++; - } - } else { - page = shmem_read_mapping_page(mapping, - pgoff + pgidx); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - goto err; - } + page = shmem_read_mapping_page(mapping, pgoff + pgidx); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto err; } ubuf->pages[pgbuf++] = page; } fput(memfd); memfd = NULL; - if (hpage) { - put_page(hpage); - hpage = NULL; - } } exp_info.ops = &udmabuf_ops; diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 68f576700911..110e99b86a66 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -550,4 +550,15 @@ config EDAC_ZYNQMP Xilinx ZynqMP OCM (On Chip Memory) controller. It can also be built as a module. In that case it will be called zynqmp_edac. +config EDAC_NPCM + tristate "Nuvoton NPCM DDR Memory Controller" + depends on (ARCH_NPCM || COMPILE_TEST) + help + Support for error detection and correction on the Nuvoton NPCM DDR + memory controller. + + The memory controller supports single bit error correction, double bit + error detection (in-line ECC in which a section 1/8th of the memory + device used to store data is used for ECC storage). + endif # EDAC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 9b025c5b3061..61945d3113cc 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -84,4 +84,5 @@ obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o obj-$(CONFIG_EDAC_BLUEFIELD) += bluefield_edac.o obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o +obj-$(CONFIG_EDAC_NPCM) += npcm_edac.o obj-$(CONFIG_EDAC_ZYNQMP) += zynqmp_edac.o diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 5c4292e65b96..597dae7692b1 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -975,6 +975,74 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) return csrow; } +/* + * See AMD PPR DF::LclNodeTypeMap + * + * This register gives information for nodes of the same type within a system. + * + * Reading this register from a GPU node will tell how many GPU nodes are in the + * system and what the lowest AMD Node ID value is for the GPU nodes. Use this + * info to fixup the Linux logical "Node ID" value set in the AMD NB code and EDAC. + */ +static struct local_node_map { + u16 node_count; + u16 base_node_id; +} gpu_node_map; + +#define PCI_DEVICE_ID_AMD_MI200_DF_F1 0x14d1 +#define REG_LOCAL_NODE_TYPE_MAP 0x144 + +/* Local Node Type Map (LNTM) fields */ +#define LNTM_NODE_COUNT GENMASK(27, 16) +#define LNTM_BASE_NODE_ID GENMASK(11, 0) + +static int gpu_get_node_map(void) +{ + struct pci_dev *pdev; + int ret; + u32 tmp; + + /* + * Node ID 0 is reserved for CPUs. + * Therefore, a non-zero Node ID means we've already cached the values. + */ + if (gpu_node_map.base_node_id) + return 0; + + pdev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F1, NULL); + if (!pdev) { + ret = -ENODEV; + goto out; + } + + ret = pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp); + if (ret) + goto out; + + gpu_node_map.node_count = FIELD_GET(LNTM_NODE_COUNT, tmp); + gpu_node_map.base_node_id = FIELD_GET(LNTM_BASE_NODE_ID, tmp); + +out: + pci_dev_put(pdev); + return ret; +} + +static int fixup_node_id(int node_id, struct mce *m) +{ + /* MCA_IPID[InstanceIdHi] give the AMD Node ID for the bank. */ + u8 nid = (m->ipid >> 44) & 0xF; + + if (smca_get_bank_type(m->extcpu, m->bank) != SMCA_UMC_V2) + return node_id; + + /* Nodes below the GPU base node are CPU nodes and don't need a fixup. */ + if (nid < gpu_node_map.base_node_id) + return node_id; + + /* Convert the hardware-provided AMD Node ID to a Linux logical one. */ + return nid - gpu_node_map.base_node_id + 1; +} + /* Protect the PCI config register pairs used for DF indirect access. */ static DEFINE_MUTEX(df_indirect_mutex); @@ -1426,12 +1494,47 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt) return cs_mode; } +static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode, + int csrow_nr, int dimm) +{ + u32 msb, weight, num_zero_bits; + u32 addr_mask_deinterleaved; + int size = 0; + + /* + * The number of zero bits in the mask is equal to the number of bits + * in a full mask minus the number of bits in the current mask. + * + * The MSB is the number of bits in the full mask because BIT[0] is + * always 0. + * + * In the special 3 Rank interleaving case, a single bit is flipped + * without swapping with the most significant bit. This can be handled + * by keeping the MSB where it is and ignoring the single zero bit. + */ + msb = fls(addr_mask_orig) - 1; + weight = hweight_long(addr_mask_orig); + num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE); + + /* Take the number of zero bits off from the top of the mask. */ + addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1); + + edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm); + edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig); + edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved); + + /* Register [31:1] = Address [39:9]. Size is in kBs here. */ + size = (addr_mask_deinterleaved >> 2) + 1; + + /* Return size in MBs. */ + return size >> 10; +} + static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc, unsigned int cs_mode, int csrow_nr) { - u32 addr_mask_orig, addr_mask_deinterleaved; - u32 msb, weight, num_zero_bits; int cs_mask_nr = csrow_nr; + u32 addr_mask_orig; int dimm, size = 0; /* No Chip Selects are enabled. */ @@ -1475,33 +1578,7 @@ static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc, else addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr]; - /* - * The number of zero bits in the mask is equal to the number of bits - * in a full mask minus the number of bits in the current mask. - * - * The MSB is the number of bits in the full mask because BIT[0] is - * always 0. - * - * In the special 3 Rank interleaving case, a single bit is flipped - * without swapping with the most significant bit. This can be handled - * by keeping the MSB where it is and ignoring the single zero bit. - */ - msb = fls(addr_mask_orig) - 1; - weight = hweight_long(addr_mask_orig); - num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE); - - /* Take the number of zero bits off from the top of the mask. */ - addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1); - - edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm); - edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig); - edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved); - - /* Register [31:1] = Address [39:9]. Size is in kBs here. */ - size = (addr_mask_deinterleaved >> 2) + 1; - - /* Return size in MBs. */ - return size >> 10; + return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, dimm); } static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) @@ -2992,6 +3069,8 @@ static void decode_umc_error(int node_id, struct mce *m) struct err_info err; u64 sys_addr; + node_id = fixup_node_id(node_id, m); + mci = edac_mc_find(node_id); if (!mci) return; @@ -3675,6 +3754,227 @@ static int umc_hw_info_get(struct amd64_pvt *pvt) return 0; } +/* + * The CPUs have one channel per UMC, so UMC number is equivalent to a + * channel number. The GPUs have 8 channels per UMC, so the UMC number no + * longer works as a channel number. + * + * The channel number within a GPU UMC is given in MCA_IPID[15:12]. + * However, the IDs are split such that two UMC values go to one UMC, and + * the channel numbers are split in two groups of four. + * + * Refer to comment on gpu_get_umc_base(). + * + * For example, + * UMC0 CH[3:0] = 0x0005[3:0]000 + * UMC0 CH[7:4] = 0x0015[3:0]000 + * UMC1 CH[3:0] = 0x0025[3:0]000 + * UMC1 CH[7:4] = 0x0035[3:0]000 + */ +static void gpu_get_err_info(struct mce *m, struct err_info *err) +{ + u8 ch = (m->ipid & GENMASK(31, 0)) >> 20; + u8 phy = ((m->ipid >> 12) & 0xf); + + err->channel = ch % 2 ? phy + 4 : phy; + err->csrow = phy; +} + +static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc, + unsigned int cs_mode, int csrow_nr) +{ + u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr]; + + return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, csrow_nr >> 1); +} + +static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) +{ + int size, cs_mode, cs = 0; + + edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl); + + cs_mode = CS_EVEN_PRIMARY | CS_ODD_PRIMARY; + + for_each_chip_select(cs, ctrl, pvt) { + size = gpu_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs); + amd64_info(EDAC_MC ": %d: %5dMB\n", cs, size); + } +} + +static void gpu_dump_misc_regs(struct amd64_pvt *pvt) +{ + struct amd64_umc *umc; + u32 i; + + for_each_umc(i) { + umc = &pvt->umc[i]; + + edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg); + edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl); + edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl); + edac_dbg(1, "UMC%d All HBMs support ECC: yes\n", i); + + gpu_debug_display_dimm_sizes(pvt, i); + } +} + +static u32 gpu_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) +{ + u32 nr_pages; + int cs_mode = CS_EVEN_PRIMARY | CS_ODD_PRIMARY; + + nr_pages = gpu_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr); + nr_pages <<= 20 - PAGE_SHIFT; + + edac_dbg(0, "csrow: %d, channel: %d\n", csrow_nr, dct); + edac_dbg(0, "nr_pages/channel: %u\n", nr_pages); + + return nr_pages; +} + +static void gpu_init_csrows(struct mem_ctl_info *mci) +{ + struct amd64_pvt *pvt = mci->pvt_info; + struct dimm_info *dimm; + u8 umc, cs; + + for_each_umc(umc) { + for_each_chip_select(cs, umc, pvt) { + if (!csrow_enabled(cs, umc, pvt)) + continue; + + dimm = mci->csrows[umc]->channels[cs]->dimm; + + edac_dbg(1, "MC node: %d, csrow: %d\n", + pvt->mc_node_id, cs); + + dimm->nr_pages = gpu_get_csrow_nr_pages(pvt, umc, cs); + dimm->edac_mode = EDAC_SECDED; + dimm->mtype = MEM_HBM2; + dimm->dtype = DEV_X16; + dimm->grain = 64; + } + } +} + +static void gpu_setup_mci_misc_attrs(struct mem_ctl_info *mci) +{ + struct amd64_pvt *pvt = mci->pvt_info; + + mci->mtype_cap = MEM_FLAG_HBM2; + mci->edac_ctl_cap = EDAC_FLAG_SECDED; + + mci->edac_cap = EDAC_FLAG_EC; + mci->mod_name = EDAC_MOD_STR; + mci->ctl_name = pvt->ctl_name; + mci->dev_name = pci_name(pvt->F3); + mci->ctl_page_to_phys = NULL; + + gpu_init_csrows(mci); +} + +/* ECC is enabled by default on GPU nodes */ +static bool gpu_ecc_enabled(struct amd64_pvt *pvt) +{ + return true; +} + +static inline u32 gpu_get_umc_base(u8 umc, u8 channel) +{ + /* + * On CPUs, there is one channel per UMC, so UMC numbering equals + * channel numbering. On GPUs, there are eight channels per UMC, + * so the channel numbering is different from UMC numbering. + * + * On CPU nodes channels are selected in 6th nibble + * UMC chY[3:0]= [(chY*2 + 1) : (chY*2)]50000; + * + * On GPU nodes channels are selected in 3rd nibble + * HBM chX[3:0]= [Y ]5X[3:0]000; + * HBM chX[7:4]= [Y+1]5X[3:0]000 + */ + umc *= 2; + + if (channel >= 4) + umc++; + + return 0x50000 + (umc << 20) + ((channel % 4) << 12); +} + +static void gpu_read_mc_regs(struct amd64_pvt *pvt) +{ + u8 nid = pvt->mc_node_id; + struct amd64_umc *umc; + u32 i, umc_base; + + /* Read registers from each UMC */ + for_each_umc(i) { + umc_base = gpu_get_umc_base(i, 0); + umc = &pvt->umc[i]; + + amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg); + amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl); + amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl); + } +} + +static void gpu_read_base_mask(struct amd64_pvt *pvt) +{ + u32 base_reg, mask_reg; + u32 *base, *mask; + int umc, cs; + + for_each_umc(umc) { + for_each_chip_select(cs, umc, pvt) { + base_reg = gpu_get_umc_base(umc, cs) + UMCCH_BASE_ADDR; + base = &pvt->csels[umc].csbases[cs]; + + if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) { + edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n", + umc, cs, *base, base_reg); + } + + mask_reg = gpu_get_umc_base(umc, cs) + UMCCH_ADDR_MASK; + mask = &pvt->csels[umc].csmasks[cs]; + + if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) { + edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n", + umc, cs, *mask, mask_reg); + } + } + } +} + +static void gpu_prep_chip_selects(struct amd64_pvt *pvt) +{ + int umc; + + for_each_umc(umc) { + pvt->csels[umc].b_cnt = 8; + pvt->csels[umc].m_cnt = 8; + } +} + +static int gpu_hw_info_get(struct amd64_pvt *pvt) +{ + int ret; + + ret = gpu_get_node_map(); + if (ret) + return ret; + + pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL); + if (!pvt->umc) + return -ENOMEM; + + gpu_prep_chip_selects(pvt); + gpu_read_base_mask(pvt); + gpu_read_mc_regs(pvt); + + return 0; +} + static void hw_info_put(struct amd64_pvt *pvt) { pci_dev_put(pvt->F1); @@ -3690,6 +3990,14 @@ static struct low_ops umc_ops = { .get_err_info = umc_get_err_info, }; +static struct low_ops gpu_ops = { + .hw_info_get = gpu_hw_info_get, + .ecc_enabled = gpu_ecc_enabled, + .setup_mci_misc_attrs = gpu_setup_mci_misc_attrs, + .dump_misc_regs = gpu_dump_misc_regs, + .get_err_info = gpu_get_err_info, +}; + /* Use Family 16h versions for defaults and adjust as needed below. */ static struct low_ops dct_ops = { .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, @@ -3813,9 +4121,27 @@ static int per_family_init(struct amd64_pvt *pvt) case 0x20 ... 0x2f: pvt->ctl_name = "F19h_M20h"; break; + case 0x30 ... 0x3f: + if (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) { + pvt->ctl_name = "MI200"; + pvt->max_mcs = 4; + pvt->ops = &gpu_ops; + } else { + pvt->ctl_name = "F19h_M30h"; + pvt->max_mcs = 8; + } + break; case 0x50 ... 0x5f: pvt->ctl_name = "F19h_M50h"; break; + case 0x60 ... 0x6f: + pvt->ctl_name = "F19h_M60h"; + pvt->flags.zn_regs_v2 = 1; + break; + case 0x70 ... 0x7f: + pvt->ctl_name = "F19h_M70h"; + pvt->flags.zn_regs_v2 = 1; + break; case 0xa0 ... 0xaf: pvt->ctl_name = "F19h_MA0h"; pvt->max_mcs = 12; @@ -3846,11 +4172,17 @@ static int init_one_instance(struct amd64_pvt *pvt) struct edac_mc_layer layers[2]; int ret = -ENOMEM; + /* + * For Heterogeneous family EDAC CHIP_SELECT and CHANNEL layers should + * be swapped to fit into the layers. + */ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; - layers[0].size = pvt->csels[0].b_cnt; + layers[0].size = (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) ? + pvt->max_mcs : pvt->csels[0].b_cnt; layers[0].is_virt_csrow = true; layers[1].type = EDAC_MC_LAYER_CHANNEL; - layers[1].size = pvt->max_mcs; + layers[1].size = (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) ? + pvt->csels[0].b_cnt : pvt->max_mcs; layers[1].is_virt_csrow = false; mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0); @@ -4074,8 +4406,6 @@ static int __init amd64_edac_init(void) amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR); #endif - printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION); - return 0; err_pci: @@ -4121,7 +4451,7 @@ module_exit(amd64_edac_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, Dave Peterson, Thayne Harbaugh; AMD"); -MODULE_DESCRIPTION("MC support for AMD64 memory controllers - " EDAC_AMD64_VERSION); +MODULE_DESCRIPTION("MC support for AMD64 memory controllers"); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index e84fe0d4120a..5a4e4a59682b 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -16,6 +16,7 @@ #include <linux/slab.h> #include <linux/mmzone.h> #include <linux/edac.h> +#include <linux/bitfield.h> #include <asm/cpu_device_id.h> #include <asm/msr.h> #include "edac_module.h" @@ -85,7 +86,6 @@ * sections 3.5.4 and 3.5.5 for more information. */ -#define EDAC_AMD64_VERSION "3.5.0" #define EDAC_MOD_STR "amd64_edac" /* Extended Model from CPUID, for CPU Revision numbers */ diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index cc5c63feb26a..9215c06783df 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -1186,7 +1186,8 @@ static void decode_smca_error(struct mce *m) if (xec < smca_mce_descs[bank_type].num_descs) pr_cont(", %s.\n", smca_mce_descs[bank_type].descs[xec]); - if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc) + if ((bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) && + xec == 0 && decode_dram_ecc) decode_dram_ecc(topology_die_id(m->extcpu), m); } diff --git a/drivers/edac/npcm_edac.c b/drivers/edac/npcm_edac.c new file mode 100644 index 000000000000..12b95be3e989 --- /dev/null +++ b/drivers/edac/npcm_edac.c @@ -0,0 +1,543 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (c) 2022 Nuvoton Technology Corporation + +#include <linux/debugfs.h> +#include <linux/iopoll.h> +#include <linux/of_device.h> +#include <linux/regmap.h> +#include "edac_module.h" + +#define EDAC_MOD_NAME "npcm-edac" +#define EDAC_MSG_SIZE 256 + +/* chip serials */ +#define NPCM7XX_CHIP BIT(0) +#define NPCM8XX_CHIP BIT(1) + +/* syndrome values */ +#define UE_SYNDROME 0x03 + +/* error injection */ +#define ERROR_TYPE_CORRECTABLE 0 +#define ERROR_TYPE_UNCORRECTABLE 1 +#define ERROR_LOCATION_DATA 0 +#define ERROR_LOCATION_CHECKCODE 1 +#define ERROR_BIT_DATA_MAX 63 +#define ERROR_BIT_CHECKCODE_MAX 7 + +static char data_synd[] = { + 0xf4, 0xf1, 0xec, 0xea, 0xe9, 0xe6, 0xe5, 0xe3, + 0xdc, 0xda, 0xd9, 0xd6, 0xd5, 0xd3, 0xce, 0xcb, + 0xb5, 0xb0, 0xad, 0xab, 0xa8, 0xa7, 0xa4, 0xa2, + 0x9d, 0x9b, 0x98, 0x97, 0x94, 0x92, 0x8f, 0x8a, + 0x75, 0x70, 0x6d, 0x6b, 0x68, 0x67, 0x64, 0x62, + 0x5e, 0x5b, 0x58, 0x57, 0x54, 0x52, 0x4f, 0x4a, + 0x34, 0x31, 0x2c, 0x2a, 0x29, 0x26, 0x25, 0x23, + 0x1c, 0x1a, 0x19, 0x16, 0x15, 0x13, 0x0e, 0x0b +}; + +static struct regmap *npcm_regmap; + +struct npcm_platform_data { + /* chip serials */ + int chip; + + /* memory controller registers */ + u32 ctl_ecc_en; + u32 ctl_int_status; + u32 ctl_int_ack; + u32 ctl_int_mask_master; + u32 ctl_int_mask_ecc; + u32 ctl_ce_addr_l; + u32 ctl_ce_addr_h; + u32 ctl_ce_data_l; + u32 ctl_ce_data_h; + u32 ctl_ce_synd; + u32 ctl_ue_addr_l; + u32 ctl_ue_addr_h; + u32 ctl_ue_data_l; + u32 ctl_ue_data_h; + u32 ctl_ue_synd; + u32 ctl_source_id; + u32 ctl_controller_busy; + u32 ctl_xor_check_bits; + + /* masks and shifts */ + u32 ecc_en_mask; + u32 int_status_ce_mask; + u32 int_status_ue_mask; + u32 int_ack_ce_mask; + u32 int_ack_ue_mask; + u32 int_mask_master_non_ecc_mask; + u32 int_mask_master_global_mask; + u32 int_mask_ecc_non_event_mask; + u32 ce_addr_h_mask; + u32 ce_synd_mask; + u32 ce_synd_shift; + u32 ue_addr_h_mask; + u32 ue_synd_mask; + u32 ue_synd_shift; + u32 source_id_ce_mask; + u32 source_id_ce_shift; + u32 source_id_ue_mask; + u32 source_id_ue_shift; + u32 controller_busy_mask; + u32 xor_check_bits_mask; + u32 xor_check_bits_shift; + u32 writeback_en_mask; + u32 fwc_mask; +}; + +struct priv_data { + void __iomem *reg; + char message[EDAC_MSG_SIZE]; + const struct npcm_platform_data *pdata; + + /* error injection */ + struct dentry *debugfs; + u8 error_type; + u8 location; + u8 bit; +}; + +static void handle_ce(struct mem_ctl_info *mci) +{ + struct priv_data *priv = mci->pvt_info; + const struct npcm_platform_data *pdata; + u32 val_h = 0, val_l, id, synd; + u64 addr = 0, data = 0; + + pdata = priv->pdata; + regmap_read(npcm_regmap, pdata->ctl_ce_addr_l, &val_l); + if (pdata->chip == NPCM8XX_CHIP) { + regmap_read(npcm_regmap, pdata->ctl_ce_addr_h, &val_h); + val_h &= pdata->ce_addr_h_mask; + } + addr = ((addr | val_h) << 32) | val_l; + + regmap_read(npcm_regmap, pdata->ctl_ce_data_l, &val_l); + if (pdata->chip == NPCM8XX_CHIP) + regmap_read(npcm_regmap, pdata->ctl_ce_data_h, &val_h); + data = ((data | val_h) << 32) | val_l; + + regmap_read(npcm_regmap, pdata->ctl_source_id, &id); + id = (id & pdata->source_id_ce_mask) >> pdata->source_id_ce_shift; + + regmap_read(npcm_regmap, pdata->ctl_ce_synd, &synd); + synd = (synd & pdata->ce_synd_mask) >> pdata->ce_synd_shift; + + snprintf(priv->message, EDAC_MSG_SIZE, + "addr = 0x%llx, data = 0x%llx, id = 0x%x", addr, data, id); + + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, addr >> PAGE_SHIFT, + addr & ~PAGE_MASK, synd, 0, 0, -1, priv->message, ""); +} + +static void handle_ue(struct mem_ctl_info *mci) +{ + struct priv_data *priv = mci->pvt_info; + const struct npcm_platform_data *pdata; + u32 val_h = 0, val_l, id, synd; + u64 addr = 0, data = 0; + + pdata = priv->pdata; + regmap_read(npcm_regmap, pdata->ctl_ue_addr_l, &val_l); + if (pdata->chip == NPCM8XX_CHIP) { + regmap_read(npcm_regmap, pdata->ctl_ue_addr_h, &val_h); + val_h &= pdata->ue_addr_h_mask; + } + addr = ((addr | val_h) << 32) | val_l; + + regmap_read(npcm_regmap, pdata->ctl_ue_data_l, &val_l); + if (pdata->chip == NPCM8XX_CHIP) + regmap_read(npcm_regmap, pdata->ctl_ue_data_h, &val_h); + data = ((data | val_h) << 32) | val_l; + + regmap_read(npcm_regmap, pdata->ctl_source_id, &id); + id = (id & pdata->source_id_ue_mask) >> pdata->source_id_ue_shift; + + regmap_read(npcm_regmap, pdata->ctl_ue_synd, &synd); + synd = (synd & pdata->ue_synd_mask) >> pdata->ue_synd_shift; + + snprintf(priv->message, EDAC_MSG_SIZE, + "addr = 0x%llx, data = 0x%llx, id = 0x%x", addr, data, id); + + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, addr >> PAGE_SHIFT, + addr & ~PAGE_MASK, synd, 0, 0, -1, priv->message, ""); +} + +static irqreturn_t edac_ecc_isr(int irq, void *dev_id) +{ + const struct npcm_platform_data *pdata; + struct mem_ctl_info *mci = dev_id; + u32 status; + + pdata = ((struct priv_data *)mci->pvt_info)->pdata; + regmap_read(npcm_regmap, pdata->ctl_int_status, &status); + if (status & pdata->int_status_ce_mask) { + handle_ce(mci); + + /* acknowledge the CE interrupt */ + regmap_write(npcm_regmap, pdata->ctl_int_ack, + pdata->int_ack_ce_mask); + return IRQ_HANDLED; + } else if (status & pdata->int_status_ue_mask) { + handle_ue(mci); + + /* acknowledge the UE interrupt */ + regmap_write(npcm_regmap, pdata->ctl_int_ack, + pdata->int_ack_ue_mask); + return IRQ_HANDLED; + } + + WARN_ON_ONCE(1); + return IRQ_NONE; +} + +static ssize_t force_ecc_error(struct file *file, const char __user *data, + size_t count, loff_t *ppos) +{ + struct device *dev = file->private_data; + struct mem_ctl_info *mci = to_mci(dev); + struct priv_data *priv = mci->pvt_info; + const struct npcm_platform_data *pdata; + u32 val, syndrome; + int ret; + + pdata = priv->pdata; + edac_printk(KERN_INFO, EDAC_MOD_NAME, + "force an ECC error, type = %d, location = %d, bit = %d\n", + priv->error_type, priv->location, priv->bit); + + /* ensure no pending writes */ + ret = regmap_read_poll_timeout(npcm_regmap, pdata->ctl_controller_busy, + val, !(val & pdata->controller_busy_mask), + 1000, 10000); + if (ret) { + edac_printk(KERN_INFO, EDAC_MOD_NAME, + "wait pending writes timeout\n"); + return count; + } + + regmap_read(npcm_regmap, pdata->ctl_xor_check_bits, &val); + val &= ~pdata->xor_check_bits_mask; + + /* write syndrome to XOR_CHECK_BITS */ + if (priv->error_type == ERROR_TYPE_CORRECTABLE) { + if (priv->location == ERROR_LOCATION_DATA && + priv->bit > ERROR_BIT_DATA_MAX) { + edac_printk(KERN_INFO, EDAC_MOD_NAME, + "data bit should not exceed %d (%d)\n", + ERROR_BIT_DATA_MAX, priv->bit); + return count; + } + + if (priv->location == ERROR_LOCATION_CHECKCODE && + priv->bit > ERROR_BIT_CHECKCODE_MAX) { + edac_printk(KERN_INFO, EDAC_MOD_NAME, + "checkcode bit should not exceed %d (%d)\n", + ERROR_BIT_CHECKCODE_MAX, priv->bit); + return count; + } + + syndrome = priv->location ? 1 << priv->bit + : data_synd[priv->bit]; + + regmap_write(npcm_regmap, pdata->ctl_xor_check_bits, + val | (syndrome << pdata->xor_check_bits_shift) | + pdata->writeback_en_mask); + } else if (priv->error_type == ERROR_TYPE_UNCORRECTABLE) { + regmap_write(npcm_regmap, pdata->ctl_xor_check_bits, + val | (UE_SYNDROME << pdata->xor_check_bits_shift)); + } + + /* force write check */ + regmap_update_bits(npcm_regmap, pdata->ctl_xor_check_bits, + pdata->fwc_mask, pdata->fwc_mask); + + return count; +} + +static const struct file_operations force_ecc_error_fops = { + .open = simple_open, + .write = force_ecc_error, + .llseek = generic_file_llseek, +}; + +/* + * Setup debugfs for error injection. + * + * Nodes: + * error_type - 0: CE, 1: UE + * location - 0: data, 1: checkcode + * bit - 0 ~ 63 for data and 0 ~ 7 for checkcode + * force_ecc_error - trigger + * + * Examples: + * 1. Inject a correctable error (CE) at checkcode bit 7. + * ~# echo 0 > /sys/kernel/debug/edac/npcm-edac/error_type + * ~# echo 1 > /sys/kernel/debug/edac/npcm-edac/location + * ~# echo 7 > /sys/kernel/debug/edac/npcm-edac/bit + * ~# echo 1 > /sys/kernel/debug/edac/npcm-edac/force_ecc_error + * + * 2. Inject an uncorrectable error (UE). + * ~# echo 1 > /sys/kernel/debug/edac/npcm-edac/error_type + * ~# echo 1 > /sys/kernel/debug/edac/npcm-edac/force_ecc_error + */ +static void setup_debugfs(struct mem_ctl_info *mci) +{ + struct priv_data *priv = mci->pvt_info; + + priv->debugfs = edac_debugfs_create_dir(mci->mod_name); + if (!priv->debugfs) + return; + + edac_debugfs_create_x8("error_type", 0644, priv->debugfs, &priv->error_type); + edac_debugfs_create_x8("location", 0644, priv->debugfs, &priv->location); + edac_debugfs_create_x8("bit", 0644, priv->debugfs, &priv->bit); + edac_debugfs_create_file("force_ecc_error", 0200, priv->debugfs, + &mci->dev, &force_ecc_error_fops); +} + +static int setup_irq(struct mem_ctl_info *mci, struct platform_device *pdev) +{ + const struct npcm_platform_data *pdata; + int ret, irq; + + pdata = ((struct priv_data *)mci->pvt_info)->pdata; + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + edac_printk(KERN_ERR, EDAC_MOD_NAME, "IRQ not defined in DTS\n"); + return irq; + } + + ret = devm_request_irq(&pdev->dev, irq, edac_ecc_isr, 0, + dev_name(&pdev->dev), mci); + if (ret < 0) { + edac_printk(KERN_ERR, EDAC_MOD_NAME, "failed to request IRQ\n"); + return ret; + } + + /* enable the functional group of ECC and mask the others */ + regmap_write(npcm_regmap, pdata->ctl_int_mask_master, + pdata->int_mask_master_non_ecc_mask); + + if (pdata->chip == NPCM8XX_CHIP) + regmap_write(npcm_regmap, pdata->ctl_int_mask_ecc, + pdata->int_mask_ecc_non_event_mask); + + return 0; +} + +static const struct regmap_config npcm_regmap_cfg = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, +}; + +static int edac_probe(struct platform_device *pdev) +{ + const struct npcm_platform_data *pdata; + struct device *dev = &pdev->dev; + struct edac_mc_layer layers[1]; + struct mem_ctl_info *mci; + struct priv_data *priv; + void __iomem *reg; + u32 val; + int rc; + + reg = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + npcm_regmap = devm_regmap_init_mmio(dev, reg, &npcm_regmap_cfg); + if (IS_ERR(npcm_regmap)) + return PTR_ERR(npcm_regmap); + + pdata = of_device_get_match_data(dev); + if (!pdata) + return -EINVAL; + + /* bail out if ECC is not enabled */ + regmap_read(npcm_regmap, pdata->ctl_ecc_en, &val); + if (!(val & pdata->ecc_en_mask)) { + edac_printk(KERN_ERR, EDAC_MOD_NAME, "ECC is not enabled\n"); + return -EPERM; + } + + edac_op_state = EDAC_OPSTATE_INT; + + layers[0].type = EDAC_MC_LAYER_ALL_MEM; + layers[0].size = 1; + + mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, + sizeof(struct priv_data)); + if (!mci) + return -ENOMEM; + + mci->pdev = &pdev->dev; + priv = mci->pvt_info; + priv->reg = reg; + priv->pdata = pdata; + platform_set_drvdata(pdev, mci); + + mci->mtype_cap = MEM_FLAG_DDR4; + mci->edac_ctl_cap = EDAC_FLAG_SECDED; + mci->scrub_cap = SCRUB_FLAG_HW_SRC; + mci->scrub_mode = SCRUB_HW_SRC; + mci->edac_cap = EDAC_FLAG_SECDED; + mci->ctl_name = "npcm_ddr_controller"; + mci->dev_name = dev_name(&pdev->dev); + mci->mod_name = EDAC_MOD_NAME; + mci->ctl_page_to_phys = NULL; + + rc = setup_irq(mci, pdev); + if (rc) + goto free_edac_mc; + + rc = edac_mc_add_mc(mci); + if (rc) + goto free_edac_mc; + + if (IS_ENABLED(CONFIG_EDAC_DEBUG) && pdata->chip == NPCM8XX_CHIP) + setup_debugfs(mci); + + return rc; + +free_edac_mc: + edac_mc_free(mci); + return rc; +} + +static int edac_remove(struct platform_device *pdev) +{ + struct mem_ctl_info *mci = platform_get_drvdata(pdev); + struct priv_data *priv = mci->pvt_info; + const struct npcm_platform_data *pdata; + + pdata = priv->pdata; + if (IS_ENABLED(CONFIG_EDAC_DEBUG) && pdata->chip == NPCM8XX_CHIP) + edac_debugfs_remove_recursive(priv->debugfs); + + edac_mc_del_mc(&pdev->dev); + edac_mc_free(mci); + + regmap_write(npcm_regmap, pdata->ctl_int_mask_master, + pdata->int_mask_master_global_mask); + regmap_update_bits(npcm_regmap, pdata->ctl_ecc_en, pdata->ecc_en_mask, 0); + + return 0; +} + +static const struct npcm_platform_data npcm750_edac = { + .chip = NPCM7XX_CHIP, + + /* memory controller registers */ + .ctl_ecc_en = 0x174, + .ctl_int_status = 0x1d0, + .ctl_int_ack = 0x1d4, + .ctl_int_mask_master = 0x1d8, + .ctl_ce_addr_l = 0x188, + .ctl_ce_data_l = 0x190, + .ctl_ce_synd = 0x18c, + .ctl_ue_addr_l = 0x17c, + .ctl_ue_data_l = 0x184, + .ctl_ue_synd = 0x180, + .ctl_source_id = 0x194, + + /* masks and shifts */ + .ecc_en_mask = BIT(24), + .int_status_ce_mask = GENMASK(4, 3), + .int_status_ue_mask = GENMASK(6, 5), + .int_ack_ce_mask = GENMASK(4, 3), + .int_ack_ue_mask = GENMASK(6, 5), + .int_mask_master_non_ecc_mask = GENMASK(30, 7) | GENMASK(2, 0), + .int_mask_master_global_mask = BIT(31), + .ce_synd_mask = GENMASK(6, 0), + .ce_synd_shift = 0, + .ue_synd_mask = GENMASK(6, 0), + .ue_synd_shift = 0, + .source_id_ce_mask = GENMASK(29, 16), + .source_id_ce_shift = 16, + .source_id_ue_mask = GENMASK(13, 0), + .source_id_ue_shift = 0, +}; + +static const struct npcm_platform_data npcm845_edac = { + .chip = NPCM8XX_CHIP, + + /* memory controller registers */ + .ctl_ecc_en = 0x16c, + .ctl_int_status = 0x228, + .ctl_int_ack = 0x244, + .ctl_int_mask_master = 0x220, + .ctl_int_mask_ecc = 0x260, + .ctl_ce_addr_l = 0x18c, + .ctl_ce_addr_h = 0x190, + .ctl_ce_data_l = 0x194, + .ctl_ce_data_h = 0x198, + .ctl_ce_synd = 0x190, + .ctl_ue_addr_l = 0x17c, + .ctl_ue_addr_h = 0x180, + .ctl_ue_data_l = 0x184, + .ctl_ue_data_h = 0x188, + .ctl_ue_synd = 0x180, + .ctl_source_id = 0x19c, + .ctl_controller_busy = 0x20c, + .ctl_xor_check_bits = 0x174, + + /* masks and shifts */ + .ecc_en_mask = GENMASK(17, 16), + .int_status_ce_mask = GENMASK(1, 0), + .int_status_ue_mask = GENMASK(3, 2), + .int_ack_ce_mask = GENMASK(1, 0), + .int_ack_ue_mask = GENMASK(3, 2), + .int_mask_master_non_ecc_mask = GENMASK(30, 3) | GENMASK(1, 0), + .int_mask_master_global_mask = BIT(31), + .int_mask_ecc_non_event_mask = GENMASK(8, 4), + .ce_addr_h_mask = GENMASK(1, 0), + .ce_synd_mask = GENMASK(15, 8), + .ce_synd_shift = 8, + .ue_addr_h_mask = GENMASK(1, 0), + .ue_synd_mask = GENMASK(15, 8), + .ue_synd_shift = 8, + .source_id_ce_mask = GENMASK(29, 16), + .source_id_ce_shift = 16, + .source_id_ue_mask = GENMASK(13, 0), + .source_id_ue_shift = 0, + .controller_busy_mask = BIT(0), + .xor_check_bits_mask = GENMASK(23, 16), + .xor_check_bits_shift = 16, + .writeback_en_mask = BIT(24), + .fwc_mask = BIT(8), +}; + +static const struct of_device_id npcm_edac_of_match[] = { + { + .compatible = "nuvoton,npcm750-memory-controller", + .data = &npcm750_edac + }, + { + .compatible = "nuvoton,npcm845-memory-controller", + .data = &npcm845_edac + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, npcm_edac_of_match); + +static struct platform_driver npcm_edac_driver = { + .driver = { + .name = "npcm-edac", + .of_match_table = npcm_edac_of_match, + }, + .probe = edac_probe, + .remove = edac_remove, +}; + +module_platform_driver(npcm_edac_driver); + +MODULE_AUTHOR("Medad CChien <medadyoung@gmail.com>"); +MODULE_AUTHOR("Marvin Lin <kflin@nuvoton.com>"); +MODULE_DESCRIPTION("Nuvoton NPCM EDAC Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c index 0bcd9f02c84a..b9c5772da959 100644 --- a/drivers/edac/thunderx_edac.c +++ b/drivers/edac/thunderx_edac.c @@ -481,7 +481,7 @@ static int thunderx_create_debugfs_nodes(struct dentry *parent, ent = edac_debugfs_create_file(attrs[i]->name, attrs[i]->mode, parent, data, &attrs[i]->fops); - if (!ent) + if (IS_ERR(ent)) break; } diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index e4ccfb6a8fa5..ec056f6f40ce 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -2124,6 +2124,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware file, blocks, le32_to_cpu(blk->len), type, le32_to_cpu(blk->id)); + region_name = cs_dsp_mem_region_name(type); mem = cs_dsp_find_region(dsp, type); if (!mem) { cs_dsp_err(dsp, "No base for region %x\n", type); @@ -2147,8 +2148,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware reg = dsp->ops->region_to_reg(mem, reg); reg += offset; } else { - cs_dsp_err(dsp, "No %x for algorithm %x\n", - type, le32_to_cpu(blk->id)); + cs_dsp_err(dsp, "No %s for algorithm %x\n", + region_name, le32_to_cpu(blk->id)); } break; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 043ca31c114e..231f1c70d1db 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -269,6 +269,20 @@ config EFI_COCO_SECRET virt/coco/efi_secret module to access the secrets, which in turn allows userspace programs to access the injected secrets. +config UNACCEPTED_MEMORY + bool + depends on EFI_STUB + help + Some Virtual Machine platforms, such as Intel TDX, require + some memory to be "accepted" by the guest before it can be used. + This mechanism helps prevent malicious hosts from making changes + to guest memory. + + UEFI specification v2.9 introduced EFI_UNACCEPTED_MEMORY memory type. + + This option adds support for unaccepted memory and makes such memory + usable by the kernel. + config EFI_EMBEDDED_FIRMWARE bool select CRYPTO_LIB_SHA256 diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index b51f2a4c821e..e489fefd23da 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -41,3 +41,4 @@ obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o obj-$(CONFIG_EFI_EARLYCON) += earlycon.o obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o obj-$(CONFIG_UEFI_CPER_X86) += cper-x86.o +obj-$(CONFIG_UNACCEPTED_MEMORY) += unaccepted_memory.o diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index abeff7dc0b58..3a6ee7bb06f1 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -50,6 +50,9 @@ struct efi __read_mostly efi = { #ifdef CONFIG_EFI_COCO_SECRET .coco_secret = EFI_INVALID_TABLE_ADDR, #endif +#ifdef CONFIG_UNACCEPTED_MEMORY + .unaccepted = EFI_INVALID_TABLE_ADDR, +#endif }; EXPORT_SYMBOL(efi); @@ -361,24 +364,6 @@ static void __init efi_debugfs_init(void) static inline void efi_debugfs_init(void) {} #endif -static void refresh_nv_rng_seed(struct work_struct *work) -{ - u8 seed[EFI_RANDOM_SEED_SIZE]; - - get_random_bytes(seed, sizeof(seed)); - efi.set_variable(L"RandomSeed", &LINUX_EFI_RANDOM_SEED_TABLE_GUID, - EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS | - EFI_VARIABLE_RUNTIME_ACCESS, sizeof(seed), seed); - memzero_explicit(seed, sizeof(seed)); -} -static int refresh_nv_rng_seed_notification(struct notifier_block *nb, unsigned long action, void *data) -{ - static DECLARE_WORK(work, refresh_nv_rng_seed); - schedule_work(&work); - return NOTIFY_DONE; -} -static struct notifier_block refresh_nv_rng_seed_nb = { .notifier_call = refresh_nv_rng_seed_notification }; - /* * We register the efi subsystem with the firmware subsystem and the * efivars subsystem with the efi subsystem, if the system was booted with @@ -451,9 +436,6 @@ static int __init efisubsys_init(void) platform_device_register_simple("efi_secret", 0, NULL, 0); #endif - if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) - execute_with_initialized_rng(&refresh_nv_rng_seed_nb); - return 0; err_remove_group: @@ -605,6 +587,9 @@ static const efi_config_table_type_t common_tables[] __initconst = { #ifdef CONFIG_EFI_COCO_SECRET {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" }, #endif +#ifdef CONFIG_UNACCEPTED_MEMORY + {LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID, &efi.unaccepted, "Unaccepted" }, +#endif #ifdef CONFIG_EFI_GENERIC_STUB {LINUX_EFI_SCREEN_INFO_TABLE_GUID, &screen_info_table }, #endif @@ -759,6 +744,25 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables, } } + if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) && + efi.unaccepted != EFI_INVALID_TABLE_ADDR) { + struct efi_unaccepted_memory *unaccepted; + + unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted)); + if (unaccepted) { + unsigned long size; + + if (unaccepted->version == 1) { + size = sizeof(*unaccepted) + unaccepted->size; + memblock_reserve(efi.unaccepted, size); + } else { + efi.unaccepted = EFI_INVALID_TABLE_ADDR; + } + + early_memunmap(unaccepted, sizeof(*unaccepted)); + } + } + return 0; } @@ -843,6 +847,7 @@ static __initdata char memory_type_name[][13] = { "MMIO Port", "PAL Code", "Persistent", + "Unaccepted", }; char * __init efi_md_typeattr_format(char *buf, size_t size, diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 3abb2b357482..16d64a34d1e1 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -96,6 +96,8 @@ CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) zboot-obj-$(CONFIG_RISCV) := lib-clz_ctz.o lib-ashldi3.o lib-$(CONFIG_EFI_ZBOOT) += zboot.o $(zboot-obj-y) +lib-$(CONFIG_UNACCEPTED_MEMORY) += unaccepted_memory.o bitmap.o find.o + extra-y := $(lib-y) lib-y := $(patsubst %.o,%.stub.o,$(lib-y)) diff --git a/drivers/firmware/efi/libstub/bitmap.c b/drivers/firmware/efi/libstub/bitmap.c new file mode 100644 index 000000000000..5c9bba0d549b --- /dev/null +++ b/drivers/firmware/efi/libstub/bitmap.c @@ -0,0 +1,41 @@ +#include <linux/bitmap.h> + +void __bitmap_set(unsigned long *map, unsigned int start, int len) +{ + unsigned long *p = map + BIT_WORD(start); + const unsigned int size = start + len; + int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); + + while (len - bits_to_set >= 0) { + *p |= mask_to_set; + len -= bits_to_set; + bits_to_set = BITS_PER_LONG; + mask_to_set = ~0UL; + p++; + } + if (len) { + mask_to_set &= BITMAP_LAST_WORD_MASK(size); + *p |= mask_to_set; + } +} + +void __bitmap_clear(unsigned long *map, unsigned int start, int len) +{ + unsigned long *p = map + BIT_WORD(start); + const unsigned int size = start + len; + int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); + + while (len - bits_to_clear >= 0) { + *p &= ~mask_to_clear; + len -= bits_to_clear; + bits_to_clear = BITS_PER_LONG; + mask_to_clear = ~0UL; + p++; + } + if (len) { + mask_to_clear &= BITMAP_LAST_WORD_MASK(size); + *p &= ~mask_to_clear; + } +} diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 54a2822cae77..6aa38a1bf126 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -1136,4 +1136,10 @@ void efi_remap_image(unsigned long image_base, unsigned alloc_size, asmlinkage efi_status_t __efiapi efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab); +efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc, + struct efi_boot_memmap *map); +void process_unaccepted_memory(u64 start, u64 end); +void accept_memory(phys_addr_t start, phys_addr_t end); +void arch_accept_memory(phys_addr_t start, phys_addr_t end); + #endif diff --git a/drivers/firmware/efi/libstub/find.c b/drivers/firmware/efi/libstub/find.c new file mode 100644 index 000000000000..4e7740d28987 --- /dev/null +++ b/drivers/firmware/efi/libstub/find.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/bitmap.h> +#include <linux/math.h> +#include <linux/minmax.h> + +/* + * Common helper for find_next_bit() function family + * @FETCH: The expression that fetches and pre-processes each word of bitmap(s) + * @MUNGE: The expression that post-processes a word containing found bit (may be empty) + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + */ +#define FIND_NEXT_BIT(FETCH, MUNGE, size, start) \ +({ \ + unsigned long mask, idx, tmp, sz = (size), __start = (start); \ + \ + if (unlikely(__start >= sz)) \ + goto out; \ + \ + mask = MUNGE(BITMAP_FIRST_WORD_MASK(__start)); \ + idx = __start / BITS_PER_LONG; \ + \ + for (tmp = (FETCH) & mask; !tmp; tmp = (FETCH)) { \ + if ((idx + 1) * BITS_PER_LONG >= sz) \ + goto out; \ + idx++; \ + } \ + \ + sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(tmp)), sz); \ +out: \ + sz; \ +}) + +unsigned long _find_next_bit(const unsigned long *addr, unsigned long nbits, unsigned long start) +{ + return FIND_NEXT_BIT(addr[idx], /* nop */, nbits, start); +} + +unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits, + unsigned long start) +{ + return FIND_NEXT_BIT(~addr[idx], /* nop */, nbits, start); +} diff --git a/drivers/firmware/efi/libstub/unaccepted_memory.c b/drivers/firmware/efi/libstub/unaccepted_memory.c new file mode 100644 index 000000000000..ca61f4733ea5 --- /dev/null +++ b/drivers/firmware/efi/libstub/unaccepted_memory.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/efi.h> +#include <asm/efi.h> +#include "efistub.h" + +struct efi_unaccepted_memory *unaccepted_table; + +efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc, + struct efi_boot_memmap *map) +{ + efi_guid_t unaccepted_table_guid = LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID; + u64 unaccepted_start = ULLONG_MAX, unaccepted_end = 0, bitmap_size; + efi_status_t status; + int i; + + /* Check if the table is already installed */ + unaccepted_table = get_efi_config_table(unaccepted_table_guid); + if (unaccepted_table) { + if (unaccepted_table->version != 1) { + efi_err("Unknown version of unaccepted memory table\n"); + return EFI_UNSUPPORTED; + } + return EFI_SUCCESS; + } + + /* Check if there's any unaccepted memory and find the max address */ + for (i = 0; i < nr_desc; i++) { + efi_memory_desc_t *d; + unsigned long m = (unsigned long)map->map; + + d = efi_early_memdesc_ptr(m, map->desc_size, i); + if (d->type != EFI_UNACCEPTED_MEMORY) + continue; + + unaccepted_start = min(unaccepted_start, d->phys_addr); + unaccepted_end = max(unaccepted_end, + d->phys_addr + d->num_pages * PAGE_SIZE); + } + + if (unaccepted_start == ULLONG_MAX) + return EFI_SUCCESS; + + unaccepted_start = round_down(unaccepted_start, + EFI_UNACCEPTED_UNIT_SIZE); + unaccepted_end = round_up(unaccepted_end, EFI_UNACCEPTED_UNIT_SIZE); + + /* + * If unaccepted memory is present, allocate a bitmap to track what + * memory has to be accepted before access. + * + * One bit in the bitmap represents 2MiB in the address space: + * A 4k bitmap can track 64GiB of physical address space. + * + * In the worst case scenario -- a huge hole in the middle of the + * address space -- It needs 256MiB to handle 4PiB of the address + * space. + * + * The bitmap will be populated in setup_e820() according to the memory + * map after efi_exit_boot_services(). + */ + bitmap_size = DIV_ROUND_UP(unaccepted_end - unaccepted_start, + EFI_UNACCEPTED_UNIT_SIZE * BITS_PER_BYTE); + + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + sizeof(*unaccepted_table) + bitmap_size, + (void **)&unaccepted_table); + if (status != EFI_SUCCESS) { + efi_err("Failed to allocate unaccepted memory config table\n"); + return status; + } + + unaccepted_table->version = 1; + unaccepted_table->unit_size = EFI_UNACCEPTED_UNIT_SIZE; + unaccepted_table->phys_base = unaccepted_start; + unaccepted_table->size = bitmap_size; + memset(unaccepted_table->bitmap, 0, bitmap_size); + + status = efi_bs_call(install_configuration_table, + &unaccepted_table_guid, unaccepted_table); + if (status != EFI_SUCCESS) { + efi_bs_call(free_pool, unaccepted_table); + efi_err("Failed to install unaccepted memory config table!\n"); + } + + return status; +} + +/* + * The accepted memory bitmap only works at unit_size granularity. Take + * unaligned start/end addresses and either: + * 1. Accepts the memory immediately and in its entirety + * 2. Accepts unaligned parts, and marks *some* aligned part unaccepted + * + * The function will never reach the bitmap_set() with zero bits to set. + */ +void process_unaccepted_memory(u64 start, u64 end) +{ + u64 unit_size = unaccepted_table->unit_size; + u64 unit_mask = unaccepted_table->unit_size - 1; + u64 bitmap_size = unaccepted_table->size; + + /* + * Ensure that at least one bit will be set in the bitmap by + * immediately accepting all regions under 2*unit_size. This is + * imprecise and may immediately accept some areas that could + * have been represented in the bitmap. But, results in simpler + * code below + * + * Consider case like this (assuming unit_size == 2MB): + * + * | 4k | 2044k | 2048k | + * ^ 0x0 ^ 2MB ^ 4MB + * + * Only the first 4k has been accepted. The 0MB->2MB region can not be + * represented in the bitmap. The 2MB->4MB region can be represented in + * the bitmap. But, the 0MB->4MB region is <2*unit_size and will be + * immediately accepted in its entirety. + */ + if (end - start < 2 * unit_size) { + arch_accept_memory(start, end); + return; + } + + /* + * No matter how the start and end are aligned, at least one unaccepted + * unit_size area will remain to be marked in the bitmap. + */ + + /* Immediately accept a <unit_size piece at the start: */ + if (start & unit_mask) { + arch_accept_memory(start, round_up(start, unit_size)); + start = round_up(start, unit_size); + } + + /* Immediately accept a <unit_size piece at the end: */ + if (end & unit_mask) { + arch_accept_memory(round_down(end, unit_size), end); + end = round_down(end, unit_size); + } + + /* + * Accept part of the range that before phys_base and cannot be recorded + * into the bitmap. + */ + if (start < unaccepted_table->phys_base) { + arch_accept_memory(start, + min(unaccepted_table->phys_base, end)); + start = unaccepted_table->phys_base; + } + + /* Nothing to record */ + if (end < unaccepted_table->phys_base) + return; + + /* Translate to offsets from the beginning of the bitmap */ + start -= unaccepted_table->phys_base; + end -= unaccepted_table->phys_base; + + /* Accept memory that doesn't fit into bitmap */ + if (end > bitmap_size * unit_size * BITS_PER_BYTE) { + unsigned long phys_start, phys_end; + + phys_start = bitmap_size * unit_size * BITS_PER_BYTE + + unaccepted_table->phys_base; + phys_end = end + unaccepted_table->phys_base; + + arch_accept_memory(phys_start, phys_end); + end = bitmap_size * unit_size * BITS_PER_BYTE; + } + + /* + * 'start' and 'end' are now both unit_size-aligned. + * Record the range as being unaccepted: + */ + bitmap_set(unaccepted_table->bitmap, + start / unit_size, (end - start) / unit_size); +} + +void accept_memory(phys_addr_t start, phys_addr_t end) +{ + unsigned long range_start, range_end; + unsigned long bitmap_size; + u64 unit_size; + + if (!unaccepted_table) + return; + + unit_size = unaccepted_table->unit_size; + + /* + * Only care for the part of the range that is represented + * in the bitmap. + */ + if (start < unaccepted_table->phys_base) + start = unaccepted_table->phys_base; + if (end < unaccepted_table->phys_base) + return; + + /* Translate to offsets from the beginning of the bitmap */ + start -= unaccepted_table->phys_base; + end -= unaccepted_table->phys_base; + + /* Make sure not to overrun the bitmap */ + if (end > unaccepted_table->size * unit_size * BITS_PER_BYTE) + end = unaccepted_table->size * unit_size * BITS_PER_BYTE; + + range_start = start / unit_size; + bitmap_size = DIV_ROUND_UP(end, unit_size); + + for_each_set_bitrange_from(range_start, range_end, + unaccepted_table->bitmap, bitmap_size) { + unsigned long phys_start, phys_end; + + phys_start = range_start * unit_size + unaccepted_table->phys_base; + phys_end = range_end * unit_size + unaccepted_table->phys_base; + + arch_accept_memory(phys_start, phys_end); + bitmap_clear(unaccepted_table->bitmap, + range_start, range_end - range_start); + } +} diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index a0bfd31358ba..220be75a5cdc 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -26,6 +26,17 @@ const efi_dxe_services_table_t *efi_dxe_table; u32 image_offset __section(".data"); static efi_loaded_image_t *image = NULL; +typedef union sev_memory_acceptance_protocol sev_memory_acceptance_protocol_t; +union sev_memory_acceptance_protocol { + struct { + efi_status_t (__efiapi * allow_unaccepted_memory)( + sev_memory_acceptance_protocol_t *); + }; + struct { + u32 allow_unaccepted_memory; + } mixed_mode; +}; + static efi_status_t preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom) { @@ -310,6 +321,29 @@ setup_memory_protection(unsigned long image_base, unsigned long image_size) #endif } +static void setup_unaccepted_memory(void) +{ + efi_guid_t mem_acceptance_proto = OVMF_SEV_MEMORY_ACCEPTANCE_PROTOCOL_GUID; + sev_memory_acceptance_protocol_t *proto; + efi_status_t status; + + if (!IS_ENABLED(CONFIG_UNACCEPTED_MEMORY)) + return; + + /* + * Enable unaccepted memory before calling exit boot services in order + * for the UEFI to not accept all memory on EBS. + */ + status = efi_bs_call(locate_protocol, &mem_acceptance_proto, NULL, + (void **)&proto); + if (status != EFI_SUCCESS) + return; + + status = efi_call_proto(proto, allow_unaccepted_memory); + if (status != EFI_SUCCESS) + efi_err("Memory acceptance protocol failed\n"); +} + static const efi_char16_t apple[] = L"Apple"; static void setup_quirks(struct boot_params *boot_params, @@ -613,6 +647,16 @@ setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_s e820_type = E820_TYPE_PMEM; break; + case EFI_UNACCEPTED_MEMORY: + if (!IS_ENABLED(CONFIG_UNACCEPTED_MEMORY)) { + efi_warn_once( +"The system has unaccepted memory, but kernel does not support it\nConsider enabling CONFIG_UNACCEPTED_MEMORY\n"); + continue; + } + e820_type = E820_TYPE_RAM; + process_unaccepted_memory(d->phys_addr, + d->phys_addr + PAGE_SIZE * d->num_pages); + break; default: continue; } @@ -681,28 +725,27 @@ static efi_status_t allocate_e820(struct boot_params *params, struct setup_data **e820ext, u32 *e820ext_size) { - unsigned long map_size, desc_size, map_key; + struct efi_boot_memmap *map; efi_status_t status; - __u32 nr_desc, desc_version; - - /* Only need the size of the mem map and size of each mem descriptor */ - map_size = 0; - status = efi_bs_call(get_memory_map, &map_size, NULL, &map_key, - &desc_size, &desc_version); - if (status != EFI_BUFFER_TOO_SMALL) - return (status != EFI_SUCCESS) ? status : EFI_UNSUPPORTED; + __u32 nr_desc; - nr_desc = map_size / desc_size + EFI_MMAP_NR_SLACK_SLOTS; + status = efi_get_memory_map(&map, false); + if (status != EFI_SUCCESS) + return status; - if (nr_desc > ARRAY_SIZE(params->e820_table)) { - u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table); + nr_desc = map->map_size / map->desc_size; + if (nr_desc > ARRAY_SIZE(params->e820_table) - EFI_MMAP_NR_SLACK_SLOTS) { + u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table) + + EFI_MMAP_NR_SLACK_SLOTS; status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size); - if (status != EFI_SUCCESS) - return status; } - return EFI_SUCCESS; + if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) && status == EFI_SUCCESS) + status = allocate_unaccepted_bitmap(nr_desc, map); + + efi_bs_call(free_pool, map); + return status; } struct exit_boot_struct { @@ -899,6 +942,8 @@ asmlinkage unsigned long efi_main(efi_handle_t handle, setup_quirks(boot_params, bzimage_addr, buffer_end - buffer_start); + setup_unaccepted_memory(); + status = exit_boot(boot_params, handle); if (status != EFI_SUCCESS) { efi_err("exit_boot() failed!\n"); diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c new file mode 100644 index 000000000000..853f7dc3c21d --- /dev/null +++ b/drivers/firmware/efi/unaccepted_memory.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/efi.h> +#include <linux/memblock.h> +#include <linux/spinlock.h> +#include <asm/unaccepted_memory.h> + +/* Protects unaccepted memory bitmap */ +static DEFINE_SPINLOCK(unaccepted_memory_lock); + +/* + * accept_memory() -- Consult bitmap and accept the memory if needed. + * + * Only memory that is explicitly marked as unaccepted in the bitmap requires + * an action. All the remaining memory is implicitly accepted and doesn't need + * acceptance. + * + * No need to accept: + * - anything if the system has no unaccepted table; + * - memory that is below phys_base; + * - memory that is above the memory that addressable by the bitmap; + */ +void accept_memory(phys_addr_t start, phys_addr_t end) +{ + struct efi_unaccepted_memory *unaccepted; + unsigned long range_start, range_end; + unsigned long flags; + u64 unit_size; + + unaccepted = efi_get_unaccepted_table(); + if (!unaccepted) + return; + + unit_size = unaccepted->unit_size; + + /* + * Only care for the part of the range that is represented + * in the bitmap. + */ + if (start < unaccepted->phys_base) + start = unaccepted->phys_base; + if (end < unaccepted->phys_base) + return; + + /* Translate to offsets from the beginning of the bitmap */ + start -= unaccepted->phys_base; + end -= unaccepted->phys_base; + + /* + * load_unaligned_zeropad() can lead to unwanted loads across page + * boundaries. The unwanted loads are typically harmless. But, they + * might be made to totally unrelated or even unmapped memory. + * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now + * #VE) to recover from these unwanted loads. + * + * But, this approach does not work for unaccepted memory. For TDX, a + * load from unaccepted memory will not lead to a recoverable exception + * within the guest. The guest will exit to the VMM where the only + * recourse is to terminate the guest. + * + * There are two parts to fix this issue and comprehensively avoid + * access to unaccepted memory. Together these ensure that an extra + * "guard" page is accepted in addition to the memory that needs to be + * used: + * + * 1. Implicitly extend the range_contains_unaccepted_memory(start, end) + * checks up to end+unit_size if 'end' is aligned on a unit_size + * boundary. + * + * 2. Implicitly extend accept_memory(start, end) to end+unit_size if + * 'end' is aligned on a unit_size boundary. (immediately following + * this comment) + */ + if (!(end % unit_size)) + end += unit_size; + + /* Make sure not to overrun the bitmap */ + if (end > unaccepted->size * unit_size * BITS_PER_BYTE) + end = unaccepted->size * unit_size * BITS_PER_BYTE; + + range_start = start / unit_size; + + spin_lock_irqsave(&unaccepted_memory_lock, flags); + for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap, + DIV_ROUND_UP(end, unit_size)) { + unsigned long phys_start, phys_end; + unsigned long len = range_end - range_start; + + phys_start = range_start * unit_size + unaccepted->phys_base; + phys_end = range_end * unit_size + unaccepted->phys_base; + + arch_accept_memory(phys_start, phys_end); + bitmap_clear(unaccepted->bitmap, range_start, len); + } + spin_unlock_irqrestore(&unaccepted_memory_lock, flags); +} + +bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end) +{ + struct efi_unaccepted_memory *unaccepted; + unsigned long flags; + bool ret = false; + u64 unit_size; + + unaccepted = efi_get_unaccepted_table(); + if (!unaccepted) + return false; + + unit_size = unaccepted->unit_size; + + /* + * Only care for the part of the range that is represented + * in the bitmap. + */ + if (start < unaccepted->phys_base) + start = unaccepted->phys_base; + if (end < unaccepted->phys_base) + return false; + + /* Translate to offsets from the beginning of the bitmap */ + start -= unaccepted->phys_base; + end -= unaccepted->phys_base; + + /* + * Also consider the unaccepted state of the *next* page. See fix #1 in + * the comment on load_unaligned_zeropad() in accept_memory(). + */ + if (!(end % unit_size)) + end += unit_size; + + /* Make sure not to overrun the bitmap */ + if (end > unaccepted->size * unit_size * BITS_PER_BYTE) + end = unaccepted->size * unit_size * BITS_PER_BYTE; + + spin_lock_irqsave(&unaccepted_memory_lock, flags); + while (start < end) { + if (test_bit(start / unit_size, unaccepted->bitmap)) { + ret = true; + break; + } + + start += unit_size; + } + spin_unlock_irqrestore(&unaccepted_memory_lock, flags); + + return ret; +} diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c index 94b49ccd23ac..71f51303c2ba 100644 --- a/drivers/firmware/iscsi_ibft_find.c +++ b/drivers/firmware/iscsi_ibft_find.c @@ -42,8 +42,6 @@ static const struct { }; #define IBFT_SIGN_LEN 4 -#define IBFT_START 0x80000 /* 512kB */ -#define IBFT_END 0x100000 /* 1MB */ #define VGA_MEM 0xA0000 /* VGA buffer */ #define VGA_SIZE 0x20000 /* 128kB */ @@ -52,9 +50,9 @@ static const struct { */ void __init reserve_ibft_region(void) { - unsigned long pos; + unsigned long pos, virt_pos = 0; unsigned int len = 0; - void *virt; + void *virt = NULL; int i; ibft_phys_addr = 0; @@ -70,13 +68,20 @@ void __init reserve_ibft_region(void) * so skip that area */ if (pos == VGA_MEM) pos += VGA_SIZE; - virt = isa_bus_to_virt(pos); + + /* Map page by page */ + if (offset_in_page(pos) == 0) { + if (virt) + early_memunmap(virt, PAGE_SIZE); + virt = early_memremap_ro(pos, PAGE_SIZE); + virt_pos = pos; + } for (i = 0; i < ARRAY_SIZE(ibft_signs); i++) { - if (memcmp(virt, ibft_signs[i].sign, IBFT_SIGN_LEN) == - 0) { + if (memcmp(virt + (pos - virt_pos), ibft_signs[i].sign, + IBFT_SIGN_LEN) == 0) { unsigned long *addr = - (unsigned long *)isa_bus_to_virt(pos + 4); + (unsigned long *)(virt + pos - virt_pos + 4); len = *addr; /* if the length of the table extends past 1M, * the table cannot be valid. */ @@ -84,9 +89,12 @@ void __init reserve_ibft_region(void) ibft_phys_addr = pos; memblock_reserve(ibft_phys_addr, PAGE_ALIGN(len)); pr_info("iBFT found at %pa.\n", &ibft_phys_addr); - return; + goto out; } } } } + +out: + early_memunmap(virt, PAGE_SIZE); } diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c index 98939cd4a71e..745e5f67254e 100644 --- a/drivers/gpio/gpio-sifive.c +++ b/drivers/gpio/gpio-sifive.c @@ -221,8 +221,12 @@ static int sifive_gpio_probe(struct platform_device *pdev) return -ENODEV; } - for (i = 0; i < ngpio; i++) - chip->irq_number[i] = platform_get_irq(pdev, i); + for (i = 0; i < ngpio; i++) { + ret = platform_get_irq(pdev, i); + if (ret < 0) + return ret; + chip->irq_number[i] = ret; + } ret = bgpio_init(&chip->gc, dev, 4, chip->base + SIFIVE_GPIO_INPUT_VAL, diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index a7220e04a93e..5be8ad61523e 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1745,7 +1745,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gc) } /* Remove all IRQ mappings and delete the domain */ - if (gc->irq.domain) { + if (!gc->irq.domain_is_allocated_externally && gc->irq.domain) { unsigned int irq; for (offset = 0; offset < gc->ngpio; offset++) { @@ -1791,6 +1791,15 @@ int gpiochip_irqchip_add_domain(struct gpio_chip *gc, gc->to_irq = gpiochip_to_irq; gc->irq.domain = domain; + gc->irq.domain_is_allocated_externally = true; + + /* + * Using barrier() here to prevent compiler from reordering + * gc->irq.initialized before adding irqdomain. + */ + barrier(); + + gc->irq.initialized = true; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b1ca1ab6d6ad..393b6fb7a71d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1615,6 +1615,7 @@ static const u16 amdgpu_unsupported_pciidlist[] = { 0x5874, 0x5940, 0x5941, + 0x5b70, 0x5b72, 0x5b73, 0x5b74, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 3b225be89cb7..a70103ac0026 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -140,7 +140,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) places[c].lpfn = visible_pfn; - else if (adev->gmc.real_vram_size != adev->gmc.visible_vram_size) + else places[c].flags |= TTM_PL_FLAG_TOPDOWN; if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 9d7e6e0e73ed..a150b7a4b4aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -3548,6 +3548,9 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, void *fw_pri_cpu_addr; int ret; + if (adev->psp.vbflash_image_size == 0) + return -EINVAL; + dev_info(adev->dev, "VBIOS flash to PSP started"); ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, @@ -3599,13 +3602,13 @@ static ssize_t amdgpu_psp_vbflash_status(struct device *dev, } static const struct bin_attribute psp_vbflash_bin_attr = { - .attr = {.name = "psp_vbflash", .mode = 0664}, + .attr = {.name = "psp_vbflash", .mode = 0660}, .size = 0, .write = amdgpu_psp_vbflash_write, .read = amdgpu_psp_vbflash_read, }; -static DEVICE_ATTR(psp_vbflash_status, 0444, amdgpu_psp_vbflash_status, NULL); +static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); int amdgpu_psp_sysfs_init(struct amdgpu_device *adev) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index dc474b809604..49de3a3eebc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -581,3 +581,21 @@ void amdgpu_ring_ib_end(struct amdgpu_ring *ring) if (ring->is_sw_ring) amdgpu_sw_ring_ib_end(ring); } + +void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring) +{ + if (ring->is_sw_ring) + amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CONTROL); +} + +void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring) +{ + if (ring->is_sw_ring) + amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CE); +} + +void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring) +{ + if (ring->is_sw_ring) + amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index d8749444b689..2474cb71e476 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -227,6 +227,9 @@ struct amdgpu_ring_funcs { int (*preempt_ib)(struct amdgpu_ring *ring); void (*emit_mem_sync)(struct amdgpu_ring *ring); void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable); + void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset); + void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset); + void (*patch_de)(struct amdgpu_ring *ring, unsigned offset); }; struct amdgpu_ring { @@ -318,10 +321,16 @@ struct amdgpu_ring { #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r) +#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o))) +#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o))) +#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o))) int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); void amdgpu_ring_ib_begin(struct amdgpu_ring *ring); void amdgpu_ring_ib_end(struct amdgpu_ring *ring); +void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring); +void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring); +void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring); void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c index 62079f0e3ee8..73516abef662 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c @@ -105,6 +105,16 @@ static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux) amdgpu_fence_update_start_timestamp(e->ring, chunk->sync_seq, ktime_get()); + if (chunk->sync_seq == + le32_to_cpu(*(e->ring->fence_drv.cpu_addr + 2))) { + if (chunk->cntl_offset <= e->ring->buf_mask) + amdgpu_ring_patch_cntl(e->ring, + chunk->cntl_offset); + if (chunk->ce_offset <= e->ring->buf_mask) + amdgpu_ring_patch_ce(e->ring, chunk->ce_offset); + if (chunk->de_offset <= e->ring->buf_mask) + amdgpu_ring_patch_de(e->ring, chunk->de_offset); + } amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring, chunk->start, chunk->end); @@ -407,6 +417,17 @@ void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring) amdgpu_ring_mux_end_ib(mux, ring); } +void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_ring_mux *mux = &adev->gfx.muxer; + unsigned offset; + + offset = ring->wptr & ring->buf_mask; + + amdgpu_ring_mux_ib_mark_offset(mux, ring, offset, type); +} + void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) { struct amdgpu_mux_entry *e; @@ -429,6 +450,10 @@ void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *r } chunk->start = ring->wptr; + /* the initialized value used to check if they are set by the ib submission*/ + chunk->cntl_offset = ring->buf_mask + 1; + chunk->de_offset = ring->buf_mask + 1; + chunk->ce_offset = ring->buf_mask + 1; list_add_tail(&chunk->entry, &e->list); } @@ -454,6 +479,41 @@ static void scan_and_remove_signaled_chunk(struct amdgpu_ring_mux *mux, struct a } } +void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux, + struct amdgpu_ring *ring, u64 offset, + enum amdgpu_ring_mux_offset_type type) +{ + struct amdgpu_mux_entry *e; + struct amdgpu_mux_chunk *chunk; + + e = amdgpu_ring_mux_sw_entry(mux, ring); + if (!e) { + DRM_ERROR("cannot find entry!\n"); + return; + } + + chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry); + if (!chunk) { + DRM_ERROR("cannot find chunk!\n"); + return; + } + + switch (type) { + case AMDGPU_MUX_OFFSET_TYPE_CONTROL: + chunk->cntl_offset = offset; + break; + case AMDGPU_MUX_OFFSET_TYPE_DE: + chunk->de_offset = offset; + break; + case AMDGPU_MUX_OFFSET_TYPE_CE: + chunk->ce_offset = offset; + break; + default: + DRM_ERROR("invalid type (%d)\n", type); + break; + } +} + void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) { struct amdgpu_mux_entry *e; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h index 4be45fc14954..b22d4fb2a847 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h @@ -50,6 +50,12 @@ struct amdgpu_mux_entry { struct list_head list; }; +enum amdgpu_ring_mux_offset_type { + AMDGPU_MUX_OFFSET_TYPE_CONTROL, + AMDGPU_MUX_OFFSET_TYPE_DE, + AMDGPU_MUX_OFFSET_TYPE_CE, +}; + struct amdgpu_ring_mux { struct amdgpu_ring *real_ring; @@ -72,12 +78,18 @@ struct amdgpu_ring_mux { * @sync_seq: the fence seqno related with the saved IB. * @start:- start location on the software ring. * @end:- end location on the software ring. + * @control_offset:- the PRE_RESUME bit position used for resubmission. + * @de_offset:- the anchor in write_data for de meta of resubmission. + * @ce_offset:- the anchor in write_data for ce meta of resubmission. */ struct amdgpu_mux_chunk { struct list_head entry; uint32_t sync_seq; u64 start; u64 end; + u64 cntl_offset; + u64 de_offset; + u64 ce_offset; }; int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, @@ -89,6 +101,8 @@ u64 amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ri u64 amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring); void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring); void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring); +void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, + u64 offset, enum amdgpu_ring_mux_offset_type type); bool amdgpu_mcbp_handle_trailing_fence_irq(struct amdgpu_ring_mux *mux); u64 amdgpu_sw_ring_get_rptr_gfx(struct amdgpu_ring *ring); @@ -97,6 +111,7 @@ void amdgpu_sw_ring_set_wptr_gfx(struct amdgpu_ring *ring); void amdgpu_sw_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring); void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring); +void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type); const char *amdgpu_sw_ring_name(int idx); unsigned int amdgpu_sw_ring_priority(int idx); diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index 1c5d9388ad0b..5f610e9a5f0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -1509,7 +1509,7 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios) str = CSTR(idx); if (*str != '\0') { pr_info("ATOM BIOS: %s\n", str); - strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version)); + strscpy(ctx->vbios_version, str, sizeof(ctx->vbios_version)); } atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index e7f2b7bf0ff5..a674c8a58dc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -755,7 +755,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev); static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev); -static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); +static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds); static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); @@ -5127,7 +5127,8 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, gfx_v9_0_ring_emit_de_meta(ring, (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? - true : false); + true : false, + job->gds_size > 0 && job->gds_base != 0); } amdgpu_ring_write(ring, header); @@ -5138,9 +5139,83 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, #endif lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_ib_on_emit_cntl(ring); amdgpu_ring_write(ring, control); } +static void gfx_v9_0_ring_patch_cntl(struct amdgpu_ring *ring, + unsigned offset) +{ + u32 control = ring->ring[offset]; + + control |= INDIRECT_BUFFER_PRE_RESUME(1); + ring->ring[offset] = control; +} + +static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring, + unsigned offset) +{ + struct amdgpu_device *adev = ring->adev; + void *ce_payload_cpu_addr; + uint64_t payload_offset, payload_size; + + payload_size = sizeof(struct v9_ce_ib_state); + + if (ring->is_mes_queue) { + payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data, + gfx[0].gfx_meta_data) + + offsetof(struct v9_gfx_meta_data, ce_payload); + ce_payload_cpu_addr = + amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset); + } else { + payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload); + ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset; + } + + if (offset + (payload_size >> 2) <= ring->buf_mask + 1) { + memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size); + } else { + memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, + (ring->buf_mask + 1 - offset) << 2); + payload_size -= (ring->buf_mask + 1 - offset) << 2; + memcpy((void *)&ring->ring[0], + ce_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2), + payload_size); + } +} + +static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring, + unsigned offset) +{ + struct amdgpu_device *adev = ring->adev; + void *de_payload_cpu_addr; + uint64_t payload_offset, payload_size; + + payload_size = sizeof(struct v9_de_ib_state); + + if (ring->is_mes_queue) { + payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data, + gfx[0].gfx_meta_data) + + offsetof(struct v9_gfx_meta_data, de_payload); + de_payload_cpu_addr = + amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset); + } else { + payload_offset = offsetof(struct v9_gfx_meta_data, de_payload); + de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset; + } + + if (offset + (payload_size >> 2) <= ring->buf_mask + 1) { + memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, payload_size); + } else { + memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, + (ring->buf_mask + 1 - offset) << 2); + payload_size -= (ring->buf_mask + 1 - offset) << 2; + memcpy((void *)&ring->ring[0], + de_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2), + payload_size); + } +} + static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, @@ -5336,6 +5411,8 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume) amdgpu_ring_write(ring, lower_32_bits(ce_payload_gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ce_payload_gpu_addr)); + amdgpu_ring_ib_on_emit_ce(ring); + if (resume) amdgpu_ring_write_multiple(ring, ce_payload_cpu_addr, sizeof(ce_payload) >> 2); @@ -5369,10 +5446,6 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring) amdgpu_ring_alloc(ring, 13); gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr, ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC | AMDGPU_FENCE_FLAG_INT); - /*reset the CP_VMID_PREEMPT after trailing fence*/ - amdgpu_ring_emit_wreg(ring, - SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT), - 0x0); /* assert IB preemption, emit the trailing fence */ kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, @@ -5395,6 +5468,10 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring) DRM_WARN("ring %d timeout to preempt ib\n", ring->idx); } + /*reset the CP_VMID_PREEMPT after trailing fence*/ + amdgpu_ring_emit_wreg(ring, + SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT), + 0x0); amdgpu_ring_commit(ring); /* deassert preemption condition */ @@ -5402,7 +5479,7 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring) return r; } -static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) +static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds) { struct amdgpu_device *adev = ring->adev; struct v9_de_ib_state de_payload = {0}; @@ -5433,8 +5510,10 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) PAGE_SIZE); } - de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); - de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); + if (usegds) { + de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); + de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); + } cnt = (sizeof(de_payload) >> 2) + 4 - 2; amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); @@ -5445,6 +5524,7 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr)); + amdgpu_ring_ib_on_emit_de(ring); if (resume) amdgpu_ring_write_multiple(ring, de_payload_cpu_addr, sizeof(de_payload) >> 2); @@ -6855,6 +6935,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = { .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, .soft_recovery = gfx_v9_0_ring_soft_recovery, .emit_mem_sync = gfx_v9_0_emit_mem_sync, + .patch_cntl = gfx_v9_0_ring_patch_cntl, + .patch_de = gfx_v9_0_ring_patch_de_meta, + .patch_ce = gfx_v9_0_ring_patch_ce_meta, }; static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index e5fd1e00914d..da126ff8bcbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -129,7 +129,11 @@ static int vcn_v4_0_sw_init(void *handle) if (adev->vcn.harvest_config & (1 << i)) continue; - atomic_set(&adev->vcn.inst[i].sched_score, 0); + /* Init instance 0 sched_score to 1, so it's scheduled after other instances */ + if (i == 0) + atomic_set(&adev->vcn.inst[i].sched_score, 1); + else + atomic_set(&adev->vcn.inst[i].sched_score, 0); /* VCN UNIFIED TRAP */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d5cec03eaa8d..7acd73e5004f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7196,7 +7196,13 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) drm_add_modes_noedid(connector, 1920, 1080); } else { amdgpu_dm_connector_ddc_get_modes(connector, edid); - amdgpu_dm_connector_add_common_modes(encoder, connector); + /* most eDP supports only timings from its edid, + * usually only detailed timings are available + * from eDP edid. timings which are not from edid + * may damage eDP + */ + if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) + amdgpu_dm_connector_add_common_modes(encoder, connector); amdgpu_dm_connector_add_freesync_modes(connector, edid); } amdgpu_dm_fbc_init(connector); @@ -8198,6 +8204,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) bundle->stream_update.abm_level = &acrtc_state->abm_level; + mutex_lock(&dm->dc_lock); + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && + acrtc_state->stream->link->psr_settings.psr_allow_active) + amdgpu_dm_psr_disable(acrtc_state->stream); + mutex_unlock(&dm->dc_lock); + /* * If FreeSync state on the stream has changed then we need to * re-adjust the min/max bounds now that DC doesn't handle this @@ -8211,10 +8223,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } mutex_lock(&dm->dc_lock); - if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && - acrtc_state->stream->link->psr_settings.psr_allow_active) - amdgpu_dm_psr_disable(acrtc_state->stream); - update_planes_and_stream_adapter(dm->dc, acrtc_state->update_type, planes_count, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index a131e30fd7d6..d471d58aba92 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -980,6 +980,11 @@ static bool detect_link_and_local_sink(struct dc_link *link, (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)) converter_disable_audio = true; + + /* limited link rate to HBR3 for DPIA until we implement USB4 V2 */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->reported_link_cap.link_rate > LINK_RATE_HIGH3) + link->reported_link_cap.link_rate = LINK_RATE_HIGH3; break; } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c index d3fe149d8476..81fb4e5dd804 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c @@ -794,7 +794,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev) struct i2c_board_info info = { }; const char *name = pp_lib_thermal_controller_names[controller->ucType]; info.addr = controller->ucI2cAddress >> 1; - strlcpy(info.type, name, sizeof(info.type)); + strscpy(info.type, name, sizeof(info.type)); i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info); } } else { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 09405ef1e3c8..08577d1b84ec 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -1696,10 +1696,39 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, } } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE && + (((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) || + ((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) { + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_COMPUTE_BIT, + (void *)(&activity_monitor_external), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } + + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_CUSTOM); + } else { + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_WORKLOAD, smu->power_profile_mode); + } + if (workload_type < 0) return -EINVAL; diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 7a748785c545..4676cf2900df 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -298,6 +298,10 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata) if (refclk_lut[i] == refclk_rate) break; + /* avoid buffer overflow and "1" is the default rate in the datasheet. */ + if (i >= refclk_lut_size) + i = 1; + regmap_update_bits(pdata->regmap, SN_DPPLL_SRC_REG, REFCLK_FREQ_MASK, REFCLK_FREQ(i)); diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index 16565a0a5da6..e6a78fd32380 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -2103,7 +2103,7 @@ int drm_dp_aux_register(struct drm_dp_aux *aux) aux->ddc.owner = THIS_MODULE; aux->ddc.dev.parent = aux->dev; - strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev), + strscpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev), sizeof(aux->ddc.name)); ret = drm_dp_aux_register_devnode(aux); diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 38dab76ae69e..943a00db77d4 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -3404,7 +3404,7 @@ int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr, /* Skip failed payloads */ if (payload->vc_start_slot == -1) { - drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n", + drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n", payload->port->connector->name); return -EIO; } @@ -5702,7 +5702,7 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port) aux->ddc.dev.parent = parent_dev; aux->ddc.dev.of_node = parent_dev->of_node; - strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev), + strscpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev), sizeof(aux->ddc.name)); return i2c_add_adapter(&aux->ddc); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 1a5a2cd0d4ec..78dcae201cc6 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -496,13 +496,13 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj) EXPORT_SYMBOL(drm_gem_create_mmap_offset); /* - * Move pages to appropriate lru and release the pagevec, decrementing the - * ref count of those pages. + * Move folios to appropriate lru and release the folios, decrementing the + * ref count of those folios. */ -static void drm_gem_check_release_pagevec(struct pagevec *pvec) +static void drm_gem_check_release_batch(struct folio_batch *fbatch) { - check_move_unevictable_pages(pvec); - __pagevec_release(pvec); + check_move_unevictable_folios(fbatch); + __folio_batch_release(fbatch); cond_resched(); } @@ -534,10 +534,10 @@ static void drm_gem_check_release_pagevec(struct pagevec *pvec) struct page **drm_gem_get_pages(struct drm_gem_object *obj) { struct address_space *mapping; - struct page *p, **pages; - struct pagevec pvec; - int i, npages; - + struct page **pages; + struct folio *folio; + struct folio_batch fbatch; + int i, j, npages; if (WARN_ON(!obj->filp)) return ERR_PTR(-EINVAL); @@ -559,11 +559,14 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) mapping_set_unevictable(mapping); - for (i = 0; i < npages; i++) { - p = shmem_read_mapping_page(mapping, i); - if (IS_ERR(p)) + i = 0; + while (i < npages) { + folio = shmem_read_folio_gfp(mapping, i, + mapping_gfp_mask(mapping)); + if (IS_ERR(folio)) goto fail; - pages[i] = p; + for (j = 0; j < folio_nr_pages(folio); j++, i++) + pages[i] = folio_file_page(folio, i); /* Make sure shmem keeps __GFP_DMA32 allocated pages in the * correct region during swapin. Note that this requires @@ -571,23 +574,26 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) * so shmem can relocate pages during swapin if required. */ BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && - (page_to_pfn(p) >= 0x00100000UL)); + (folio_pfn(folio) >= 0x00100000UL)); } return pages; fail: mapping_clear_unevictable(mapping); - pagevec_init(&pvec); - while (i--) { - if (!pagevec_add(&pvec, pages[i])) - drm_gem_check_release_pagevec(&pvec); + folio_batch_init(&fbatch); + j = 0; + while (j < i) { + struct folio *f = page_folio(pages[j]); + if (!folio_batch_add(&fbatch, f)) + drm_gem_check_release_batch(&fbatch); + j += folio_nr_pages(f); } - if (pagevec_count(&pvec)) - drm_gem_check_release_pagevec(&pvec); + if (fbatch.nr) + drm_gem_check_release_batch(&fbatch); kvfree(pages); - return ERR_CAST(p); + return ERR_CAST(folio); } EXPORT_SYMBOL(drm_gem_get_pages); @@ -603,7 +609,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, { int i, npages; struct address_space *mapping; - struct pagevec pvec; + struct folio_batch fbatch; mapping = file_inode(obj->filp)->i_mapping; mapping_clear_unevictable(mapping); @@ -616,23 +622,27 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, npages = obj->size >> PAGE_SHIFT; - pagevec_init(&pvec); + folio_batch_init(&fbatch); for (i = 0; i < npages; i++) { + struct folio *folio; + if (!pages[i]) continue; + folio = page_folio(pages[i]); if (dirty) - set_page_dirty(pages[i]); + folio_mark_dirty(folio); if (accessed) - mark_page_accessed(pages[i]); + folio_mark_accessed(folio); /* Undo the reference we took when populating the table */ - if (!pagevec_add(&pvec, pages[i])) - drm_gem_check_release_pagevec(&pvec); + if (!folio_batch_add(&fbatch, folio)) + drm_gem_check_release_batch(&fbatch); + i += folio_nr_pages(folio) - 1; } - if (pagevec_count(&pvec)) - drm_gem_check_release_pagevec(&pvec); + if (folio_batch_count(&fbatch)) + drm_gem_check_release_batch(&fbatch); kvfree(pages); } diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c index c21c3f623033..5423ad883729 100644 --- a/drivers/gpu/drm/drm_managed.c +++ b/drivers/gpu/drm/drm_managed.c @@ -49,10 +49,10 @@ struct drmres { * Some archs want to perform DMA into kmalloc caches * and need a guaranteed alignment larger than * the alignment of a 64-bit integer. - * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same - * buffer alignment as if it was allocated by plain kmalloc(). + * Thus we use ARCH_DMA_MINALIGN for data[] which will force the same + * alignment for struct drmres when allocated by kmalloc(). */ - u8 __aligned(ARCH_KMALLOC_MINALIGN) data[]; + u8 __aligned(ARCH_DMA_MINALIGN) data[]; }; static void free_dr(struct drmres *dr) diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 3fd6c733ff4e..6252ac01e945 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -223,7 +223,7 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host, device_set_node(&dsi->dev, of_fwnode_handle(info->node)); dsi->channel = info->channel; - strlcpy(dsi->name, info->type, sizeof(dsi->name)); + strscpy(dsi->name, info->type, sizeof(dsi->name)); ret = mipi_dsi_device_add(dsi); if (ret) { diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index db5c9343a3d2..0918d80672bb 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -1951,7 +1951,7 @@ static int tda998x_create(struct device *dev) * offset. */ memset(&cec_info, 0, sizeof(cec_info)); - strlcpy(cec_info.type, "tda9950", sizeof(cec_info.type)); + strscpy(cec_info.type, "tda9950", sizeof(cec_info.type)); cec_info.addr = priv->cec_addr; cec_info.platform_data = &priv->cec_glue; cec_info.irq = client->irq; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 37d1efcd3ca6..adf1154c0e10 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -19,13 +19,13 @@ #include "i915_trace.h" /* - * Move pages to appropriate lru and release the pagevec, decrementing the - * ref count of those pages. + * Move folios to appropriate lru and release the batch, decrementing the + * ref count of those folios. */ -static void check_release_pagevec(struct pagevec *pvec) +static void check_release_folio_batch(struct folio_batch *fbatch) { - check_move_unevictable_pages(pvec); - __pagevec_release(pvec); + check_move_unevictable_folios(fbatch); + __folio_batch_release(fbatch); cond_resched(); } @@ -33,24 +33,29 @@ void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, bool dirty, bool backup) { struct sgt_iter sgt_iter; - struct pagevec pvec; + struct folio_batch fbatch; + struct folio *last = NULL; struct page *page; mapping_clear_unevictable(mapping); - pagevec_init(&pvec); + folio_batch_init(&fbatch); for_each_sgt_page(page, sgt_iter, st) { - if (dirty) - set_page_dirty(page); + struct folio *folio = page_folio(page); + if (folio == last) + continue; + last = folio; + if (dirty) + folio_mark_dirty(folio); if (backup) - mark_page_accessed(page); + folio_mark_accessed(folio); - if (!pagevec_add(&pvec, page)) - check_release_pagevec(&pvec); + if (!folio_batch_add(&fbatch, folio)) + check_release_folio_batch(&fbatch); } - if (pagevec_count(&pvec)) - check_release_pagevec(&pvec); + if (fbatch.nr) + check_release_folio_batch(&fbatch); sg_free_table(st); } @@ -63,8 +68,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, unsigned int page_count; /* restricted by sg_alloc_table */ unsigned long i; struct scatterlist *sg; - struct page *page; - unsigned long last_pfn = 0; /* suppress gcc warning */ + unsigned long next_pfn = 0; /* suppress gcc warning */ gfp_t noreclaim; int ret; @@ -95,6 +99,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, sg = st->sgl; st->nents = 0; for (i = 0; i < page_count; i++) { + struct folio *folio; const unsigned int shrink[] = { I915_SHRINK_BOUND | I915_SHRINK_UNBOUND, 0, @@ -103,12 +108,12 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, do { cond_resched(); - page = shmem_read_mapping_page_gfp(mapping, i, gfp); - if (!IS_ERR(page)) + folio = shmem_read_folio_gfp(mapping, i, gfp); + if (!IS_ERR(folio)) break; if (!*s) { - ret = PTR_ERR(page); + ret = PTR_ERR(folio); goto err_sg; } @@ -147,19 +152,21 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, if (!i || sg->length >= max_segment || - page_to_pfn(page) != last_pfn + 1) { + folio_pfn(folio) != next_pfn) { if (i) sg = sg_next(sg); st->nents++; - sg_set_page(sg, page, PAGE_SIZE, 0); + sg_set_folio(sg, folio, folio_size(folio), 0); } else { - sg->length += PAGE_SIZE; + /* XXX: could overflow? */ + sg->length += folio_size(folio); } - last_pfn = page_to_pfn(page); + next_pfn = folio_pfn(folio) + folio_nr_pages(folio); + i += folio_nr_pages(folio) - 1; /* Check that the i965g/gm workaround works. */ - GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL); + GEM_BUG_ON(gfp & __GFP_DMA32 && next_pfn >= 0x00100000UL); } if (sg) /* loop terminated early; short sg table */ sg_mark_end(sg); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 56279908ed30..01e271b6ad21 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -1681,7 +1681,9 @@ static int igt_mmap_gpu(void *arg) static int check_present_pte(pte_t *pte, unsigned long addr, void *data) { - if (!pte_present(*pte) || pte_none(*pte)) { + pte_t ptent = ptep_get(pte); + + if (!pte_present(ptent) || pte_none(ptent)) { pr_err("missing PTE:%lx\n", (addr - (unsigned long)data) >> PAGE_SHIFT); return -EINVAL; @@ -1692,7 +1694,9 @@ static int check_present_pte(pte_t *pte, unsigned long addr, void *data) static int check_absent_pte(pte_t *pte, unsigned long addr, void *data) { - if (pte_present(*pte) && !pte_none(*pte)) { + pte_t ptent = ptep_get(pte); + + if (pte_present(ptent) && !pte_none(ptent)) { pr_err("present PTE:%lx; expected to be revoked\n", (addr - (unsigned long)data) >> PAGE_SHIFT); return -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index f020c0086fbc..35f70bb8e4fb 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -187,64 +187,64 @@ i915_error_printer(struct drm_i915_error_state_buf *e) } /* single threaded page allocator with a reserved stash for emergencies */ -static void pool_fini(struct pagevec *pv) +static void pool_fini(struct folio_batch *fbatch) { - pagevec_release(pv); + folio_batch_release(fbatch); } -static int pool_refill(struct pagevec *pv, gfp_t gfp) +static int pool_refill(struct folio_batch *fbatch, gfp_t gfp) { - while (pagevec_space(pv)) { - struct page *p; + while (folio_batch_space(fbatch)) { + struct folio *folio; - p = alloc_page(gfp); - if (!p) + folio = folio_alloc(gfp, 0); + if (!folio) return -ENOMEM; - pagevec_add(pv, p); + folio_batch_add(fbatch, folio); } return 0; } -static int pool_init(struct pagevec *pv, gfp_t gfp) +static int pool_init(struct folio_batch *fbatch, gfp_t gfp) { int err; - pagevec_init(pv); + folio_batch_init(fbatch); - err = pool_refill(pv, gfp); + err = pool_refill(fbatch, gfp); if (err) - pool_fini(pv); + pool_fini(fbatch); return err; } -static void *pool_alloc(struct pagevec *pv, gfp_t gfp) +static void *pool_alloc(struct folio_batch *fbatch, gfp_t gfp) { - struct page *p; + struct folio *folio; - p = alloc_page(gfp); - if (!p && pagevec_count(pv)) - p = pv->pages[--pv->nr]; + folio = folio_alloc(gfp, 0); + if (!folio && folio_batch_count(fbatch)) + folio = fbatch->folios[--fbatch->nr]; - return p ? page_address(p) : NULL; + return folio ? folio_address(folio) : NULL; } -static void pool_free(struct pagevec *pv, void *addr) +static void pool_free(struct folio_batch *fbatch, void *addr) { - struct page *p = virt_to_page(addr); + struct folio *folio = virt_to_folio(addr); - if (pagevec_space(pv)) - pagevec_add(pv, p); + if (folio_batch_space(fbatch)) + folio_batch_add(fbatch, folio); else - __free_page(p); + folio_put(folio); } #ifdef CONFIG_DRM_I915_COMPRESS_ERROR struct i915_vma_compress { - struct pagevec pool; + struct folio_batch pool; struct z_stream_s zstream; void *tmp; }; @@ -381,7 +381,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) #else struct i915_vma_compress { - struct pagevec pool; + struct folio_batch pool; }; static bool compress_init(struct i915_vma_compress *c) diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c index 2fc9214ffa82..4d39ea0a05ca 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c @@ -295,7 +295,7 @@ static int mtk_hdmi_ddc_probe(struct platform_device *pdev) return ret; } - strlcpy(ddc->adap.name, "mediatek-hdmi-ddc", sizeof(ddc->adap.name)); + strscpy(ddc->adap.name, "mediatek-hdmi-ddc", sizeof(ddc->adap.name)); ddc->adap.owner = THIS_MODULE; ddc->adap.class = I2C_CLASS_DDC; ddc->adap.algo = &mtk_hdmi_ddc_algorithm; diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 8cf096f841a9..a2ae8c21e4dc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -220,6 +220,9 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out int optimus_funcs; struct pci_dev *parent_pdev; + if (pdev->vendor != PCI_VENDOR_ID_NVIDIA) + return; + *has_pr3 = false; parent_pdev = pci_upstream_bridge(pdev); if (parent_pdev) { diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 086b66b60d91..f75c6f09dd2a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -730,7 +730,8 @@ out: #endif nouveau_connector_set_edid(nv_connector, edid); - nouveau_connector_set_encoder(connector, nv_encoder); + if (nv_encoder) + nouveau_connector_set_encoder(connector, nv_encoder); return status; } @@ -966,7 +967,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) /* Determine display colour depth for everything except LVDS now, * DP requires this before mode_valid() is called. */ - if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) + if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode) nouveau_connector_detect_depth(connector); /* Find the native mode if this is a digital panel, if we didn't @@ -987,7 +988,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) * "native" mode as some VBIOS tables require us to use the * pixel clock as part of the lookup... */ - if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) + if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode) nouveau_connector_detect_depth(connector); if (nv_encoder->dcb->type == DCB_OUTPUT_TV) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index cc7c5b4a05fd..7aac9384600e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -137,10 +137,16 @@ nouveau_name(struct drm_device *dev) static inline bool nouveau_cli_work_ready(struct dma_fence *fence) { - if (!dma_fence_is_signaled(fence)) - return false; - dma_fence_put(fence); - return true; + bool ret = true; + + spin_lock_irq(fence->lock); + if (!dma_fence_is_signaled_locked(fence)) + ret = false; + spin_unlock_irq(fence->lock); + + if (ret == true) + dma_fence_put(fence); + return ret; } static void diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 4ad5a328d920..bf3c411a55c5 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -2105,7 +2105,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) const char *name = thermal_controller_names[power_info->info. ucOverdriveThermalController]; info.addr = power_info->info.ucOverdriveControllerAddress >> 1; - strlcpy(info.type, name, sizeof(info.type)); + strscpy(info.type, name, sizeof(info.type)); i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info); } } @@ -2355,7 +2355,7 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r struct i2c_board_info info = { }; const char *name = pp_lib_thermal_controller_names[controller->ucType]; info.addr = controller->ucI2cAddress >> 1; - strlcpy(info.type, name, sizeof(info.type)); + strscpy(info.type, name, sizeof(info.type)); i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info); } } else { diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 783a6b8802d5..795c3667f6d6 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -2702,7 +2702,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) struct i2c_board_info info = { }; const char *name = thermal_controller_names[thermal_controller]; info.addr = i2c_addr >> 1; - strlcpy(info.type, name, sizeof(info.type)); + strscpy(info.type, name, sizeof(info.type)); i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info); } } @@ -2719,7 +2719,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) struct i2c_board_info info = { }; const char *name = "f75375"; info.addr = 0x28; - strlcpy(info.type, name, sizeof(info.type)); + strscpy(info.type, name, sizeof(info.type)); i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info); DRM_INFO("Possible %s thermal controller at 0x%02x\n", name, info.addr); diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c index fe76e29910ef..8f6c3aef0962 100644 --- a/drivers/gpu/drm/radeon/radeon_fbdev.c +++ b/drivers/gpu/drm/radeon/radeon_fbdev.c @@ -307,6 +307,7 @@ static void radeon_fbdev_client_unregister(struct drm_client_dev *client) if (fb_helper->info) { vga_switcheroo_client_fb_set(rdev->pdev, NULL); + drm_helper_force_disable_all(dev); drm_fb_helper_unregister_info(fb_helper); } else { drm_client_release(&fb_helper->client); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 2220cdf6a3f6..3a9db030f98f 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -359,7 +359,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm struct page **pages = ttm->pages + pinned; r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0, - pages, NULL); + pages); if (r < 0) goto release_pages; diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c index f51774866f41..9afb889963c1 100644 --- a/drivers/gpu/drm/rockchip/inno_hdmi.c +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -797,7 +797,7 @@ static struct i2c_adapter *inno_hdmi_i2c_adapter(struct inno_hdmi *hdmi) adap->dev.parent = hdmi->dev; adap->dev.of_node = hdmi->dev->of_node; adap->algo = &inno_hdmi_algorithm; - strlcpy(adap->name, "Inno HDMI", sizeof(adap->name)); + strscpy(adap->name, "Inno HDMI", sizeof(adap->name)); i2c_set_adapdata(adap, hdmi); ret = i2c_add_adapter(adap); diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c index 90145ad96984..b5d042ee052f 100644 --- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c +++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c @@ -730,7 +730,7 @@ static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi) adap->dev.parent = hdmi->dev; adap->dev.of_node = hdmi->dev->of_node; adap->algo = &rk3066_hdmi_algorithm; - strlcpy(adap->name, "RK3066 HDMI", sizeof(adap->name)); + strscpy(adap->name, "RK3066 HDMI", sizeof(adap->name)); i2c_set_adapdata(adap, hdmi); ret = i2c_add_adapter(adap); diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c index c7d7e9fff91c..d1a65a921f5a 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c @@ -304,7 +304,7 @@ int sun4i_hdmi_i2c_create(struct device *dev, struct sun4i_hdmi *hdmi) adap->owner = THIS_MODULE; adap->class = I2C_CLASS_DDC; adap->algo = &sun4i_hdmi_i2c_algorithm; - strlcpy(adap->name, "sun4i_hdmi_i2c adapter", sizeof(adap->name)); + strscpy(adap->name, "sun4i_hdmi_i2c adapter", sizeof(adap->name)); i2c_set_adapdata(adap, hdmi); ret = i2c_add_adapter(adap); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h index 0b74ca2dfb7b..23899d743a90 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h @@ -105,10 +105,14 @@ flags, magic, bp, \ eax, ebx, ecx, edx, si, di) \ ({ \ - asm volatile ("push %%rbp;" \ + asm volatile ( \ + UNWIND_HINT_SAVE \ + "push %%rbp;" \ + UNWIND_HINT_UNDEFINED \ "mov %12, %%rbp;" \ VMWARE_HYPERCALL_HB_OUT \ - "pop %%rbp;" : \ + "pop %%rbp;" \ + UNWIND_HINT_RESTORE : \ "=a"(eax), \ "=b"(ebx), \ "=c"(ecx), \ @@ -130,10 +134,14 @@ flags, magic, bp, \ eax, ebx, ecx, edx, si, di) \ ({ \ - asm volatile ("push %%rbp;" \ + asm volatile ( \ + UNWIND_HINT_SAVE \ + "push %%rbp;" \ + UNWIND_HINT_UNDEFINED \ "mov %12, %%rbp;" \ VMWARE_HYPERCALL_HB_IN \ - "pop %%rbp" : \ + "pop %%rbp;" \ + UNWIND_HINT_RESTORE : \ "=a"(eax), \ "=b"(ebx), \ "=c"(ecx), \ diff --git a/drivers/greybus/connection.c b/drivers/greybus/connection.c index e3799a53a193..9c88861986c8 100644 --- a/drivers/greybus/connection.c +++ b/drivers/greybus/connection.c @@ -187,8 +187,8 @@ _gb_connection_create(struct gb_host_device *hd, int hd_cport_id, spin_lock_init(&connection->lock); INIT_LIST_HEAD(&connection->operations); - connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1, - dev_name(&hd->dev), hd_cport_id); + connection->wq = alloc_ordered_workqueue("%s:%d", 0, dev_name(&hd->dev), + hd_cport_id); if (!connection->wq) { ret = -ENOMEM; goto err_free_connection; diff --git a/drivers/greybus/svc.c b/drivers/greybus/svc.c index 16cced80867a..0d7e749174a4 100644 --- a/drivers/greybus/svc.c +++ b/drivers/greybus/svc.c @@ -1318,7 +1318,7 @@ struct gb_svc *gb_svc_create(struct gb_host_device *hd) if (!svc) return NULL; - svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev)); + svc->wq = alloc_ordered_workqueue("%s:svc", 0, dev_name(&hd->dev)); if (!svc->wq) { kfree(svc); return NULL; diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 007f26d5f1a4..2f4d09ce027a 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -829,11 +829,22 @@ static void vmbus_wait_for_unload(void) if (completion_done(&vmbus_connection.unload_event)) goto completed; - for_each_online_cpu(cpu) { + for_each_present_cpu(cpu) { struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); + /* + * In a CoCo VM the synic_message_page is not allocated + * in hv_synic_alloc(). Instead it is set/cleared in + * hv_synic_enable_regs() and hv_synic_disable_regs() + * such that it is set only when the CPU is online. If + * not all present CPUs are online, the message page + * might be NULL, so skip such CPUs. + */ page_addr = hv_cpu->synic_message_page; + if (!page_addr) + continue; + msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; @@ -867,11 +878,14 @@ completed: * maybe-pending messages on all CPUs to be able to receive new * messages after we reconnect. */ - for_each_online_cpu(cpu) { + for_each_present_cpu(cpu) { struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); page_addr = hv_cpu->synic_message_page; + if (!page_addr) + continue; + msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; msg->header.message_type = HVMSG_NONE; } diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c index 64f9ceca887b..542a1d53b303 100644 --- a/drivers/hv/hv_common.c +++ b/drivers/hv/hv_common.c @@ -364,13 +364,20 @@ int hv_common_cpu_init(unsigned int cpu) flags = irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL; inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); - *inputarg = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags); - if (!(*inputarg)) - return -ENOMEM; - if (hv_root_partition) { - outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg); - *outputarg = (char *)(*inputarg) + HV_HYP_PAGE_SIZE; + /* + * hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory is already + * allocated if this CPU was previously online and then taken offline + */ + if (!*inputarg) { + *inputarg = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags); + if (!(*inputarg)) + return -ENOMEM; + + if (hv_root_partition) { + outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg); + *outputarg = (char *)(*inputarg) + HV_HYP_PAGE_SIZE; + } } msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX); @@ -385,24 +392,17 @@ int hv_common_cpu_init(unsigned int cpu) int hv_common_cpu_die(unsigned int cpu) { - unsigned long flags; - void **inputarg, **outputarg; - void *mem; - - local_irq_save(flags); - - inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); - mem = *inputarg; - *inputarg = NULL; - - if (hv_root_partition) { - outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg); - *outputarg = NULL; - } - - local_irq_restore(flags); - - kfree(mem); + /* + * The hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory + * is not freed when the CPU goes offline as the hyperv_pcpu_input_arg + * may be used by the Hyper-V vPCI driver in reassigning interrupts + * as part of the offlining process. The interrupt reassignment + * happens *after* the CPUHP_AP_HYPERV_ONLINE state has run and + * called this function. + * + * If a previously offlined CPU is brought back online again, the + * originally allocated memory is reused in hv_common_cpu_init(). + */ return 0; } diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 1c65a6dfb9fa..67f95a29aeca 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1372,7 +1372,7 @@ static int vmbus_bus_init(void) ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online", hv_synic_init, hv_synic_cleanup); if (ret < 0) - goto err_cpuhp; + goto err_alloc; hyperv_cpuhp_online = ret; ret = vmbus_connect(); @@ -1392,9 +1392,8 @@ static int vmbus_bus_init(void) err_connect: cpuhp_remove_state(hyperv_cpuhp_online); -err_cpuhp: - hv_synic_free(); err_alloc: + hv_synic_free(); if (vmbus_irq == -1) { hv_remove_vmbus_handler(); } else { diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c index 1fc4fd79a1c6..1bab91ce8e95 100644 --- a/drivers/hwtracing/coresight/coresight-trbe.c +++ b/drivers/hwtracing/coresight/coresight-trbe.c @@ -218,7 +218,7 @@ static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr) * Enable the TRBE without clearing LIMITPTR which * might be required for fetching the buffer limits. */ - trblimitr |= TRBLIMITR_ENABLE; + trblimitr |= TRBLIMITR_EL1_E; write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); /* Synchronize the TRBE enable event */ @@ -236,7 +236,7 @@ static inline void set_trbe_disabled(struct trbe_cpudata *cpudata) * Disable the TRBE without clearing LIMITPTR which * might be required for fetching the buffer limits. */ - trblimitr &= ~TRBLIMITR_ENABLE; + trblimitr &= ~TRBLIMITR_EL1_E; write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); if (trbe_needs_drain_after_disable(cpudata)) @@ -582,12 +582,12 @@ static void clr_trbe_status(void) u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1); WARN_ON(is_trbe_enabled()); - trbsr &= ~TRBSR_IRQ; - trbsr &= ~TRBSR_TRG; - trbsr &= ~TRBSR_WRAP; - trbsr &= ~(TRBSR_EC_MASK << TRBSR_EC_SHIFT); - trbsr &= ~(TRBSR_BSC_MASK << TRBSR_BSC_SHIFT); - trbsr &= ~TRBSR_STOP; + trbsr &= ~TRBSR_EL1_IRQ; + trbsr &= ~TRBSR_EL1_TRG; + trbsr &= ~TRBSR_EL1_WRAP; + trbsr &= ~TRBSR_EL1_EC_MASK; + trbsr &= ~TRBSR_EL1_BSC_MASK; + trbsr &= ~TRBSR_EL1_S; write_sysreg_s(trbsr, SYS_TRBSR_EL1); } @@ -596,13 +596,13 @@ static void set_trbe_limit_pointer_enabled(struct trbe_buf *buf) u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); unsigned long addr = buf->trbe_limit; - WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_LIMIT_SHIFT))); + WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_EL1_LIMIT_SHIFT))); WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); - trblimitr &= ~TRBLIMITR_NVM; - trblimitr &= ~(TRBLIMITR_FILL_MODE_MASK << TRBLIMITR_FILL_MODE_SHIFT); - trblimitr &= ~(TRBLIMITR_TRIG_MODE_MASK << TRBLIMITR_TRIG_MODE_SHIFT); - trblimitr &= ~(TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT); + trblimitr &= ~TRBLIMITR_EL1_nVM; + trblimitr &= ~TRBLIMITR_EL1_FM_MASK; + trblimitr &= ~TRBLIMITR_EL1_TM_MASK; + trblimitr &= ~TRBLIMITR_EL1_LIMIT_MASK; /* * Fill trace buffer mode is used here while configuring the @@ -613,14 +613,15 @@ static void set_trbe_limit_pointer_enabled(struct trbe_buf *buf) * trace data in the interrupt handler, before reconfiguring * the TRBE. */ - trblimitr |= (TRBE_FILL_MODE_FILL & TRBLIMITR_FILL_MODE_MASK) << TRBLIMITR_FILL_MODE_SHIFT; + trblimitr |= (TRBLIMITR_EL1_FM_FILL << TRBLIMITR_EL1_FM_SHIFT) & + TRBLIMITR_EL1_FM_MASK; /* * Trigger mode is not used here while configuring the TRBE for * the trace capture. Hence just keep this in the ignore mode. */ - trblimitr |= (TRBE_TRIG_MODE_IGNORE & TRBLIMITR_TRIG_MODE_MASK) << - TRBLIMITR_TRIG_MODE_SHIFT; + trblimitr |= (TRBLIMITR_EL1_TM_IGNR << TRBLIMITR_EL1_TM_SHIFT) & + TRBLIMITR_EL1_TM_MASK; trblimitr |= (addr & PAGE_MASK); set_trbe_enabled(buf->cpudata, trblimitr); } diff --git a/drivers/hwtracing/coresight/coresight-trbe.h b/drivers/hwtracing/coresight/coresight-trbe.h index 98ff1b17ad07..77cbb5c63878 100644 --- a/drivers/hwtracing/coresight/coresight-trbe.h +++ b/drivers/hwtracing/coresight/coresight-trbe.h @@ -30,7 +30,7 @@ static inline bool is_trbe_enabled(void) { u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); - return trblimitr & TRBLIMITR_ENABLE; + return trblimitr & TRBLIMITR_EL1_E; } #define TRBE_EC_OTHERS 0 @@ -39,7 +39,7 @@ static inline bool is_trbe_enabled(void) static inline int get_trbe_ec(u64 trbsr) { - return (trbsr >> TRBSR_EC_SHIFT) & TRBSR_EC_MASK; + return (trbsr & TRBSR_EL1_EC_MASK) >> TRBSR_EL1_EC_SHIFT; } #define TRBE_BSC_NOT_STOPPED 0 @@ -48,63 +48,55 @@ static inline int get_trbe_ec(u64 trbsr) static inline int get_trbe_bsc(u64 trbsr) { - return (trbsr >> TRBSR_BSC_SHIFT) & TRBSR_BSC_MASK; + return (trbsr & TRBSR_EL1_BSC_MASK) >> TRBSR_EL1_BSC_SHIFT; } static inline void clr_trbe_irq(void) { u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1); - trbsr &= ~TRBSR_IRQ; + trbsr &= ~TRBSR_EL1_IRQ; write_sysreg_s(trbsr, SYS_TRBSR_EL1); } static inline bool is_trbe_irq(u64 trbsr) { - return trbsr & TRBSR_IRQ; + return trbsr & TRBSR_EL1_IRQ; } static inline bool is_trbe_trg(u64 trbsr) { - return trbsr & TRBSR_TRG; + return trbsr & TRBSR_EL1_TRG; } static inline bool is_trbe_wrap(u64 trbsr) { - return trbsr & TRBSR_WRAP; + return trbsr & TRBSR_EL1_WRAP; } static inline bool is_trbe_abort(u64 trbsr) { - return trbsr & TRBSR_ABORT; + return trbsr & TRBSR_EL1_EA; } static inline bool is_trbe_running(u64 trbsr) { - return !(trbsr & TRBSR_STOP); + return !(trbsr & TRBSR_EL1_S); } -#define TRBE_TRIG_MODE_STOP 0 -#define TRBE_TRIG_MODE_IRQ 1 -#define TRBE_TRIG_MODE_IGNORE 3 - -#define TRBE_FILL_MODE_FILL 0 -#define TRBE_FILL_MODE_WRAP 1 -#define TRBE_FILL_MODE_CIRCULAR_BUFFER 3 - static inline bool get_trbe_flag_update(u64 trbidr) { - return trbidr & TRBIDR_FLAG; + return trbidr & TRBIDR_EL1_F; } static inline bool is_trbe_programmable(u64 trbidr) { - return !(trbidr & TRBIDR_PROG); + return !(trbidr & TRBIDR_EL1_P); } static inline int get_trbe_address_align(u64 trbidr) { - return (trbidr >> TRBIDR_ALIGN_SHIFT) & TRBIDR_ALIGN_MASK; + return (trbidr & TRBIDR_EL1_Align_MASK) >> TRBIDR_EL1_Align_SHIFT; } static inline unsigned long get_trbe_write_pointer(void) @@ -121,7 +113,7 @@ static inline void set_trbe_write_pointer(unsigned long addr) static inline unsigned long get_trbe_limit_pointer(void) { u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); - unsigned long addr = trblimitr & (TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT); + unsigned long addr = trblimitr & TRBLIMITR_EL1_LIMIT_MASK; WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); return addr; @@ -130,7 +122,7 @@ static inline unsigned long get_trbe_limit_pointer(void) static inline unsigned long get_trbe_base_pointer(void) { u64 trbbaser = read_sysreg_s(SYS_TRBBASER_EL1); - unsigned long addr = trbbaser & (TRBBASER_BASE_MASK << TRBBASER_BASE_SHIFT); + unsigned long addr = trbbaser & TRBBASER_EL1_BASE_MASK; WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); return addr; @@ -139,7 +131,7 @@ static inline unsigned long get_trbe_base_pointer(void) static inline void set_trbe_base_pointer(unsigned long addr) { WARN_ON(is_trbe_enabled()); - WARN_ON(!IS_ALIGNED(addr, (1UL << TRBBASER_BASE_SHIFT))); + WARN_ON(!IS_ALIGNED(addr, (1UL << TRBBASER_EL1_BASE_SHIFT))); WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); write_sysreg_s(addr, SYS_TRBBASER_EL1); } diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 1af0a637d7f1..4d24ceb57ee7 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -201,8 +201,8 @@ static void lpi2c_imx_stop(struct lpi2c_imx_struct *lpi2c_imx) /* CLKLO = I2C_CLK_RATIO * CLKHI, SETHOLD = CLKHI, DATAVD = CLKHI/2 */ static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx) { - u8 prescale, filt, sethold, clkhi, clklo, datavd; - unsigned int clk_rate, clk_cycle; + u8 prescale, filt, sethold, datavd; + unsigned int clk_rate, clk_cycle, clkhi, clklo; enum lpi2c_imx_pincfg pincfg; unsigned int temp; diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 2e153f2f71b6..78682388e02e 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -1752,16 +1752,21 @@ nodma: if (!clk_freq || clk_freq > I2C_MAX_FAST_MODE_PLUS_FREQ) { dev_err(qup->dev, "clock frequency not supported %d\n", clk_freq); - return -EINVAL; + ret = -EINVAL; + goto fail_dma; } qup->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(qup->base)) - return PTR_ERR(qup->base); + if (IS_ERR(qup->base)) { + ret = PTR_ERR(qup->base); + goto fail_dma; + } qup->irq = platform_get_irq(pdev, 0); - if (qup->irq < 0) - return qup->irq; + if (qup->irq < 0) { + ret = qup->irq; + goto fail_dma; + } if (has_acpi_companion(qup->dev)) { ret = device_property_read_u32(qup->dev, @@ -1775,13 +1780,15 @@ nodma: qup->clk = devm_clk_get(qup->dev, "core"); if (IS_ERR(qup->clk)) { dev_err(qup->dev, "Could not get core clock\n"); - return PTR_ERR(qup->clk); + ret = PTR_ERR(qup->clk); + goto fail_dma; } qup->pclk = devm_clk_get(qup->dev, "iface"); if (IS_ERR(qup->pclk)) { dev_err(qup->dev, "Could not get iface clock\n"); - return PTR_ERR(qup->pclk); + ret = PTR_ERR(qup->pclk); + goto fail_dma; } qup_i2c_enable_clocks(qup); src_clk_freq = clk_get_rate(qup->clk); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index aa2d19db2b1d..34201d7ef33e 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -199,6 +199,43 @@ static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev, return __intel_idle(dev, drv, index); } +static __always_inline int __intel_idle_hlt(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + raw_safe_halt(); + raw_local_irq_disable(); + return index; +} + +/** + * intel_idle_hlt - Ask the processor to enter the given idle state using hlt. + * @dev: cpuidle device of the target CPU. + * @drv: cpuidle driver (assumed to point to intel_idle_driver). + * @index: Target idle state index. + * + * Use the HLT instruction to notify the processor that the CPU represented by + * @dev is idle and it can try to enter the idle state corresponding to @index. + * + * Must be called under local_irq_disable(). + */ +static __cpuidle int intel_idle_hlt(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + return __intel_idle_hlt(dev, drv, index); +} + +static __cpuidle int intel_idle_hlt_irq_on(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + int ret; + + raw_local_irq_enable(); + ret = __intel_idle_hlt(dev, drv, index); + raw_local_irq_disable(); + + return ret; +} + /** * intel_idle_s2idle - Ask the processor to enter the given idle state. * @dev: cpuidle device of the target CPU. @@ -1242,6 +1279,25 @@ static struct cpuidle_state snr_cstates[] __initdata = { .enter = NULL } }; +static struct cpuidle_state vmguest_cstates[] __initdata = { + { + .name = "C1", + .desc = "HLT", + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE, + .exit_latency = 5, + .target_residency = 10, + .enter = &intel_idle_hlt, }, + { + .name = "C1L", + .desc = "Long HLT", + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 5, + .target_residency = 200, + .enter = &intel_idle_hlt, }, + { + .enter = NULL } +}; + static const struct idle_cpu idle_cpu_nehalem __initconst = { .state_table = nehalem_cstates, .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, @@ -1839,6 +1895,66 @@ static bool __init intel_idle_verify_cstate(unsigned int mwait_hint) return true; } +static void state_update_enter_method(struct cpuidle_state *state, int cstate) +{ + if (state->enter == intel_idle_hlt) { + if (force_irq_on) { + pr_info("forced intel_idle_irq for state %d\n", cstate); + state->enter = intel_idle_hlt_irq_on; + } + return; + } + if (state->enter == intel_idle_hlt_irq_on) + return; /* no update scenarios */ + + if (state->flags & CPUIDLE_FLAG_INIT_XSTATE) { + /* + * Combining with XSTATE with IBRS or IRQ_ENABLE flags + * is not currently supported but this driver. + */ + WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IBRS); + WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE); + state->enter = intel_idle_xstate; + return; + } + + if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) && + state->flags & CPUIDLE_FLAG_IBRS) { + /* + * IBRS mitigation requires that C-states are entered + * with interrupts disabled. + */ + WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE); + state->enter = intel_idle_ibrs; + return; + } + + if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE) { + state->enter = intel_idle_irq; + return; + } + + if (force_irq_on) { + pr_info("forced intel_idle_irq for state %d\n", cstate); + state->enter = intel_idle_irq; + } +} + +/* + * For mwait based states, we want to verify the cpuid data to see if the state + * is actually supported by this specific CPU. + * For non-mwait based states, this check should be skipped. + */ +static bool should_verify_mwait(struct cpuidle_state *state) +{ + if (state->enter == intel_idle_hlt) + return false; + if (state->enter == intel_idle_hlt_irq_on) + return false; + + return true; +} + static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) { int cstate; @@ -1887,35 +2003,15 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) } mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); - if (!intel_idle_verify_cstate(mwait_hint)) + if (should_verify_mwait(&cpuidle_state_table[cstate]) && !intel_idle_verify_cstate(mwait_hint)) continue; /* Structure copy. */ drv->states[drv->state_count] = cpuidle_state_table[cstate]; state = &drv->states[drv->state_count]; - if (state->flags & CPUIDLE_FLAG_INIT_XSTATE) { - /* - * Combining with XSTATE with IBRS or IRQ_ENABLE flags - * is not currently supported but this driver. - */ - WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IBRS); - WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE); - state->enter = intel_idle_xstate; - } else if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) && - state->flags & CPUIDLE_FLAG_IBRS) { - /* - * IBRS mitigation requires that C-states are entered - * with interrupts disabled. - */ - WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE); - state->enter = intel_idle_ibrs; - } else if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE) { - state->enter = intel_idle_irq; - } else if (force_irq_on) { - pr_info("forced intel_idle_irq for state %d\n", cstate); - state->enter = intel_idle_irq; - } + state_update_enter_method(state, cstate); + if ((disabled_states_mask & BIT(drv->state_count)) || ((icpu->use_acpi || force_use_acpi) && @@ -2041,6 +2137,93 @@ static void __init intel_idle_cpuidle_devices_uninit(void) cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i)); } +/* + * Match up the latency and break even point of the bare metal (cpu based) + * states with the deepest VM available state. + * + * We only want to do this for the deepest state, the ones that has + * the TLB_FLUSHED flag set on the . + * + * All our short idle states are dominated by vmexit/vmenter latencies, + * not the underlying hardware latencies so we keep our values for these. + */ +static void matchup_vm_state_with_baremetal(void) +{ + int cstate; + + for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { + int matching_cstate; + + if (intel_idle_max_cstate_reached(cstate)) + break; + + if (!cpuidle_state_table[cstate].enter) + break; + + if (!(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_TLB_FLUSHED)) + continue; + + for (matching_cstate = 0; matching_cstate < CPUIDLE_STATE_MAX; ++matching_cstate) { + if (!icpu->state_table[matching_cstate].enter) + break; + if (icpu->state_table[matching_cstate].exit_latency > cpuidle_state_table[cstate].exit_latency) { + cpuidle_state_table[cstate].exit_latency = icpu->state_table[matching_cstate].exit_latency; + cpuidle_state_table[cstate].target_residency = icpu->state_table[matching_cstate].target_residency; + } + } + + } +} + + +static int __init intel_idle_vminit(const struct x86_cpu_id *id) +{ + int retval; + + cpuidle_state_table = vmguest_cstates; + + icpu = (const struct idle_cpu *)id->driver_data; + + pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n", + boot_cpu_data.x86_model); + + intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); + if (!intel_idle_cpuidle_devices) + return -ENOMEM; + + /* + * We don't know exactly what the host will do when we go idle, but as a worst estimate + * we can assume that the exit latency of the deepest host state will be hit for our + * deep (long duration) guest idle state. + * The same logic applies to the break even point for the long duration guest idle state. + * So lets copy these two properties from the table we found for the host CPU type. + */ + matchup_vm_state_with_baremetal(); + + intel_idle_cpuidle_driver_init(&intel_idle_driver); + + retval = cpuidle_register_driver(&intel_idle_driver); + if (retval) { + struct cpuidle_driver *drv = cpuidle_get_driver(); + printk(KERN_DEBUG pr_fmt("intel_idle yielding to %s\n"), + drv ? drv->name : "none"); + goto init_driver_fail; + } + + retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online", + intel_idle_cpu_online, NULL); + if (retval < 0) + goto hp_setup_fail; + + return 0; +hp_setup_fail: + intel_idle_cpuidle_devices_uninit(); + cpuidle_unregister_driver(&intel_idle_driver); +init_driver_fail: + free_percpu(intel_idle_cpuidle_devices); + return retval; +} + static int __init intel_idle_init(void) { const struct x86_cpu_id *id; @@ -2059,6 +2242,8 @@ static int __init intel_idle_init(void) id = x86_match_cpu(intel_idle_ids); if (id) { if (!boot_cpu_has(X86_FEATURE_MWAIT)) { + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return intel_idle_vminit(id); pr_debug("Please enable MWAIT in BIOS SETUP\n"); return -ENODEV; } diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 93a1c48d0c32..6b3f4384e46a 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -3295,7 +3295,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) route->path_rec->traffic_class = tos; route->path_rec->mtu = iboe_get_mtu(ndev->mtu); route->path_rec->rate_selector = IB_SA_EQ; - route->path_rec->rate = iboe_get_rate(ndev); + route->path_rec->rate = IB_RATE_PORT_CURRENT; dev_put(ndev); route->path_rec->packet_life_time_selector = IB_SA_EQ; /* In case ACK timeout is set, use this value to calculate @@ -4964,7 +4964,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, if (!ndev) return -ENODEV; - ib.rec.rate = iboe_get_rate(ndev); + ib.rec.rate = IB_RATE_PORT_CURRENT; ib.rec.hop_limit = 1; ib.rec.mtu = iboe_get_mtu(ndev->mtu); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 4796f6a8828c..e836c9c477f6 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1850,8 +1850,13 @@ static int modify_qp(struct uverbs_attr_bundle *attrs, attr->path_mtu = cmd->base.path_mtu; if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE) attr->path_mig_state = cmd->base.path_mig_state; - if (cmd->base.attr_mask & IB_QP_QKEY) + if (cmd->base.attr_mask & IB_QP_QKEY) { + if (cmd->base.qkey & IB_QP_SET_QKEY && !capable(CAP_NET_RAW)) { + ret = -EPERM; + goto release_qp; + } attr->qkey = cmd->base.qkey; + } if (cmd->base.attr_mask & IB_QP_RQ_PSN) attr->rq_psn = cmd->base.rq_psn; if (cmd->base.attr_mask & IB_QP_SQ_PSN) diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index fbace69672ca..7c9c79c13941 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -222,8 +222,12 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue, spin_lock_irq(&ev_queue->lock); while (list_empty(&ev_queue->event_list)) { - spin_unlock_irq(&ev_queue->lock); + if (ev_queue->is_closed) { + spin_unlock_irq(&ev_queue->lock); + return -EIO; + } + spin_unlock_irq(&ev_queue->lock); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; @@ -233,12 +237,6 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue, return -ERESTARTSYS; spin_lock_irq(&ev_queue->lock); - - /* If device was disassociated and no event exists set an error */ - if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) { - spin_unlock_irq(&ev_queue->lock); - return -EIO; - } } event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list); diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index 5a2baf49ecaa..2c95e6f3d47a 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -135,8 +135,6 @@ struct bnxt_re_dev { struct delayed_work worker; u8 cur_prio_map; - u16 active_speed; - u8 active_width; /* FP Notification Queue (CQ & SRQ) */ struct tasklet_struct nq_task; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index b1c36412025f..952811c40c54 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -199,6 +199,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num, { struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; + int rc; memset(port_attr, 0, sizeof(*port_attr)); @@ -228,10 +229,10 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num, port_attr->sm_sl = 0; port_attr->subnet_timeout = 0; port_attr->init_type_reply = 0; - port_attr->active_speed = rdev->active_speed; - port_attr->active_width = rdev->active_width; + rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed, + &port_attr->active_width); - return 0; + return rc; } int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num, diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index e34eccd178d0..3073398a2183 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1077,8 +1077,6 @@ static int bnxt_re_ib_init(struct bnxt_re_dev *rdev) return rc; } dev_info(rdev_to_dev(rdev), "Device registered with IB successfully"); - ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, - &rdev->active_width); set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ? diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c index 1c06920505d2..93257fa5aae8 100644 --- a/drivers/infiniband/hw/mlx5/counters.c +++ b/drivers/infiniband/hw/mlx5/counters.c @@ -209,7 +209,8 @@ static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev, !vport_qcounters_supported(dev)) || !port_num) return &dev->port[0].cnts; - return &dev->port[port_num - 1].cnts; + return is_mdev_switchdev_mode(dev->mdev) ? + &dev->port[1].cnts : &dev->port[port_num - 1].cnts; } /** @@ -262,7 +263,7 @@ static struct rdma_hw_stats * mlx5_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - const struct mlx5_ib_counters *cnts = &dev->port[port_num - 1].cnts; + const struct mlx5_ib_counters *cnts = get_counters(dev, port_num); return do_alloc_stats(cnts); } @@ -329,6 +330,7 @@ static int mlx5_ib_query_q_counters_vport(struct mlx5_ib_dev *dev, { u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {}; u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {}; + struct mlx5_core_dev *mdev; __be32 val; int ret, i; @@ -336,12 +338,16 @@ static int mlx5_ib_query_q_counters_vport(struct mlx5_ib_dev *dev, dev->port[port_num].rep->vport == MLX5_VPORT_UPLINK) return 0; + mdev = mlx5_eswitch_get_core_dev(dev->port[port_num].rep->esw); + if (!mdev) + return -EOPNOTSUPP; + MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); MLX5_SET(query_q_counter_in, in, other_vport, 1); MLX5_SET(query_q_counter_in, in, vport_number, dev->port[port_num].rep->vport); MLX5_SET(query_q_counter_in, in, aggregate, 1); - ret = mlx5_cmd_exec_inout(dev->mdev, query_q_counter, in, out); + ret = mlx5_cmd_exec_inout(mdev, query_q_counter, in, out); if (ret) return ret; @@ -575,43 +581,53 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev, bool is_vport = is_mdev_switchdev_mode(dev->mdev) && port_num != MLX5_VPORT_PF; const struct mlx5_ib_counter *names; - int j = 0, i; + int j = 0, i, size; names = is_vport ? vport_basic_q_cnts : basic_q_cnts; - for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) { + size = is_vport ? ARRAY_SIZE(vport_basic_q_cnts) : + ARRAY_SIZE(basic_q_cnts); + for (i = 0; i < size; i++, j++) { descs[j].name = names[i].name; - offsets[j] = basic_q_cnts[i].offset; + offsets[j] = names[i].offset; } names = is_vport ? vport_out_of_seq_q_cnts : out_of_seq_q_cnts; + size = is_vport ? ARRAY_SIZE(vport_out_of_seq_q_cnts) : + ARRAY_SIZE(out_of_seq_q_cnts); if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) { - for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) { + for (i = 0; i < size; i++, j++) { descs[j].name = names[i].name; - offsets[j] = out_of_seq_q_cnts[i].offset; + offsets[j] = names[i].offset; } } names = is_vport ? vport_retrans_q_cnts : retrans_q_cnts; + size = is_vport ? ARRAY_SIZE(vport_retrans_q_cnts) : + ARRAY_SIZE(retrans_q_cnts); if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { - for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) { + for (i = 0; i < size; i++, j++) { descs[j].name = names[i].name; - offsets[j] = retrans_q_cnts[i].offset; + offsets[j] = names[i].offset; } } names = is_vport ? vport_extended_err_cnts : extended_err_cnts; + size = is_vport ? ARRAY_SIZE(vport_extended_err_cnts) : + ARRAY_SIZE(extended_err_cnts); if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) { - for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) { + for (i = 0; i < size; i++, j++) { descs[j].name = names[i].name; - offsets[j] = extended_err_cnts[i].offset; + offsets[j] = names[i].offset; } } names = is_vport ? vport_roce_accl_cnts : roce_accl_cnts; + size = is_vport ? ARRAY_SIZE(vport_roce_accl_cnts) : + ARRAY_SIZE(roce_accl_cnts); if (MLX5_CAP_GEN(dev->mdev, roce_accl)) { - for (i = 0; i < ARRAY_SIZE(roce_accl_cnts); i++, j++) { + for (i = 0; i < size; i++, j++) { descs[j].name = names[i].name; - offsets[j] = roce_accl_cnts[i].offset; + offsets[j] = names[i].offset; } } @@ -661,25 +677,37 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev, static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev, struct mlx5_ib_counters *cnts, u32 port_num) { - u32 num_counters, num_op_counters = 0; + bool is_vport = is_mdev_switchdev_mode(dev->mdev) && + port_num != MLX5_VPORT_PF; + u32 num_counters, num_op_counters = 0, size; - num_counters = ARRAY_SIZE(basic_q_cnts); + size = is_vport ? ARRAY_SIZE(vport_basic_q_cnts) : + ARRAY_SIZE(basic_q_cnts); + num_counters = size; + size = is_vport ? ARRAY_SIZE(vport_out_of_seq_q_cnts) : + ARRAY_SIZE(out_of_seq_q_cnts); if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) - num_counters += ARRAY_SIZE(out_of_seq_q_cnts); + num_counters += size; + size = is_vport ? ARRAY_SIZE(vport_retrans_q_cnts) : + ARRAY_SIZE(retrans_q_cnts); if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) - num_counters += ARRAY_SIZE(retrans_q_cnts); + num_counters += size; + size = is_vport ? ARRAY_SIZE(vport_extended_err_cnts) : + ARRAY_SIZE(extended_err_cnts); if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) - num_counters += ARRAY_SIZE(extended_err_cnts); + num_counters += size; + size = is_vport ? ARRAY_SIZE(vport_roce_accl_cnts) : + ARRAY_SIZE(roce_accl_cnts); if (MLX5_CAP_GEN(dev->mdev, roce_accl)) - num_counters += ARRAY_SIZE(roce_accl_cnts); + num_counters += size; cnts->num_q_counters = num_counters; - if (is_mdev_switchdev_mode(dev->mdev) && port_num != MLX5_VPORT_PF) + if (is_vport) goto skip_non_qcounters; if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { @@ -725,11 +753,11 @@ err: static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) { u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {}; - int num_cnt_ports; + int num_cnt_ports = dev->num_ports; int i, j; - num_cnt_ports = (!is_mdev_switchdev_mode(dev->mdev) || - vport_qcounters_supported(dev)) ? dev->num_ports : 1; + if (is_mdev_switchdev_mode(dev->mdev)) + num_cnt_ports = min(2, num_cnt_ports); MLX5_SET(dealloc_q_counter_in, in, opcode, MLX5_CMD_OP_DEALLOC_Q_COUNTER); @@ -761,15 +789,22 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) { u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {}; u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {}; - int num_cnt_ports; + int num_cnt_ports = dev->num_ports; int err = 0; int i; bool is_shared; MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0; - num_cnt_ports = (!is_mdev_switchdev_mode(dev->mdev) || - vport_qcounters_supported(dev)) ? dev->num_ports : 1; + + /* + * In switchdev we need to allocate two ports, one that is used for + * the device Q_counters and it is essentially the real Q_counters of + * this device, while the other is used as a helper for PF to be able to + * query all other vports. + */ + if (is_mdev_switchdev_mode(dev->mdev)) + num_cnt_ports = min(2, num_cnt_ports); for (i = 0; i < num_cnt_ports; i++) { err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts, i); diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c index 3008632a6c20..1e419e080b53 100644 --- a/drivers/infiniband/hw/mlx5/fs.c +++ b/drivers/infiniband/hw/mlx5/fs.c @@ -695,8 +695,6 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev, struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table *ft; - if (mlx5_ib_shared_ft_allowed(&dev->ib_dev)) - ft_attr.uid = MLX5_SHARED_RESOURCE_UID; ft_attr.prio = priority; ft_attr.max_fte = num_entries; ft_attr.flags = flags; @@ -2025,6 +2023,237 @@ static int flow_matcher_cleanup(struct ib_uobject *uobject, return 0; } +static int steering_anchor_create_ft(struct mlx5_ib_dev *dev, + struct mlx5_ib_flow_prio *ft_prio, + enum mlx5_flow_namespace_type ns_type) +{ + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_table *ft; + + if (ft_prio->anchor.ft) + return 0; + + ns = mlx5_get_flow_namespace(dev->mdev, ns_type); + if (!ns) + return -EOPNOTSUPP; + + ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED; + ft_attr.uid = MLX5_SHARED_RESOURCE_UID; + ft_attr.prio = 0; + ft_attr.max_fte = 2; + ft_attr.level = 1; + + ft = mlx5_create_flow_table(ns, &ft_attr); + if (IS_ERR(ft)) + return PTR_ERR(ft); + + ft_prio->anchor.ft = ft; + + return 0; +} + +static void steering_anchor_destroy_ft(struct mlx5_ib_flow_prio *ft_prio) +{ + if (ft_prio->anchor.ft) { + mlx5_destroy_flow_table(ft_prio->anchor.ft); + ft_prio->anchor.ft = NULL; + } +} + +static int +steering_anchor_create_fg_drop(struct mlx5_ib_flow_prio *ft_prio) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *fg; + void *flow_group_in; + int err = 0; + + if (ft_prio->anchor.fg_drop) + return 0; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); + + fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in); + if (IS_ERR(fg)) { + err = PTR_ERR(fg); + goto out; + } + + ft_prio->anchor.fg_drop = fg; + +out: + kvfree(flow_group_in); + + return err; +} + +static void +steering_anchor_destroy_fg_drop(struct mlx5_ib_flow_prio *ft_prio) +{ + if (ft_prio->anchor.fg_drop) { + mlx5_destroy_flow_group(ft_prio->anchor.fg_drop); + ft_prio->anchor.fg_drop = NULL; + } +} + +static int +steering_anchor_create_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *fg; + void *flow_group_in; + int err = 0; + + if (ft_prio->anchor.fg_goto_table) + return 0; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in); + if (IS_ERR(fg)) { + err = PTR_ERR(fg); + goto out; + } + ft_prio->anchor.fg_goto_table = fg; + +out: + kvfree(flow_group_in); + + return err; +} + +static void +steering_anchor_destroy_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio) +{ + if (ft_prio->anchor.fg_goto_table) { + mlx5_destroy_flow_group(ft_prio->anchor.fg_goto_table); + ft_prio->anchor.fg_goto_table = NULL; + } +} + +static int +steering_anchor_create_rule_drop(struct mlx5_ib_flow_prio *ft_prio) +{ + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *handle; + + if (ft_prio->anchor.rule_drop) + return 0; + + flow_act.fg = ft_prio->anchor.fg_drop; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + + handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act, + NULL, 0); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + ft_prio->anchor.rule_drop = handle; + + return 0; +} + +static void steering_anchor_destroy_rule_drop(struct mlx5_ib_flow_prio *ft_prio) +{ + if (ft_prio->anchor.rule_drop) { + mlx5_del_flow_rules(ft_prio->anchor.rule_drop); + ft_prio->anchor.rule_drop = NULL; + } +} + +static int +steering_anchor_create_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio) +{ + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *handle; + + if (ft_prio->anchor.rule_goto_table) + return 0; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + flow_act.fg = ft_prio->anchor.fg_goto_table; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = ft_prio->flow_table; + + handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act, + &dest, 1); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + ft_prio->anchor.rule_goto_table = handle; + + return 0; +} + +static void +steering_anchor_destroy_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio) +{ + if (ft_prio->anchor.rule_goto_table) { + mlx5_del_flow_rules(ft_prio->anchor.rule_goto_table); + ft_prio->anchor.rule_goto_table = NULL; + } +} + +static int steering_anchor_create_res(struct mlx5_ib_dev *dev, + struct mlx5_ib_flow_prio *ft_prio, + enum mlx5_flow_namespace_type ns_type) +{ + int err; + + err = steering_anchor_create_ft(dev, ft_prio, ns_type); + if (err) + return err; + + err = steering_anchor_create_fg_drop(ft_prio); + if (err) + goto destroy_ft; + + err = steering_anchor_create_fg_goto_table(ft_prio); + if (err) + goto destroy_fg_drop; + + err = steering_anchor_create_rule_drop(ft_prio); + if (err) + goto destroy_fg_goto_table; + + err = steering_anchor_create_rule_goto_table(ft_prio); + if (err) + goto destroy_rule_drop; + + return 0; + +destroy_rule_drop: + steering_anchor_destroy_rule_drop(ft_prio); +destroy_fg_goto_table: + steering_anchor_destroy_fg_goto_table(ft_prio); +destroy_fg_drop: + steering_anchor_destroy_fg_drop(ft_prio); +destroy_ft: + steering_anchor_destroy_ft(ft_prio); + + return err; +} + +static void mlx5_steering_anchor_destroy_res(struct mlx5_ib_flow_prio *ft_prio) +{ + steering_anchor_destroy_rule_goto_table(ft_prio); + steering_anchor_destroy_rule_drop(ft_prio); + steering_anchor_destroy_fg_goto_table(ft_prio); + steering_anchor_destroy_fg_drop(ft_prio); + steering_anchor_destroy_ft(ft_prio); +} + static int steering_anchor_cleanup(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) @@ -2035,6 +2264,9 @@ static int steering_anchor_cleanup(struct ib_uobject *uobject, return -EBUSY; mutex_lock(&obj->dev->flow_db->lock); + if (!--obj->ft_prio->anchor.rule_goto_table_ref) + steering_anchor_destroy_rule_goto_table(obj->ft_prio); + put_flow_table(obj->dev, obj->ft_prio, true); mutex_unlock(&obj->dev->flow_db->lock); @@ -2042,6 +2274,24 @@ static int steering_anchor_cleanup(struct ib_uobject *uobject, return 0; } +static void fs_cleanup_anchor(struct mlx5_ib_flow_prio *prio, + int count) +{ + while (count--) + mlx5_steering_anchor_destroy_res(&prio[count]); +} + +void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev) +{ + fs_cleanup_anchor(dev->flow_db->prios, MLX5_IB_NUM_FLOW_FT); + fs_cleanup_anchor(dev->flow_db->egress_prios, MLX5_IB_NUM_FLOW_FT); + fs_cleanup_anchor(dev->flow_db->sniffer, MLX5_IB_NUM_SNIFFER_FTS); + fs_cleanup_anchor(dev->flow_db->egress, MLX5_IB_NUM_EGRESS_FTS); + fs_cleanup_anchor(dev->flow_db->fdb, MLX5_IB_NUM_FDB_FTS); + fs_cleanup_anchor(dev->flow_db->rdma_rx, MLX5_IB_NUM_FLOW_FT); + fs_cleanup_anchor(dev->flow_db->rdma_tx, MLX5_IB_NUM_FLOW_FT); +} + static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs, struct mlx5_ib_flow_matcher *obj) { @@ -2182,21 +2432,31 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)( return -ENOMEM; mutex_lock(&dev->flow_db->lock); + ft_prio = _get_flow_table(dev, priority, ns_type, 0); if (IS_ERR(ft_prio)) { - mutex_unlock(&dev->flow_db->lock); err = PTR_ERR(ft_prio); goto free_obj; } ft_prio->refcount++; - ft_id = mlx5_flow_table_id(ft_prio->flow_table); - mutex_unlock(&dev->flow_db->lock); + + if (!ft_prio->anchor.rule_goto_table_ref) { + err = steering_anchor_create_res(dev, ft_prio, ns_type); + if (err) + goto put_flow_table; + } + + ft_prio->anchor.rule_goto_table_ref++; + + ft_id = mlx5_flow_table_id(ft_prio->anchor.ft); err = uverbs_copy_to(attrs, MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID, &ft_id, sizeof(ft_id)); if (err) - goto put_flow_table; + goto destroy_res; + + mutex_unlock(&dev->flow_db->lock); uobj->object = obj; obj->dev = dev; @@ -2205,8 +2465,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)( return 0; +destroy_res: + --ft_prio->anchor.rule_goto_table_ref; + mlx5_steering_anchor_destroy_res(ft_prio); put_flow_table: - mutex_lock(&dev->flow_db->lock); put_flow_table(dev, ft_prio, true); mutex_unlock(&dev->flow_db->lock); free_obj: diff --git a/drivers/infiniband/hw/mlx5/fs.h b/drivers/infiniband/hw/mlx5/fs.h index ad320adaf321..b9734904f5f0 100644 --- a/drivers/infiniband/hw/mlx5/fs.h +++ b/drivers/infiniband/hw/mlx5/fs.h @@ -10,6 +10,7 @@ #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) int mlx5_ib_fs_init(struct mlx5_ib_dev *dev); +void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev); #else static inline int mlx5_ib_fs_init(struct mlx5_ib_dev *dev) { @@ -21,9 +22,24 @@ static inline int mlx5_ib_fs_init(struct mlx5_ib_dev *dev) mutex_init(&dev->flow_db->lock); return 0; } + +inline void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev) {} #endif + static inline void mlx5_ib_fs_cleanup(struct mlx5_ib_dev *dev) { + /* When a steering anchor is created, a special flow table is also + * created for the user to reference. Since the user can reference it, + * the kernel cannot trust that when the user destroys the steering + * anchor, they no longer reference the flow table. + * + * To address this issue, when a user destroys a steering anchor, only + * the flow steering rule in the table is destroyed, but the table + * itself is kept to deal with the above scenario. The remaining + * resources are only removed when the RDMA device is destroyed, which + * is a safe assumption that all references are gone. + */ + mlx5_ib_fs_cleanup_anchor(dev); kfree(dev->flow_db); } #endif /* _MLX5_IB_FS_H */ diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 5d45de223c43..f0b394ed7452 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -4275,6 +4275,9 @@ const struct mlx5_ib_profile raw_eth_profile = { STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, mlx5_ib_stage_post_ib_reg_umr_init, NULL), + STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, + mlx5_ib_stage_delay_drop_init, + mlx5_ib_stage_delay_drop_cleanup), STAGE_CREATE(MLX5_IB_STAGE_RESTRACK, mlx5_ib_restrack_init, NULL), diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index efa4dc6e7dee..2dfa6f49a6f4 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -237,8 +237,19 @@ enum { #define MLX5_IB_NUM_SNIFFER_FTS 2 #define MLX5_IB_NUM_EGRESS_FTS 1 #define MLX5_IB_NUM_FDB_FTS MLX5_BY_PASS_NUM_REGULAR_PRIOS + +struct mlx5_ib_anchor { + struct mlx5_flow_table *ft; + struct mlx5_flow_group *fg_goto_table; + struct mlx5_flow_group *fg_drop; + struct mlx5_flow_handle *rule_goto_table; + struct mlx5_flow_handle *rule_drop; + unsigned int rule_goto_table_ref; +}; + struct mlx5_ib_flow_prio { struct mlx5_flow_table *flow_table; + struct mlx5_ib_anchor anchor; unsigned int refcount; }; @@ -1587,6 +1598,9 @@ static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev) MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass)) return 0; + if (mlx5_lag_is_lacp_owner(dev->mdev) && !dev->lag_active) + return 0; + return dev->lag_active || (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 && MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity)); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 70ca8ffa9256..78b96bfb4e6a 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1237,6 +1237,9 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev, MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid); MLX5_SET(tisc, tisc, transport_domain, tdn); + if (!mlx5_ib_lag_should_assign_affinity(dev) && + mlx5_lag_is_lacp_owner(dev->mdev)) + MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1); if (qp->flags & IB_QP_CREATE_SOURCE_QPN) MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn); diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index f693bc753b6b..1bb7507325bc 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c @@ -111,7 +111,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages, ret = pin_user_pages(start_page + got * PAGE_SIZE, num_pages - got, FOLL_LONGTERM | FOLL_WRITE, - p + got, NULL); + p + got); if (ret < 0) { mmap_read_unlock(current->mm); goto bail_release; diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 2a5cac2658ec..84e0f41e7dfa 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -140,7 +140,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, ret = pin_user_pages(cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof(struct page *)), - gup_flags, page_list, NULL); + gup_flags, page_list); if (ret < 0) goto out; diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c index 20ff0c0c4605..6ca2a05b6a2a 100644 --- a/drivers/infiniband/sw/rxe/rxe_cq.c +++ b/drivers/infiniband/sw/rxe/rxe_cq.c @@ -113,8 +113,6 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT); - spin_unlock_irqrestore(&cq->cq_lock, flags); - if ((cq->notify == IB_CQ_NEXT_COMP) || (cq->notify == IB_CQ_SOLICITED && solicited)) { cq->notify = 0; @@ -122,6 +120,8 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); } + spin_unlock_irqrestore(&cq->cq_lock, flags); + return 0; } diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index a38fab19bed1..cd59666158b1 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -159,6 +159,9 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) pkt->mask = RXE_GRH_MASK; pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph); + /* remove udp header */ + skb_pull(skb, sizeof(struct udphdr)); + rxe_rcv(skb); return 0; @@ -401,6 +404,9 @@ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt) return -EIO; } + /* remove udp header */ + skb_pull(skb, sizeof(struct udphdr)); + rxe_rcv(skb); return 0; diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 61a2eb77d999..a0f206431cf8 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -176,6 +176,9 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, spin_lock_init(&qp->rq.producer_lock); spin_lock_init(&qp->rq.consumer_lock); + skb_queue_head_init(&qp->req_pkts); + skb_queue_head_init(&qp->resp_pkts); + atomic_set(&qp->ssn, 0); atomic_set(&qp->skb_out, 0); } @@ -234,8 +237,6 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, qp->req.opcode = -1; qp->comp.opcode = -1; - skb_queue_head_init(&qp->req_pkts); - rxe_init_task(&qp->req.task, qp, rxe_requester); rxe_init_task(&qp->comp.task, qp, rxe_completer); @@ -279,8 +280,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, } } - skb_queue_head_init(&qp->resp_pkts); - rxe_init_task(&qp->resp.task, qp, rxe_responder); qp->resp.opcode = OPCODE_NONE; diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 1da044f6b7d4..ee68306555b9 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -489,8 +489,9 @@ static enum resp_states check_rkey(struct rxe_qp *qp, if (mw->access & IB_ZERO_BASED) qp->resp.offset = mw->addr; - rxe_put(mw); rxe_get(mr); + rxe_put(mw); + mw = NULL; } else { mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE); if (!mr) { diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 4d8f6b8051ff..83093e16b6c6 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -1357,7 +1357,7 @@ static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) if (cleanup_err) rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err); - kfree_rcu(mr); + kfree_rcu_mightsleep(mr); return 0; err_out: diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c index f51ab2ccf151..e6e25f15567d 100644 --- a/drivers/infiniband/sw/siw/siw_mem.c +++ b/drivers/infiniband/sw/siw/siw_mem.c @@ -422,7 +422,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable) umem->page_chunk[i].plist = plist; while (nents) { rv = pin_user_pages(first_page_va, nents, foll_flags, - plist, NULL); + plist); if (rv < 0) goto out_sem_up; diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index f290cd49698e..92e1e7587af8 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -657,9 +657,13 @@ static int isert_connect_error(struct rdma_cm_id *cma_id) { struct isert_conn *isert_conn = cma_id->qp->qp_context; + struct isert_np *isert_np = cma_id->context; ib_drain_qp(isert_conn->qp); + + mutex_lock(&isert_np->mutex); list_del_init(&isert_conn->node); + mutex_unlock(&isert_np->mutex); isert_conn->cm_id = NULL; isert_put_conn(isert_conn); @@ -2431,6 +2435,7 @@ isert_free_np(struct iscsi_np *np) { struct isert_np *isert_np = np->np_context; struct isert_conn *isert_conn, *n; + LIST_HEAD(drop_conn_list); if (isert_np->cm_id) rdma_destroy_id(isert_np->cm_id); @@ -2450,7 +2455,7 @@ isert_free_np(struct iscsi_np *np) node) { isert_info("cleaning isert_conn %p state (%d)\n", isert_conn, isert_conn->state); - isert_connect_release(isert_conn); + list_move_tail(&isert_conn->node, &drop_conn_list); } } @@ -2461,11 +2466,16 @@ isert_free_np(struct iscsi_np *np) node) { isert_info("cleaning isert_conn %p state (%d)\n", isert_conn, isert_conn->state); - isert_connect_release(isert_conn); + list_move_tail(&isert_conn->node, &drop_conn_list); } } mutex_unlock(&isert_np->mutex); + list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) { + list_del_init(&isert_conn->node); + isert_connect_release(isert_conn); + } + np->np_context = NULL; kfree(isert_np); } @@ -2560,8 +2570,6 @@ static void isert_wait_conn(struct iscsit_conn *conn) isert_put_unsol_pending_cmds(conn); isert_wait4cmds(conn); isert_wait4logout(isert_conn); - - queue_work(isert_release_wq, &isert_conn->release_work); } static void isert_free_conn(struct iscsit_conn *conn) diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index edb2e3a25880..cfb50bfe53c3 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -2040,6 +2040,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, return 0; } +/* The caller should do the cleanup in case of error */ static int create_cm(struct rtrs_clt_con *con) { struct rtrs_path *s = con->c.path; @@ -2062,14 +2063,14 @@ static int create_cm(struct rtrs_clt_con *con) err = rdma_set_reuseaddr(cm_id, 1); if (err != 0) { rtrs_err(s, "Set address reuse failed, err: %d\n", err); - goto destroy_cm; + return err; } err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr, (struct sockaddr *)&clt_path->s.dst_addr, RTRS_CONNECT_TIMEOUT_MS); if (err) { rtrs_err(s, "Failed to resolve address, err: %d\n", err); - goto destroy_cm; + return err; } /* * Combine connection status and session events. This is needed @@ -2084,29 +2085,15 @@ static int create_cm(struct rtrs_clt_con *con) if (err == 0) err = -ETIMEDOUT; /* Timedout or interrupted */ - goto errr; - } - if (con->cm_err < 0) { - err = con->cm_err; - goto errr; + return err; } - if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) { + if (con->cm_err < 0) + return con->cm_err; + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) /* Device removal */ - err = -ECONNABORTED; - goto errr; - } + return -ECONNABORTED; return 0; - -errr: - stop_cm(con); - mutex_lock(&con->con_mutex); - destroy_con_cq_qp(con); - mutex_unlock(&con->con_mutex); -destroy_cm: - destroy_cm(con); - - return err; } static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path) @@ -2334,7 +2321,7 @@ static void rtrs_clt_close_work(struct work_struct *work) static int init_conns(struct rtrs_clt_path *clt_path) { unsigned int cid; - int err; + int err, i; /* * On every new session connections increase reconnect counter @@ -2350,10 +2337,8 @@ static int init_conns(struct rtrs_clt_path *clt_path) goto destroy; err = create_cm(to_clt_con(clt_path->s.con[cid])); - if (err) { - destroy_con(to_clt_con(clt_path->s.con[cid])); + if (err) goto destroy; - } } err = alloc_path_reqs(clt_path); if (err) @@ -2364,15 +2349,21 @@ static int init_conns(struct rtrs_clt_path *clt_path) return 0; destroy: - while (cid--) { - struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]); + /* Make sure we do the cleanup in the order they are created */ + for (i = 0; i <= cid; i++) { + struct rtrs_clt_con *con; - stop_cm(con); + if (!clt_path->s.con[i]) + break; - mutex_lock(&con->con_mutex); - destroy_con_cq_qp(con); - mutex_unlock(&con->con_mutex); - destroy_cm(con); + con = to_clt_con(clt_path->s.con[i]); + if (con->c.cm_id) { + stop_cm(con); + mutex_lock(&con->con_mutex); + destroy_con_cq_qp(con); + mutex_unlock(&con->con_mutex); + destroy_cm(con); + } destroy_con(con); } /* diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c index 4bf9d868cc52..3696f367ff51 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs.c +++ b/drivers/infiniband/ulp/rtrs/rtrs.c @@ -37,8 +37,10 @@ struct rtrs_iu *rtrs_iu_alloc(u32 iu_num, size_t size, gfp_t gfp_mask, goto err; iu->dma_addr = ib_dma_map_single(dma_dev, iu->buf, size, dir); - if (ib_dma_mapping_error(dma_dev, iu->dma_addr)) + if (ib_dma_mapping_error(dma_dev, iu->dma_addr)) { + kfree(iu->buf); goto err; + } iu->cqe.done = done; iu->size = size; diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c index 577c75c83e25..bb3c6072fc82 100644 --- a/drivers/input/touchscreen/sun4i-ts.c +++ b/drivers/input/touchscreen/sun4i-ts.c @@ -22,7 +22,7 @@ * in the kernel). So this driver offers straight forward, reliable single * touch functionality only. * - * s.a. A20 User Manual "1.15 TP" (Documentation/arm/sunxi.rst) + * s.a. A20 User Manual "1.15 TP" (Documentation/arch/arm/sunxi.rst) * (looks like the description in the A20 User Manual v1.3 is better * than the one in the A10 User Manual v.1.5) */ diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 4d800601e8ec..2b12b583ef4b 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -152,6 +152,7 @@ config IOMMU_DMA select IOMMU_IOVA select IRQ_MSI_IOMMU select NEED_SG_DMA_LENGTH + select NEED_SG_DMA_FLAGS if SWIOTLB # Shared Virtual Addressing config IOMMU_SVA diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 2ddbda3a4374..ab8aa8f77cc4 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -986,8 +986,13 @@ union irte_ga_hi { }; struct irte_ga { - union irte_ga_lo lo; - union irte_ga_hi hi; + union { + struct { + union irte_ga_lo lo; + union irte_ga_hi hi; + }; + u128 irte; + }; }; struct irq_2_irte { diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index dc1ec6849775..9ea40960978b 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2078,10 +2078,6 @@ static struct protection_domain *protection_domain_alloc(unsigned int type) int mode = DEFAULT_PGTABLE_LEVEL; int ret; - domain = kzalloc(sizeof(*domain), GFP_KERNEL); - if (!domain) - return NULL; - /* * Force IOMMU v1 page table when iommu=pt and * when allocating domain for pass-through devices. @@ -2097,6 +2093,10 @@ static struct protection_domain *protection_domain_alloc(unsigned int type) return NULL; } + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) + return NULL; + switch (pgtable) { case AMD_IOMMU_V1: ret = protection_domain_init_v1(domain, mode); @@ -3023,10 +3023,10 @@ out: static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, struct irte_ga *irte, struct amd_ir_data *data) { - bool ret; struct irq_remap_table *table; - unsigned long flags; struct irte_ga *entry; + unsigned long flags; + u128 old; table = get_irq_table(iommu, devid); if (!table) @@ -3037,16 +3037,14 @@ static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, entry = (struct irte_ga *)table->table; entry = &entry[index]; - ret = cmpxchg_double(&entry->lo.val, &entry->hi.val, - entry->lo.val, entry->hi.val, - irte->lo.val, irte->hi.val); /* * We use cmpxchg16 to atomically update the 128-bit IRTE, * and it cannot be updated by the hardware or other processors * behind us, so the return value of cmpxchg16 should be the * same as the old value. */ - WARN_ON(!ret); + old = entry->irte; + WARN_ON(!try_cmpxchg128(&entry->irte, &old, irte->irte)); if (data) data->ref = entry; diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 7a9f0b0bddbd..e86ae462cade 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -520,9 +520,38 @@ static bool dev_is_untrusted(struct device *dev) return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; } -static bool dev_use_swiotlb(struct device *dev) +static bool dev_use_swiotlb(struct device *dev, size_t size, + enum dma_data_direction dir) { - return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev); + return IS_ENABLED(CONFIG_SWIOTLB) && + (dev_is_untrusted(dev) || + dma_kmalloc_needs_bounce(dev, size, dir)); +} + +static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + struct scatterlist *s; + int i; + + if (!IS_ENABLED(CONFIG_SWIOTLB)) + return false; + + if (dev_is_untrusted(dev)) + return true; + + /* + * If kmalloc() buffers are not DMA-safe for this device and + * direction, check the individual lengths in the sg list. If any + * element is deemed unsafe, use the swiotlb for bouncing. + */ + if (!dma_kmalloc_safe(dev, dir)) { + for_each_sg(sg, s, nents, i) + if (!dma_kmalloc_size_aligned(s->length)) + return true; + } + + return false; } /** @@ -922,7 +951,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev, { phys_addr_t phys; - if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev)) + if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir)) return; phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); @@ -938,7 +967,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev, { phys_addr_t phys; - if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev)) + if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir)) return; phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); @@ -956,7 +985,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg; int i; - if (dev_use_swiotlb(dev)) + if (sg_dma_is_swiotlb(sgl)) for_each_sg(sgl, sg, nelems, i) iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg), sg->length, dir); @@ -972,7 +1001,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg; int i; - if (dev_use_swiotlb(dev)) + if (sg_dma_is_swiotlb(sgl)) for_each_sg(sgl, sg, nelems, i) iommu_dma_sync_single_for_device(dev, sg_dma_address(sg), @@ -998,7 +1027,8 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, * If both the physical buffer start address and size are * page aligned, we don't need to use a bounce page. */ - if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) { + if (dev_use_swiotlb(dev, size, dir) && + iova_offset(iovad, phys | size)) { void *padding_start; size_t padding_size, aligned_size; @@ -1080,7 +1110,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, sg_dma_address(s) = DMA_MAPPING_ERROR; sg_dma_len(s) = 0; - if (sg_is_dma_bus_address(s)) { + if (sg_dma_is_bus_address(s)) { if (i > 0) cur = sg_next(cur); @@ -1136,7 +1166,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents) int i; for_each_sg(sg, s, nents, i) { - if (sg_is_dma_bus_address(s)) { + if (sg_dma_is_bus_address(s)) { sg_dma_unmark_bus_address(s); } else { if (sg_dma_address(s) != DMA_MAPPING_ERROR) @@ -1166,6 +1196,8 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg, struct scatterlist *s; int i; + sg_dma_mark_swiotlb(sg); + for_each_sg(sg, s, nents, i) { sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s), s->offset, s->length, dir, attrs); @@ -1210,7 +1242,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, goto out; } - if (dev_use_swiotlb(dev)) + if (dev_use_sg_swiotlb(dev, sg, nents, dir)) return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs); if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) @@ -1315,7 +1347,7 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, struct scatterlist *tmp; int i; - if (dev_use_swiotlb(dev)) { + if (sg_dma_is_swiotlb(sg)) { iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs); return; } @@ -1329,7 +1361,7 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, * just have to be determined. */ for_each_sg(sg, tmp, nents, i) { - if (sg_is_dma_bus_address(tmp)) { + if (sg_dma_is_bus_address(tmp)) { sg_dma_unmark_bus_address(tmp); continue; } @@ -1343,7 +1375,7 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, nents -= i; for_each_sg(tmp, tmp, nents, i) { - if (sg_is_dma_bus_address(tmp)) { + if (sg_dma_is_bus_address(tmp)) { sg_dma_unmark_bus_address(tmp); continue; } diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index a1b987335b31..08f56326e2f8 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -175,18 +175,14 @@ static int modify_irte(struct irq_2_iommu *irq_iommu, irte = &iommu->ir_table->base[index]; if ((irte->pst == 1) || (irte_modified->pst == 1)) { - bool ret; - - ret = cmpxchg_double(&irte->low, &irte->high, - irte->low, irte->high, - irte_modified->low, irte_modified->high); /* * We use cmpxchg16 to atomically update the 128-bit IRTE, * and it cannot be updated by the hardware or other processors * behind us, so the return value of cmpxchg16 should be the * same as the old value. */ - WARN_ON(!ret); + u128 old = irte->irte; + WARN_ON(!try_cmpxchg128(&irte->irte, &old, irte_modified->irte)); } else { WRITE_ONCE(irte->low, irte_modified->low); WRITE_ONCE(irte->high, irte_modified->high); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index f1dcfa3f1a1b..eb620552967b 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2567,7 +2567,7 @@ ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, len = 0; } - if (sg_is_dma_bus_address(sg)) + if (sg_dma_is_bus_address(sg)) goto next; if (len) { diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c index 3c47846cc5ef..412ca96be128 100644 --- a/drivers/iommu/iommufd/pages.c +++ b/drivers/iommu/iommufd/pages.c @@ -786,7 +786,7 @@ static int pfn_reader_user_pin(struct pfn_reader_user *user, user->locked = 1; } rc = pin_user_pages_remote(pages->source_mm, uptr, npages, - user->gup_flags, user->upages, NULL, + user->gup_flags, user->upages, &user->locked); } if (rc <= 0) { @@ -1799,7 +1799,7 @@ static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index, rc = pin_user_pages_remote( pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE), 1, (flags & IOMMUFD_ACCESS_RW_WRITE) ? FOLL_WRITE : 0, &page, - NULL, NULL); + NULL); mmap_read_unlock(pages->source_mm); if (rc != 1) { if (WARN_ON(rc >= 0)) diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c index 77ebe7e47e0e..e731e0784f7e 100644 --- a/drivers/irqchip/irq-clps711x.c +++ b/drivers/irqchip/irq-clps711x.c @@ -212,12 +212,6 @@ out_kfree: return err; } -void __init clps711x_intc_init(phys_addr_t base, resource_size_t size) -{ - BUG_ON(_clps711x_intc_init(NULL, base, size)); -} - -#ifdef CONFIG_IRQCHIP static int __init clps711x_intc_init_dt(struct device_node *np, struct device_node *parent) { @@ -231,4 +225,3 @@ static int __init clps711x_intc_init_dt(struct device_node *np, return _clps711x_intc_init(np, res.start, resource_size(&res)); } IRQCHIP_DECLARE(clps711x, "cirrus,ep7209-intc", clps711x_intc_init_dt); -#endif diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c index 46a3aa60e50e..359efc1d1be7 100644 --- a/drivers/irqchip/irq-ftintc010.c +++ b/drivers/irqchip/irq-ftintc010.c @@ -125,7 +125,7 @@ static struct irq_chip ft010_irq_chip = { /* Local static for the IRQ entry call */ static struct ft010_irq_data firq; -asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs) +static asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs) { struct ft010_irq_data *f = &firq; int irq; @@ -162,7 +162,7 @@ static const struct irq_domain_ops ft010_irqdomain_ops = { .xlate = irq_domain_xlate_onetwocell, }; -int __init ft010_of_init_irq(struct device_node *node, +static int __init ft010_of_init_irq(struct device_node *node, struct device_node *parent) { struct ft010_irq_data *f = &firq; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 0ec2b1e1df75..1994541eaef8 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -3585,6 +3585,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, irqd = irq_get_irq_data(virq + i); irqd_set_single_target(irqd); irqd_set_affinity_on_activate(irqd); + irqd_set_resend_when_in_progress(irqd); pr_debug("ID:%d pID:%d vID:%d\n", (int)(hwirq + i - its_dev->event_map.lpi_base), (int)(hwirq + i), virq + i); @@ -4523,6 +4524,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq irq_domain_set_hwirq_and_chip(domain, virq + i, i, irqchip, vm->vpes[i]); set_bit(i, bitmap); + irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i)); } if (err) { diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index a605aa79435a..0c6c1af9a5b7 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -40,6 +40,7 @@ #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) #define FLAGS_WORKAROUND_MTK_GICR_SAVE (1ULL << 2) +#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 3) #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) @@ -656,10 +657,16 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) return 0; } -static u64 gic_mpidr_to_affinity(unsigned long mpidr) +static u64 gic_cpu_to_affinity(int cpu) { + u64 mpidr = cpu_logical_map(cpu); u64 aff; + /* ASR8601 needs to have its affinities shifted down... */ + if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001)) + mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1) | + (MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8)); + aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | @@ -914,7 +921,7 @@ static void __init gic_dist_init(void) * Set all global interrupts to the boot CPU only. ARE must be * enabled. */ - affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); + affinity = gic_cpu_to_affinity(smp_processor_id()); for (i = 32; i < GIC_LINE_NR; i++) gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); @@ -963,7 +970,7 @@ static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) { - unsigned long mpidr = cpu_logical_map(smp_processor_id()); + unsigned long mpidr; u64 typer; u32 aff; @@ -971,6 +978,8 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) * Convert affinity to a 32bit value that can be matched to * GICR_TYPER bits [63:32]. */ + mpidr = gic_cpu_to_affinity(smp_processor_id()); + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | @@ -1084,7 +1093,7 @@ static inline bool gic_dist_security_disabled(void) static void gic_cpu_sys_reg_init(void) { int i, cpu = smp_processor_id(); - u64 mpidr = cpu_logical_map(cpu); + u64 mpidr = gic_cpu_to_affinity(cpu); u64 need_rss = MPIDR_RS(mpidr); bool group0; u32 pribits; @@ -1183,11 +1192,11 @@ static void gic_cpu_sys_reg_init(void) for_each_online_cpu(i) { bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu); - need_rss |= MPIDR_RS(cpu_logical_map(i)); + need_rss |= MPIDR_RS(gic_cpu_to_affinity(i)); if (need_rss && (!have_rss)) pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", cpu, (unsigned long)mpidr, - i, (unsigned long)cpu_logical_map(i)); + i, (unsigned long)gic_cpu_to_affinity(i)); } /** @@ -1263,9 +1272,11 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, unsigned long cluster_id) { int next_cpu, cpu = *base_cpu; - unsigned long mpidr = cpu_logical_map(cpu); + unsigned long mpidr; u16 tlist = 0; + mpidr = gic_cpu_to_affinity(cpu); + while (cpu < nr_cpu_ids) { tlist |= 1 << (mpidr & 0xf); @@ -1274,7 +1285,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, goto out; cpu = next_cpu; - mpidr = cpu_logical_map(cpu); + mpidr = gic_cpu_to_affinity(cpu); if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { cpu--; @@ -1319,7 +1330,7 @@ static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) dsb(ishst); for_each_cpu(cpu, mask) { - u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); + u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu)); u16 tlist; tlist = gic_compute_target_list(&cpu, mask, cluster_id); @@ -1377,7 +1388,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, offset = convert_offset_index(d, GICD_IROUTER, &index); reg = gic_dist_base(d) + offset + (index * 8); - val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); + val = gic_cpu_to_affinity(cpu); gic_write_irouter(val, reg); @@ -1796,6 +1807,15 @@ static bool gic_enable_quirk_nvidia_t241(void *data) return true; } +static bool gic_enable_quirk_asr8601(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001; + + return true; +} + static const struct gic_quirk gic_quirks[] = { { .desc = "GICv3: Qualcomm MSM8996 broken firmware", @@ -1803,6 +1823,11 @@ static const struct gic_quirk gic_quirks[] = { .init = gic_enable_quirk_msm8996, }, { + .desc = "GICv3: ASR erratum 8601001", + .compatible = "asr,asr8601-gic-v3", + .init = gic_enable_quirk_asr8601, + }, + { .desc = "GICv3: Mediatek Chromebook GICR save problem", .property = "mediatek,broken-save-restore-fw", .init = gic_enable_quirk_mtk_gicr, diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c index 5f47d8ee4ae3..b9dcc8e78c75 100644 --- a/drivers/irqchip/irq-jcore-aic.c +++ b/drivers/irqchip/irq-jcore-aic.c @@ -68,6 +68,7 @@ static int __init aic_irq_of_init(struct device_node *node, unsigned min_irq = JCORE_AIC2_MIN_HWIRQ; unsigned dom_sz = JCORE_AIC_MAX_HWIRQ+1; struct irq_domain *domain; + int ret; pr_info("Initializing J-Core AIC\n"); @@ -100,6 +101,12 @@ static int __init aic_irq_of_init(struct device_node *node, jcore_aic.irq_unmask = noop; jcore_aic.name = "AIC"; + ret = irq_alloc_descs(-1, min_irq, dom_sz - min_irq, + of_node_to_nid(node)); + + if (ret < 0) + return ret; + domain = irq_domain_add_legacy(node, dom_sz - min_irq, min_irq, min_irq, &jcore_aic_irqdomain_ops, &jcore_aic); diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index 71ef19f77a5a..92d8aa28bdf5 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -36,6 +36,7 @@ static int nr_pics; struct eiointc_priv { u32 node; + u32 vec_count; nodemask_t node_map; cpumask_t cpuspan_map; struct fwnode_handle *domain_handle; @@ -153,18 +154,18 @@ static int eiointc_router_init(unsigned int cpu) if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) { eiointc_enable(); - for (i = 0; i < VEC_COUNT / 32; i++) { + for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) { data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2))); iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4); } - for (i = 0; i < VEC_COUNT / 32 / 4; i++) { + for (i = 0; i < eiointc_priv[0]->vec_count / 32 / 4; i++) { bit = BIT(1 + index); /* Route to IP[1 + index] */ data = bit | (bit << 8) | (bit << 16) | (bit << 24); iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4); } - for (i = 0; i < VEC_COUNT / 4; i++) { + for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) { /* Route to Node-0 Core-0 */ if (index == 0) bit = BIT(cpu_logical_map(0)); @@ -175,7 +176,7 @@ static int eiointc_router_init(unsigned int cpu) iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4); } - for (i = 0; i < VEC_COUNT / 32; i++) { + for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) { data = 0xffffffff; iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4); iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4); @@ -195,7 +196,7 @@ static void eiointc_irq_dispatch(struct irq_desc *desc) chained_irq_enter(chip, desc); - for (i = 0; i < VEC_REG_COUNT; i++) { + for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) { pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3)); iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3)); while (pending) { @@ -310,11 +311,11 @@ static void eiointc_resume(void) eiointc_router_init(0); for (i = 0; i < nr_pics; i++) { - for (j = 0; j < VEC_COUNT; j++) { + for (j = 0; j < eiointc_priv[0]->vec_count; j++) { desc = irq_resolve_mapping(eiointc_priv[i]->eiointc_domain, j); if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) { raw_spin_lock(&desc->lock); - irq_data = &desc->irq_data; + irq_data = irq_domain_get_irq_data(eiointc_priv[i]->eiointc_domain, irq_desc_get_irq(desc)); eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0); raw_spin_unlock(&desc->lock); } @@ -375,11 +376,47 @@ static int __init acpi_cascade_irqdomain_init(void) return 0; } +static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, + u64 node_map) +{ + int i; + + node_map = node_map ? node_map : -1ULL; + for_each_possible_cpu(i) { + if (node_map & (1ULL << (cpu_to_eio_node(i)))) { + node_set(cpu_to_eio_node(i), priv->node_map); + cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, + cpumask_of(i)); + } + } + + priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, + priv->vec_count, + &eiointc_domain_ops, + priv); + if (!priv->eiointc_domain) { + pr_err("loongson-extioi: cannot add IRQ domain\n"); + return -ENOMEM; + } + + eiointc_priv[nr_pics++] = priv; + eiointc_router_init(0); + irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv); + + if (nr_pics == 1) { + register_syscore_ops(&eiointc_syscore_ops); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING, + "irqchip/loongarch/intc:starting", + eiointc_router_init, NULL); + } + + return 0; +} + int __init eiointc_acpi_init(struct irq_domain *parent, struct acpi_madt_eio_pic *acpi_eiointc) { - int i, ret, parent_irq; - unsigned long node_map; + int parent_irq, ret; struct eiointc_priv *priv; int node; @@ -394,37 +431,14 @@ int __init eiointc_acpi_init(struct irq_domain *parent, goto out_free_priv; } + priv->vec_count = VEC_COUNT; priv->node = acpi_eiointc->node; - node_map = acpi_eiointc->node_map ? : -1ULL; - - for_each_possible_cpu(i) { - if (node_map & (1ULL << cpu_to_eio_node(i))) { - node_set(cpu_to_eio_node(i), priv->node_map); - cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, cpumask_of(i)); - } - } - - /* Setup IRQ domain */ - priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, VEC_COUNT, - &eiointc_domain_ops, priv); - if (!priv->eiointc_domain) { - pr_err("loongson-eiointc: cannot add IRQ domain\n"); - goto out_free_handle; - } - - eiointc_priv[nr_pics++] = priv; - - eiointc_router_init(0); parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade); - irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv); - if (nr_pics == 1) { - register_syscore_ops(&eiointc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING, - "irqchip/loongarch/intc:starting", - eiointc_router_init, NULL); - } + ret = eiointc_init(priv, parent_irq, acpi_eiointc->node_map); + if (ret < 0) + goto out_free_handle; if (cpu_has_flatmode) node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE); @@ -432,7 +446,10 @@ int __init eiointc_acpi_init(struct irq_domain *parent, node = acpi_eiointc->node; acpi_set_vec_parent(node, priv->eiointc_domain, pch_group); acpi_set_vec_parent(node, priv->eiointc_domain, msi_group); + ret = acpi_cascade_irqdomain_init(); + if (ret < 0) + goto out_free_handle; return ret; @@ -444,3 +461,49 @@ out_free_priv: return -ENOMEM; } + +static int __init eiointc_of_init(struct device_node *of_node, + struct device_node *parent) +{ + int parent_irq, ret; + struct eiointc_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + parent_irq = irq_of_parse_and_map(of_node, 0); + if (parent_irq <= 0) { + ret = -ENODEV; + goto out_free_priv; + } + + ret = irq_set_handler_data(parent_irq, priv); + if (ret < 0) + goto out_free_priv; + + /* + * In particular, the number of devices supported by the LS2K0500 + * extended I/O interrupt vector is 128. + */ + if (of_device_is_compatible(of_node, "loongson,ls2k0500-eiointc")) + priv->vec_count = 128; + else + priv->vec_count = VEC_COUNT; + + priv->node = 0; + priv->domain_handle = of_node_to_fwnode(of_node); + + ret = eiointc_init(priv, parent_irq, 0); + if (ret < 0) + goto out_free_priv; + + return 0; + +out_free_priv: + kfree(priv); + return ret; +} + +IRQCHIP_DECLARE(loongson_ls2k0500_eiointc, "loongson,ls2k0500-eiointc", eiointc_of_init); +IRQCHIP_DECLARE(loongson_ls2k2000_eiointc, "loongson,ls2k2000-eiointc", eiointc_of_init); diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c index 8d00a9ad5b00..e4b33aed1c97 100644 --- a/drivers/irqchip/irq-loongson-liointc.c +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -32,6 +32,10 @@ #define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04) #define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08) #define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c) +/* + * LIOINTC_REG_INTC_POL register is only valid for Loongson-2K series, and + * Loongson-3 series behave as noops. + */ #define LIOINTC_REG_INTC_POL (LIOINTC_INTC_CHIP_START + 0x10) #define LIOINTC_REG_INTC_EDGE (LIOINTC_INTC_CHIP_START + 0x14) @@ -116,19 +120,19 @@ static int liointc_set_type(struct irq_data *data, unsigned int type) switch (type) { case IRQ_TYPE_LEVEL_HIGH: liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false); - liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true); + liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false); break; case IRQ_TYPE_LEVEL_LOW: liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false); - liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false); + liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true); break; case IRQ_TYPE_EDGE_RISING: liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true); - liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true); + liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false); break; case IRQ_TYPE_EDGE_FALLING: liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true); - liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false); + liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true); break; default: irq_gc_unlock_irqrestore(gc, flags); @@ -291,6 +295,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision, ct->chip.irq_mask = irq_gc_mask_disable_reg; ct->chip.irq_mask_ack = irq_gc_mask_disable_reg; ct->chip.irq_set_type = liointc_set_type; + ct->chip.flags = IRQCHIP_SKIP_SET_WAKE; gc->mask_cache = 0; priv->gc = gc; diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index e5fe4d50be05..93a71f66efeb 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -164,7 +164,7 @@ static int pch_pic_domain_translate(struct irq_domain *d, if (fwspec->param_count < 2) return -EINVAL; - *hwirq = fwspec->param[0] + priv->ht_vec_base; + *hwirq = fwspec->param[0]; *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; } else { if (fwspec->param_count < 1) @@ -196,7 +196,7 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq, parent_fwspec.fwnode = domain->parent->fwnode; parent_fwspec.param_count = 1; - parent_fwspec.param[0] = hwirq; + parent_fwspec.param[0] = hwirq + priv->ht_vec_base; err = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec); if (err) @@ -401,14 +401,12 @@ static int __init acpi_cascade_irqdomain_init(void) int __init pch_pic_acpi_init(struct irq_domain *parent, struct acpi_madt_bio_pic *acpi_pchpic) { - int ret, vec_base; + int ret; struct fwnode_handle *domain_handle; if (find_pch_pic(acpi_pchpic->gsi_base) >= 0) return 0; - vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ; - domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address); if (!domain_handle) { pr_err("Unable to allocate domain handle\n"); @@ -416,7 +414,7 @@ int __init pch_pic_acpi_init(struct irq_domain *parent, } ret = pch_pic_init(acpi_pchpic->address, acpi_pchpic->size, - vec_base, parent, domain_handle, acpi_pchpic->gsi_base); + 0, parent, domain_handle, acpi_pchpic->gsi_base); if (ret < 0) { irq_domain_free_fwnode(domain_handle); diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c index 83455ca72439..25cf4f80e767 100644 --- a/drivers/irqchip/irq-mmp.c +++ b/drivers/irqchip/irq-mmp.c @@ -244,132 +244,6 @@ static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs) generic_handle_domain_irq(icu_data[0].domain, hwirq); } -/* MMP (ARMv5) */ -void __init icu_init_irq(void) -{ - int irq; - - max_icu_nr = 1; - mmp_icu_base = ioremap(0xd4282000, 0x1000); - icu_data[0].conf_enable = mmp_conf.conf_enable; - icu_data[0].conf_disable = mmp_conf.conf_disable; - icu_data[0].conf_mask = mmp_conf.conf_mask; - icu_data[0].nr_irqs = 64; - icu_data[0].virq_base = 0; - icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0, - &irq_domain_simple_ops, - &icu_data[0]); - for (irq = 0; irq < 64; irq++) { - icu_mask_irq(irq_get_irq_data(irq)); - irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq); - } - irq_set_default_host(icu_data[0].domain); - set_handle_irq(mmp_handle_irq); -} - -/* MMP2 (ARMv7) */ -void __init mmp2_init_icu(void) -{ - int irq, end; - - max_icu_nr = 8; - mmp_icu_base = ioremap(0xd4282000, 0x1000); - icu_data[0].conf_enable = mmp2_conf.conf_enable; - icu_data[0].conf_disable = mmp2_conf.conf_disable; - icu_data[0].conf_mask = mmp2_conf.conf_mask; - icu_data[0].nr_irqs = 64; - icu_data[0].virq_base = 0; - icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0, - &irq_domain_simple_ops, - &icu_data[0]); - icu_data[1].reg_status = mmp_icu_base + 0x150; - icu_data[1].reg_mask = mmp_icu_base + 0x168; - icu_data[1].clr_mfp_irq_base = icu_data[0].virq_base + - icu_data[0].nr_irqs; - icu_data[1].clr_mfp_hwirq = 1; /* offset to IRQ_MMP2_PMIC_BASE */ - icu_data[1].nr_irqs = 2; - icu_data[1].cascade_irq = 4; - icu_data[1].virq_base = icu_data[0].virq_base + icu_data[0].nr_irqs; - icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs, - icu_data[1].virq_base, 0, - &irq_domain_simple_ops, - &icu_data[1]); - icu_data[2].reg_status = mmp_icu_base + 0x154; - icu_data[2].reg_mask = mmp_icu_base + 0x16c; - icu_data[2].nr_irqs = 2; - icu_data[2].cascade_irq = 5; - icu_data[2].virq_base = icu_data[1].virq_base + icu_data[1].nr_irqs; - icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs, - icu_data[2].virq_base, 0, - &irq_domain_simple_ops, - &icu_data[2]); - icu_data[3].reg_status = mmp_icu_base + 0x180; - icu_data[3].reg_mask = mmp_icu_base + 0x17c; - icu_data[3].nr_irqs = 3; - icu_data[3].cascade_irq = 9; - icu_data[3].virq_base = icu_data[2].virq_base + icu_data[2].nr_irqs; - icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs, - icu_data[3].virq_base, 0, - &irq_domain_simple_ops, - &icu_data[3]); - icu_data[4].reg_status = mmp_icu_base + 0x158; - icu_data[4].reg_mask = mmp_icu_base + 0x170; - icu_data[4].nr_irqs = 5; - icu_data[4].cascade_irq = 17; - icu_data[4].virq_base = icu_data[3].virq_base + icu_data[3].nr_irqs; - icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs, - icu_data[4].virq_base, 0, - &irq_domain_simple_ops, - &icu_data[4]); - icu_data[5].reg_status = mmp_icu_base + 0x15c; - icu_data[5].reg_mask = mmp_icu_base + 0x174; - icu_data[5].nr_irqs = 15; - icu_data[5].cascade_irq = 35; - icu_data[5].virq_base = icu_data[4].virq_base + icu_data[4].nr_irqs; - icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs, - icu_data[5].virq_base, 0, - &irq_domain_simple_ops, - &icu_data[5]); - icu_data[6].reg_status = mmp_icu_base + 0x160; - icu_data[6].reg_mask = mmp_icu_base + 0x178; - icu_data[6].nr_irqs = 2; - icu_data[6].cascade_irq = 51; - icu_data[6].virq_base = icu_data[5].virq_base + icu_data[5].nr_irqs; - icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs, - icu_data[6].virq_base, 0, - &irq_domain_simple_ops, - &icu_data[6]); - icu_data[7].reg_status = mmp_icu_base + 0x188; - icu_data[7].reg_mask = mmp_icu_base + 0x184; - icu_data[7].nr_irqs = 2; - icu_data[7].cascade_irq = 55; - icu_data[7].virq_base = icu_data[6].virq_base + icu_data[6].nr_irqs; - icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs, - icu_data[7].virq_base, 0, - &irq_domain_simple_ops, - &icu_data[7]); - end = icu_data[7].virq_base + icu_data[7].nr_irqs; - for (irq = 0; irq < end; irq++) { - icu_mask_irq(irq_get_irq_data(irq)); - if (irq == icu_data[1].cascade_irq || - irq == icu_data[2].cascade_irq || - irq == icu_data[3].cascade_irq || - irq == icu_data[4].cascade_irq || - irq == icu_data[5].cascade_irq || - irq == icu_data[6].cascade_irq || - irq == icu_data[7].cascade_irq) { - irq_set_chip(irq, &icu_irq_chip); - irq_set_chained_handler(irq, icu_mux_irq_demux); - } else { - irq_set_chip_and_handler(irq, &icu_irq_chip, - handle_level_irq); - } - } - irq_set_default_host(icu_data[0].domain); - set_handle_irq(mmp2_handle_irq); -} - -#ifdef CONFIG_OF static int __init mmp_init_bases(struct device_node *node) { int ret, nr_irqs, irq, i = 0; @@ -548,4 +422,3 @@ err: return -EINVAL; } IRQCHIP_DECLARE(mmp2_mux_intc, "mrvl,mmp2-mux-intc", mmp2_mux_of_init); -#endif diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c index 55cb6b5a686e..be9680645545 100644 --- a/drivers/irqchip/irq-mxs.c +++ b/drivers/irqchip/irq-mxs.c @@ -201,6 +201,7 @@ static int __init icoll_of_init(struct device_node *np, stmp_reset_block(icoll_priv.ctrl); icoll_add_domain(np, ICOLL_NUM_IRQS); + set_handle_irq(icoll_handle_irq); return 0; } diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 6a3f7498ea8e..b5fa76ce5046 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -173,6 +173,16 @@ static struct irq_chip stm32_exti_h_chip_direct; #define EXTI_INVALID_IRQ U8_MAX #define STM32MP1_DESC_IRQ_SIZE (ARRAY_SIZE(stm32mp1_exti_banks) * IRQS_PER_BANK) +/* + * Use some intentionally tricky logic here to initialize the whole array to + * EXTI_INVALID_IRQ, but then override certain fields, requiring us to indicate + * that we "know" that there are overrides in this structure, and we'll need to + * disable that warning from W=1 builds. + */ +__diag_push(); +__diag_ignore_all("-Woverride-init", + "logic to initialize all and then override some is OK"); + static const u8 stm32mp1_desc_irq[] = { /* default value */ [0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ, @@ -208,6 +218,7 @@ static const u8 stm32mp1_desc_irq[] = { [31] = 53, [32] = 82, [33] = 83, + [46] = 151, [47] = 93, [48] = 138, [50] = 139, @@ -266,6 +277,8 @@ static const u8 stm32mp13_desc_irq[] = { [70] = 98, }; +__diag_pop(); + static const struct stm32_exti_drv_data stm32mp1_drv_data = { .exti_banks = stm32mp1_exti_banks, .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks), diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index aebb7ef10e63..5a79bb3c272f 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -275,7 +275,7 @@ struct bcache_device { int (*cache_miss)(struct btree *b, struct search *s, struct bio *bio, unsigned int sectors); - int (*ioctl)(struct bcache_device *d, fmode_t mode, + int (*ioctl)(struct bcache_device *d, blk_mode_t mode, unsigned int cmd, unsigned long arg); }; @@ -1004,11 +1004,11 @@ extern struct workqueue_struct *bch_flush_wq; extern struct mutex bch_register_lock; extern struct list_head bch_cache_sets; -extern struct kobj_type bch_cached_dev_ktype; -extern struct kobj_type bch_flash_dev_ktype; -extern struct kobj_type bch_cache_set_ktype; -extern struct kobj_type bch_cache_set_internal_ktype; -extern struct kobj_type bch_cache_ktype; +extern const struct kobj_type bch_cached_dev_ktype; +extern const struct kobj_type bch_flash_dev_ktype; +extern const struct kobj_type bch_cache_set_ktype; +extern const struct kobj_type bch_cache_set_internal_ktype; +extern const struct kobj_type bch_cache_ktype; void bch_cached_dev_release(struct kobject *kobj); void bch_flash_dev_release(struct kobject *kobj); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 147c493a989a..fd121a61f17c 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -559,6 +559,27 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) } } +#define cmp_int(l, r) ((l > r) - (l < r)) + +#ifdef CONFIG_PROVE_LOCKING +static int btree_lock_cmp_fn(const struct lockdep_map *_a, + const struct lockdep_map *_b) +{ + const struct btree *a = container_of(_a, struct btree, lock.dep_map); + const struct btree *b = container_of(_b, struct btree, lock.dep_map); + + return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key); +} + +static void btree_lock_print_fn(const struct lockdep_map *map) +{ + const struct btree *b = container_of(map, struct btree, lock.dep_map); + + printk(KERN_CONT " l=%u %llu:%llu", b->level, + KEY_INODE(&b->key), KEY_OFFSET(&b->key)); +} +#endif + static struct btree *mca_bucket_alloc(struct cache_set *c, struct bkey *k, gfp_t gfp) { @@ -572,7 +593,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c, return NULL; init_rwsem(&b->lock); - lockdep_set_novalidate_class(&b->lock); + lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn); mutex_init(&b->write_lock); lockdep_set_novalidate_class(&b->write_lock); INIT_LIST_HEAD(&b->list); @@ -885,7 +906,7 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, * cannibalize_bucket() will take. This means every time we unlock the root of * the btree, we need to release this lock if we have it held. */ -static void bch_cannibalize_unlock(struct cache_set *c) +void bch_cannibalize_unlock(struct cache_set *c) { spin_lock(&c->btree_cannibalize_lock); if (c->btree_cache_alloc_lock == current) { @@ -1090,10 +1111,12 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, struct btree *parent) { BKEY_PADDED(key) k; - struct btree *b = ERR_PTR(-EAGAIN); + struct btree *b; mutex_lock(&c->bucket_lock); retry: + /* return ERR_PTR(-EAGAIN) when it fails */ + b = ERR_PTR(-EAGAIN); if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait)) goto err; @@ -1138,7 +1161,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b, { struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); - if (!IS_ERR_OR_NULL(n)) { + if (!IS_ERR(n)) { mutex_lock(&n->write_lock); bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); bkey_copy_key(&n->key, &b->key); @@ -1340,7 +1363,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, memset(new_nodes, 0, sizeof(new_nodes)); closure_init_stack(&cl); - while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) + while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b)) keys += r[nodes++].keys; blocks = btree_default_blocks(b->c) * 2 / 3; @@ -1352,7 +1375,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, for (i = 0; i < nodes; i++) { new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); - if (IS_ERR_OR_NULL(new_nodes[i])) + if (IS_ERR(new_nodes[i])) goto out_nocoalesce; } @@ -1487,7 +1510,7 @@ out_nocoalesce: bch_keylist_free(&keylist); for (i = 0; i < nodes; i++) - if (!IS_ERR_OR_NULL(new_nodes[i])) { + if (!IS_ERR(new_nodes[i])) { btree_node_free(new_nodes[i]); rw_unlock(true, new_nodes[i]); } @@ -1669,7 +1692,7 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op, if (should_rewrite) { n = btree_node_alloc_replacement(b, NULL); - if (!IS_ERR_OR_NULL(n)) { + if (!IS_ERR(n)) { bch_btree_node_write_sync(n); bch_btree_set_root(n); @@ -1968,6 +1991,15 @@ static int bch_btree_check_thread(void *arg) c->gc_stats.nodes++; bch_btree_op_init(&op, 0); ret = bcache_btree(check_recurse, p, c->root, &op); + /* + * The op may be added to cache_set's btree_cache_wait + * in mca_cannibalize(), must ensure it is removed from + * the list and release btree_cache_alloc_lock before + * free op memory. + * Otherwise, the btree_cache_wait will be damaged. + */ + bch_cannibalize_unlock(c); + finish_wait(&c->btree_cache_wait, &(&op)->wait); if (ret) goto out; } diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 1b5fdbc0d83e..45d64b54115a 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -247,8 +247,8 @@ static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) static inline void rw_lock(bool w, struct btree *b, int level) { - w ? down_write_nested(&b->lock, level + 1) - : down_read_nested(&b->lock, level + 1); + w ? down_write(&b->lock) + : down_read(&b->lock); if (w) b->seq++; } @@ -282,6 +282,7 @@ void bch_initial_gc_finish(struct cache_set *c); void bch_moving_gc(struct cache_set *c); int bch_btree_check(struct cache_set *c); void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k); +void bch_cannibalize_unlock(struct cache_set *c); static inline void wake_up_gc(struct cache_set *c) { diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 67a2e29e0b40..a9b1f3896249 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1228,7 +1228,7 @@ void cached_dev_submit_bio(struct bio *bio) detached_dev_do_request(d, bio, orig_bdev, start_time); } -static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, +static int cached_dev_ioctl(struct bcache_device *d, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct cached_dev *dc = container_of(d, struct cached_dev, disk); @@ -1318,7 +1318,7 @@ void flash_dev_submit_bio(struct bio *bio) continue_at(cl, search_free, NULL); } -static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, +static int flash_dev_ioctl(struct bcache_device *d, blk_mode_t mode, unsigned int cmd, unsigned long arg) { return -ENOTTY; diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h index bd3afc856d53..21b445f8af15 100644 --- a/drivers/md/bcache/stats.h +++ b/drivers/md/bcache/stats.h @@ -18,7 +18,6 @@ struct cache_stats { unsigned long cache_misses; unsigned long cache_bypass_hits; unsigned long cache_bypass_misses; - unsigned long cache_readaheads; unsigned long cache_miss_collisions; unsigned long sectors_bypassed; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 7e9d19fd21dd..e2a803683105 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -732,9 +732,9 @@ out: /* Bcache device */ -static int open_dev(struct block_device *b, fmode_t mode) +static int open_dev(struct gendisk *disk, blk_mode_t mode) { - struct bcache_device *d = b->bd_disk->private_data; + struct bcache_device *d = disk->private_data; if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) return -ENXIO; @@ -743,14 +743,14 @@ static int open_dev(struct block_device *b, fmode_t mode) return 0; } -static void release_dev(struct gendisk *b, fmode_t mode) +static void release_dev(struct gendisk *b) { struct bcache_device *d = b->private_data; closure_put(&d->cl); } -static int ioctl_dev(struct block_device *b, fmode_t mode, +static int ioctl_dev(struct block_device *b, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct bcache_device *d = b->bd_disk->private_data; @@ -1369,7 +1369,7 @@ static void cached_dev_free(struct closure *cl) put_page(virt_to_page(dc->sb_disk)); if (!IS_ERR_OR_NULL(dc->bdev)) - blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); + blkdev_put(dc->bdev, bcache_kobj); wake_up(&unregister_wait); @@ -1723,7 +1723,7 @@ static void cache_set_flush(struct closure *cl) if (!IS_ERR_OR_NULL(c->gc_thread)) kthread_stop(c->gc_thread); - if (!IS_ERR_OR_NULL(c->root)) + if (!IS_ERR(c->root)) list_add(&c->root->list, &c->btree_cache); /* @@ -2087,7 +2087,7 @@ static int run_cache_set(struct cache_set *c) err = "cannot allocate new btree root"; c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); - if (IS_ERR_OR_NULL(c->root)) + if (IS_ERR(c->root)) goto err; mutex_lock(&c->root->write_lock); @@ -2218,7 +2218,7 @@ void bch_cache_release(struct kobject *kobj) put_page(virt_to_page(ca->sb_disk)); if (!IS_ERR_OR_NULL(ca->bdev)) - blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); + blkdev_put(ca->bdev, bcache_kobj); kfree(ca); module_put(THIS_MODULE); @@ -2359,7 +2359,7 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk, * call blkdev_put() to bdev in bch_cache_release(). So we * explicitly call blkdev_put() here. */ - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); + blkdev_put(bdev, bcache_kobj); if (ret == -ENOMEM) err = "cache_alloc(): -ENOMEM"; else if (ret == -EPERM) @@ -2461,7 +2461,7 @@ static void register_bdev_worker(struct work_struct *work) if (!dc) { fail = true; put_page(virt_to_page(args->sb_disk)); - blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(args->bdev, bcache_kobj); goto out; } @@ -2491,7 +2491,7 @@ static void register_cache_worker(struct work_struct *work) if (!ca) { fail = true; put_page(virt_to_page(args->sb_disk)); - blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(args->bdev, bcache_kobj); goto out; } @@ -2558,9 +2558,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, ret = -EINVAL; err = "failed to open device"; - bdev = blkdev_get_by_path(strim(path), - FMODE_READ|FMODE_WRITE|FMODE_EXCL, - sb); + bdev = blkdev_get_by_path(strim(path), BLK_OPEN_READ | BLK_OPEN_WRITE, + bcache_kobj, NULL); if (IS_ERR(bdev)) { if (bdev == ERR_PTR(-EBUSY)) { dev_t dev; @@ -2648,7 +2647,7 @@ async_done: out_put_sb_page: put_page(virt_to_page(sb_disk)); out_blkdev_put: - blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(bdev, register_bcache); out_free_sb: kfree(sb); out_free_path: diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index c6f677059214..0e2c1880f60b 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -1111,26 +1111,25 @@ SHOW(__bch_cache) vfree(p); - ret = scnprintf(buf, PAGE_SIZE, - "Unused: %zu%%\n" - "Clean: %zu%%\n" - "Dirty: %zu%%\n" - "Metadata: %zu%%\n" - "Average: %llu\n" - "Sectors per Q: %zu\n" - "Quantiles: [", - unused * 100 / (size_t) ca->sb.nbuckets, - available * 100 / (size_t) ca->sb.nbuckets, - dirty * 100 / (size_t) ca->sb.nbuckets, - meta * 100 / (size_t) ca->sb.nbuckets, sum, - n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1)); + ret = sysfs_emit(buf, + "Unused: %zu%%\n" + "Clean: %zu%%\n" + "Dirty: %zu%%\n" + "Metadata: %zu%%\n" + "Average: %llu\n" + "Sectors per Q: %zu\n" + "Quantiles: [", + unused * 100 / (size_t) ca->sb.nbuckets, + available * 100 / (size_t) ca->sb.nbuckets, + dirty * 100 / (size_t) ca->sb.nbuckets, + meta * 100 / (size_t) ca->sb.nbuckets, sum, + n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1)); for (i = 0; i < ARRAY_SIZE(q); i++) - ret += scnprintf(buf + ret, PAGE_SIZE - ret, - "%u ", q[i]); + ret += sysfs_emit_at(buf, ret, "%u ", q[i]); ret--; - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n"); + ret += sysfs_emit_at(buf, ret, "]\n"); return ret; } diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h index a2ff6447b699..65b8bd975ab1 100644 --- a/drivers/md/bcache/sysfs.h +++ b/drivers/md/bcache/sysfs.h @@ -3,7 +3,7 @@ #define _BCACHE_SYSFS_H_ #define KTYPE(type) \ -struct kobj_type type ## _ktype = { \ +const struct kobj_type type ## _ktype = { \ .release = type ## _release, \ .sysfs_ops = &((const struct sysfs_ops) { \ .show = type ## _show, \ diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index d4a5fc0650bb..24c049067f61 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -890,6 +890,16 @@ static int bch_root_node_dirty_init(struct cache_set *c, if (ret < 0) pr_warn("sectors dirty init failed, ret=%d!\n", ret); + /* + * The op may be added to cache_set's btree_cache_wait + * in mca_cannibalize(), must ensure it is removed from + * the list and release btree_cache_alloc_lock before + * free op memory. + * Otherwise, the btree_cache_wait will be damaged. + */ + bch_cannibalize_unlock(c); + finish_wait(&c->btree_cache_wait, &(&op.op)->wait); + return ret; } diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 9e0c69958587..acffed750e3e 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -1828,7 +1828,7 @@ int dm_cache_metadata_abort(struct dm_cache_metadata *cmd) * Replacement block manager (new_bm) is created and old_bm destroyed outside of * cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of * shrinker associated with the block manager's bufio client vs cmd root_lock). - * - must take shrinker_mutex without holding cmd->root_lock + * - must take shrinker_rwsem without holding cmd->root_lock */ new_bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT, CACHE_MAX_CONCURRENT_LOCKS); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 872896218550..911f73f7ebba 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2051,8 +2051,8 @@ static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as, if (!at_least_one_arg(as, error)) return -EINVAL; - r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, - &ca->metadata_dev); + r = dm_get_device(ca->ti, dm_shift_arg(as), + BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->metadata_dev); if (r) { *error = "Error opening metadata device"; return r; @@ -2074,8 +2074,8 @@ static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as, if (!at_least_one_arg(as, error)) return -EINVAL; - r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, - &ca->cache_dev); + r = dm_get_device(ca->ti, dm_shift_arg(as), + BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->cache_dev); if (r) { *error = "Error opening cache device"; return r; @@ -2093,8 +2093,8 @@ static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as, if (!at_least_one_arg(as, error)) return -EINVAL; - r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, - &ca->origin_dev); + r = dm_get_device(ca->ti, dm_shift_arg(as), + BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->origin_dev); if (r) { *error = "Error opening origin device"; return r; diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index f467cdb5a022..94b2fc33f64b 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -1683,8 +1683,8 @@ static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char * int r; sector_t metadata_dev_size; - r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, - &clone->metadata_dev); + r = dm_get_device(clone->ti, dm_shift_arg(as), + BLK_OPEN_READ | BLK_OPEN_WRITE, &clone->metadata_dev); if (r) { *error = "Error opening metadata device"; return r; @@ -1703,8 +1703,8 @@ static int parse_dest_dev(struct clone *clone, struct dm_arg_set *as, char **err int r; sector_t dest_dev_size; - r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, - &clone->dest_dev); + r = dm_get_device(clone->ti, dm_shift_arg(as), + BLK_OPEN_READ | BLK_OPEN_WRITE, &clone->dest_dev); if (r) { *error = "Error opening destination device"; return r; @@ -1725,7 +1725,7 @@ static int parse_source_dev(struct clone *clone, struct dm_arg_set *as, char **e int r; sector_t source_dev_size; - r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ, + r = dm_get_device(clone->ti, dm_shift_arg(as), BLK_OPEN_READ, &clone->source_dev); if (r) { *error = "Error opening source device"; diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index aecab0c0720f..ce913ad91a52 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -207,11 +207,10 @@ struct dm_table { unsigned integrity_added:1; /* - * Indicates the rw permissions for the new logical - * device. This should be a combination of FMODE_READ - * and FMODE_WRITE. + * Indicates the rw permissions for the new logical device. This + * should be a combination of BLK_OPEN_READ and BLK_OPEN_WRITE. */ - fmode_t mode; + blk_mode_t mode; /* a list of devices used by this table */ struct list_head devices; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 8b47b913ee83..15424bfea7ee 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1693,8 +1693,7 @@ retry: len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size; - bio_add_page(clone, page, len, 0); - + __bio_add_page(clone, page, len, 0); remaining_size -= len; } @@ -3256,7 +3255,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->per_bio_data_size = ti->per_io_data_size = ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size, - ARCH_KMALLOC_MINALIGN); + ARCH_DMA_MINALIGN); ret = mempool_init(&cc->page_pool, BIO_MAX_VECS, crypt_page_alloc, crypt_page_free, cc); if (ret) { diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 0d70914217ee..6acfa5bf97a4 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1482,14 +1482,16 @@ static int era_ctr(struct dm_target *ti, unsigned int argc, char **argv) era->ti = ti; - r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev); + r = dm_get_device(ti, argv[0], BLK_OPEN_READ | BLK_OPEN_WRITE, + &era->metadata_dev); if (r) { ti->error = "Error opening metadata device"; era_destroy(era); return -EINVAL; } - r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev); + r = dm_get_device(ti, argv[1], BLK_OPEN_READ | BLK_OPEN_WRITE, + &era->origin_dev); if (r) { ti->error = "Error opening data device"; era_destroy(era); diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c index d369457dbed0..2a71bcdba92d 100644 --- a/drivers/md/dm-init.c +++ b/drivers/md/dm-init.c @@ -293,8 +293,10 @@ static int __init dm_init_init(void) for (i = 0; i < ARRAY_SIZE(waitfor); i++) { if (waitfor[i]) { + dev_t dev; + DMINFO("waiting for device %s ...", waitfor[i]); - while (!dm_get_dev_t(waitfor[i])) + while (early_lookup_bdev(waitfor[i], &dev)) fsleep(5000); } } diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 31838b13ea54..63ec502fcb12 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -4268,10 +4268,10 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv } /* - * If this workqueue were percpu, it would cause bio reordering + * If this workqueue weren't ordered, it would cause bio reordering * and reduced performance. */ - ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); + ic->wait_wq = alloc_ordered_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM); if (!ic->wait_wq) { ti->error = "Cannot allocate workqueue"; r = -ENOMEM; diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index cc77cf3d4109..6d301019e5e3 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -861,7 +861,7 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) table = dm_get_inactive_table(md, &srcu_idx); if (table) { - if (!(dm_table_get_mode(table) & FMODE_WRITE)) + if (!(dm_table_get_mode(table) & BLK_OPEN_WRITE)) param->flags |= DM_READONLY_FLAG; param->target_count = table->num_targets; } @@ -1168,13 +1168,10 @@ static int do_resume(struct dm_ioctl *param) /* Do we need to load a new map ? */ if (new_map) { sector_t old_size, new_size; - int srcu_idx; /* Suspend if it isn't already suspended */ - old_map = dm_get_live_table(md, &srcu_idx); - if ((param->flags & DM_SKIP_LOCKFS_FLAG) || !old_map) + if (param->flags & DM_SKIP_LOCKFS_FLAG) suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; - dm_put_live_table(md, srcu_idx); if (param->flags & DM_NOFLUSH_FLAG) suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; if (!dm_suspended_md(md)) @@ -1192,7 +1189,7 @@ static int do_resume(struct dm_ioctl *param) if (old_size && new_size && old_size != new_size) need_resize_uevent = true; - if (dm_table_get_mode(new_map) & FMODE_WRITE) + if (dm_table_get_mode(new_map) & BLK_OPEN_WRITE) set_disk_ro(dm_disk(md), 0); else set_disk_ro(dm_disk(md), 1); @@ -1381,12 +1378,12 @@ static int dev_arm_poll(struct file *filp, struct dm_ioctl *param, size_t param_ return 0; } -static inline fmode_t get_mode(struct dm_ioctl *param) +static inline blk_mode_t get_mode(struct dm_ioctl *param) { - fmode_t mode = FMODE_READ | FMODE_WRITE; + blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE; if (param->flags & DM_READONLY_FLAG) - mode = FMODE_READ; + mode = BLK_OPEN_READ; return mode; } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index c8821fcb8299..8846bf510a35 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3750,11 +3750,11 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv, * canceling read-auto mode */ mddev->ro = 0; - if (!mddev->suspended && mddev->sync_thread) + if (!mddev->suspended) md_wakeup_thread(mddev->sync_thread); } set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - if (!mddev->suspended && mddev->thread) + if (!mddev->suspended) md_wakeup_thread(mddev->thread); return 0; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 9c49f53760d0..bf7a574499a3 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1241,9 +1241,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) int i; int r = -EINVAL; char *origin_path, *cow_path; - dev_t origin_dev, cow_dev; unsigned int args_used, num_flush_bios = 1; - fmode_t origin_mode = FMODE_READ; + blk_mode_t origin_mode = BLK_OPEN_READ; if (argc < 4) { ti->error = "requires 4 or more arguments"; @@ -1253,7 +1252,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (dm_target_is_snapshot_merge(ti)) { num_flush_bios = 2; - origin_mode = FMODE_WRITE; + origin_mode = BLK_OPEN_WRITE; } s = kzalloc(sizeof(*s), GFP_KERNEL); @@ -1279,24 +1278,21 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->error = "Cannot get origin device"; goto bad_origin; } - origin_dev = s->origin->bdev->bd_dev; cow_path = argv[0]; argv++; argc--; - cow_dev = dm_get_dev_t(cow_path); - if (cow_dev && cow_dev == origin_dev) { - ti->error = "COW device cannot be the same as origin device"; - r = -EINVAL; - goto bad_cow; - } - r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow); if (r) { ti->error = "Cannot get COW device"; goto bad_cow; } + if (s->cow->bdev && s->cow->bdev == s->origin->bdev) { + ti->error = "COW device cannot be the same as origin device"; + r = -EINVAL; + goto bad_store; + } r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store); if (r) { diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 1398f1d6e83e..7d208b2b1a19 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -126,7 +126,7 @@ static int alloc_targets(struct dm_table *t, unsigned int num) return 0; } -int dm_table_create(struct dm_table **result, fmode_t mode, +int dm_table_create(struct dm_table **result, blk_mode_t mode, unsigned int num_targets, struct mapped_device *md) { struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); @@ -304,7 +304,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, * device and not to touch the existing bdev field in case * it is accessed concurrently. */ -static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, +static int upgrade_mode(struct dm_dev_internal *dd, blk_mode_t new_mode, struct mapped_device *md) { int r; @@ -324,23 +324,13 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, } /* - * Convert the path to a device - */ -dev_t dm_get_dev_t(const char *path) -{ - dev_t dev; - - if (lookup_bdev(path, &dev)) - dev = name_to_dev_t(path); - return dev; -} -EXPORT_SYMBOL_GPL(dm_get_dev_t); - -/* * Add a device to the list, or just increment the usage count if * it's already present. + * + * Note: the __ref annotation is because this function can call the __init + * marked early_lookup_bdev when called during early boot code from dm-init.c. */ -int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, +int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode, struct dm_dev **result) { int r; @@ -358,9 +348,13 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, if (MAJOR(dev) != major || MINOR(dev) != minor) return -EOVERFLOW; } else { - dev = dm_get_dev_t(path); - if (!dev) - return -ENODEV; + r = lookup_bdev(path, &dev); +#ifndef MODULE + if (r && system_state < SYSTEM_RUNNING) + r = early_lookup_bdev(path, &dev); +#endif + if (r) + return r; } if (dev == disk_devt(t->md->disk)) return -EINVAL; @@ -668,7 +662,8 @@ int dm_table_add_target(struct dm_table *t, const char *type, t->singleton = true; } - if (dm_target_always_writeable(ti->type) && !(t->mode & FMODE_WRITE)) { + if (dm_target_always_writeable(ti->type) && + !(t->mode & BLK_OPEN_WRITE)) { ti->error = "target type may not be included in a read-only table"; goto bad; } @@ -2039,7 +2034,7 @@ struct list_head *dm_table_get_devices(struct dm_table *t) return &t->devices; } -fmode_t dm_table_get_mode(struct dm_table *t) +blk_mode_t dm_table_get_mode(struct dm_table *t) { return t->mode; } diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 9f5cb52c5763..9dd0409848ab 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1756,13 +1756,15 @@ int dm_thin_remove_range(struct dm_thin_device *td, int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) { - int r; + int r = -EINVAL; uint32_t ref_count; down_read(&pmd->root_lock); - r = dm_sm_get_count(pmd->data_sm, b, &ref_count); - if (!r) - *result = (ref_count > 1); + if (!pmd->fail_io) { + r = dm_sm_get_count(pmd->data_sm, b, &ref_count); + if (!r) + *result = (ref_count > 1); + } up_read(&pmd->root_lock); return r; @@ -1770,10 +1772,11 @@ int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *re int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e) { - int r = 0; + int r = -EINVAL; pmd_write_lock(pmd); - r = dm_sm_inc_blocks(pmd->data_sm, b, e); + if (!pmd->fail_io) + r = dm_sm_inc_blocks(pmd->data_sm, b, e); pmd_write_unlock(pmd); return r; @@ -1781,10 +1784,11 @@ int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_ int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e) { - int r = 0; + int r = -EINVAL; pmd_write_lock(pmd); - r = dm_sm_dec_blocks(pmd->data_sm, b, e); + if (!pmd->fail_io) + r = dm_sm_dec_blocks(pmd->data_sm, b, e); pmd_write_unlock(pmd); return r; @@ -1887,7 +1891,7 @@ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd) * Replacement block manager (new_bm) is created and old_bm destroyed outside of * pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of * shrinker associated with the block manager's bufio client vs pmd root_lock). - * - must take shrinker_mutex without holding pmd->root_lock + * - must take shrinker_rwsem without holding pmd->root_lock */ new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT, THIN_MAX_CONCURRENT_LOCKS); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 2b13c949bd72..f1d0dcb9db22 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -401,8 +401,7 @@ static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t da sector_t s = block_to_sectors(tc->pool, data_b); sector_t len = block_to_sectors(tc->pool, data_e - data_b); - return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOWAIT, - &op->bio); + return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio); } static void end_discard(struct discard_op *op, int r) @@ -3301,7 +3300,7 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv) unsigned long block_size; dm_block_t low_water_blocks; struct dm_dev *metadata_dev; - fmode_t metadata_mode; + blk_mode_t metadata_mode; /* * FIXME Remove validation from scope of lock. @@ -3334,7 +3333,8 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (r) goto out_unlock; - metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE); + metadata_mode = BLK_OPEN_READ | + ((pf.mode == PM_READ_ONLY) ? 0 : BLK_OPEN_WRITE); r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev); if (r) { ti->error = "Error opening metadata block device"; @@ -3342,7 +3342,7 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv) } warn_if_metadata_device_too_big(metadata_dev->bdev); - r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev); + r = dm_get_device(ti, argv[1], BLK_OPEN_READ | BLK_OPEN_WRITE, &data_dev); if (r) { ti->error = "Error getting data device"; goto out_metadata; @@ -4223,7 +4223,7 @@ static int thin_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_origin_dev; } - r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev); + r = dm_get_device(ti, argv[2], BLK_OPEN_READ, &origin_dev); if (r) { ti->error = "Error opening origin device"; goto bad_origin_dev; diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index a9ee2faa75a2..3ef9f018da60 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -607,7 +607,7 @@ int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v, (*argc)--; if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV)) { - r = dm_get_device(ti, arg_value, FMODE_READ, &v->fec->dev); + r = dm_get_device(ti, arg_value, BLK_OPEN_READ, &v->fec->dev); if (r) { ti->error = "FEC device lookup failed"; return r; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index e35c16e06d06..26adcfea0302 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -1196,7 +1196,7 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (r) goto bad; - if ((dm_table_get_mode(ti->table) & ~FMODE_READ)) { + if ((dm_table_get_mode(ti->table) & ~BLK_OPEN_READ)) { ti->error = "Device must be readonly"; r = -EINVAL; goto bad; @@ -1225,13 +1225,13 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv) } v->version = num; - r = dm_get_device(ti, argv[1], FMODE_READ, &v->data_dev); + r = dm_get_device(ti, argv[1], BLK_OPEN_READ, &v->data_dev); if (r) { ti->error = "Data device lookup failed"; goto bad; } - r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev); + r = dm_get_device(ti, argv[2], BLK_OPEN_READ, &v->hash_dev); if (r) { ti->error = "Hash device lookup failed"; goto bad; diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 8f0896a6990b..9d3cca8e3dc9 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -577,7 +577,7 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, bio->bi_iter.bi_sector = dmz_blk2sect(block); bio->bi_private = mblk; bio->bi_end_io = dmz_mblock_bio_end_io; - bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); + __bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); submit_bio(bio); return mblk; @@ -728,7 +728,7 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, bio->bi_iter.bi_sector = dmz_blk2sect(block); bio->bi_private = mblk; bio->bi_end_io = dmz_mblock_bio_end_io; - bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); + __bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); submit_bio(bio); return 0; @@ -752,7 +752,7 @@ static int dmz_rdwr_block(struct dmz_dev *dev, enum req_op op, bio = bio_alloc(dev->bdev, 1, op | REQ_SYNC | REQ_META | REQ_PRIO, GFP_NOIO); bio->bi_iter.bi_sector = dmz_blk2sect(block); - bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0); + __bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0); ret = submit_bio_wait(bio); bio_put(bio); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3b694ba3a106..fe2d4750d9c7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -207,7 +207,7 @@ static int __init local_init(void) if (r) return r; - deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); + deferred_remove_workqueue = alloc_ordered_workqueue("kdmremove", 0); if (!deferred_remove_workqueue) { r = -ENOMEM; goto out_uevent_exit; @@ -310,13 +310,13 @@ int dm_deleting_md(struct mapped_device *md) return test_bit(DMF_DELETING, &md->flags); } -static int dm_blk_open(struct block_device *bdev, fmode_t mode) +static int dm_blk_open(struct gendisk *disk, blk_mode_t mode) { struct mapped_device *md; spin_lock(&_minor_lock); - md = bdev->bd_disk->private_data; + md = disk->private_data; if (!md) goto out; @@ -334,7 +334,7 @@ out: return md ? 0 : -ENXIO; } -static void dm_blk_close(struct gendisk *disk, fmode_t mode) +static void dm_blk_close(struct gendisk *disk) { struct mapped_device *md; @@ -448,7 +448,7 @@ static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) dm_put_live_table(md, srcu_idx); } -static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, +static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct mapped_device *md = bdev->bd_disk->private_data; @@ -734,7 +734,7 @@ static char *_dm_claim_ptr = "I belong to device-mapper"; * Open a table device so we can use it as a map destination. */ static struct table_device *open_table_device(struct mapped_device *md, - dev_t dev, fmode_t mode) + dev_t dev, blk_mode_t mode) { struct table_device *td; struct block_device *bdev; @@ -746,7 +746,7 @@ static struct table_device *open_table_device(struct mapped_device *md, return ERR_PTR(-ENOMEM); refcount_set(&td->count, 1); - bdev = blkdev_get_by_dev(dev, mode | FMODE_EXCL, _dm_claim_ptr); + bdev = blkdev_get_by_dev(dev, mode, _dm_claim_ptr, NULL); if (IS_ERR(bdev)) { r = PTR_ERR(bdev); goto out_free_td; @@ -771,7 +771,7 @@ static struct table_device *open_table_device(struct mapped_device *md, return td; out_blkdev_put: - blkdev_put(bdev, mode | FMODE_EXCL); + blkdev_put(bdev, _dm_claim_ptr); out_free_td: kfree(td); return ERR_PTR(r); @@ -784,14 +784,14 @@ static void close_table_device(struct table_device *td, struct mapped_device *md { if (md->disk->slave_dir) bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); - blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); + blkdev_put(td->dm_dev.bdev, _dm_claim_ptr); put_dax(td->dm_dev.dax_dev); list_del(&td->list); kfree(td); } static struct table_device *find_table_device(struct list_head *l, dev_t dev, - fmode_t mode) + blk_mode_t mode) { struct table_device *td; @@ -802,7 +802,7 @@ static struct table_device *find_table_device(struct list_head *l, dev_t dev, return NULL; } -int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, +int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode, struct dm_dev **result) { struct table_device *td; @@ -1172,7 +1172,8 @@ static inline sector_t max_io_len_target_boundary(struct dm_target *ti, } static sector_t __max_io_len(struct dm_target *ti, sector_t sector, - unsigned int max_granularity) + unsigned int max_granularity, + unsigned int max_sectors) { sector_t target_offset = dm_target_offset(ti, sector); sector_t len = max_io_len_target_boundary(ti, target_offset); @@ -1186,13 +1187,13 @@ static sector_t __max_io_len(struct dm_target *ti, sector_t sector, if (!max_granularity) return len; return min_t(sector_t, len, - min(queue_max_sectors(ti->table->md->queue), + min(max_sectors ? : queue_max_sectors(ti->table->md->queue), blk_chunk_sectors_left(target_offset, max_granularity))); } static inline sector_t max_io_len(struct dm_target *ti, sector_t sector) { - return __max_io_len(ti, sector, ti->max_io_len); + return __max_io_len(ti, sector, ti->max_io_len, 0); } int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) @@ -1581,12 +1582,13 @@ static void __send_empty_flush(struct clone_info *ci) static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, unsigned int num_bios, - unsigned int max_granularity) + unsigned int max_granularity, + unsigned int max_sectors) { unsigned int len, bios; len = min_t(sector_t, ci->sector_count, - __max_io_len(ti, ci->sector, max_granularity)); + __max_io_len(ti, ci->sector, max_granularity, max_sectors)); atomic_add(num_bios, &ci->io->io_count); bios = __send_duplicate_bios(ci, ti, num_bios, &len); @@ -1623,23 +1625,27 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci, { unsigned int num_bios = 0; unsigned int max_granularity = 0; + unsigned int max_sectors = 0; struct queue_limits *limits = dm_get_queue_limits(ti->table->md); switch (bio_op(ci->bio)) { case REQ_OP_DISCARD: num_bios = ti->num_discard_bios; + max_sectors = limits->max_discard_sectors; if (ti->max_discard_granularity) - max_granularity = limits->max_discard_sectors; + max_granularity = max_sectors; break; case REQ_OP_SECURE_ERASE: num_bios = ti->num_secure_erase_bios; + max_sectors = limits->max_secure_erase_sectors; if (ti->max_secure_erase_granularity) - max_granularity = limits->max_secure_erase_sectors; + max_granularity = max_sectors; break; case REQ_OP_WRITE_ZEROES: num_bios = ti->num_write_zeroes_bios; + max_sectors = limits->max_write_zeroes_sectors; if (ti->max_write_zeroes_granularity) - max_granularity = limits->max_write_zeroes_sectors; + max_granularity = max_sectors; break; default: break; @@ -1654,7 +1660,8 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci, if (unlikely(!num_bios)) return BLK_STS_NOTSUPP; - __send_changing_extent_only(ci, ti, num_bios, max_granularity); + __send_changing_extent_only(ci, ti, num_bios, + max_granularity, max_sectors); return BLK_STS_OK; } @@ -2808,6 +2815,10 @@ retry: } map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); + if (!map) { + /* avoid deadlock with fs/namespace.c:do_mount() */ + suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; + } r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); if (r) diff --git a/drivers/md/dm.h b/drivers/md/dm.h index a856e0aee73b..63d9010d8e61 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -203,7 +203,7 @@ int dm_open_count(struct mapped_device *md); int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred); int dm_cancel_deferred_remove(struct mapped_device *md); int dm_request_based(struct mapped_device *md); -int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, +int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode, struct dm_dev **result); void dm_put_table_device(struct mapped_device *md, struct dm_dev *d); diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c index 91836e6de326..6eaa0eab40f9 100644 --- a/drivers/md/md-autodetect.c +++ b/drivers/md/md-autodetect.c @@ -147,7 +147,8 @@ static void __init md_setup_drive(struct md_setup_args *args) if (p) *p++ = 0; - dev = name_to_dev_t(devname); + if (early_lookup_bdev(devname, &dev)) + dev = 0; if (strncmp(devname, "/dev/", 5) == 0) devname += 5; snprintf(comp_name, 63, "/dev/%s", devname); diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index bc8d7565171d..1ff712889a3b 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -54,14 +54,7 @@ __acquires(bitmap->lock) { unsigned char *mappage; - if (page >= bitmap->pages) { - /* This can happen if bitmap_start_sync goes beyond - * End-of-device while looking for a whole page. - * It is harmless. - */ - return -EINVAL; - } - + WARN_ON_ONCE(page >= bitmap->pages); if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ return 0; @@ -1023,7 +1016,6 @@ static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block) return set; } - /* this gets called when the md device is ready to unplug its underlying * (slave) device queues -- before we let any writes go down, we need to * sync the dirty pages of the bitmap file to disk */ @@ -1033,8 +1025,7 @@ void md_bitmap_unplug(struct bitmap *bitmap) int dirty, need_write; int writing = 0; - if (!bitmap || !bitmap->storage.filemap || - test_bit(BITMAP_STALE, &bitmap->flags)) + if (!md_bitmap_enabled(bitmap)) return; /* look at each page to see if there are any set bits that need to be @@ -1063,6 +1054,35 @@ void md_bitmap_unplug(struct bitmap *bitmap) } EXPORT_SYMBOL(md_bitmap_unplug); +struct bitmap_unplug_work { + struct work_struct work; + struct bitmap *bitmap; + struct completion *done; +}; + +static void md_bitmap_unplug_fn(struct work_struct *work) +{ + struct bitmap_unplug_work *unplug_work = + container_of(work, struct bitmap_unplug_work, work); + + md_bitmap_unplug(unplug_work->bitmap); + complete(unplug_work->done); +} + +void md_bitmap_unplug_async(struct bitmap *bitmap) +{ + DECLARE_COMPLETION_ONSTACK(done); + struct bitmap_unplug_work unplug_work; + + INIT_WORK_ONSTACK(&unplug_work.work, md_bitmap_unplug_fn); + unplug_work.bitmap = bitmap; + unplug_work.done = &done; + + queue_work(md_bitmap_wq, &unplug_work.work); + wait_for_completion(&done); +} +EXPORT_SYMBOL(md_bitmap_unplug_async); + static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); /* * bitmap_init_from_disk -- called at bitmap_create time to initialize * the in-memory bitmap from the on-disk bitmap -- also, sets up the @@ -1241,11 +1261,28 @@ static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap, sector_t offset, sector_t *blocks, int create); +static void mddev_set_timeout(struct mddev *mddev, unsigned long timeout, + bool force) +{ + struct md_thread *thread; + + rcu_read_lock(); + thread = rcu_dereference(mddev->thread); + + if (!thread) + goto out; + + if (force || thread->timeout < MAX_SCHEDULE_TIMEOUT) + thread->timeout = timeout; + +out: + rcu_read_unlock(); +} + /* * bitmap daemon -- periodically wakes up to clean bits and flush pages * out to disk */ - void md_bitmap_daemon_work(struct mddev *mddev) { struct bitmap *bitmap; @@ -1269,7 +1306,7 @@ void md_bitmap_daemon_work(struct mddev *mddev) bitmap->daemon_lastrun = jiffies; if (bitmap->allclean) { - mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; + mddev_set_timeout(mddev, MAX_SCHEDULE_TIMEOUT, true); goto done; } bitmap->allclean = 1; @@ -1366,8 +1403,7 @@ void md_bitmap_daemon_work(struct mddev *mddev) done: if (bitmap->allclean == 0) - mddev->thread->timeout = - mddev->bitmap_info.daemon_sleep; + mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true); mutex_unlock(&mddev->bitmap_info.mutex); } @@ -1387,6 +1423,14 @@ __acquires(bitmap->lock) sector_t csize; int err; + if (page >= bitmap->pages) { + /* + * This can happen if bitmap_start_sync goes beyond + * End-of-device while looking for a whole page or + * user set a huge number to sysfs bitmap_set_bits. + */ + return NULL; + } err = md_bitmap_checkpage(bitmap, page, create, 0); if (bitmap->bp[page].hijacked || @@ -1820,8 +1864,7 @@ void md_bitmap_destroy(struct mddev *mddev) mddev->bitmap = NULL; /* disconnect from the md device */ spin_unlock(&mddev->lock); mutex_unlock(&mddev->bitmap_info.mutex); - if (mddev->thread) - mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; + mddev_set_timeout(mddev, MAX_SCHEDULE_TIMEOUT, true); md_bitmap_free(bitmap); } @@ -1964,7 +2007,7 @@ int md_bitmap_load(struct mddev *mddev) /* Kick recovery in case any bits were set */ set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); - mddev->thread->timeout = mddev->bitmap_info.daemon_sleep; + mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true); md_wakeup_thread(mddev->thread); md_bitmap_update_sb(bitmap); @@ -2469,17 +2512,11 @@ timeout_store(struct mddev *mddev, const char *buf, size_t len) timeout = MAX_SCHEDULE_TIMEOUT-1; if (timeout < 1) timeout = 1; + mddev->bitmap_info.daemon_sleep = timeout; - if (mddev->thread) { - /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then - * the bitmap is all clean and we don't need to - * adjust the timeout right now - */ - if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) { - mddev->thread->timeout = timeout; - md_wakeup_thread(mddev->thread); - } - } + mddev_set_timeout(mddev, timeout, false); + md_wakeup_thread(mddev->thread); + return len; } diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h index cfd7395de8fd..8a3788c9bfef 100644 --- a/drivers/md/md-bitmap.h +++ b/drivers/md/md-bitmap.h @@ -264,6 +264,7 @@ void md_bitmap_sync_with_cluster(struct mddev *mddev, sector_t new_lo, sector_t new_hi); void md_bitmap_unplug(struct bitmap *bitmap); +void md_bitmap_unplug_async(struct bitmap *bitmap); void md_bitmap_daemon_work(struct mddev *mddev); int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, @@ -273,6 +274,13 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot, sector_t *lo, sector_t *hi, bool clear_bits); void md_bitmap_free(struct bitmap *bitmap); void md_bitmap_wait_behind_writes(struct mddev *mddev); + +static inline bool md_bitmap_enabled(struct bitmap *bitmap) +{ + return bitmap && bitmap->storage.filemap && + !test_bit(BITMAP_STALE, &bitmap->flags); +} + #endif #endif diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 10e0c5381d01..3d9fd74233df 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -75,14 +75,14 @@ struct md_cluster_info { sector_t suspend_hi; int suspend_from; /* the slot which broadcast suspend_lo/hi */ - struct md_thread *recovery_thread; + struct md_thread __rcu *recovery_thread; unsigned long recovery_map; /* communication loc resources */ struct dlm_lock_resource *ack_lockres; struct dlm_lock_resource *message_lockres; struct dlm_lock_resource *token_lockres; struct dlm_lock_resource *no_new_dev_lockres; - struct md_thread *recv_thread; + struct md_thread __rcu *recv_thread; struct completion newdisk_completion; wait_queue_head_t wait; unsigned long state; @@ -362,8 +362,8 @@ static void __recover_slot(struct mddev *mddev, int slot) set_bit(slot, &cinfo->recovery_map); if (!cinfo->recovery_thread) { - cinfo->recovery_thread = md_register_thread(recover_bitmaps, - mddev, "recover"); + rcu_assign_pointer(cinfo->recovery_thread, + md_register_thread(recover_bitmaps, mddev, "recover")); if (!cinfo->recovery_thread) { pr_warn("md-cluster: Could not create recovery thread\n"); return; @@ -526,11 +526,15 @@ static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg) static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg) { int got_lock = 0; + struct md_thread *thread; struct md_cluster_info *cinfo = mddev->cluster_info; mddev->good_device_nr = le32_to_cpu(msg->raid_slot); dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR); - wait_event(mddev->thread->wqueue, + + /* daemaon thread must exist */ + thread = rcu_dereference_protected(mddev->thread, true); + wait_event(thread->wqueue, (got_lock = mddev_trylock(mddev)) || test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state)); md_reload_sb(mddev, mddev->good_device_nr); @@ -889,7 +893,8 @@ static int join(struct mddev *mddev, int nodes) } /* Initiate the communication resources */ ret = -ENOMEM; - cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv"); + rcu_assign_pointer(cinfo->recv_thread, + md_register_thread(recv_daemon, mddev, "cluster_recv")); if (!cinfo->recv_thread) { pr_err("md-cluster: cannot allocate memory for recv_thread!\n"); goto err; diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index 66edf5e72bd6..92c45be203d7 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c @@ -400,8 +400,8 @@ static int multipath_run (struct mddev *mddev) if (ret) goto out_free_conf; - mddev->thread = md_register_thread(multipathd, mddev, - "multipath"); + rcu_assign_pointer(mddev->thread, + md_register_thread(multipathd, mddev, "multipath")); if (!mddev->thread) goto out_free_conf; diff --git a/drivers/md/md.c b/drivers/md/md.c index 8e344b4b3444..cf3733c90c47 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -70,11 +70,7 @@ #include "md-bitmap.h" #include "md-cluster.h" -/* pers_list is a list of registered personalities protected - * by pers_lock. - * pers_lock does extra service to protect accesses to - * mddev->thread when the mutex cannot be held. - */ +/* pers_list is a list of registered personalities protected by pers_lock. */ static LIST_HEAD(pers_list); static DEFINE_SPINLOCK(pers_lock); @@ -87,23 +83,13 @@ static struct module *md_cluster_mod; static DECLARE_WAIT_QUEUE_HEAD(resync_wait); static struct workqueue_struct *md_wq; static struct workqueue_struct *md_misc_wq; -static struct workqueue_struct *md_rdev_misc_wq; +struct workqueue_struct *md_bitmap_wq; static int remove_and_add_spares(struct mddev *mddev, struct md_rdev *this); static void mddev_detach(struct mddev *mddev); - -enum md_ro_state { - MD_RDWR, - MD_RDONLY, - MD_AUTO_READ, - MD_MAX_STATE -}; - -static bool md_is_rdwr(struct mddev *mddev) -{ - return (mddev->ro == MD_RDWR); -} +static void export_rdev(struct md_rdev *rdev, struct mddev *mddev); +static void md_wakeup_thread_directly(struct md_thread __rcu *thread); /* * Default number of read corrections we'll attempt on an rdev @@ -360,10 +346,6 @@ EXPORT_SYMBOL_GPL(md_new_event); static LIST_HEAD(all_mddevs); static DEFINE_SPINLOCK(all_mddevs_lock); -static bool is_md_suspended(struct mddev *mddev) -{ - return percpu_ref_is_dying(&mddev->active_io); -} /* Rather than calling directly into the personality make_request function, * IO requests come here first so that we can check if the device is * being suspended pending a reconfiguration. @@ -457,13 +439,19 @@ static void md_submit_bio(struct bio *bio) */ void mddev_suspend(struct mddev *mddev) { - WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); - lockdep_assert_held(&mddev->reconfig_mutex); + struct md_thread *thread = rcu_dereference_protected(mddev->thread, + lockdep_is_held(&mddev->reconfig_mutex)); + + WARN_ON_ONCE(thread && current == thread->tsk); if (mddev->suspended++) return; wake_up(&mddev->sb_wait); set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags); percpu_ref_kill(&mddev->active_io); + + if (mddev->pers->prepare_suspend) + mddev->pers->prepare_suspend(mddev); + wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io)); mddev->pers->quiesce(mddev, 1); clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); @@ -655,9 +643,11 @@ void mddev_init(struct mddev *mddev) { mutex_init(&mddev->open_mutex); mutex_init(&mddev->reconfig_mutex); + mutex_init(&mddev->delete_mutex); mutex_init(&mddev->bitmap_info.mutex); INIT_LIST_HEAD(&mddev->disks); INIT_LIST_HEAD(&mddev->all_mddevs); + INIT_LIST_HEAD(&mddev->deleting); timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); atomic_set(&mddev->active, 1); atomic_set(&mddev->openers, 0); @@ -759,6 +749,24 @@ static void mddev_free(struct mddev *mddev) static const struct attribute_group md_redundancy_group; +static void md_free_rdev(struct mddev *mddev) +{ + struct md_rdev *rdev; + struct md_rdev *tmp; + + mutex_lock(&mddev->delete_mutex); + if (list_empty(&mddev->deleting)) + goto out; + + list_for_each_entry_safe(rdev, tmp, &mddev->deleting, same_set) { + list_del_init(&rdev->same_set); + kobject_del(&rdev->kobj); + export_rdev(rdev, mddev); + } +out: + mutex_unlock(&mddev->delete_mutex); +} + void mddev_unlock(struct mddev *mddev) { if (mddev->to_remove) { @@ -800,13 +808,10 @@ void mddev_unlock(struct mddev *mddev) } else mutex_unlock(&mddev->reconfig_mutex); - /* As we've dropped the mutex we need a spinlock to - * make sure the thread doesn't disappear - */ - spin_lock(&pers_lock); + md_free_rdev(mddev); + md_wakeup_thread(mddev->thread); wake_up(&mddev->sb_wait); - spin_unlock(&pers_lock); } EXPORT_SYMBOL_GPL(mddev_unlock); @@ -938,7 +943,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, atomic_inc(&rdev->nr_pending); bio->bi_iter.bi_sector = sector; - bio_add_page(bio, page, size, 0); + __bio_add_page(bio, page, size, 0); bio->bi_private = rdev; bio->bi_end_io = super_written; @@ -979,7 +984,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, bio.bi_iter.bi_sector = sector + rdev->new_data_offset; else bio.bi_iter.bi_sector = sector + rdev->data_offset; - bio_add_page(&bio, page, size, 0); + __bio_add_page(&bio, page, size, 0); submit_bio_wait(&bio); @@ -2440,16 +2445,12 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) return err; } -static void rdev_delayed_delete(struct work_struct *ws) -{ - struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); - kobject_del(&rdev->kobj); - kobject_put(&rdev->kobj); -} - void md_autodetect_dev(dev_t dev); -static void export_rdev(struct md_rdev *rdev) +/* just for claiming the bdev */ +static struct md_rdev claim_rdev; + +static void export_rdev(struct md_rdev *rdev, struct mddev *mddev) { pr_debug("md: export_rdev(%pg)\n", rdev->bdev); md_rdev_clear(rdev); @@ -2457,13 +2458,15 @@ static void export_rdev(struct md_rdev *rdev) if (test_bit(AutoDetected, &rdev->flags)) md_autodetect_dev(rdev->bdev->bd_dev); #endif - blkdev_put(rdev->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(rdev->bdev, mddev->major_version == -2 ? &claim_rdev : rdev); rdev->bdev = NULL; kobject_put(&rdev->kobj); } static void md_kick_rdev_from_array(struct md_rdev *rdev) { + struct mddev *mddev = rdev->mddev; + bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); list_del_rcu(&rdev->same_set); pr_debug("md: unbind<%pg>\n", rdev->bdev); @@ -2477,15 +2480,17 @@ static void md_kick_rdev_from_array(struct md_rdev *rdev) rdev->sysfs_unack_badblocks = NULL; rdev->sysfs_badblocks = NULL; rdev->badblocks.count = 0; - /* We need to delay this, otherwise we can deadlock when - * writing to 'remove' to "dev/state". We also need - * to delay it due to rcu usage. - */ + synchronize_rcu(); - INIT_WORK(&rdev->del_work, rdev_delayed_delete); - kobject_get(&rdev->kobj); - queue_work(md_rdev_misc_wq, &rdev->del_work); - export_rdev(rdev); + + /* + * kobject_del() will wait for all in progress writers to be done, where + * reconfig_mutex is held, hence it can't be called under + * reconfig_mutex and it's delayed to mddev_unlock(). + */ + mutex_lock(&mddev->delete_mutex); + list_add(&rdev->same_set, &mddev->deleting); + mutex_unlock(&mddev->delete_mutex); } static void export_array(struct mddev *mddev) @@ -3553,6 +3558,7 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr, { struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); + struct kernfs_node *kn = NULL; ssize_t rv; struct mddev *mddev = rdev->mddev; @@ -3560,6 +3566,10 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr, return -EIO; if (!capable(CAP_SYS_ADMIN)) return -EACCES; + + if (entry->store == state_store && cmd_match(page, "remove")) + kn = sysfs_break_active_protection(kobj, attr); + rv = mddev ? mddev_lock(mddev) : -ENODEV; if (!rv) { if (rdev->mddev == NULL) @@ -3568,6 +3578,10 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr, rv = entry->store(rdev, page, length); mddev_unlock(mddev); } + + if (kn) + sysfs_unbreak_active_protection(kn); + return rv; } @@ -3612,6 +3626,7 @@ int md_rdev_init(struct md_rdev *rdev) return badblocks_init(&rdev->badblocks, 0); } EXPORT_SYMBOL_GPL(md_rdev_init); + /* * Import a device. If 'super_format' >= 0, then sanity check the superblock * @@ -3624,7 +3639,6 @@ EXPORT_SYMBOL_GPL(md_rdev_init); */ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) { - static struct md_rdev claim_rdev; /* just for claiming the bdev */ struct md_rdev *rdev; sector_t size; int err; @@ -3640,9 +3654,8 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe if (err) goto out_clear_rdev; - rdev->bdev = blkdev_get_by_dev(newdev, - FMODE_READ | FMODE_WRITE | FMODE_EXCL, - super_format == -2 ? &claim_rdev : rdev); + rdev->bdev = blkdev_get_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE, + super_format == -2 ? &claim_rdev : rdev, NULL); if (IS_ERR(rdev->bdev)) { pr_warn("md: could not open device unknown-block(%u,%u).\n", MAJOR(newdev), MINOR(newdev)); @@ -3679,7 +3692,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe return rdev; out_blkdev_put: - blkdev_put(rdev->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(rdev->bdev, super_format == -2 ? &claim_rdev : rdev); out_clear_rdev: md_rdev_clear(rdev); out_free_rdev: @@ -3794,8 +3807,9 @@ int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) static ssize_t safe_delay_show(struct mddev *mddev, char *page) { - int msec = (mddev->safemode_delay*1000)/HZ; - return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); + unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ; + + return sprintf(page, "%u.%03u\n", msec/1000, msec%1000); } static ssize_t safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) @@ -3807,7 +3821,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) return -EINVAL; } - if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) + if (strict_strtoul_scaled(cbuf, &msec, 3) < 0 || msec > UINT_MAX / HZ) return -EINVAL; if (msec == 0) mddev->safemode_delay = 0; @@ -4477,6 +4491,8 @@ max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len rv = kstrtouint(buf, 10, &n); if (rv < 0) return rv; + if (n > INT_MAX) + return -EINVAL; atomic_set(&mddev->max_corr_read_errors, n); return len; } @@ -4491,20 +4507,6 @@ null_show(struct mddev *mddev, char *page) return -EINVAL; } -/* need to ensure rdev_delayed_delete() has completed */ -static void flush_rdev_wq(struct mddev *mddev) -{ - struct md_rdev *rdev; - - rcu_read_lock(); - rdev_for_each_rcu(rdev, mddev) - if (work_pending(&rdev->del_work)) { - flush_workqueue(md_rdev_misc_wq); - break; - } - rcu_read_unlock(); -} - static ssize_t new_dev_store(struct mddev *mddev, const char *buf, size_t len) { @@ -4532,7 +4534,6 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len) minor != MINOR(dev)) return -EOVERFLOW; - flush_rdev_wq(mddev); err = mddev_lock(mddev); if (err) return err; @@ -4560,7 +4561,7 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len) err = bind_rdev_to_array(rdev, mddev); out: if (err) - export_rdev(rdev); + export_rdev(rdev, mddev); mddev_unlock(mddev); if (!err) md_new_event(); @@ -4804,11 +4805,21 @@ action_store(struct mddev *mddev, const char *page, size_t len) return -EINVAL; err = mddev_lock(mddev); if (!err) { - if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { err = -EBUSY; - else { + } else if (mddev->reshape_position == MaxSector || + mddev->pers->check_reshape == NULL || + mddev->pers->check_reshape(mddev)) { clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); err = mddev->pers->start_reshape(mddev); + } else { + /* + * If reshape is still in progress, and + * md_check_recovery() can continue to reshape, + * don't restart reshape because data can be + * corrupted for raid456. + */ + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); } mddev_unlock(mddev); } @@ -5592,7 +5603,6 @@ struct mddev *md_alloc(dev_t dev, char *name) * removed (mddev_delayed_delete). */ flush_workqueue(md_misc_wq); - flush_workqueue(md_rdev_misc_wq); mutex_lock(&disks_mutex); mddev = mddev_alloc(dev); @@ -6269,10 +6279,12 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) } if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) set_bit(MD_RECOVERY_INTR, &mddev->recovery); - if (mddev->sync_thread) - /* Thread might be blocked waiting for metadata update - * which will now never happen */ - wake_up_process(mddev->sync_thread->tsk); + + /* + * Thread might be blocked waiting for metadata update which will now + * never happen + */ + md_wakeup_thread_directly(mddev->sync_thread); if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) return -EBUSY; @@ -6333,10 +6345,12 @@ static int do_md_stop(struct mddev *mddev, int mode, } if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) set_bit(MD_RECOVERY_INTR, &mddev->recovery); - if (mddev->sync_thread) - /* Thread might be blocked waiting for metadata update - * which will now never happen */ - wake_up_process(mddev->sync_thread->tsk); + + /* + * Thread might be blocked waiting for metadata update which will now + * never happen + */ + md_wakeup_thread_directly(mddev->sync_thread); mddev_unlock(mddev); wait_event(resync_wait, (mddev->sync_thread == NULL && @@ -6498,7 +6512,7 @@ static void autorun_devices(int part) rdev_for_each_list(rdev, tmp, &candidates) { list_del_init(&rdev->same_set); if (bind_rdev_to_array(rdev, mddev)) - export_rdev(rdev); + export_rdev(rdev, mddev); } autorun_array(mddev); mddev_unlock(mddev); @@ -6508,7 +6522,7 @@ static void autorun_devices(int part) */ rdev_for_each_list(rdev, tmp, &candidates) { list_del_init(&rdev->same_set); - export_rdev(rdev); + export_rdev(rdev, mddev); } mddev_put(mddev); } @@ -6696,13 +6710,13 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) pr_warn("md: %pg has different UUID to %pg\n", rdev->bdev, rdev0->bdev); - export_rdev(rdev); + export_rdev(rdev, mddev); return -EINVAL; } } err = bind_rdev_to_array(rdev, mddev); if (err) - export_rdev(rdev); + export_rdev(rdev, mddev); return err; } @@ -6733,7 +6747,6 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) if (info->state & (1<<MD_DISK_SYNC) && info->raid_disk < mddev->raid_disks) { rdev->raid_disk = info->raid_disk; - set_bit(In_sync, &rdev->flags); clear_bit(Bitmap_sync, &rdev->flags); } else rdev->raid_disk = -1; @@ -6746,7 +6759,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) /* This was a hot-add request, but events doesn't * match, so reject it. */ - export_rdev(rdev); + export_rdev(rdev, mddev); return -EINVAL; } @@ -6772,7 +6785,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) } } if (has_journal || mddev->bitmap) { - export_rdev(rdev); + export_rdev(rdev, mddev); return -EBUSY; } set_bit(Journal, &rdev->flags); @@ -6787,7 +6800,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) /* --add initiated by this node */ err = md_cluster_ops->add_new_disk(mddev, rdev); if (err) { - export_rdev(rdev); + export_rdev(rdev, mddev); return err; } } @@ -6797,7 +6810,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) err = bind_rdev_to_array(rdev, mddev); if (err) - export_rdev(rdev); + export_rdev(rdev, mddev); if (mddev_is_clustered(mddev)) { if (info->state & (1 << MD_DISK_CANDIDATE)) { @@ -6860,7 +6873,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) err = bind_rdev_to_array(rdev, mddev); if (err) { - export_rdev(rdev); + export_rdev(rdev, mddev); return err; } } @@ -6985,7 +6998,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) return 0; abort_export: - export_rdev(rdev); + export_rdev(rdev, mddev); return err; } @@ -7486,7 +7499,7 @@ static int __md_set_array_info(struct mddev *mddev, void __user *argp) return err; } -static int md_ioctl(struct block_device *bdev, fmode_t mode, +static int md_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { int err = 0; @@ -7555,9 +7568,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, } - if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK) - flush_rdev_wq(mddev); - if (cmd == HOT_REMOVE_DISK) /* need to ensure recovery thread has run */ wait_event_interruptible_timeout(mddev->sb_wait, @@ -7718,7 +7728,7 @@ out: return err; } #ifdef CONFIG_COMPAT -static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, +static int md_compat_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { switch (cmd) { @@ -7767,13 +7777,13 @@ out_unlock: return err; } -static int md_open(struct block_device *bdev, fmode_t mode) +static int md_open(struct gendisk *disk, blk_mode_t mode) { struct mddev *mddev; int err; spin_lock(&all_mddevs_lock); - mddev = mddev_get(bdev->bd_disk->private_data); + mddev = mddev_get(disk->private_data); spin_unlock(&all_mddevs_lock); if (!mddev) return -ENODEV; @@ -7789,7 +7799,7 @@ static int md_open(struct block_device *bdev, fmode_t mode) atomic_inc(&mddev->openers); mutex_unlock(&mddev->open_mutex); - bdev_check_media_change(bdev); + disk_check_media_change(disk); return 0; out_unlock: @@ -7799,7 +7809,7 @@ out: return err; } -static void md_release(struct gendisk *disk, fmode_t mode) +static void md_release(struct gendisk *disk) { struct mddev *mddev = disk->private_data; @@ -7886,13 +7896,29 @@ static int md_thread(void *arg) return 0; } -void md_wakeup_thread(struct md_thread *thread) +static void md_wakeup_thread_directly(struct md_thread __rcu *thread) { - if (thread) { - pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); - set_bit(THREAD_WAKEUP, &thread->flags); - wake_up(&thread->wqueue); + struct md_thread *t; + + rcu_read_lock(); + t = rcu_dereference(thread); + if (t) + wake_up_process(t->tsk); + rcu_read_unlock(); +} + +void md_wakeup_thread(struct md_thread __rcu *thread) +{ + struct md_thread *t; + + rcu_read_lock(); + t = rcu_dereference(thread); + if (t) { + pr_debug("md: waking up MD thread %s.\n", t->tsk->comm); + set_bit(THREAD_WAKEUP, &t->flags); + wake_up(&t->wqueue); } + rcu_read_unlock(); } EXPORT_SYMBOL(md_wakeup_thread); @@ -7922,22 +7948,15 @@ struct md_thread *md_register_thread(void (*run) (struct md_thread *), } EXPORT_SYMBOL(md_register_thread); -void md_unregister_thread(struct md_thread **threadp) +void md_unregister_thread(struct md_thread __rcu **threadp) { - struct md_thread *thread; + struct md_thread *thread = rcu_dereference_protected(*threadp, true); - /* - * Locking ensures that mddev_unlock does not wake_up a - * non-existent thread - */ - spin_lock(&pers_lock); - thread = *threadp; - if (!thread) { - spin_unlock(&pers_lock); + if (!thread) return; - } - *threadp = NULL; - spin_unlock(&pers_lock); + + rcu_assign_pointer(*threadp, NULL); + synchronize_rcu(); pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); kthread_stop(thread->tsk); @@ -9100,6 +9119,7 @@ void md_do_sync(struct md_thread *thread) spin_unlock(&mddev->lock); wake_up(&resync_wait); + wake_up(&mddev->sb_wait); md_wakeup_thread(mddev->thread); return; } @@ -9202,9 +9222,8 @@ static void md_start_sync(struct work_struct *ws) { struct mddev *mddev = container_of(ws, struct mddev, del_work); - mddev->sync_thread = md_register_thread(md_do_sync, - mddev, - "resync"); + rcu_assign_pointer(mddev->sync_thread, + md_register_thread(md_do_sync, mddev, "resync")); if (!mddev->sync_thread) { pr_warn("%s: could not start resync thread...\n", mdname(mddev)); @@ -9619,9 +9638,10 @@ static int __init md_init(void) if (!md_misc_wq) goto err_misc_wq; - md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0); - if (!md_rdev_misc_wq) - goto err_rdev_misc_wq; + md_bitmap_wq = alloc_workqueue("md_bitmap", WQ_MEM_RECLAIM | WQ_UNBOUND, + 0); + if (!md_bitmap_wq) + goto err_bitmap_wq; ret = __register_blkdev(MD_MAJOR, "md", md_probe); if (ret < 0) @@ -9641,8 +9661,8 @@ static int __init md_init(void) err_mdp: unregister_blkdev(MD_MAJOR, "md"); err_md: - destroy_workqueue(md_rdev_misc_wq); -err_rdev_misc_wq: + destroy_workqueue(md_bitmap_wq); +err_bitmap_wq: destroy_workqueue(md_misc_wq); err_misc_wq: destroy_workqueue(md_wq); @@ -9938,8 +9958,8 @@ static __exit void md_exit(void) } spin_unlock(&all_mddevs_lock); - destroy_workqueue(md_rdev_misc_wq); destroy_workqueue(md_misc_wq); + destroy_workqueue(md_bitmap_wq); destroy_workqueue(md_wq); } diff --git a/drivers/md/md.h b/drivers/md/md.h index fd8f260ed5f8..bfd2306bc750 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -122,8 +122,6 @@ struct md_rdev { struct serial_in_rdev *serial; /* used for raid1 io serialization */ - struct work_struct del_work; /* used for delayed sysfs removal */ - struct kernfs_node *sysfs_state; /* handle for 'state' * sysfs entry */ /* handle for 'unacknowledged_bad_blocks' sysfs dentry */ @@ -367,8 +365,8 @@ struct mddev { int new_chunk_sectors; int reshape_backwards; - struct md_thread *thread; /* management thread */ - struct md_thread *sync_thread; /* doing resync or reconstruct */ + struct md_thread __rcu *thread; /* management thread */ + struct md_thread __rcu *sync_thread; /* doing resync or reconstruct */ /* 'last_sync_action' is initialized to "none". It is set when a * sync operation (i.e "data-check", "requested-resync", "resync", @@ -531,6 +529,14 @@ struct mddev { unsigned int good_device_nr; /* good device num within cluster raid */ unsigned int noio_flag; /* for memalloc scope API */ + /* + * Temporarily store rdev that will be finally removed when + * reconfig_mutex is unlocked. + */ + struct list_head deleting; + /* Protect the deleting list */ + struct mutex delete_mutex; + bool has_superblocks:1; bool fail_last_dev:1; bool serialize_policy:1; @@ -555,6 +561,23 @@ enum recovery_flags { MD_RESYNCING_REMOTE, /* remote node is running resync thread */ }; +enum md_ro_state { + MD_RDWR, + MD_RDONLY, + MD_AUTO_READ, + MD_MAX_STATE +}; + +static inline bool md_is_rdwr(struct mddev *mddev) +{ + return (mddev->ro == MD_RDWR); +} + +static inline bool is_md_suspended(struct mddev *mddev) +{ + return percpu_ref_is_dying(&mddev->active_io); +} + static inline int __must_check mddev_lock(struct mddev *mddev) { return mutex_lock_interruptible(&mddev->reconfig_mutex); @@ -614,6 +637,7 @@ struct md_personality int (*start_reshape) (struct mddev *mddev); void (*finish_reshape) (struct mddev *mddev); void (*update_reshape_pos) (struct mddev *mddev); + void (*prepare_suspend) (struct mddev *mddev); /* quiesce suspends or resumes internal processing. * 1 - stop new actions and wait for action io to complete * 0 - return to normal behaviour @@ -734,8 +758,8 @@ extern struct md_thread *md_register_thread( void (*run)(struct md_thread *thread), struct mddev *mddev, const char *name); -extern void md_unregister_thread(struct md_thread **threadp); -extern void md_wakeup_thread(struct md_thread *thread); +extern void md_unregister_thread(struct md_thread __rcu **threadp); +extern void md_wakeup_thread(struct md_thread __rcu *thread); extern void md_check_recovery(struct mddev *mddev); extern void md_reap_sync_thread(struct mddev *mddev); extern int mddev_init_writes_pending(struct mddev *mddev); @@ -828,6 +852,7 @@ struct mdu_array_info_s; struct mdu_disk_info_s; extern int mdp_major; +extern struct workqueue_struct *md_bitmap_wq; void md_autostart_arrays(int part); int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info); int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info); diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index e61f6cad4e08..169ebe296f2d 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -21,6 +21,7 @@ #define IO_MADE_GOOD ((struct bio *)2) #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) +#define MAX_PLUG_BIO 32 /* for managing resync I/O pages */ struct resync_pages { @@ -31,6 +32,7 @@ struct resync_pages { struct raid1_plug_cb { struct blk_plug_cb cb; struct bio_list pending; + unsigned int count; }; static void rbio_pool_free(void *rbio, void *data) @@ -101,11 +103,73 @@ static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp, struct page *page = resync_fetch_page(rp, idx); int len = min_t(int, size, PAGE_SIZE); - /* - * won't fail because the vec table is big - * enough to hold all these pages - */ - bio_add_page(bio, page, len, 0); + if (WARN_ON(!bio_add_page(bio, page, len, 0))) { + bio->bi_status = BLK_STS_RESOURCE; + bio_endio(bio); + return; + } + size -= len; } while (idx++ < RESYNC_PAGES && size > 0); } + + +static inline void raid1_submit_write(struct bio *bio) +{ + struct md_rdev *rdev = (struct md_rdev *)bio->bi_bdev; + + bio->bi_next = NULL; + bio_set_dev(bio, rdev->bdev); + if (test_bit(Faulty, &rdev->flags)) + bio_io_error(bio); + else if (unlikely(bio_op(bio) == REQ_OP_DISCARD && + !bdev_max_discard_sectors(bio->bi_bdev))) + /* Just ignore it */ + bio_endio(bio); + else + submit_bio_noacct(bio); +} + +static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio, + blk_plug_cb_fn unplug, int copies) +{ + struct raid1_plug_cb *plug = NULL; + struct blk_plug_cb *cb; + + /* + * If bitmap is not enabled, it's safe to submit the io directly, and + * this can get optimal performance. + */ + if (!md_bitmap_enabled(mddev->bitmap)) { + raid1_submit_write(bio); + return true; + } + + cb = blk_check_plugged(unplug, mddev, sizeof(*plug)); + if (!cb) + return false; + + plug = container_of(cb, struct raid1_plug_cb, cb); + bio_list_add(&plug->pending, bio); + if (++plug->count / MAX_PLUG_BIO >= copies) { + list_del(&cb->list); + cb->callback(cb, false); + } + + + return true; +} + +/* + * current->bio_list will be set under submit_bio() context, in this case bitmap + * io will be added to the list and wait for current io submission to finish, + * while current io submission must wait for bitmap io to be done. In order to + * avoid such deadlock, submit bitmap io asynchronously. + */ +static inline void raid1_prepare_flush_writes(struct bitmap *bitmap) +{ + if (current->bio_list) + md_bitmap_unplug_async(bitmap); + else + md_bitmap_unplug(bitmap); +} diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 68a9e2d9985b..dd25832eb045 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -794,22 +794,13 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect static void flush_bio_list(struct r1conf *conf, struct bio *bio) { /* flush any pending bitmap writes to disk before proceeding w/ I/O */ - md_bitmap_unplug(conf->mddev->bitmap); + raid1_prepare_flush_writes(conf->mddev->bitmap); wake_up(&conf->wait_barrier); while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; - struct md_rdev *rdev = (void *)bio->bi_bdev; - bio->bi_next = NULL; - bio_set_dev(bio, rdev->bdev); - if (test_bit(Faulty, &rdev->flags)) { - bio_io_error(bio); - } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !bdev_max_discard_sectors(bio->bi_bdev))) - /* Just ignore it */ - bio_endio(bio); - else - submit_bio_noacct(bio); + + raid1_submit_write(bio); bio = next; cond_resched(); } @@ -1147,7 +1138,10 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio, if (unlikely(!page)) goto free_pages; - bio_add_page(behind_bio, page, len, 0); + if (!bio_add_page(behind_bio, page, len, 0)) { + put_page(page); + goto free_pages; + } size -= len; i++; @@ -1175,7 +1169,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) struct r1conf *conf = mddev->private; struct bio *bio; - if (from_schedule || current->bio_list) { + if (from_schedule) { spin_lock_irq(&conf->device_lock); bio_list_merge(&conf->pending_bio_list, &plug->pending); spin_unlock_irq(&conf->device_lock); @@ -1343,8 +1337,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, struct bitmap *bitmap = mddev->bitmap; unsigned long flags; struct md_rdev *blocked_rdev; - struct blk_plug_cb *cb; - struct raid1_plug_cb *plug = NULL; int first_clone; int max_sectors; bool write_behind = false; @@ -1573,15 +1565,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, r1_bio->sector); /* flush_pending_writes() needs access to the rdev so...*/ mbio->bi_bdev = (void *)rdev; - - cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); - if (cb) - plug = container_of(cb, struct raid1_plug_cb, cb); - else - plug = NULL; - if (plug) { - bio_list_add(&plug->pending, mbio); - } else { + if (!raid1_add_bio_to_plug(mddev, mbio, raid1_unplug, disks)) { spin_lock_irqsave(&conf->device_lock, flags); bio_list_add(&conf->pending_bio_list, mbio); spin_unlock_irqrestore(&conf->device_lock, flags); @@ -2914,7 +2898,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, * won't fail because the vec table is big * enough to hold all these pages */ - bio_add_page(bio, page, len, 0); + __bio_add_page(bio, page, len, 0); } } nr_sectors += len>>9; @@ -3084,7 +3068,8 @@ static struct r1conf *setup_conf(struct mddev *mddev) } err = -ENOMEM; - conf->thread = md_register_thread(raid1d, mddev, "raid1"); + rcu_assign_pointer(conf->thread, + md_register_thread(raid1d, mddev, "raid1")); if (!conf->thread) goto abort; @@ -3177,8 +3162,8 @@ static int raid1_run(struct mddev *mddev) /* * Ok, everything is just fine now */ - mddev->thread = conf->thread; - conf->thread = NULL; + rcu_assign_pointer(mddev->thread, conf->thread); + rcu_assign_pointer(conf->thread, NULL); mddev->private = conf; set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index ebb6788820e7..468f189da7a0 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -130,7 +130,7 @@ struct r1conf { /* When taking over an array from a different personality, we store * the new thread here until we fully activate the array. */ - struct md_thread *thread; + struct md_thread __rcu *thread; /* Keep track of cluster resync window to send to other * nodes. diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 4fcfcb350d2b..d0de8c9fb3cf 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -779,8 +779,16 @@ static struct md_rdev *read_balance(struct r10conf *conf, disk = r10_bio->devs[slot].devnum; rdev = rcu_dereference(conf->mirrors[disk].replacement); if (rdev == NULL || test_bit(Faulty, &rdev->flags) || - r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) + r10_bio->devs[slot].addr + sectors > + rdev->recovery_offset) { + /* + * Read replacement first to prevent reading both rdev + * and replacement as NULL during replacement replace + * rdev. + */ + smp_mb(); rdev = rcu_dereference(conf->mirrors[disk].rdev); + } if (rdev == NULL || test_bit(Faulty, &rdev->flags)) continue; @@ -902,25 +910,15 @@ static void flush_pending_writes(struct r10conf *conf) __set_current_state(TASK_RUNNING); blk_start_plug(&plug); - /* flush any pending bitmap writes to disk - * before proceeding w/ I/O */ - md_bitmap_unplug(conf->mddev->bitmap); + raid1_prepare_flush_writes(conf->mddev->bitmap); wake_up(&conf->wait_barrier); while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; - struct md_rdev *rdev = (void*)bio->bi_bdev; - bio->bi_next = NULL; - bio_set_dev(bio, rdev->bdev); - if (test_bit(Faulty, &rdev->flags)) { - bio_io_error(bio); - } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !bdev_max_discard_sectors(bio->bi_bdev))) - /* Just ignore it */ - bio_endio(bio); - else - submit_bio_noacct(bio); + + raid1_submit_write(bio); bio = next; + cond_resched(); } blk_finish_plug(&plug); } else @@ -982,6 +980,7 @@ static void lower_barrier(struct r10conf *conf) static bool stop_waiting_barrier(struct r10conf *conf) { struct bio_list *bio_list = current->bio_list; + struct md_thread *thread; /* barrier is dropped */ if (!conf->barrier) @@ -997,12 +996,14 @@ static bool stop_waiting_barrier(struct r10conf *conf) (!bio_list_empty(&bio_list[0]) || !bio_list_empty(&bio_list[1]))) return true; + /* daemon thread must exist while handling io */ + thread = rcu_dereference_protected(conf->mddev->thread, true); /* * move on if io is issued from raid10d(), nr_pending is not released * from original io(see handle_read_error()). All raise barrier is * blocked until this io is done. */ - if (conf->mddev->thread->tsk == current) { + if (thread->tsk == current) { WARN_ON_ONCE(atomic_read(&conf->nr_pending) == 0); return true; } @@ -1113,7 +1114,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) struct r10conf *conf = mddev->private; struct bio *bio; - if (from_schedule || current->bio_list) { + if (from_schedule) { spin_lock_irq(&conf->device_lock); bio_list_merge(&conf->pending_bio_list, &plug->pending); spin_unlock_irq(&conf->device_lock); @@ -1125,23 +1126,15 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) /* we aren't scheduling, so we can do the write-out directly. */ bio = bio_list_get(&plug->pending); - md_bitmap_unplug(mddev->bitmap); + raid1_prepare_flush_writes(mddev->bitmap); wake_up(&conf->wait_barrier); while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; - struct md_rdev *rdev = (void*)bio->bi_bdev; - bio->bi_next = NULL; - bio_set_dev(bio, rdev->bdev); - if (test_bit(Faulty, &rdev->flags)) { - bio_io_error(bio); - } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !bdev_max_discard_sectors(bio->bi_bdev))) - /* Just ignore it */ - bio_endio(bio); - else - submit_bio_noacct(bio); + + raid1_submit_write(bio); bio = next; + cond_resched(); } kfree(plug); } @@ -1282,8 +1275,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; const blk_opf_t do_fua = bio->bi_opf & REQ_FUA; unsigned long flags; - struct blk_plug_cb *cb; - struct raid1_plug_cb *plug = NULL; struct r10conf *conf = mddev->private; struct md_rdev *rdev; int devnum = r10_bio->devs[n_copy].devnum; @@ -1323,14 +1314,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, atomic_inc(&r10_bio->remaining); - cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug)); - if (cb) - plug = container_of(cb, struct raid1_plug_cb, cb); - else - plug = NULL; - if (plug) { - bio_list_add(&plug->pending, mbio); - } else { + if (!raid1_add_bio_to_plug(mddev, mbio, raid10_unplug, conf->copies)) { spin_lock_irqsave(&conf->device_lock, flags); bio_list_add(&conf->pending_bio_list, mbio); spin_unlock_irqrestore(&conf->device_lock, flags); @@ -1479,9 +1463,15 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, for (i = 0; i < conf->copies; i++) { int d = r10_bio->devs[i].devnum; - struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); - struct md_rdev *rrdev = rcu_dereference( - conf->mirrors[d].replacement); + struct md_rdev *rdev, *rrdev; + + rrdev = rcu_dereference(conf->mirrors[d].replacement); + /* + * Read replacement first to prevent reading both rdev and + * replacement as NULL during replacement replace rdev. + */ + smp_mb(); + rdev = rcu_dereference(conf->mirrors[d].rdev); if (rdev == rrdev) rrdev = NULL; if (rdev && (test_bit(Faulty, &rdev->flags))) @@ -2148,9 +2138,10 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) { struct r10conf *conf = mddev->private; int err = -EEXIST; - int mirror; + int mirror, repl_slot = -1; int first = 0; int last = conf->geo.raid_disks - 1; + struct raid10_info *p; if (mddev->recovery_cp < MaxSector) /* only hot-add to in-sync arrays, as recovery is @@ -2173,23 +2164,14 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) else mirror = first; for ( ; mirror <= last ; mirror++) { - struct raid10_info *p = &conf->mirrors[mirror]; + p = &conf->mirrors[mirror]; if (p->recovery_disabled == mddev->recovery_disabled) continue; if (p->rdev) { - if (!test_bit(WantReplacement, &p->rdev->flags) || - p->replacement != NULL) - continue; - clear_bit(In_sync, &rdev->flags); - set_bit(Replacement, &rdev->flags); - rdev->raid_disk = mirror; - err = 0; - if (mddev->gendisk) - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); - conf->fullsync = 1; - rcu_assign_pointer(p->replacement, rdev); - break; + if (test_bit(WantReplacement, &p->rdev->flags) && + p->replacement == NULL && repl_slot < 0) + repl_slot = mirror; + continue; } if (mddev->gendisk) @@ -2206,6 +2188,19 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) break; } + if (err && repl_slot >= 0) { + p = &conf->mirrors[repl_slot]; + clear_bit(In_sync, &rdev->flags); + set_bit(Replacement, &rdev->flags); + rdev->raid_disk = repl_slot; + err = 0; + if (mddev->gendisk) + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); + conf->fullsync = 1; + rcu_assign_pointer(p->replacement, rdev); + } + print_conf(conf); return err; } @@ -3303,6 +3298,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, int chunks_skipped = 0; sector_t chunk_mask = conf->geo.chunk_mask; int page_idx = 0; + int error_disk = -1; /* * Allow skipping a full rebuild for incremental assembly @@ -3386,8 +3382,21 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, return reshape_request(mddev, sector_nr, skipped); if (chunks_skipped >= conf->geo.raid_disks) { - /* if there has been nothing to do on any drive, - * then there is nothing to do at all.. + pr_err("md/raid10:%s: %s fails\n", mdname(mddev), + test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? "resync" : "recovery"); + if (error_disk >= 0 && + !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { + /* + * recovery fails, set mirrors.recovery_disabled, + * device shouldn't be added to there. + */ + conf->mirrors[error_disk].recovery_disabled = + mddev->recovery_disabled; + return 0; + } + /* + * if there has been nothing to do on any drive, + * then there is nothing to do at all. */ *skipped = 1; return (max_sector - sector_nr) + sectors_skipped; @@ -3437,8 +3446,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, sector_t sect; int must_sync; int any_working; - int need_recover = 0; - int need_replace = 0; struct raid10_info *mirror = &conf->mirrors[i]; struct md_rdev *mrdev, *mreplace; @@ -3446,15 +3453,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, mrdev = rcu_dereference(mirror->rdev); mreplace = rcu_dereference(mirror->replacement); - if (mrdev != NULL && - !test_bit(Faulty, &mrdev->flags) && - !test_bit(In_sync, &mrdev->flags)) - need_recover = 1; - if (mreplace != NULL && - !test_bit(Faulty, &mreplace->flags)) - need_replace = 1; + if (mrdev && (test_bit(Faulty, &mrdev->flags) || + test_bit(In_sync, &mrdev->flags))) + mrdev = NULL; + if (mreplace && test_bit(Faulty, &mreplace->flags)) + mreplace = NULL; - if (!need_recover && !need_replace) { + if (!mrdev && !mreplace) { rcu_read_unlock(); continue; } @@ -3470,8 +3475,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, rcu_read_unlock(); continue; } - if (mreplace && test_bit(Faulty, &mreplace->flags)) - mreplace = NULL; /* Unless we are doing a full sync, or a replacement * we only need to recover the block if it is set in * the bitmap @@ -3490,7 +3493,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, rcu_read_unlock(); continue; } - atomic_inc(&mrdev->nr_pending); + if (mrdev) + atomic_inc(&mrdev->nr_pending); if (mreplace) atomic_inc(&mreplace->nr_pending); rcu_read_unlock(); @@ -3577,7 +3581,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, r10_bio->devs[1].devnum = i; r10_bio->devs[1].addr = to_addr; - if (need_recover) { + if (mrdev) { bio = r10_bio->devs[1].bio; bio->bi_next = biolist; biolist = bio; @@ -3594,11 +3598,11 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, bio = r10_bio->devs[1].repl_bio; if (bio) bio->bi_end_io = NULL; - /* Note: if need_replace, then bio + /* Note: if replace is not NULL, then bio * cannot be NULL as r10buf_pool_alloc will * have allocated it. */ - if (!need_replace) + if (!mreplace) break; bio->bi_next = biolist; biolist = bio; @@ -3622,7 +3626,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, for (k = 0; k < conf->copies; k++) if (r10_bio->devs[k].devnum == i) break; - if (!test_bit(In_sync, + if (mrdev && !test_bit(In_sync, &mrdev->flags) && !rdev_set_badblocks( mrdev, @@ -3643,17 +3647,21 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, mdname(mddev)); mirror->recovery_disabled = mddev->recovery_disabled; + } else { + error_disk = i; } put_buf(r10_bio); if (rb2) atomic_dec(&rb2->remaining); r10_bio = rb2; - rdev_dec_pending(mrdev, mddev); + if (mrdev) + rdev_dec_pending(mrdev, mddev); if (mreplace) rdev_dec_pending(mreplace, mddev); break; } - rdev_dec_pending(mrdev, mddev); + if (mrdev) + rdev_dec_pending(mrdev, mddev); if (mreplace) rdev_dec_pending(mreplace, mddev); if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) { @@ -3819,11 +3827,11 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, for (bio= biolist ; bio ; bio=bio->bi_next) { struct resync_pages *rp = get_resync_pages(bio); page = resync_fetch_page(rp, page_idx); - /* - * won't fail because the vec table is big enough - * to hold all these pages - */ - bio_add_page(bio, page, len, 0); + if (WARN_ON(!bio_add_page(bio, page, len, 0))) { + bio->bi_status = BLK_STS_RESOURCE; + bio_endio(bio); + goto giveup; + } } nr_sectors += len>>9; sector_nr += len>>9; @@ -4107,7 +4115,8 @@ static struct r10conf *setup_conf(struct mddev *mddev) atomic_set(&conf->nr_pending, 0); err = -ENOMEM; - conf->thread = md_register_thread(raid10d, mddev, "raid10"); + rcu_assign_pointer(conf->thread, + md_register_thread(raid10d, mddev, "raid10")); if (!conf->thread) goto out; @@ -4152,8 +4161,8 @@ static int raid10_run(struct mddev *mddev) if (!conf) goto out; - mddev->thread = conf->thread; - conf->thread = NULL; + rcu_assign_pointer(mddev->thread, conf->thread); + rcu_assign_pointer(conf->thread, NULL); if (mddev_is_clustered(conf->mddev)) { int fc, fo; @@ -4296,8 +4305,8 @@ static int raid10_run(struct mddev *mddev) clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); - mddev->sync_thread = md_register_thread(md_do_sync, mddev, - "reshape"); + rcu_assign_pointer(mddev->sync_thread, + md_register_thread(md_do_sync, mddev, "reshape")); if (!mddev->sync_thread) goto out_free_conf; } @@ -4698,8 +4707,8 @@ out: set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); - mddev->sync_thread = md_register_thread(md_do_sync, mddev, - "reshape"); + rcu_assign_pointer(mddev->sync_thread, + md_register_thread(md_do_sync, mddev, "reshape")); if (!mddev->sync_thread) { ret = -EAGAIN; goto abort; @@ -4997,11 +5006,11 @@ read_more: if (len > PAGE_SIZE) len = PAGE_SIZE; for (bio = blist; bio ; bio = bio->bi_next) { - /* - * won't fail because the vec table is big enough - * to hold all these pages - */ - bio_add_page(bio, page, len, 0); + if (WARN_ON(!bio_add_page(bio, page, len, 0))) { + bio->bi_status = BLK_STS_RESOURCE; + bio_endio(bio); + return sectors_done; + } } sector_nr += len >> 9; nr_sectors += len >> 9; diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index 8c072ce0bc54..63e48b11b552 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -100,7 +100,7 @@ struct r10conf { /* When taking over an array from a different personality, we store * the new thread here until we fully activate the array. */ - struct md_thread *thread; + struct md_thread __rcu *thread; /* * Keep track of cluster resync window to send to other nodes. diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 46182b955aef..47ba7d9e81e1 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -120,7 +120,7 @@ struct r5l_log { struct bio_set bs; mempool_t meta_pool; - struct md_thread *reclaim_thread; + struct md_thread __rcu *reclaim_thread; unsigned long reclaim_target; /* number of space that need to be * reclaimed. if it's 0, reclaim spaces * used by io_units which are in @@ -792,7 +792,7 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) io->current_bio = r5l_bio_alloc(log); io->current_bio->bi_end_io = r5l_log_endio; io->current_bio->bi_private = io; - bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0); + __bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0); r5_reserve_log_entry(log, io); @@ -1576,17 +1576,18 @@ void r5l_wake_reclaim(struct r5l_log *log, sector_t space) void r5l_quiesce(struct r5l_log *log, int quiesce) { - struct mddev *mddev; + struct mddev *mddev = log->rdev->mddev; + struct md_thread *thread = rcu_dereference_protected( + log->reclaim_thread, lockdep_is_held(&mddev->reconfig_mutex)); if (quiesce) { /* make sure r5l_write_super_and_discard_space exits */ - mddev = log->rdev->mddev; wake_up(&mddev->sb_wait); - kthread_park(log->reclaim_thread->tsk); + kthread_park(thread->tsk); r5l_wake_reclaim(log, MaxSector); r5l_do_reclaim(log); } else - kthread_unpark(log->reclaim_thread->tsk); + kthread_unpark(thread->tsk); } bool r5l_log_disk_error(struct r5conf *conf) @@ -3063,6 +3064,7 @@ void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev) int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) { struct r5l_log *log; + struct md_thread *thread; int ret; pr_debug("md/raid:%s: using device %pg as journal\n", @@ -3121,11 +3123,13 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) spin_lock_init(&log->tree_lock); INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN); - log->reclaim_thread = md_register_thread(r5l_reclaim_thread, - log->rdev->mddev, "reclaim"); - if (!log->reclaim_thread) + thread = md_register_thread(r5l_reclaim_thread, log->rdev->mddev, + "reclaim"); + if (!thread) goto reclaim_thread; - log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL; + + thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL; + rcu_assign_pointer(log->reclaim_thread, thread); init_waitqueue_head(&log->iounit_wait); diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index e495939bb3e0..eaea57aee602 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -465,7 +465,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io) bio->bi_end_io = ppl_log_endio; bio->bi_iter.bi_sector = log->next_io_sector; - bio_add_page(bio, io->header_page, PAGE_SIZE, 0); + __bio_add_page(bio, io->header_page, PAGE_SIZE, 0); pr_debug("%s: log->current_io_sector: %llu\n", __func__, (unsigned long long)log->next_io_sector); @@ -496,7 +496,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io) prev->bi_opf, GFP_NOIO, &ppl_conf->bs); bio->bi_iter.bi_sector = bio_end_sector(prev); - bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0); + __bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0); bio_chain(bio, prev); ppl_submit_iounit_bio(io, prev); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 9ea285fbc4a6..85b3004594e0 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2433,7 +2433,7 @@ static int grow_stripes(struct r5conf *conf, int num) conf->active_name = 0; sc = kmem_cache_create(conf->cache_name[conf->active_name], - sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), + struct_size_t(struct stripe_head, dev, devs), 0, 0, NULL); if (!sc) return 1; @@ -2559,7 +2559,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) /* Step 1 */ sc = kmem_cache_create(conf->cache_name[1-conf->active_name], - sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), + struct_size_t(struct stripe_head, dev, newsize), 0, 0, NULL); if (!sc) return -ENOMEM; @@ -5966,6 +5966,19 @@ out: return ret; } +static bool reshape_inprogress(struct mddev *mddev) +{ + return test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && + test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && + !test_bit(MD_RECOVERY_DONE, &mddev->recovery) && + !test_bit(MD_RECOVERY_INTR, &mddev->recovery); +} + +static bool reshape_disabled(struct mddev *mddev) +{ + return is_md_suspended(mddev) || !md_is_rdwr(mddev); +} + static enum stripe_result make_stripe_request(struct mddev *mddev, struct r5conf *conf, struct stripe_request_ctx *ctx, sector_t logical_sector, struct bio *bi) @@ -5997,7 +6010,8 @@ static enum stripe_result make_stripe_request(struct mddev *mddev, if (ahead_of_reshape(mddev, logical_sector, conf->reshape_safe)) { spin_unlock_irq(&conf->device_lock); - return STRIPE_SCHEDULE_AND_RETRY; + ret = STRIPE_SCHEDULE_AND_RETRY; + goto out; } } spin_unlock_irq(&conf->device_lock); @@ -6076,6 +6090,15 @@ static enum stripe_result make_stripe_request(struct mddev *mddev, out_release: raid5_release_stripe(sh); +out: + if (ret == STRIPE_SCHEDULE_AND_RETRY && !reshape_inprogress(mddev) && + reshape_disabled(mddev)) { + bi->bi_status = BLK_STS_IOERR; + ret = STRIPE_FAIL; + pr_err("md/raid456:%s: io failed across reshape position while reshape can't make progress.\n", + mdname(mddev)); + } + return ret; } @@ -7708,7 +7731,8 @@ static struct r5conf *setup_conf(struct mddev *mddev) } sprintf(pers_name, "raid%d", mddev->new_level); - conf->thread = md_register_thread(raid5d, mddev, pers_name); + rcu_assign_pointer(conf->thread, + md_register_thread(raid5d, mddev, pers_name)); if (!conf->thread) { pr_warn("md/raid:%s: couldn't allocate thread.\n", mdname(mddev)); @@ -7931,8 +7955,8 @@ static int raid5_run(struct mddev *mddev) } conf->min_offset_diff = min_offset_diff; - mddev->thread = conf->thread; - conf->thread = NULL; + rcu_assign_pointer(mddev->thread, conf->thread); + rcu_assign_pointer(conf->thread, NULL); mddev->private = conf; for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; @@ -8029,8 +8053,8 @@ static int raid5_run(struct mddev *mddev) clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); - mddev->sync_thread = md_register_thread(md_do_sync, mddev, - "reshape"); + rcu_assign_pointer(mddev->sync_thread, + md_register_thread(md_do_sync, mddev, "reshape")); if (!mddev->sync_thread) goto abort; } @@ -8377,6 +8401,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) p = conf->disks + disk; tmp = rdev_mdlock_deref(mddev, p->rdev); if (test_bit(WantReplacement, &tmp->flags) && + mddev->reshape_position == MaxSector && p->replacement == NULL) { clear_bit(In_sync, &rdev->flags); set_bit(Replacement, &rdev->flags); @@ -8500,6 +8525,7 @@ static int raid5_start_reshape(struct mddev *mddev) struct r5conf *conf = mddev->private; struct md_rdev *rdev; int spares = 0; + int i; unsigned long flags; if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) @@ -8511,6 +8537,13 @@ static int raid5_start_reshape(struct mddev *mddev) if (has_failed(conf)) return -EINVAL; + /* raid5 can't handle concurrent reshape and recovery */ + if (mddev->recovery_cp < MaxSector) + return -EBUSY; + for (i = 0; i < conf->raid_disks; i++) + if (rdev_mdlock_deref(mddev, conf->disks[i].replacement)) + return -EBUSY; + rdev_for_each(rdev, mddev) { if (!test_bit(In_sync, &rdev->flags) && !test_bit(Faulty, &rdev->flags)) @@ -8607,8 +8640,8 @@ static int raid5_start_reshape(struct mddev *mddev) clear_bit(MD_RECOVERY_DONE, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); - mddev->sync_thread = md_register_thread(md_do_sync, mddev, - "reshape"); + rcu_assign_pointer(mddev->sync_thread, + md_register_thread(md_do_sync, mddev, "reshape")); if (!mddev->sync_thread) { mddev->recovery = 0; spin_lock_irq(&conf->device_lock); @@ -9043,6 +9076,22 @@ static int raid5_start(struct mddev *mddev) return r5l_start(conf->log); } +static void raid5_prepare_suspend(struct mddev *mddev) +{ + struct r5conf *conf = mddev->private; + + wait_event(mddev->sb_wait, !reshape_inprogress(mddev) || + percpu_ref_is_zero(&mddev->active_io)); + if (percpu_ref_is_zero(&mddev->active_io)) + return; + + /* + * Reshape is not in progress, and array is suspended, io that is + * waiting for reshpape can never be done. + */ + wake_up(&conf->wait_for_overlap); +} + static struct md_personality raid6_personality = { .name = "raid6", @@ -9063,6 +9112,7 @@ static struct md_personality raid6_personality = .check_reshape = raid6_check_reshape, .start_reshape = raid5_start_reshape, .finish_reshape = raid5_finish_reshape, + .prepare_suspend = raid5_prepare_suspend, .quiesce = raid5_quiesce, .takeover = raid6_takeover, .change_consistency_policy = raid5_change_consistency_policy, @@ -9087,6 +9137,7 @@ static struct md_personality raid5_personality = .check_reshape = raid5_check_reshape, .start_reshape = raid5_start_reshape, .finish_reshape = raid5_finish_reshape, + .prepare_suspend = raid5_prepare_suspend, .quiesce = raid5_quiesce, .takeover = raid5_takeover, .change_consistency_policy = raid5_change_consistency_policy, @@ -9112,6 +9163,7 @@ static struct md_personality raid4_personality = .check_reshape = raid5_check_reshape, .start_reshape = raid5_start_reshape, .finish_reshape = raid5_finish_reshape, + .prepare_suspend = raid5_prepare_suspend, .quiesce = raid5_quiesce, .takeover = raid4_takeover, .change_consistency_policy = raid5_change_consistency_policy, diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index e873938a6125..97a795979a35 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -268,7 +268,7 @@ struct stripe_head { unsigned long flags; u32 log_checksum; unsigned short write_hint; - } dev[1]; /* allocated with extra space depending of RAID geometry */ + } dev[]; /* allocated depending of RAID geometry ("disks" member) */ }; /* stripe_head_state - collects and tracks the dynamic state of a stripe_head @@ -679,7 +679,7 @@ struct r5conf { /* When taking over an array from a different personality, we store * the new thread here until we fully activate the array. */ - struct md_thread *thread; + struct md_thread __rcu *thread; struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; struct r5worker_group *worker_groups; int group_cnt; diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index bc6950a5740f..9293b058ab99 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -817,26 +817,15 @@ static void dvb_frontend_stop(struct dvb_frontend *fe) dev_dbg(fe->dvb->device, "%s:\n", __func__); - mutex_lock(&fe->remove_mutex); - if (fe->exit != DVB_FE_DEVICE_REMOVED) fe->exit = DVB_FE_NORMAL_EXIT; mb(); - if (!fepriv->thread) { - mutex_unlock(&fe->remove_mutex); + if (!fepriv->thread) return; - } kthread_stop(fepriv->thread); - mutex_unlock(&fe->remove_mutex); - - if (fepriv->dvbdev->users < -1) { - wait_event(fepriv->dvbdev->wait_queue, - fepriv->dvbdev->users == -1); - } - sema_init(&fepriv->sem, 1); fepriv->state = FESTATE_IDLE; @@ -2780,13 +2769,9 @@ static int dvb_frontend_open(struct inode *inode, struct file *file) struct dvb_adapter *adapter = fe->dvb; int ret; - mutex_lock(&fe->remove_mutex); - dev_dbg(fe->dvb->device, "%s:\n", __func__); - if (fe->exit == DVB_FE_DEVICE_REMOVED) { - ret = -ENODEV; - goto err_remove_mutex; - } + if (fe->exit == DVB_FE_DEVICE_REMOVED) + return -ENODEV; if (adapter->mfe_shared == 2) { mutex_lock(&adapter->mfe_lock); @@ -2794,8 +2779,7 @@ static int dvb_frontend_open(struct inode *inode, struct file *file) if (adapter->mfe_dvbdev && !adapter->mfe_dvbdev->writers) { mutex_unlock(&adapter->mfe_lock); - ret = -EBUSY; - goto err_remove_mutex; + return -EBUSY; } adapter->mfe_dvbdev = dvbdev; } @@ -2818,10 +2802,8 @@ static int dvb_frontend_open(struct inode *inode, struct file *file) while (mferetry-- && (mfedev->users != -1 || mfepriv->thread)) { if (msleep_interruptible(500)) { - if (signal_pending(current)) { - ret = -EINTR; - goto err_remove_mutex; - } + if (signal_pending(current)) + return -EINTR; } } @@ -2833,8 +2815,7 @@ static int dvb_frontend_open(struct inode *inode, struct file *file) if (mfedev->users != -1 || mfepriv->thread) { mutex_unlock(&adapter->mfe_lock); - ret = -EBUSY; - goto err_remove_mutex; + return -EBUSY; } adapter->mfe_dvbdev = dvbdev; } @@ -2893,8 +2874,6 @@ static int dvb_frontend_open(struct inode *inode, struct file *file) if (adapter->mfe_shared) mutex_unlock(&adapter->mfe_lock); - - mutex_unlock(&fe->remove_mutex); return ret; err3: @@ -2916,9 +2895,6 @@ err1: err0: if (adapter->mfe_shared) mutex_unlock(&adapter->mfe_lock); - -err_remove_mutex: - mutex_unlock(&fe->remove_mutex); return ret; } @@ -2929,8 +2905,6 @@ static int dvb_frontend_release(struct inode *inode, struct file *file) struct dvb_frontend_private *fepriv = fe->frontend_priv; int ret; - mutex_lock(&fe->remove_mutex); - dev_dbg(fe->dvb->device, "%s:\n", __func__); if ((file->f_flags & O_ACCMODE) != O_RDONLY) { @@ -2952,18 +2926,10 @@ static int dvb_frontend_release(struct inode *inode, struct file *file) } mutex_unlock(&fe->dvb->mdev_lock); #endif + if (fe->exit != DVB_FE_NO_EXIT) + wake_up(&dvbdev->wait_queue); if (fe->ops.ts_bus_ctrl) fe->ops.ts_bus_ctrl(fe, 0); - - if (fe->exit != DVB_FE_NO_EXIT) { - mutex_unlock(&fe->remove_mutex); - wake_up(&dvbdev->wait_queue); - } else { - mutex_unlock(&fe->remove_mutex); - } - - } else { - mutex_unlock(&fe->remove_mutex); } dvb_frontend_put(fe); @@ -3064,7 +3030,6 @@ int dvb_register_frontend(struct dvb_adapter *dvb, fepriv = fe->frontend_priv; kref_init(&fe->refcount); - mutex_init(&fe->remove_mutex); /* * After initialization, there need to be two references: one diff --git a/drivers/media/platform/amphion/vpu_core.c b/drivers/media/platform/amphion/vpu_core.c index de23627a119a..43d85a54268b 100644 --- a/drivers/media/platform/amphion/vpu_core.c +++ b/drivers/media/platform/amphion/vpu_core.c @@ -254,7 +254,7 @@ static int vpu_core_register(struct device *dev, struct vpu_core *core) if (vpu_core_is_exist(vpu, core)) return 0; - core->workqueue = alloc_workqueue("vpu", WQ_UNBOUND | WQ_MEM_RECLAIM, 1); + core->workqueue = alloc_ordered_workqueue("vpu", WQ_MEM_RECLAIM); if (!core->workqueue) { dev_err(core->dev, "fail to alloc workqueue\n"); return -ENOMEM; diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c index 6773b885597c..a48edb445eea 100644 --- a/drivers/media/platform/amphion/vpu_v4l2.c +++ b/drivers/media/platform/amphion/vpu_v4l2.c @@ -740,7 +740,7 @@ int vpu_v4l2_open(struct file *file, struct vpu_inst *inst) inst->fh.ctrl_handler = &inst->ctrl_handler; file->private_data = &inst->fh; inst->state = VPU_CODEC_STATE_DEINIT; - inst->workqueue = alloc_workqueue("vpu_inst", WQ_UNBOUND | WQ_MEM_RECLAIM, 1); + inst->workqueue = alloc_ordered_workqueue("vpu_inst", WQ_MEM_RECLAIM); if (inst->workqueue) { INIT_WORK(&inst->msg_work, vpu_inst_run_work); ret = kfifo_init(&inst->msg_fifo, diff --git a/drivers/media/platform/chips-media/coda-common.c b/drivers/media/platform/chips-media/coda-common.c index d013ea5d9d3d..ac9a642ae76f 100644 --- a/drivers/media/platform/chips-media/coda-common.c +++ b/drivers/media/platform/chips-media/coda-common.c @@ -3268,7 +3268,7 @@ static int coda_probe(struct platform_device *pdev) &dev->iram.blob); } - dev->workqueue = alloc_workqueue("coda", WQ_UNBOUND | WQ_MEM_RECLAIM, 1); + dev->workqueue = alloc_ordered_workqueue("coda", WQ_MEM_RECLAIM); if (!dev->workqueue) { dev_err(&pdev->dev, "unable to alloc workqueue\n"); ret = -ENOMEM; diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index 53001532e8e3..405b89ea1054 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -180,7 +180,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, data, size, dma->nr_pages); err = pin_user_pages(data & PAGE_MASK, dma->nr_pages, gup_flags, - dma->pages, NULL); + dma->pages); if (err != dma->nr_pages) { dma->nr_pages = (err >= 0) ? err : 0; diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index 48821f4c2b21..3c95600ab2f7 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -309,7 +309,7 @@ static void lkdtm_OVERFLOW_UNSIGNED(void) struct array_bounds_flex_array { int one; int two; - char data[1]; + char data[]; }; struct array_bounds { @@ -341,7 +341,7 @@ static void lkdtm_ARRAY_BOUNDS(void) * For the uninstrumented flex array member, also touch 1 byte * beyond to verify it is correctly uninstrumented. */ - for (i = 0; i < sizeof(not_checked->data) + 1; i++) + for (i = 0; i < 2; i++) not_checked->data[i] = 'A'; pr_info("Array access beyond bounds ...\n"); @@ -487,6 +487,7 @@ static void lkdtm_UNSET_SMEP(void) * the cr4 writing instruction. */ insn = (unsigned char *)native_write_cr4; + OPTIMIZER_HIDE_VAR(insn); for (i = 0; i < MOV_CR4_DEPTH; i++) { /* mov %rdi, %cr4 */ if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7) diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index b836936e9747..629edb6486de 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -185,7 +185,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma, #else *pageshift = PAGE_SHIFT; #endif - if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0) + if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page) <= 0) return -EFAULT; *paddr = page_to_phys(page); put_page(page); @@ -228,7 +228,7 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, goto err; #ifdef CONFIG_X86_64 if (unlikely(pmd_large(*pmdp))) - pte = *(pte_t *) pmdp; + pte = ptep_get((pte_t *)pmdp); else #endif pte = *pte_offset_kernel(pmdp, vaddr); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index d920c4178389..6d01025f1fcf 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -358,15 +358,15 @@ static const struct attribute_group *mmc_disk_attr_groups[] = { NULL, }; -static int mmc_blk_open(struct block_device *bdev, fmode_t mode) +static int mmc_blk_open(struct gendisk *disk, blk_mode_t mode) { - struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); + struct mmc_blk_data *md = mmc_blk_get(disk); int ret = -ENXIO; mutex_lock(&block_mutex); if (md) { ret = 0; - if ((mode & FMODE_WRITE) && md->read_only) { + if ((mode & BLK_OPEN_WRITE) && md->read_only) { mmc_blk_put(md); ret = -EROFS; } @@ -376,7 +376,7 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode) return ret; } -static void mmc_blk_release(struct gendisk *disk, fmode_t mode) +static void mmc_blk_release(struct gendisk *disk) { struct mmc_blk_data *md = disk->private_data; @@ -757,7 +757,7 @@ static int mmc_blk_check_blkdev(struct block_device *bdev) return 0; } -static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, +static int mmc_blk_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct mmc_blk_data *md; @@ -794,7 +794,7 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, } #ifdef CONFIG_COMPAT -static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, +static int mmc_blk_compat_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 8648f7e63ca1..eea208856ce0 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1403,8 +1403,8 @@ static int bcm2835_probe(struct platform_device *pdev) host->max_clk = clk_get_rate(clk); host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) { - ret = -EINVAL; + if (host->irq < 0) { + ret = host->irq; goto err; } diff --git a/drivers/mmc/host/litex_mmc.c b/drivers/mmc/host/litex_mmc.c index 39c6707fdfdb..9af6b0902efe 100644 --- a/drivers/mmc/host/litex_mmc.c +++ b/drivers/mmc/host/litex_mmc.c @@ -649,6 +649,7 @@ static struct platform_driver litex_mmc_driver = { .driver = { .name = "litex-mmc", .of_match_table = litex_match, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, }; module_platform_driver(litex_mmc_driver); diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index b8514d9d5e73..ee9a25b900ae 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -991,11 +991,8 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id) if (data && !cmd->error) data->bytes_xfered = data->blksz * data->blocks; - if (meson_mmc_bounce_buf_read(data) || - meson_mmc_get_next_command(cmd)) - ret = IRQ_WAKE_THREAD; - else - ret = IRQ_HANDLED; + + return IRQ_WAKE_THREAD; } out: @@ -1007,9 +1004,6 @@ out: writel(start, host->regs + SD_EMMC_START); } - if (ret == IRQ_HANDLED) - meson_mmc_request_done(host->mmc, cmd->mrq); - return ret; } @@ -1192,8 +1186,8 @@ static int meson_mmc_probe(struct platform_device *pdev) return PTR_ERR(host->regs); host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) - return -EINVAL; + if (host->irq < 0) + return host->irq; cd_irq = platform_get_irq_optional(pdev, 1); mmc_gpio_set_cd_irq(mmc, cd_irq); diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index f2b2e8b0574e..696cbef3ff7d 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1735,7 +1735,8 @@ static void mmci_set_max_busy_timeout(struct mmc_host *mmc) return; if (host->variant->busy_timeout && mmc->actual_clock) - max_busy_timeout = ~0UL / (mmc->actual_clock / MSEC_PER_SEC); + max_busy_timeout = U32_MAX / DIV_ROUND_UP(mmc->actual_clock, + MSEC_PER_SEC); mmc->max_busy_timeout = max_busy_timeout; } diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index edade0e54a0c..9785ec91654f 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -2680,7 +2680,7 @@ static int msdc_drv_probe(struct platform_device *pdev) host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) { - ret = -EINVAL; + ret = host->irq; goto host_free; } diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index 629efbe639c4..b4f6a0a2fcb5 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -704,7 +704,7 @@ static int mvsd_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); if (irq < 0) - return -ENXIO; + return irq; mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); if (!mmc) { diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index ce78edfb402b..86454f1182bb 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -1343,7 +1343,7 @@ static int mmc_omap_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) - return -ENXIO; + return irq; host->virt_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(host->virt_base)) diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 517dde777413..1e0f2d7774bd 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1791,9 +1791,11 @@ static int omap_hsmmc_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - irq = platform_get_irq(pdev, 0); - if (res == NULL || irq < 0) + if (!res) return -ENXIO; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c index 6f9d31a886ba..1bf22b08b373 100644 --- a/drivers/mmc/host/owl-mmc.c +++ b/drivers/mmc/host/owl-mmc.c @@ -637,7 +637,7 @@ static int owl_mmc_probe(struct platform_device *pdev) owl_host->irq = platform_get_irq(pdev, 0); if (owl_host->irq < 0) { - ret = -EINVAL; + ret = owl_host->irq; goto err_release_channel; } diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 8f0e639236b1..edf2e6c14dc6 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -829,7 +829,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev) host->ops = &sdhci_acpi_ops_dflt; host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) { - err = -EINVAL; + err = host->irq; goto err_free; } diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 8ac81d57a3df..1877d583fe8c 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -2479,6 +2479,9 @@ static inline void sdhci_msm_get_of_property(struct platform_device *pdev, msm_host->ddr_config = DDR_CONFIG_POR_VAL; of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config); + + if (of_device_is_compatible(node, "qcom,msm8916-sdhci")) + host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA; } static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host) diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index d463e2fd5b1a..c79035727b20 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c @@ -65,8 +65,8 @@ static int sdhci_probe(struct platform_device *pdev) host->hw_name = "sdhci"; host->ops = &sdhci_pltfm_ops; host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) { - ret = -EINVAL; + if (host->irq < 0) { + ret = host->irq; goto err_host; } host->quirks = SDHCI_QUIRK_BROKEN_ADMA; diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 0fd4c9d644dd..5cf53348372a 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -1400,7 +1400,7 @@ static int sh_mmcif_probe(struct platform_device *pdev) irq[0] = platform_get_irq(pdev, 0); irq[1] = platform_get_irq_optional(pdev, 1); if (irq[0] < 0) - return -ENXIO; + return irq[0]; reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(reg)) diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 3db9f32d6a7b..69dcb8805e05 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1350,8 +1350,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, return ret; host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) { - ret = -EINVAL; + if (host->irq < 0) { + ret = host->irq; goto error_disable_mmc; } diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index 2f59917b105e..2e17903658fc 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -1757,8 +1757,10 @@ static int usdhi6_probe(struct platform_device *pdev) irq_cd = platform_get_irq_byname(pdev, "card detect"); irq_sd = platform_get_irq_byname(pdev, "data"); irq_sdio = platform_get_irq_byname(pdev, "SDIO"); - if (irq_sd < 0 || irq_sdio < 0) - return -ENODEV; + if (irq_sd < 0) + return irq_sd; + if (irq_sdio < 0) + return irq_sdio; mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev); if (!mmc) diff --git a/drivers/most/configfs.c b/drivers/most/configfs.c index 27b0c923597f..36d8c917f65f 100644 --- a/drivers/most/configfs.c +++ b/drivers/most/configfs.c @@ -204,7 +204,7 @@ static ssize_t mdev_link_device_store(struct config_item *item, { struct mdev_link *mdev_link = to_mdev_link(item); - strlcpy(mdev_link->device, page, sizeof(mdev_link->device)); + strscpy(mdev_link->device, page, sizeof(mdev_link->device)); strim(mdev_link->device); return count; } @@ -219,7 +219,7 @@ static ssize_t mdev_link_channel_store(struct config_item *item, { struct mdev_link *mdev_link = to_mdev_link(item); - strlcpy(mdev_link->channel, page, sizeof(mdev_link->channel)); + strscpy(mdev_link->channel, page, sizeof(mdev_link->channel)); strim(mdev_link->channel); return count; } @@ -234,7 +234,7 @@ static ssize_t mdev_link_comp_store(struct config_item *item, { struct mdev_link *mdev_link = to_mdev_link(item); - strlcpy(mdev_link->comp, page, sizeof(mdev_link->comp)); + strscpy(mdev_link->comp, page, sizeof(mdev_link->comp)); strim(mdev_link->comp); return count; } @@ -250,7 +250,7 @@ static ssize_t mdev_link_comp_params_store(struct config_item *item, { struct mdev_link *mdev_link = to_mdev_link(item); - strlcpy(mdev_link->comp_params, page, sizeof(mdev_link->comp_params)); + strscpy(mdev_link->comp_params, page, sizeof(mdev_link->comp_params)); strim(mdev_link->comp_params); return count; } diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index 4cd37ec45762..be106dc20ff3 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c @@ -209,40 +209,34 @@ static void block2mtd_free_device(struct block2mtd_dev *dev) if (dev->blkdev) { invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping, 0, -1); - blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); + blkdev_put(dev->blkdev, NULL); } kfree(dev); } - -static struct block2mtd_dev *add_device(char *devname, int erase_size, - char *label, int timeout) +/* + * This function is marked __ref because it calls the __init marked + * early_lookup_bdev when called from the early boot code. + */ +static struct block_device __ref *mdtblock_early_get_bdev(const char *devname, + blk_mode_t mode, int timeout, struct block2mtd_dev *dev) { + struct block_device *bdev = ERR_PTR(-ENODEV); #ifndef MODULE int i; -#endif - const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL; - struct block_device *bdev; - struct block2mtd_dev *dev; - char *name; - if (!devname) - return NULL; - - dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL); - if (!dev) - return NULL; - - /* Get a handle on the device */ - bdev = blkdev_get_by_path(devname, mode, dev); + /* + * We can't use early_lookup_bdev from a running system. + */ + if (system_state >= SYSTEM_RUNNING) + return bdev; -#ifndef MODULE /* * We might not have the root device mounted at this point. * Try to resolve the device name by other means. */ - for (i = 0; IS_ERR(bdev) && i <= timeout; i++) { + for (i = 0; i <= timeout; i++) { dev_t devt; if (i) @@ -254,13 +248,35 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size, msleep(1000); wait_for_device_probe(); - devt = name_to_dev_t(devname); - if (!devt) - continue; - bdev = blkdev_get_by_dev(devt, mode, dev); + if (!early_lookup_bdev(devname, &devt)) { + bdev = blkdev_get_by_dev(devt, mode, dev, NULL); + if (!IS_ERR(bdev)) + break; + } } #endif + return bdev; +} +static struct block2mtd_dev *add_device(char *devname, int erase_size, + char *label, int timeout) +{ + const blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE; + struct block_device *bdev; + struct block2mtd_dev *dev; + char *name; + + if (!devname) + return NULL; + + dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL); + if (!dev) + return NULL; + + /* Get a handle on the device */ + bdev = blkdev_get_by_path(devname, mode, dev, NULL); + if (IS_ERR(bdev)) + bdev = mdtblock_early_get_bdev(devname, mode, timeout, dev); if (IS_ERR(bdev)) { pr_err("error: cannot open device %s\n", devname); goto err_free_block2mtd; diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 60b222799871..ff18636e0889 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -182,9 +182,9 @@ static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_STS_OK; } -static int blktrans_open(struct block_device *bdev, fmode_t mode) +static int blktrans_open(struct gendisk *disk, blk_mode_t mode) { - struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; + struct mtd_blktrans_dev *dev = disk->private_data; int ret = 0; kref_get(&dev->ref); @@ -208,7 +208,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) ret = __get_mtd_device(dev->mtd); if (ret) goto error_release; - dev->file_mode = mode; + dev->writable = mode & BLK_OPEN_WRITE; unlock: dev->open++; @@ -225,7 +225,7 @@ error_put: return ret; } -static void blktrans_release(struct gendisk *disk, fmode_t mode) +static void blktrans_release(struct gendisk *disk) { struct mtd_blktrans_dev *dev = disk->private_data; diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c index a0a1194dc1d9..fa476fb4dffb 100644 --- a/drivers/mtd/mtdblock.c +++ b/drivers/mtd/mtdblock.c @@ -294,7 +294,7 @@ static void mtdblock_release(struct mtd_blktrans_dev *mbd) * It was the last usage. Free the cache, but only sync if * opened for writing. */ - if (mbd->file_mode & FMODE_WRITE) + if (mbd->writable) mtd_sync(mbd->mtd); vfree(mtdblk->cache_data); } diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index 3711d7f74600..437c5b83ffe5 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -227,9 +227,9 @@ static blk_status_t ubiblock_read(struct request *req) return BLK_STS_OK; } -static int ubiblock_open(struct block_device *bdev, fmode_t mode) +static int ubiblock_open(struct gendisk *disk, blk_mode_t mode) { - struct ubiblock *dev = bdev->bd_disk->private_data; + struct ubiblock *dev = disk->private_data; int ret; mutex_lock(&dev->dev_mutex); @@ -246,11 +246,10 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode) * It's just a paranoid check, as write requests will get rejected * in any case. */ - if (mode & FMODE_WRITE) { + if (mode & BLK_OPEN_WRITE) { ret = -EROFS; goto out_unlock; } - dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY); if (IS_ERR(dev->desc)) { dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d", @@ -270,7 +269,7 @@ out_unlock: return ret; } -static void ubiblock_release(struct gendisk *gd, fmode_t mode) +static void ubiblock_release(struct gendisk *gd) { struct ubiblock *dev = gd->private_data; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 9bc54e1348cb..7e773c4ba046 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -399,6 +399,20 @@ static void mt7530_pll_setup(struct mt7530_priv *priv) core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); } +/* If port 6 is available as a CPU port, always prefer that as the default, + * otherwise don't care. + */ +static struct dsa_port * +mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds) +{ + struct dsa_port *cpu_dp = dsa_to_port(ds, 6); + + if (dsa_port_is_cpu(cpu_dp)) + return cpu_dp; + + return NULL; +} + /* Setup port 6 interface mode and TRGMII TX circuit */ static int mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface) @@ -985,6 +999,18 @@ unlock_exit: mutex_unlock(&priv->reg_mutex); } +static void +mt753x_trap_frames(struct mt7530_priv *priv) +{ + /* Trap BPDUs to the CPU port(s) */ + mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK, + MT753X_BPDU_CPU_ONLY); + + /* Trap LLDP frames with :0E MAC DA to the CPU port(s) */ + mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK, + MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY)); +} + static int mt753x_cpu_port_enable(struct dsa_switch *ds, int port) { @@ -1007,9 +1033,16 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port) UNU_FFP(BIT(port))); /* Set CPU port number */ - if (priv->id == ID_MT7621) + if (priv->id == ID_MT7530 || priv->id == ID_MT7621) mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port)); + /* Add the CPU port to the CPU port bitmap for MT7531 and the switch on + * the MT7988 SoC. Trapped frames will be forwarded to the CPU port that + * is affine to the inbound user port. + */ + if (priv->id == ID_MT7531 || priv->id == ID_MT7988) + mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port))); + /* CPU port gets connected to all user ports of * the switch. */ @@ -2255,6 +2288,8 @@ mt7530_setup(struct dsa_switch *ds) priv->p6_interface = PHY_INTERFACE_MODE_NA; + mt753x_trap_frames(priv); + /* Enable and reset MIB counters */ mt7530_mib_reset(ds); @@ -2352,17 +2387,9 @@ static int mt7531_setup_common(struct dsa_switch *ds) { struct mt7530_priv *priv = ds->priv; - struct dsa_port *cpu_dp; int ret, i; - /* BPDU to CPU port */ - dsa_switch_for_each_cpu_port(cpu_dp, ds) { - mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK, - BIT(cpu_dp->index)); - break; - } - mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK, - MT753X_BPDU_CPU_ONLY); + mt753x_trap_frames(priv); /* Enable and reset MIB counters */ mt7530_mib_reset(ds); @@ -3085,6 +3112,7 @@ static int mt7988_setup(struct dsa_switch *ds) const struct dsa_switch_ops mt7530_switch_ops = { .get_tag_protocol = mtk_get_tag_protocol, .setup = mt753x_setup, + .preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port, .get_strings = mt7530_get_strings, .get_ethtool_stats = mt7530_get_ethtool_stats, .get_sset_count = mt7530_get_sset_count, diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 5084f48a8869..08045b035e6a 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -54,6 +54,7 @@ enum mt753x_id { #define MT7531_MIRROR_PORT_GET(x) (((x) >> 16) & MIRROR_MASK) #define MT7531_MIRROR_PORT_SET(x) (((x) & MIRROR_MASK) << 16) #define MT7531_CPU_PMAP_MASK GENMASK(7, 0) +#define MT7531_CPU_PMAP(x) FIELD_PREP(MT7531_CPU_PMAP_MASK, x) #define MT753X_MIRROR_REG(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \ MT7531_CFC : MT7530_MFC) @@ -66,6 +67,11 @@ enum mt753x_id { #define MT753X_BPC 0x24 #define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0) +/* Register for :03 and :0E MAC DA frame control */ +#define MT753X_RGAC2 0x2c +#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16) +#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x) + enum mt753x_bpdu_port_fw { MT753X_BPDU_FOLLOW_MFC, MT753X_BPDU_CPU_EXCLUDE = 4, diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index cfb3faeaa5bf..d172a3e9736c 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1263,7 +1263,7 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port) /* Consider the standard Ethernet overhead of 8 octets preamble+SFD, * 4 octets FCS, 12 octets IFG. */ - needed_bit_time_ps = (maxlen + 24) * picos_per_byte; + needed_bit_time_ps = (u64)(maxlen + 24) * picos_per_byte; dev_dbg(ocelot->dev, "port %d: max frame size %d needs %llu ps at speed %d\n", diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 637d162bbcfa..1e7a6f1d4223 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -14294,11 +14294,16 @@ static void bnx2x_io_resume(struct pci_dev *pdev) bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK; - if (netif_running(dev)) - bnx2x_nic_load(bp, LOAD_NORMAL); + if (netif_running(dev)) { + if (bnx2x_nic_load(bp, LOAD_NORMAL)) { + netdev_err(bp->dev, "Error during driver initialization, try unloading/reloading the driver\n"); + goto done; + } + } netif_device_attach(dev); +done: rtnl_unlock(); } diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 7eb2ddbe9bad..a317feb8decb 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1126,8 +1126,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) } poll: - lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | - WQ_MEM_RECLAIM, 1); + lmac->check_link = alloc_ordered_workqueue("check_link", WQ_MEM_RECLAIM); if (!lmac->check_link) return -ENOMEM; INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 7e408bcc88de..0defd519ba62 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1135,8 +1135,8 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? VLAN_ETH_HLEN : ETH_HLEN; if (skb->len <= 60 && - (lancer_chip(adapter) || skb_vlan_tag_present(skb)) && - is_ipv4_pkt(skb)) { + (lancer_chip(adapter) || BE3_chip(adapter) || + skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) { ip = (struct iphdr *)ip_hdr(skb); pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c index b1871e6c4006..00e50bd30189 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c @@ -54,6 +54,9 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode) case DPMAC_ETH_IF_XFI: *if_mode = PHY_INTERFACE_MODE_10GBASER; break; + case DPMAC_ETH_IF_CAUI: + *if_mode = PHY_INTERFACE_MODE_25GBASER; + break; default: return -EINVAL; } @@ -79,6 +82,8 @@ static enum dpmac_eth_if dpmac_eth_if_mode(phy_interface_t if_mode) return DPMAC_ETH_IF_XFI; case PHY_INTERFACE_MODE_1000BASEX: return DPMAC_ETH_IF_1000BASEX; + case PHY_INTERFACE_MODE_25GBASER: + return DPMAC_ETH_IF_CAUI; default: return DPMAC_ETH_IF_MII; } @@ -418,7 +423,7 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac) mac->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10FD | MAC_100FD | MAC_1000FD | MAC_2500FD | MAC_5000FD | - MAC_10000FD; + MAC_10000FD | MAC_25000FD; dpaa2_mac_set_supported_interfaces(mac); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 83c27bbbc6ed..126007ab70f6 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -181,8 +181,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) int bw_sum = 0; u8 bw; - prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1); - prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2); + prio_top = tc_nums - 1; + prio_next = tc_nums - 2; /* Support highest prio and second prio tc in cbs mode */ if (tc != prio_top && tc != prio_next) diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 9abaff1f2aff..39d0fe76a38f 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -525,7 +525,7 @@ void iavf_set_ethtool_ops(struct net_device *netdev); void iavf_update_stats(struct iavf_adapter *adapter); void iavf_reset_interrupt_capability(struct iavf_adapter *adapter); int iavf_init_interrupt_scheme(struct iavf_adapter *adapter); -void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask); +void iavf_irq_enable_queues(struct iavf_adapter *adapter); void iavf_free_all_tx_resources(struct iavf_adapter *adapter); void iavf_free_all_rx_resources(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 2de4baff4c20..4a66873882d1 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -359,21 +359,18 @@ static void iavf_irq_disable(struct iavf_adapter *adapter) } /** - * iavf_irq_enable_queues - Enable interrupt for specified queues + * iavf_irq_enable_queues - Enable interrupt for all queues * @adapter: board private structure - * @mask: bitmap of queues to enable **/ -void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) +void iavf_irq_enable_queues(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; int i; for (i = 1; i < adapter->num_msix_vectors; i++) { - if (mask & BIT(i - 1)) { - wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), - IAVF_VFINT_DYN_CTLN1_INTENA_MASK | - IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); - } + wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), + IAVF_VFINT_DYN_CTLN1_INTENA_MASK | + IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); } } @@ -387,7 +384,7 @@ void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) struct iavf_hw *hw = &adapter->hw; iavf_misc_irq_enable(adapter); - iavf_irq_enable_queues(adapter, ~0); + iavf_irq_enable_queues(adapter); if (flush) iavf_flush(hw); diff --git a/drivers/net/ethernet/intel/iavf/iavf_register.h b/drivers/net/ethernet/intel/iavf/iavf_register.h index bf793332fc9d..a19e88898a0b 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_register.h +++ b/drivers/net/ethernet/intel/iavf/iavf_register.h @@ -40,7 +40,7 @@ #define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT) #define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 #define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) -#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ +#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...63 */ /* Reset: VFR */ #define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0 #define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT) #define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h index 37eadb3d27a8..41acfe26df1c 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.h +++ b/drivers/net/ethernet/intel/ice/ice_ddp.h @@ -185,7 +185,7 @@ struct ice_buf_hdr { #define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) \ ((ICE_PKG_BUF_SIZE - \ - struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) / \ + struct_size_t(struct ice_buf_hdr, section_entry, 1) - (hd_sz)) / \ (ent_sz)) /* ice package section IDs */ @@ -297,7 +297,7 @@ struct ice_label_section { }; #define ICE_MAX_LABELS_IN_BUF \ - ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_label_section *)0, \ + ICE_MAX_ENTRIES_IN_BUF(struct_size_t(struct ice_label_section, \ label, 1) - \ sizeof(struct ice_label), \ sizeof(struct ice_label)) @@ -352,7 +352,7 @@ struct ice_boost_tcam_section { }; #define ICE_MAX_BST_TCAMS_IN_BUF \ - ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_boost_tcam_section *)0, \ + ICE_MAX_ENTRIES_IN_BUF(struct_size_t(struct ice_boost_tcam_section, \ tcam, 1) - \ sizeof(struct ice_boost_tcam_entry), \ sizeof(struct ice_boost_tcam_entry)) @@ -372,8 +372,7 @@ struct ice_marker_ptype_tcam_section { }; #define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \ - ICE_MAX_ENTRIES_IN_BUF( \ - struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, \ + ICE_MAX_ENTRIES_IN_BUF(struct_size_t(struct ice_marker_ptype_tcam_section, tcam, \ 1) - \ sizeof(struct ice_marker_ptype_tcam_entry), \ sizeof(struct ice_marker_ptype_tcam_entry)) diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c index bd0ed155e11b..75c9de675f20 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.c +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -96,12 +96,7 @@ static void ice_gnss_read(struct kthread_work *work) int err = 0; pf = gnss->back; - if (!pf) { - err = -EFAULT; - goto exit; - } - - if (!test_bit(ICE_FLAG_GNSS, pf->flags)) + if (!pf || !test_bit(ICE_FLAG_GNSS, pf->flags)) return; hw = &pf->hw; @@ -159,7 +154,6 @@ free_buf: free_page((unsigned long)buf); requeue: kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, delay); -exit: if (err) dev_dbg(ice_pf_to_dev(pf), "GNSS failed to read err=%d\n", err); } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index a1f7c8edc22f..42c318ceff61 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4802,9 +4802,13 @@ err_init_pf: static void ice_deinit_dev(struct ice_pf *pf) { ice_free_irq_msix_misc(pf); - ice_clear_interrupt_scheme(pf); ice_deinit_pf(pf); ice_deinit_hw(&pf->hw); + + /* Service task is already stopped, so call reset directly. */ + ice_reset(&pf->hw, ICE_RESET_PFR); + pci_wait_for_pending_transaction(pf->pdev); + ice_clear_interrupt_scheme(pf); } static void ice_init_features(struct ice_pf *pf) @@ -5094,10 +5098,6 @@ int ice_load(struct ice_pf *pf) struct ice_vsi *vsi; int err; - err = ice_reset(&pf->hw, ICE_RESET_PFR); - if (err) - return err; - err = ice_init_dev(pf); if (err) return err; @@ -5354,12 +5354,6 @@ static void ice_remove(struct pci_dev *pdev) ice_setup_mc_magic_wake(pf); ice_set_wake(pf); - /* Issue a PFR as part of the prescribed driver unload flow. Do not - * do it via ice_schedule_reset() since there is no need to rebuild - * and the service task is already stopped. - */ - ice_reset(&pf->hw, ICE_RESET_PFR); - pci_wait_for_pending_transaction(pdev); pci_disable_device(pdev); } @@ -7056,6 +7050,10 @@ int ice_down(struct ice_vsi *vsi) ice_for_each_txq(vsi, i) ice_clean_tx_ring(vsi->tx_rings[i]); + if (ice_is_xdp_ena_vsi(vsi)) + ice_for_each_xdp_txq(vsi, i) + ice_clean_tx_ring(vsi->xdp_rings[i]); + ice_for_each_rxq(vsi, i) ice_clean_rx_ring(vsi->rx_rings[i]); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 7d60da1b7bf4..319ed601eaa1 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -822,6 +822,8 @@ static int igb_set_eeprom(struct net_device *netdev, */ ret_val = hw->nvm.ops.read(hw, last_word, 1, &eeprom_buff[last_word - first_word]); + if (ret_val) + goto out; } /* Device's eeprom is always little-endian, word addressable */ @@ -841,6 +843,7 @@ static int igb_set_eeprom(struct net_device *netdev, hw->nvm.ops.update(hw); igb_set_fw_version(adapter); +out: kfree(eeprom_buff); return ret_val; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 58872a4c2540..bb3db387d49c 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6947,6 +6947,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt) struct e1000_hw *hw = &adapter->hw; struct ptp_clock_event event; struct timespec64 ts; + unsigned long flags; if (pin < 0 || pin >= IGB_N_SDP) return; @@ -6954,9 +6955,12 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt) if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i354 || hw->mac.type == e1000_i350) { - s64 ns = rd32(auxstmpl); + u64 ns = rd32(auxstmpl); - ns += ((s64)(rd32(auxstmph) & 0xFF)) << 32; + ns += ((u64)(rd32(auxstmph) & 0xFF)) << 32; + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->tc, ns); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); ts = ns_to_timespec64(ns); } else { ts.tv_nsec = rd32(auxstmpl); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 1c4676882082..fa764190f270 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -254,6 +254,13 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) /* reset BQL for queue */ netdev_tx_reset_queue(txring_txq(tx_ring)); + /* Zero out the buffer ring */ + memset(tx_ring->tx_buffer_info, 0, + sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; @@ -267,7 +274,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) */ void igc_free_tx_resources(struct igc_ring *tx_ring) { - igc_clean_tx_ring(tx_ring); + igc_disable_tx_ring(tx_ring); vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; @@ -6723,6 +6730,9 @@ static void igc_remove(struct pci_dev *pdev) igc_ptp_stop(adapter); + pci_disable_ptm(pdev); + pci_clear_master(pdev); + set_bit(__IGC_DOWN, &adapter->state); del_timer_sync(&adapter->watchdog_timer); diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index e1853da280f9..43eb6e871351 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -981,6 +981,9 @@ int octep_device_setup(struct octep_device *oct) oct->mmio[i].hw_addr = ioremap(pci_resource_start(oct->pdev, i * 2), pci_resource_len(oct->pdev, i * 2)); + if (!oct->mmio[i].hw_addr) + goto unmap_prev; + oct->mmio[i].mapped = 1; } @@ -1015,7 +1018,9 @@ int octep_device_setup(struct octep_device *oct) return 0; unsupported_dev: - for (i = 0; i < OCTEP_MMIO_REGIONS; i++) + i = OCTEP_MMIO_REGIONS; +unmap_prev: + while (i--) iounmap(oct->mmio[i].hw_addr); kfree(oct->conf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 9f673bda9dbd..0069e60afa3b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -3044,9 +3044,8 @@ static int rvu_flr_init(struct rvu *rvu) cfg | BIT_ULL(22)); } - rvu->flr_wq = alloc_workqueue("rvu_afpf_flr", - WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, - 1); + rvu->flr_wq = alloc_ordered_workqueue("rvu_afpf_flr", + WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!rvu->flr_wq) return -ENOMEM; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 4ad707e758b9..f01d057ad025 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -1878,7 +1878,8 @@ static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, free_cnt = rvu_rsrc_free_count(&txsch->schq); } - if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) + if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC || + req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC) return NIX_AF_ERR_TLX_ALLOC_FAIL; /* If contiguous queues are needed, check for availability */ @@ -4080,10 +4081,6 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) { - /* CN10k supports 72KB FIFO size and max packet size of 64k */ - if (rvu->hw->lbk_bufsize == 0x12000) - return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16; - return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c index 51209119f0f2..9f11c1e40737 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c @@ -1164,10 +1164,8 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i { struct npc_exact_table *table; u16 *cnt, old_cnt; - bool promisc; table = rvu->hw->table; - promisc = table->promisc_mode[drop_mcam_idx]; cnt = &table->cnt_cmd_rules[drop_mcam_idx]; old_cnt = *cnt; @@ -1179,16 +1177,13 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i *enable_or_disable_cam = false; - if (promisc) - goto done; - - /* If all rules are deleted and not already in promisc mode; disable cam */ + /* If all rules are deleted, disable cam */ if (!*cnt && val < 0) { *enable_or_disable_cam = true; goto done; } - /* If rule got added and not already in promisc mode; enable cam */ + /* If rule got added, enable cam */ if (!old_cnt && val > 0) { *enable_or_disable_cam = true; goto done; @@ -1443,7 +1438,6 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) u32 drop_mcam_idx; bool *promisc; bool rc; - u32 cnt; table = rvu->hw->table; @@ -1466,17 +1460,8 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) return LMAC_AF_ERR_INVALID_PARAM; } *promisc = false; - cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL); mutex_unlock(&table->lock); - /* If no dmac filter entries configured, disable drop rule */ - if (!cnt) - rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false); - else - rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc); - - dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d, cnt=%d)\n", - __func__, cgx_id, lmac_id, cnt); return 0; } @@ -1494,7 +1479,6 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) u32 drop_mcam_idx; bool *promisc; bool rc; - u32 cnt; table = rvu->hw->table; @@ -1517,17 +1501,8 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) return LMAC_AF_ERR_INVALID_PARAM; } *promisc = true; - cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL); mutex_unlock(&table->lock); - /* If no dmac filter entries configured, disable drop rule */ - if (!cnt) - rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false); - else - rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc); - - dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n", - __func__, cgx_id, lmac_id, cnt); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 18284ad75157..74c49795dc82 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -271,8 +271,7 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs) { int vf; - pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq", - WQ_UNBOUND | WQ_HIGHPRI, 1); + pf->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI); if (!pf->flr_wq) return -ENOMEM; @@ -593,9 +592,8 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) if (!pf->mbox_pfvf) return -ENOMEM; - pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox", - WQ_UNBOUND | WQ_HIGHPRI | - WQ_MEM_RECLAIM, 1); + pf->mbox_pfvf_wq = alloc_ordered_workqueue("otx2_pfvf_mailbox", + WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!pf->mbox_pfvf_wq) return -ENOMEM; @@ -1063,9 +1061,8 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf) int err; mbox->pfvf = pf; - pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox", - WQ_UNBOUND | WQ_HIGHPRI | - WQ_MEM_RECLAIM, 1); + pf->mbox_wq = alloc_ordered_workqueue("otx2_pfaf_mailbox", + WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!pf->mbox_wq) return -ENOMEM; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 53366dbfbf27..7baed6bb3b72 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -297,9 +297,8 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf) int err; mbox->pfvf = vf; - vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox", - WQ_UNBOUND | WQ_HIGHPRI | - WQ_MEM_RECLAIM, 1); + vf->mbox_wq = alloc_ordered_workqueue("otx2_vfaf_mailbox", + WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!vf->mbox_wq) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 9c94807097cb..5ce28ff7685f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -732,7 +732,8 @@ static void mlx5e_rx_compute_wqe_bulk_params(struct mlx5e_params *params, static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, - struct mlx5e_rq_frags_info *info) + struct mlx5e_rq_frags_info *info, + u32 *xdp_frag_size) { u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu); int frag_size_max = DEFAULT_FRAG_SIZE; @@ -845,6 +846,8 @@ out: info->log_num_frags = order_base_2(info->num_frags); + *xdp_frag_size = info->num_frags > 1 && params->xdp_prog ? PAGE_SIZE : 0; + return 0; } @@ -989,7 +992,8 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, } default: /* MLX5_WQ_TYPE_CYCLIC */ MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); - err = mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info); + err = mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info, + ¶m->xdp_frag_size); if (err) return err; ndsegs = param->frags_info.num_frags; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index a5d20f6d6d9c..6800949dafbc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -24,6 +24,7 @@ struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; struct mlx5_wq_param wq; struct mlx5e_rq_frags_info frags_info; + u32 xdp_frag_size; }; struct mlx5e_sq_param { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index ead38ef69483..a254e728ac95 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -2021,6 +2021,8 @@ void mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr) { + if (!attr->ct_attr.ft) /* no ct action, return */ + return; if (!attr->ct_attr.nf_ft) /* means only ct clear action, and not ct_clear,ct() */ return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index ed279f450976..36826b582484 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -86,7 +86,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c, if (err) return err; - return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0); + return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, c->napi.napi_id); } static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *params, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 55b38544422f..891d39b4bfd4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -61,16 +61,19 @@ static void mlx5e_ipsec_handle_tx_limit(struct work_struct *_work) struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry; struct xfrm_state *x = sa_entry->x; - spin_lock(&x->lock); + if (sa_entry->attrs.drop) + return; + + spin_lock_bh(&x->lock); xfrm_state_check_expire(x); if (x->km.state == XFRM_STATE_EXPIRED) { sa_entry->attrs.drop = true; - mlx5e_accel_ipsec_fs_modify(sa_entry); - } - spin_unlock(&x->lock); + spin_unlock_bh(&x->lock); - if (sa_entry->attrs.drop) + mlx5e_accel_ipsec_fs_modify(sa_entry); return; + } + spin_unlock_bh(&x->lock); queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork, MLX5_IPSEC_RESCHED); @@ -1040,11 +1043,17 @@ err_fs: return err; } -static void mlx5e_xfrm_free_policy(struct xfrm_policy *x) +static void mlx5e_xfrm_del_policy(struct xfrm_policy *x) { struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x); mlx5e_accel_ipsec_fs_del_pol(pol_entry); +} + +static void mlx5e_xfrm_free_policy(struct xfrm_policy *x) +{ + struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x); + kfree(pol_entry); } @@ -1065,6 +1074,7 @@ static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = { .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft, .xdo_dev_policy_add = mlx5e_xfrm_add_policy, + .xdo_dev_policy_delete = mlx5e_xfrm_del_policy, .xdo_dev_policy_free = mlx5e_xfrm_free_policy, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index df90e19066bc..a3554bde3e07 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -305,7 +305,17 @@ static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry, } mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs); + + /* It is safe to execute the modify below unlocked since the only flows + * that could affect this HW object, are create, destroy and this work. + * + * Creation flow can't co-exist with this modify work, the destruction + * flow would cancel this work, and this work is a single entity that + * can't conflict with it self. + */ + spin_unlock_bh(&sa_entry->x->lock); mlx5_accel_esp_modify_xfrm(sa_entry, &attrs); + spin_lock_bh(&sa_entry->x->lock); data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET; @@ -431,7 +441,7 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work) aso = sa_entry->ipsec->aso; attrs = &sa_entry->attrs; - spin_lock(&sa_entry->x->lock); + spin_lock_bh(&sa_entry->x->lock); ret = mlx5e_ipsec_aso_query(sa_entry, NULL); if (ret) goto unlock; @@ -447,7 +457,7 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work) mlx5e_ipsec_handle_limits(sa_entry); unlock: - spin_unlock(&sa_entry->x->lock); + spin_unlock_bh(&sa_entry->x->lock); kfree(work); } @@ -596,7 +606,8 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry, do { ret = mlx5_aso_poll_cq(aso->aso, false); if (ret) - usleep_range(2, 10); + /* We are in atomic context */ + udelay(10); } while (ret && time_is_after_jiffies(expires)); spin_unlock_bh(&aso->lock); return ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a7c526ee5024..a5bdf78955d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -641,7 +641,7 @@ static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq) } static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params, - struct mlx5e_rq *rq) + u32 xdp_frag_size, struct mlx5e_rq *rq) { struct mlx5_core_dev *mdev = c->mdev; int err; @@ -665,7 +665,8 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param if (err) return err; - return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id); + return __xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id, + xdp_frag_size); } static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, @@ -2240,7 +2241,7 @@ static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param { int err; - err = mlx5e_init_rxq_rq(c, params, &c->rq); + err = mlx5e_init_rxq_rq(c, params, rq_params->xdp_frag_size, &c->rq); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 8a5a8703f0a3..b9b1da751a3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1439,6 +1439,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, mlx5e_hairpin_flow_del(priv, flow); free_flow_post_acts(flow); + mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr); kvfree(attr->parse_attr); kfree(flow->attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 144e59480686..ec83e6483d1a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -511,10 +511,11 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, struct mlx5_flow_rule *dst; void *in_flow_context, *vlan; void *in_match_value; + int reformat_id = 0; unsigned int inlen; int dst_cnt_size; + u32 *in, action; void *in_dests; - u32 *in; int err; if (mlx5_set_extended_dest(dev, fte, &extended_dest)) @@ -553,22 +554,42 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(flow_context, in_flow_context, extended_destination, extended_dest); - if (extended_dest) { - u32 action; - action = fte->action.action & - ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; - MLX5_SET(flow_context, in_flow_context, action, action); - } else { - MLX5_SET(flow_context, in_flow_context, action, - fte->action.action); - if (fte->action.pkt_reformat) - MLX5_SET(flow_context, in_flow_context, packet_reformat_id, - fte->action.pkt_reformat->id); + action = fte->action.action; + if (extended_dest) + action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + + MLX5_SET(flow_context, in_flow_context, action, action); + + if (!extended_dest && fte->action.pkt_reformat) { + struct mlx5_pkt_reformat *pkt_reformat = fte->action.pkt_reformat; + + if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { + reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat); + if (reformat_id < 0) { + mlx5_core_err(dev, + "Unsupported SW-owned pkt_reformat type (%d) in FW-owned table\n", + pkt_reformat->reformat_type); + err = reformat_id; + goto err_out; + } + } else { + reformat_id = fte->action.pkt_reformat->id; + } } - if (fte->action.modify_hdr) + + MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id); + + if (fte->action.modify_hdr) { + if (fte->action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { + mlx5_core_err(dev, "Can't use SW-owned modify_hdr in FW-owned table\n"); + err = -EOPNOTSUPP; + goto err_out; + } + MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->action.modify_hdr->id); + } MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type, fte->action.crypto.type); @@ -885,6 +906,8 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id); + pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_FW; + kfree(in); return err; } @@ -969,6 +992,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id); + modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_FW; kfree(in); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index f137a0611b77..b043190e50a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -54,8 +54,14 @@ struct mlx5_flow_definer { u32 id; }; +enum mlx5_flow_resource_owner { + MLX5_FLOW_RESOURCE_OWNER_FW, + MLX5_FLOW_RESOURCE_OWNER_SW, +}; + struct mlx5_modify_hdr { enum mlx5_flow_namespace_type ns_type; + enum mlx5_flow_resource_owner owner; union { struct mlx5_fs_dr_action action; u32 id; @@ -65,6 +71,7 @@ struct mlx5_modify_hdr { struct mlx5_pkt_reformat { enum mlx5_flow_namespace_type ns_type; int reformat_type; /* from mlx5_ifc */ + enum mlx5_flow_resource_owner owner; union { struct mlx5_fs_dr_action action; u32 id; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 1d879374acaa..229520405d4a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -276,18 +276,6 @@ static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev) return pci_num_vf(dev->pdev) ? true : false; } -static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) -{ - /* LACP owner conditions: - * 1) Function is physical. - * 2) LAG is supported by FW. - * 3) LAG is managed by driver (currently the only option). - */ - return MLX5_CAP_GEN(dev, vport_group_manager) && - (MLX5_CAP_GEN(dev, num_lag_ports) > 1) && - MLX5_CAP_GEN(dev, lag_master); -} - int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev); static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 843da89a9035..98412bd5a696 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -126,14 +126,22 @@ out: return ret; } -static void irq_release(struct mlx5_irq *irq) +/* mlx5_system_free_irq - Free an IRQ + * @irq: IRQ to free + * + * Free the IRQ and other resources such as rmap from the system. + * BUT doesn't free or remove reference from mlx5. + * This function is very important for the shutdown flow, where we need to + * cleanup system resoruces but keep mlx5 objects alive, + * see mlx5_irq_table_free_irqs(). + */ +static void mlx5_system_free_irq(struct mlx5_irq *irq) { struct mlx5_irq_pool *pool = irq->pool; #ifdef CONFIG_RFS_ACCEL struct cpu_rmap *rmap; #endif - xa_erase(&pool->irqs, irq->pool_index); /* free_irq requires that affinity_hint and rmap will be cleared before * calling it. To satisfy this requirement, we call * irq_cpu_rmap_remove() to remove the notifier @@ -145,10 +153,18 @@ static void irq_release(struct mlx5_irq *irq) irq_cpu_rmap_remove(rmap, irq->map.virq); #endif - free_cpumask_var(irq->mask); free_irq(irq->map.virq, &irq->nh); if (irq->map.index && pci_msix_can_alloc_dyn(pool->dev->pdev)) pci_msix_free_irq(pool->dev->pdev, irq->map); +} + +static void irq_release(struct mlx5_irq *irq) +{ + struct mlx5_irq_pool *pool = irq->pool; + + xa_erase(&pool->irqs, irq->pool_index); + mlx5_system_free_irq(irq); + free_cpumask_var(irq->mask); kfree(irq); } @@ -565,15 +581,21 @@ void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs) int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs, struct mlx5_irq **irqs, struct cpu_rmap **rmap) { + struct mlx5_irq_table *table = mlx5_irq_table_get(dev); + struct mlx5_irq_pool *pool = table->pcif_pool; struct irq_affinity_desc af_desc; struct mlx5_irq *irq; + int offset = 1; int i; + if (!pool->xa_num_irqs.max) + offset = 0; + af_desc.is_managed = false; for (i = 0; i < nirqs; i++) { cpumask_clear(&af_desc.mask); cpumask_set_cpu(cpus[i], &af_desc.mask); - irq = mlx5_irq_request(dev, i + 1, &af_desc, rmap); + irq = mlx5_irq_request(dev, i + offset, &af_desc, rmap); if (IS_ERR(irq)) break; irqs[i] = irq; @@ -699,7 +721,8 @@ static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool) unsigned long index; xa_for_each(&pool->irqs, index, irq) - free_irq(irq->map.virq, &irq->nh); + mlx5_system_free_irq(irq); + } static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 0eb9a8d7f282..0f783e7906cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -1421,9 +1421,13 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, } case DR_ACTION_TYP_TNL_L3_TO_L2: { - u8 hw_actions[DR_ACTION_CACHE_LINE_SIZE] = {}; + u8 *hw_actions; int ret; + hw_actions = kzalloc(DR_ACTION_CACHE_LINE_SIZE, GFP_KERNEL); + if (!hw_actions) + return -ENOMEM; + ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx, data, data_sz, hw_actions, @@ -1431,6 +1435,7 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, &action->rewrite->num_of_actions); if (ret) { mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n"); + kfree(hw_actions); return ret; } @@ -1440,6 +1445,7 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, ret = mlx5dr_ste_alloc_modify_hdr(action); if (ret) { mlx5dr_dbg(dmn, "Failed preparing reformat data\n"); + kfree(hw_actions); return ret; } return 0; @@ -2129,6 +2135,11 @@ mlx5dr_action_create_aso(struct mlx5dr_domain *dmn, u32 obj_id, return action; } +u32 mlx5dr_action_get_pkt_reformat_id(struct mlx5dr_action *action) +{ + return action->reformat->id; +} + int mlx5dr_action_destroy(struct mlx5dr_action *action) { if (WARN_ON_ONCE(refcount_read(&action->refcount) > 1)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 984653756779..cc215beb7436 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -331,8 +331,16 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, } if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { - bool is_decap = fte->action.pkt_reformat->reformat_type == - MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2; + bool is_decap; + + if (fte->action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) { + err = -EINVAL; + mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n"); + goto free_actions; + } + + is_decap = fte->action.pkt_reformat->reformat_type == + MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2; if (is_decap) actions[num_actions++] = @@ -661,6 +669,7 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns return -EINVAL; } + pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW; pkt_reformat->action.dr_action = action; return 0; @@ -691,6 +700,7 @@ static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns, return -EINVAL; } + modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW; modify_hdr->action.dr_action = action; return 0; @@ -816,6 +826,19 @@ static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns, return steering_caps; } +int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat) +{ + switch (pkt_reformat->reformat_type) { + case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: + case MLX5_REFORMAT_TYPE_L2_TO_NVGRE: + case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL: + case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL: + case MLX5_REFORMAT_TYPE_INSERT_HDR: + return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->action.dr_action); + } + return -EOPNOTSUPP; +} + bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev) { return mlx5dr_is_supported(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h index d168622063d5..99a3b2eff6b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h @@ -38,6 +38,8 @@ struct mlx5_fs_dr_table { bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev); +int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat); + const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void); #else @@ -47,6 +49,11 @@ static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void) return NULL; } +static inline u32 mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat) +{ + return 0; +} + static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev) { return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index 9afd268a2573..d1c04f43d86d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -150,6 +150,8 @@ mlx5dr_action_create_dest_match_range(struct mlx5dr_domain *dmn, int mlx5dr_action_destroy(struct mlx5dr_action *action); +u32 mlx5dr_action_get_pkt_reformat_id(struct mlx5dr_action *action); + int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id, u8 *dw_selectors, u8 *byte_selectors, u8 *match_mask, u32 *definer_id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/thermal.c b/drivers/net/ethernet/mellanox/mlx5/core/thermal.c index e47fa6fb836f..20bb5eb266c1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/thermal.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/thermal.c @@ -45,7 +45,7 @@ static int mlx5_thermal_get_mtmp_temp(struct mlx5_core_dev *mdev, u32 id, int *p static int mlx5_thermal_get_temp(struct thermal_zone_device *tzdev, int *p_temp) { - struct mlx5_thermal *thermal = tzdev->devdata; + struct mlx5_thermal *thermal = thermal_zone_device_priv(tzdev); struct mlx5_core_dev *mdev = thermal->mdev; int err; @@ -81,12 +81,13 @@ int mlx5_thermal_init(struct mlx5_core_dev *mdev) return -ENOMEM; thermal->mdev = mdev; - thermal->tzdev = thermal_zone_device_register(data, - MLX5_THERMAL_NUM_TRIPS, - MLX5_THERMAL_TRIP_MASK, - thermal, - &mlx5_thermal_ops, - NULL, 0, MLX5_THERMAL_POLL_INT_MSEC); + thermal->tzdev = thermal_zone_device_register_with_trips(data, + NULL, + MLX5_THERMAL_NUM_TRIPS, + MLX5_THERMAL_TRIP_MASK, + thermal, + &mlx5_thermal_ops, + NULL, 0, MLX5_THERMAL_POLL_INT_MSEC); if (IS_ERR(thermal->tzdev)) { dev_err(mdev->device, "Failed to register thermal zone device (%s) %ld\n", data, PTR_ERR(thermal->tzdev)); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index c865a4be05ee..4a1b94e5a8ea 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -582,8 +582,7 @@ qcaspi_spi_thread(void *data) while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); if ((qca->intr_req == qca->intr_svc) && - (qca->txr.skb[qca->txr.head] == NULL) && - (qca->sync == QCASPI_SYNC_READY)) + !qca->txr.skb[qca->txr.head]) schedule(); set_current_state(TASK_RUNNING); diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index aace87139cea..fa6d6202b129 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -347,17 +347,6 @@ out: return -ENOMEM; } -static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv) -{ - struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; - - gq->ring_size = TS_RING_SIZE; - gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, - sizeof(struct rswitch_ts_desc) * - (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); - return !gq->ts_ring ? -ENOMEM : 0; -} - static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr) { desc->dptrl = cpu_to_le32(lower_32_bits(addr)); @@ -533,6 +522,28 @@ static void rswitch_gwca_linkfix_free(struct rswitch_private *priv) gwca->linkfix_table = NULL; } +static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv) +{ + struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; + struct rswitch_ts_desc *desc; + + gq->ring_size = TS_RING_SIZE; + gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, + sizeof(struct rswitch_ts_desc) * + (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); + + if (!gq->ts_ring) + return -ENOMEM; + + rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE); + desc = &gq->ts_ring[gq->ring_size]; + desc->desc.die_dt = DT_LINKFIX; + rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); + INIT_LIST_HEAD(&priv->gwca.ts_info_list); + + return 0; +} + static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv) { struct rswitch_gwca_queue *gq; @@ -1780,9 +1791,6 @@ static int rswitch_init(struct rswitch_private *priv) if (err < 0) goto err_ts_queue_alloc; - rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE); - INIT_LIST_HEAD(&priv->gwca.ts_info_list); - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { err = rswitch_device_alloc(priv, i); if (err < 0) { diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index d30459dbfe8f..b63e47af6365 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -2950,7 +2950,7 @@ static u32 efx_ef10_extract_event_ts(efx_qword_t *event) return tstamp; } -static void +static int efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) { struct efx_nic *efx = channel->efx; @@ -2958,13 +2958,14 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) unsigned int tx_ev_desc_ptr; unsigned int tx_ev_q_label; unsigned int tx_ev_type; + int work_done; u64 ts_part; if (unlikely(READ_ONCE(efx->reset_pending))) - return; + return 0; if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) - return; + return 0; /* Get the transmit queue */ tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); @@ -2973,8 +2974,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) if (!tx_queue->timestamping) { /* Transmit completion */ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); - efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); - return; + return efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); } /* Transmit timestamps are only available for 8XXX series. They result @@ -3000,6 +3000,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) * fields in the event. */ tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1); + work_done = 0; switch (tx_ev_type) { case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION: @@ -3016,6 +3017,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) tx_queue->completed_timestamp_major = ts_part; efx_xmit_done_single(tx_queue); + work_done = 1; break; default: @@ -3026,6 +3028,8 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) EFX_QWORD_VAL(*event)); break; } + + return work_done; } static void @@ -3081,13 +3085,16 @@ static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, } } +#define EFX_NAPI_MAX_TX 512 + static int efx_ef10_ev_process(struct efx_channel *channel, int quota) { struct efx_nic *efx = channel->efx; efx_qword_t event, *p_event; unsigned int read_ptr; - int ev_code; + int spent_tx = 0; int spent = 0; + int ev_code; if (quota <= 0) return spent; @@ -3126,7 +3133,11 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota) } break; case ESE_DZ_EV_CODE_TX_EV: - efx_ef10_handle_tx_event(channel, &event); + spent_tx += efx_ef10_handle_tx_event(channel, &event); + if (spent_tx >= EFX_NAPI_MAX_TX) { + spent = quota; + goto out; + } break; case ESE_DZ_EV_CODE_DRIVER_EV: efx_ef10_handle_driver_event(channel, &event); diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 4dc643b0d2db..7adde9639c8a 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -253,6 +253,8 @@ static void ef100_ev_read_ack(struct efx_channel *channel) efx_reg(channel->efx, ER_GZ_EVQ_INT_PRIME)); } +#define EFX_NAPI_MAX_TX 512 + static int ef100_ev_process(struct efx_channel *channel, int quota) { struct efx_nic *efx = channel->efx; @@ -260,6 +262,7 @@ static int ef100_ev_process(struct efx_channel *channel, int quota) bool evq_phase, old_evq_phase; unsigned int read_ptr; efx_qword_t *p_event; + int spent_tx = 0; int spent = 0; bool ev_phase; int ev_type; @@ -295,7 +298,9 @@ static int ef100_ev_process(struct efx_channel *channel, int quota) efx_mcdi_process_event(channel, p_event); break; case ESE_GZ_EF100_EV_TX_COMPLETION: - ef100_ev_tx(channel, p_event); + spent_tx += ef100_ev_tx(channel, p_event); + if (spent_tx >= EFX_NAPI_MAX_TX) + spent = quota; break; case ESE_GZ_EF100_EV_DRIVER: netif_info(efx, drv, efx->net_dev, diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c index 29ffaf35559d..849e5555bd12 100644 --- a/drivers/net/ethernet/sfc/ef100_tx.c +++ b/drivers/net/ethernet/sfc/ef100_tx.c @@ -346,7 +346,7 @@ void ef100_tx_write(struct efx_tx_queue *tx_queue) ef100_tx_push_buffers(tx_queue); } -void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event) +int ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event) { unsigned int tx_done = EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_NUM_DESC); @@ -357,7 +357,7 @@ void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event) unsigned int tx_index = (tx_queue->read_count + tx_done - 1) & tx_queue->ptr_mask; - efx_xmit_done(tx_queue, tx_index); + return efx_xmit_done(tx_queue, tx_index); } /* Add a socket buffer to a TX queue diff --git a/drivers/net/ethernet/sfc/ef100_tx.h b/drivers/net/ethernet/sfc/ef100_tx.h index e9e11540fcde..d9a0819c5a72 100644 --- a/drivers/net/ethernet/sfc/ef100_tx.h +++ b/drivers/net/ethernet/sfc/ef100_tx.h @@ -20,7 +20,7 @@ void ef100_tx_init(struct efx_tx_queue *tx_queue); void ef100_tx_write(struct efx_tx_queue *tx_queue); unsigned int ef100_tx_max_skb_descs(struct efx_nic *efx); -void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event); +int ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event); netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb, diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index fcea3ea809d7..41b33a75333c 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -301,6 +301,7 @@ int efx_probe_interrupts(struct efx_nic *efx) efx->tx_channel_offset = 0; efx->n_xdp_channels = 0; efx->xdp_channel_offset = efx->n_channels; + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; rc = pci_enable_msi(efx->pci_dev); if (rc == 0) { efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; @@ -322,6 +323,7 @@ int efx_probe_interrupts(struct efx_nic *efx) efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0; efx->n_xdp_channels = 0; efx->xdp_channel_offset = efx->n_channels; + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; efx->legacy_irq = efx->pci_dev->irq; } diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c index 06ed74994e36..1776f7f8a7a9 100644 --- a/drivers/net/ethernet/sfc/siena/efx_channels.c +++ b/drivers/net/ethernet/sfc/siena/efx_channels.c @@ -302,6 +302,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx) efx->tx_channel_offset = 0; efx->n_xdp_channels = 0; efx->xdp_channel_offset = efx->n_channels; + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; rc = pci_enable_msi(efx->pci_dev); if (rc == 0) { efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; @@ -323,6 +324,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx) efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0; efx->n_xdp_channels = 0; efx->xdp_channel_offset = efx->n_channels; + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; efx->legacy_irq = efx->pci_dev->irq; } diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c index 67e789b96c43..755aa92bf823 100644 --- a/drivers/net/ethernet/sfc/tx_common.c +++ b/drivers/net/ethernet/sfc/tx_common.c @@ -249,7 +249,7 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue) } } -void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) +int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) { unsigned int fill_level, pkts_compl = 0, bytes_compl = 0; unsigned int efv_pkts_compl = 0; @@ -279,6 +279,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) } efx_xmit_done_check_empty(tx_queue); + + return pkts_compl + efv_pkts_compl; } /* Remove buffers put into a tx_queue for the current packet. diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h index d87aecbc7bf1..1e9f42938aac 100644 --- a/drivers/net/ethernet/sfc/tx_common.h +++ b/drivers/net/ethernet/sfc/tx_common.h @@ -28,7 +28,7 @@ static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer) } void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue); -void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); +int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, unsigned int insert_count); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 52cab9de05f2..87510951f4e8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3873,7 +3873,6 @@ irq_error: stmmac_hw_teardown(dev); init_error: - free_dma_desc_resources(priv, &priv->dma_conf); phylink_disconnect_phy(priv->phylink); init_phy_error: pm_runtime_put(priv->device); @@ -3891,6 +3890,9 @@ static int stmmac_open(struct net_device *dev) return PTR_ERR(dma_conf); ret = __stmmac_open(dev, dma_conf); + if (ret) + free_dma_desc_resources(priv, dma_conf); + kfree(dma_conf); return ret; } @@ -5633,12 +5635,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) stmmac_release(dev); ret = __stmmac_open(dev, dma_conf); - kfree(dma_conf); if (ret) { + free_dma_desc_resources(priv, dma_conf); + kfree(dma_conf); netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); return ret; } + kfree(dma_conf); + stmmac_set_rx_mode(dev); } diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 11cbcd9e2c72..bebcfd5e6b57 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -2068,7 +2068,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) /* Initialize the Serdes PHY for the port */ ret = am65_cpsw_init_serdes_phy(dev, port_np, port); if (ret) - return ret; + goto of_node_put; port->slave.mac_only = of_property_read_bool(port_np, "ti,mac-only"); diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index f9972b8140f9..a03490ba2e5b 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -1348,3 +1348,5 @@ module_spi_driver(adf7242_driver); MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>"); MODULE_DESCRIPTION("ADF7242 IEEE802.15.4 Transceiver Driver"); MODULE_LICENSE("GPL"); + +MODULE_FIRMWARE(FIRMWARE); diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index 8445c2189d11..31cba9aa7636 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -685,7 +685,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info) static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info) { struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1]; - struct hwsim_edge_info *einfo; + struct hwsim_edge_info *einfo, *einfo_old; struct hwsim_phy *phy_v0; struct hwsim_edge *e; u32 v0, v1; @@ -723,8 +723,10 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info) list_for_each_entry_rcu(e, &phy_v0->edges, list) { if (e->endpoint->idx == v1) { einfo->lqi = lqi; - rcu_assign_pointer(e->info, einfo); + einfo_old = rcu_replace_pointer(e->info, einfo, + lockdep_is_held(&hwsim_phys_lock)); rcu_read_unlock(); + kfree_rcu(einfo_old, rcu); mutex_unlock(&hwsim_phys_lock); return 0; } diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c index 71712ea25403..d5b05e803219 100644 --- a/drivers/net/ipvlan/ipvlan_l3s.c +++ b/drivers/net/ipvlan/ipvlan_l3s.c @@ -102,6 +102,10 @@ static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb, skb->dev = addr->master->dev; skb->skb_iif = skb->dev->ifindex; +#if IS_ENABLED(CONFIG_IPV6) + if (addr->atype == IPVL_IPV6) + IP6CB(skb)->iif = skb->dev->ifindex; +#endif len = skb->len + ETH_HLEN; ipvlan_count_rx(addr->master, len, true, false); out: diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 3427993f94f7..984dfa5d6c11 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3997,17 +3997,15 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) return -ENOMEM; secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); - if (!secy->tx_sc.stats) { - free_percpu(macsec->stats); + if (!secy->tx_sc.stats) return -ENOMEM; - } secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL); - if (!secy->tx_sc.md_dst) { - free_percpu(secy->tx_sc.stats); - free_percpu(macsec->stats); + if (!secy->tx_sc.md_dst) + /* macsec and secy percpu stats will be freed when unregistering + * net_device in macsec_free_netdev() + */ return -ENOMEM; - } if (sci == MACSEC_UNDEF_SCI) sci = dev_to_sci(dev, MACSEC_PORT_ES); diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 76f5a2402fb0..e397e7d642d9 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -936,7 +936,7 @@ static int dp83867_phy_reset(struct phy_device *phydev) { int err; - err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART); + err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET); if (err < 0) return err; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 389f33a12534..8b3618d3da4a 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -1287,7 +1287,7 @@ EXPORT_SYMBOL_GPL(mdiobus_modify_changed); * @mask: bit mask of bits to clear * @set: bit mask of bits to set */ -int mdiobus_c45_modify_changed(struct mii_bus *bus, int devad, int addr, +int mdiobus_c45_modify_changed(struct mii_bus *bus, int addr, int devad, u32 regnum, u16 mask, u16 set) { int err; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 17d0d0555a79..53598210be6c 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3021,6 +3021,15 @@ static int phy_led_blink_set(struct led_classdev *led_cdev, return err; } +static void phy_leds_unregister(struct phy_device *phydev) +{ + struct phy_led *phyled; + + list_for_each_entry(phyled, &phydev->leds, list) { + led_classdev_unregister(&phyled->led_cdev); + } +} + static int of_phy_led(struct phy_device *phydev, struct device_node *led) { @@ -3054,7 +3063,7 @@ static int of_phy_led(struct phy_device *phydev, init_data.fwnode = of_fwnode_handle(led); init_data.devname_mandatory = true; - err = devm_led_classdev_register_ext(dev, cdev, &init_data); + err = led_classdev_register_ext(dev, cdev, &init_data); if (err) return err; @@ -3083,6 +3092,7 @@ static int of_phy_leds(struct phy_device *phydev) err = of_phy_led(phydev, led); if (err) { of_node_put(led); + phy_leds_unregister(phydev); return err; } } @@ -3305,6 +3315,9 @@ static int phy_remove(struct device *dev) cancel_delayed_work_sync(&phydev->state_queue); + if (IS_ENABLED(CONFIG_PHYLIB_LEDS)) + phy_leds_unregister(phydev); + phydev->state = PHY_DOWN; sfp_bus_del_upstream(phydev->sfp_bus); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index b4831110003c..5efdeb59f4b2 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -188,6 +188,7 @@ static int phylink_interface_max_speed(phy_interface_t interface) case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_QSGMII: + case PHY_INTERFACE_MODE_QUSGMII: case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_GMII: return SPEED_1000; @@ -204,7 +205,6 @@ static int phylink_interface_max_speed(phy_interface_t interface) case PHY_INTERFACE_MODE_10GBASER: case PHY_INTERFACE_MODE_10GKR: case PHY_INTERFACE_MODE_USXGMII: - case PHY_INTERFACE_MODE_QUSGMII: return SPEED_10000; case PHY_INTERFACE_MODE_25GBASER: @@ -3299,6 +3299,41 @@ void phylink_decode_usxgmii_word(struct phylink_link_state *state, EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word); /** + * phylink_decode_usgmii_word() - decode the USGMII word from a MAC PCS + * @state: a pointer to a struct phylink_link_state. + * @lpa: a 16 bit value which stores the USGMII auto-negotiation word + * + * Helper for MAC PCS supporting the USGMII protocol and the auto-negotiation + * code word. Decode the USGMII code word and populate the corresponding fields + * (speed, duplex) into the phylink_link_state structure. The structure for this + * word is the same as the USXGMII word, except it only supports speeds up to + * 1Gbps. + */ +static void phylink_decode_usgmii_word(struct phylink_link_state *state, + uint16_t lpa) +{ + switch (lpa & MDIO_USXGMII_SPD_MASK) { + case MDIO_USXGMII_10: + state->speed = SPEED_10; + break; + case MDIO_USXGMII_100: + state->speed = SPEED_100; + break; + case MDIO_USXGMII_1000: + state->speed = SPEED_1000; + break; + default: + state->link = false; + return; + } + + if (lpa & MDIO_USXGMII_FULL_DUPLEX) + state->duplex = DUPLEX_FULL; + else + state->duplex = DUPLEX_HALF; +} + +/** * phylink_mii_c22_pcs_decode_state() - Decode MAC PCS state from MII registers * @state: a pointer to a &struct phylink_link_state. * @bmsr: The value of the %MII_BMSR register @@ -3335,9 +3370,11 @@ void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state, case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_QSGMII: - case PHY_INTERFACE_MODE_QUSGMII: phylink_decode_sgmii_word(state, lpa); break; + case PHY_INTERFACE_MODE_QUSGMII: + phylink_decode_usgmii_word(state, lpa); + break; default: state->link = false; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index f1865d047971..2e7c7b0cdc54 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1220,7 +1220,9 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x05c6, 0x9080, 8)}, {QMI_FIXED_INTF(0x05c6, 0x9083, 3)}, {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, + {QMI_QUIRK_SET_DTR(0x05c6, 0x9091, 2)}, /* Compal RXM-G1 */ {QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */ + {QMI_QUIRK_SET_DTR(0x05c6, 0x90db, 2)}, /* Compal RXM-G1 */ {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index d62a904d2e42..56326f38fe8a 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -384,6 +384,9 @@ static int lapbeth_new_device(struct net_device *dev) ASSERT_RTNL(); + if (dev->type != ARPHRD_ETHER) + return -EINVAL; + ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", NET_NAME_UNKNOWN, lapbeth_setup); if (!ndev) diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c index 038c5903c0dc..52c1a3de8da6 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.c +++ b/drivers/net/wireless/ath/ath10k/qmi.c @@ -1082,8 +1082,7 @@ int ath10k_qmi_init(struct ath10k *ar, u32 msa_size) if (ret) goto err; - qmi->event_wq = alloc_workqueue("ath10k_qmi_driver_event", - WQ_UNBOUND, 1); + qmi->event_wq = alloc_ordered_workqueue("ath10k_qmi_driver_event", 0); if (!qmi->event_wq) { ath10k_err(ar, "failed to allocate workqueue\n"); ret = -EFAULT; diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index ab923e24b0a9..26b252e62909 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -3256,8 +3256,7 @@ int ath11k_qmi_init_service(struct ath11k_base *ab) return ret; } - ab->qmi.event_wq = alloc_workqueue("ath11k_qmi_driver_event", - WQ_UNBOUND, 1); + ab->qmi.event_wq = alloc_ordered_workqueue("ath11k_qmi_driver_event", 0); if (!ab->qmi.event_wq) { ath11k_err(ab, "failed to allocate workqueue\n"); return -EFAULT; diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c index 03ba245fbee9..0a7892b1a8f8 100644 --- a/drivers/net/wireless/ath/ath12k/qmi.c +++ b/drivers/net/wireless/ath/ath12k/qmi.c @@ -3056,8 +3056,7 @@ int ath12k_qmi_init_service(struct ath12k_base *ab) return ret; } - ab->qmi.event_wq = alloc_workqueue("ath12k_qmi_driver_event", - WQ_UNBOUND, 1); + ab->qmi.event_wq = alloc_ordered_workqueue("ath12k_qmi_driver_event", 0); if (!ab->qmi.event_wq) { ath12k_err(ab, "failed to allocate workqueue\n"); return -EFAULT; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 23266d0c9ce4..9a20468345e4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -2692,7 +2692,7 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, lq_sta = mvm_sta; - spin_lock(&lq_sta->pers.lock); + spin_lock_bh(&lq_sta->pers.lock); iwl_mvm_hwrate_to_tx_rate_v1(lq_sta->last_rate_n_flags, info->band, &info->control.rates[0]); info->control.rates[0].count = 1; @@ -2707,7 +2707,7 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band, &txrc->reported_rate); } - spin_unlock(&lq_sta->pers.lock); + spin_unlock_bh(&lq_sta->pers.lock); } static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, @@ -3264,11 +3264,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, /* If it's locked we are in middle of init flow * just wait for next tx status to update the lq_sta data */ - if (!spin_trylock(&mvmsta->deflink.lq_sta.rs_drv.pers.lock)) + if (!spin_trylock_bh(&mvmsta->deflink.lq_sta.rs_drv.pers.lock)) return; __iwl_mvm_rs_tx_status(mvm, sta, tid, info, ndp); - spin_unlock(&mvmsta->deflink.lq_sta.rs_drv.pers.lock); + spin_unlock_bh(&mvmsta->deflink.lq_sta.rs_drv.pers.lock); } #ifdef CONFIG_MAC80211_DEBUGFS @@ -4117,9 +4117,9 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, } else { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - spin_lock(&mvmsta->deflink.lq_sta.rs_drv.pers.lock); + spin_lock_bh(&mvmsta->deflink.lq_sta.rs_drv.pers.lock); rs_drv_rate_init(mvm, sta, band); - spin_unlock(&mvmsta->deflink.lq_sta.rs_drv.pers.lock); + spin_unlock_bh(&mvmsta->deflink.lq_sta.rs_drv.pers.lock); } } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index dba112394838..79115eb1c285 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -548,6 +548,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690s_name), diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index b281850fbf7a..59e14b398295 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -3576,7 +3576,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, init_waitqueue_head(&trans_pcie->imr_waitq); trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", - WQ_HIGHPRI | WQ_UNBOUND, 1); + WQ_HIGHPRI | WQ_UNBOUND, 0); if (!trans_pcie->rba.alloc_wq) { ret = -ENOMEM; goto out_free_trans; diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index bcd564dc3554..5337ee4b6f10 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3127,7 +3127,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, priv->dfs_cac_workqueue = alloc_workqueue("MWIFIEX_DFS_CAC%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | - WQ_UNBOUND, 1, name); + WQ_UNBOUND, 0, name); if (!priv->dfs_cac_workqueue) { mwifiex_dbg(adapter, ERROR, "cannot alloc DFS CAC queue\n"); ret = -ENOMEM; @@ -3138,7 +3138,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, priv->dfs_chan_sw_workqueue = alloc_workqueue("MWIFIEX_DFS_CHSW%s", WQ_HIGHPRI | WQ_UNBOUND | - WQ_MEM_RECLAIM, 1, name); + WQ_MEM_RECLAIM, 0, name); if (!priv->dfs_chan_sw_workqueue) { mwifiex_dbg(adapter, ERROR, "cannot alloc DFS channel sw queue\n"); ret = -ENOMEM; diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index ea22a08e6c08..1cd9d20cca16 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1547,7 +1547,7 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter) adapter->workqueue = alloc_workqueue("MWIFIEX_WORK_QUEUE", - WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1); + WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!adapter->workqueue) goto err_kmalloc; @@ -1557,7 +1557,7 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter) adapter->rx_workqueue = alloc_workqueue("MWIFIEX_RX_WORK_QUEUE", WQ_HIGHPRI | WQ_MEM_RECLAIM | - WQ_UNBOUND, 1); + WQ_UNBOUND, 0); if (!adapter->rx_workqueue) goto err_kmalloc; INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue); @@ -1702,7 +1702,7 @@ mwifiex_add_card(void *card, struct completion *fw_done, adapter->workqueue = alloc_workqueue("MWIFIEX_WORK_QUEUE", - WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1); + WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!adapter->workqueue) goto err_kmalloc; @@ -1712,7 +1712,7 @@ mwifiex_add_card(void *card, struct completion *fw_done, adapter->rx_workqueue = alloc_workqueue("MWIFIEX_RX_WORK_QUEUE", WQ_HIGHPRI | WQ_MEM_RECLAIM | - WQ_UNBOUND, 1); + WQ_UNBOUND, 0); if (!adapter->rx_workqueue) goto err_kmalloc; diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c index d6b166fc5c0e..bff46f7ca59f 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c @@ -626,14 +626,12 @@ static void mux_dl_adb_decode(struct iosm_mux *ipc_mux, if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) goto adb_decode_err; - if (le16_to_cpu(adth->table_length) < (sizeof(struct mux_adth) - - sizeof(struct mux_adth_dg))) + if (le16_to_cpu(adth->table_length) < sizeof(struct mux_adth)) goto adb_decode_err; /* Calculate the number of datagrams. */ nr_of_dg = (le16_to_cpu(adth->table_length) - - sizeof(struct mux_adth) + - sizeof(struct mux_adth_dg)) / + sizeof(struct mux_adth)) / sizeof(struct mux_adth_dg); /* Is the datagram table empty ? */ @@ -649,7 +647,7 @@ static void mux_dl_adb_decode(struct iosm_mux *ipc_mux, } /* New aggregated datagram table. */ - dg = &adth->dg; + dg = adth->dg; if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id, nr_of_dg) < 0) goto adb_decode_err; @@ -849,7 +847,7 @@ static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux, adth->if_id = i; adth->table_length = cpu_to_le16(adth_dg_size); adth_dg_size -= offsetof(struct mux_adth, dg); - memcpy(&adth->dg, ul_adb->dg[i], adth_dg_size); + memcpy(adth->dg, ul_adb->dg[i], adth_dg_size); ul_adb->if_cnt++; } @@ -1426,14 +1424,13 @@ static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux, if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) { nr_of_dg = (le16_to_cpu(adth->table_length) - - sizeof(struct mux_adth) + - sizeof(struct mux_adth_dg)) / + sizeof(struct mux_adth)) / sizeof(struct mux_adth_dg); if (nr_of_dg <= 0) return payload_size; - dg = &adth->dg; + dg = adth->dg; for (i = 0; i < nr_of_dg; i++, dg++) { if (le32_to_cpu(dg->datagram_index) < diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h index 5d4e3b89542c..f8df88f816c4 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h +++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h @@ -161,7 +161,7 @@ struct mux_adth { u8 opt_ipv4v6; __le32 next_table_index; __le32 reserved2; - struct mux_adth_dg dg; + struct mux_adth_dg dg[]; }; /** diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c index aec3a18d44bd..7162bf38a8c9 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c @@ -1293,9 +1293,9 @@ int t7xx_cldma_init(struct cldma_ctrl *md_ctrl) for (i = 0; i < CLDMA_TXQ_NUM; i++) { md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); md_ctrl->txq[i].worker = - alloc_workqueue("md_hif%d_tx%d_worker", - WQ_UNBOUND | WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI), - 1, md_ctrl->hif_id, i); + alloc_ordered_workqueue("md_hif%d_tx%d_worker", + WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI), + md_ctrl->hif_id, i); if (!md_ctrl->txq[i].worker) goto err_workqueue; @@ -1306,9 +1306,10 @@ int t7xx_cldma_init(struct cldma_ctrl *md_ctrl) md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done); - md_ctrl->rxq[i].worker = alloc_workqueue("md_hif%d_rx%d_worker", - WQ_UNBOUND | WQ_MEM_RECLAIM, - 1, md_ctrl->hif_id, i); + md_ctrl->rxq[i].worker = + alloc_ordered_workqueue("md_hif%d_rx%d_worker", + WQ_MEM_RECLAIM, + md_ctrl->hif_id, i); if (!md_ctrl->rxq[i].worker) goto err_workqueue; } diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c index 46514208d4f9..8dab025a088a 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c @@ -618,8 +618,9 @@ int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq) return ret; } - txq->worker = alloc_workqueue("md_dpmaif_tx%d_worker", WQ_UNBOUND | WQ_MEM_RECLAIM | - (txq->index ? 0 : WQ_HIGHPRI), 1, txq->index); + txq->worker = alloc_ordered_workqueue("md_dpmaif_tx%d_worker", + WQ_MEM_RECLAIM | (txq->index ? 0 : WQ_HIGHPRI), + txq->index); if (!txq->worker) return -ENOMEM; diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c index f12f903a9dd1..da3e2dce8e70 100644 --- a/drivers/nfc/fdp/fdp.c +++ b/drivers/nfc/fdp/fdp.c @@ -762,3 +762,6 @@ EXPORT_SYMBOL(fdp_nci_remove); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("NFC NCI driver for Intel Fields Peak NFC controller"); MODULE_AUTHOR("Robert Dolca <robert.dolca@intel.com>"); + +MODULE_FIRMWARE(FDP_OTP_PATCH_NAME); +MODULE_FIRMWARE(FDP_RAM_PATCH_NAME); diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c index f70ba58dbc55..ab0f32b901c8 100644 --- a/drivers/nubus/nubus.c +++ b/drivers/nubus/nubus.c @@ -32,6 +32,13 @@ /* Globals */ +/* The "nubus.populate_procfs" parameter makes slot resources available in + * procfs. It's deprecated and disabled by default because procfs is no longer + * thought to be suitable for that and some board ROMs make it too expensive. + */ +bool nubus_populate_procfs; +module_param_named(populate_procfs, nubus_populate_procfs, bool, 0); + LIST_HEAD(nubus_func_rsrcs); /* Meaning of "bytelanes": @@ -572,9 +579,9 @@ nubus_get_functional_resource(struct nubus_board *board, int slot, nubus_proc_add_rsrc(dir.procdir, &ent); break; default: - /* Local/Private resources have their own - function */ - nubus_get_private_resource(fres, dir.procdir, &ent); + if (nubus_populate_procfs) + nubus_get_private_resource(fres, dir.procdir, + &ent); } } diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c index 1fd667852271..e7a347db708c 100644 --- a/drivers/nubus/proc.c +++ b/drivers/nubus/proc.c @@ -55,7 +55,7 @@ struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board) { char name[2]; - if (!proc_bus_nubus_dir) + if (!proc_bus_nubus_dir || !nubus_populate_procfs) return NULL; snprintf(name, sizeof(name), "%x", board->slot); return proc_mkdir(name, proc_bus_nubus_dir); @@ -72,9 +72,10 @@ struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir, char name[9]; int lanes = board->lanes; - if (!procdir) + if (!procdir || !nubus_populate_procfs) return NULL; snprintf(name, sizeof(name), "%x", ent->type); + remove_proc_subtree(name, procdir); return proc_mkdir_data(name, 0555, procdir, (void *)lanes); } @@ -137,6 +138,18 @@ static int nubus_proc_rsrc_show(struct seq_file *m, void *v) return 0; } +static int nubus_rsrc_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, nubus_proc_rsrc_show, inode); +} + +static const struct proc_ops nubus_rsrc_proc_ops = { + .proc_open = nubus_rsrc_proc_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir, const struct nubus_dirent *ent, unsigned int size) @@ -144,7 +157,7 @@ void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir, char name[9]; struct nubus_proc_pde_data *pded; - if (!procdir) + if (!procdir || !nubus_populate_procfs) return; snprintf(name, sizeof(name), "%x", ent->type); @@ -152,8 +165,9 @@ void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir, pded = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size); else pded = NULL; - proc_create_single_data(name, S_IFREG | 0444, procdir, - nubus_proc_rsrc_show, pded); + remove_proc_subtree(name, procdir); + proc_create_data(name, S_IFREG | 0444, procdir, + &nubus_rsrc_proc_ops, pded); } void nubus_proc_add_rsrc(struct proc_dir_entry *procdir, @@ -162,13 +176,14 @@ void nubus_proc_add_rsrc(struct proc_dir_entry *procdir, char name[9]; unsigned char *data = (unsigned char *)ent->data; - if (!procdir) + if (!procdir || !nubus_populate_procfs) return; snprintf(name, sizeof(name), "%x", ent->type); - proc_create_single_data(name, S_IFREG | 0444, procdir, - nubus_proc_rsrc_show, - nubus_proc_alloc_pde_data(data, 0)); + remove_proc_subtree(name, procdir); + proc_create_data(name, S_IFREG | 0444, procdir, + &nubus_rsrc_proc_ops, + nubus_proc_alloc_pde_data(data, 0)); } /* diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile index e27202d22c7d..d3fc5063e4be 100644 --- a/drivers/nvme/host/Makefile +++ b/drivers/nvme/host/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_NVME_FC) += nvme-fc.o obj-$(CONFIG_NVME_TCP) += nvme-tcp.o obj-$(CONFIG_NVME_APPLE) += nvme-apple.o -nvme-core-y += core.o ioctl.o +nvme-core-y += core.o ioctl.o sysfs.o nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o nvme-core-$(CONFIG_TRACING) += trace.o nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c index ea16a0aba679..daf5d144a8ea 100644 --- a/drivers/nvme/host/auth.c +++ b/drivers/nvme/host/auth.c @@ -30,18 +30,18 @@ struct nvme_dhchap_queue_context { u32 s2; u16 transaction; u8 status; + u8 dhgroup_id; u8 hash_id; size_t hash_len; - u8 dhgroup_id; u8 c1[64]; u8 c2[64]; u8 response[64]; u8 *host_response; u8 *ctrl_key; - int ctrl_key_len; u8 *host_key; - int host_key_len; u8 *sess_key; + int ctrl_key_len; + int host_key_len; int sess_key_len; }; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 3ec38e2b9173..fdfcf2781c85 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -237,7 +237,7 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_delete_ctrl); -static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) +void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) { /* * Keep a reference until nvme_do_delete_ctrl() complete, @@ -1635,12 +1635,12 @@ static void nvme_ns_release(struct nvme_ns *ns) nvme_put_ns(ns); } -static int nvme_open(struct block_device *bdev, fmode_t mode) +static int nvme_open(struct gendisk *disk, blk_mode_t mode) { - return nvme_ns_open(bdev->bd_disk->private_data); + return nvme_ns_open(disk->private_data); } -static void nvme_release(struct gendisk *disk, fmode_t mode) +static void nvme_release(struct gendisk *disk) { nvme_ns_release(disk->private_data); } @@ -1879,7 +1879,7 @@ static void nvme_update_disk_info(struct gendisk *disk, struct nvme_ns *ns, struct nvme_id_ns *id) { sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); - unsigned short bs = 1 << ns->lba_shift; + u32 bs = 1U << ns->lba_shift; u32 atomic_bs, phys_bs, io_opt = 0; /* @@ -2300,7 +2300,7 @@ static int nvme_report_zones(struct gendisk *disk, sector_t sector, #define nvme_report_zones NULL #endif /* CONFIG_BLK_DEV_ZONED */ -static const struct block_device_operations nvme_bdev_ops = { +const struct block_device_operations nvme_bdev_ops = { .owner = THIS_MODULE, .ioctl = nvme_ioctl, .compat_ioctl = blkdev_compat_ptr_ioctl, @@ -2835,75 +2835,6 @@ static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) return NULL; } -#define SUBSYS_ATTR_RO(_name, _mode, _show) \ - struct device_attribute subsys_attr_##_name = \ - __ATTR(_name, _mode, _show, NULL) - -static ssize_t nvme_subsys_show_nqn(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct nvme_subsystem *subsys = - container_of(dev, struct nvme_subsystem, dev); - - return sysfs_emit(buf, "%s\n", subsys->subnqn); -} -static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); - -static ssize_t nvme_subsys_show_type(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct nvme_subsystem *subsys = - container_of(dev, struct nvme_subsystem, dev); - - switch (subsys->subtype) { - case NVME_NQN_DISC: - return sysfs_emit(buf, "discovery\n"); - case NVME_NQN_NVME: - return sysfs_emit(buf, "nvm\n"); - default: - return sysfs_emit(buf, "reserved\n"); - } -} -static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type); - -#define nvme_subsys_show_str_function(field) \ -static ssize_t subsys_##field##_show(struct device *dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - struct nvme_subsystem *subsys = \ - container_of(dev, struct nvme_subsystem, dev); \ - return sysfs_emit(buf, "%.*s\n", \ - (int)sizeof(subsys->field), subsys->field); \ -} \ -static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); - -nvme_subsys_show_str_function(model); -nvme_subsys_show_str_function(serial); -nvme_subsys_show_str_function(firmware_rev); - -static struct attribute *nvme_subsys_attrs[] = { - &subsys_attr_model.attr, - &subsys_attr_serial.attr, - &subsys_attr_firmware_rev.attr, - &subsys_attr_subsysnqn.attr, - &subsys_attr_subsystype.attr, -#ifdef CONFIG_NVME_MULTIPATH - &subsys_attr_iopolicy.attr, -#endif - NULL, -}; - -static const struct attribute_group nvme_subsys_attrs_group = { - .attrs = nvme_subsys_attrs, -}; - -static const struct attribute_group *nvme_subsys_attrs_groups[] = { - &nvme_subsys_attrs_group, - NULL, -}; - static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl) { return ctrl->opts && ctrl->opts->discovery_nqn; @@ -3108,7 +3039,8 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) ctrl->max_zeroes_sectors = 0; if (ctrl->subsys->subtype != NVME_NQN_NVME || - nvme_ctrl_limited_cns(ctrl)) + nvme_ctrl_limited_cns(ctrl) || + test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags)) return 0; id = kzalloc(sizeof(*id), GFP_KERNEL); @@ -3130,6 +3062,8 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl); free_data: + if (ret > 0) + set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags); kfree(id); return ret; } @@ -3437,586 +3371,6 @@ static const struct file_operations nvme_dev_fops = { .uring_cmd = nvme_dev_uring_cmd, }; -static ssize_t nvme_sysfs_reset(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t count) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - int ret; - - ret = nvme_reset_ctrl_sync(ctrl); - if (ret < 0) - return ret; - return count; -} -static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); - -static ssize_t nvme_sysfs_rescan(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t count) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - - nvme_queue_scan(ctrl); - return count; -} -static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); - -static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) -{ - struct gendisk *disk = dev_to_disk(dev); - - if (disk->fops == &nvme_bdev_ops) - return nvme_get_ns_from_dev(dev)->head; - else - return disk->private_data; -} - -static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct nvme_ns_head *head = dev_to_ns_head(dev); - struct nvme_ns_ids *ids = &head->ids; - struct nvme_subsystem *subsys = head->subsys; - int serial_len = sizeof(subsys->serial); - int model_len = sizeof(subsys->model); - - if (!uuid_is_null(&ids->uuid)) - return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid); - - if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) - return sysfs_emit(buf, "eui.%16phN\n", ids->nguid); - - if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) - return sysfs_emit(buf, "eui.%8phN\n", ids->eui64); - - while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || - subsys->serial[serial_len - 1] == '\0')) - serial_len--; - while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || - subsys->model[model_len - 1] == '\0')) - model_len--; - - return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, - serial_len, subsys->serial, model_len, subsys->model, - head->ns_id); -} -static DEVICE_ATTR_RO(wwid); - -static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); -} -static DEVICE_ATTR_RO(nguid); - -static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; - - /* For backward compatibility expose the NGUID to userspace if - * we have no UUID set - */ - if (uuid_is_null(&ids->uuid)) { - dev_warn_ratelimited(dev, - "No UUID available providing old NGUID\n"); - return sysfs_emit(buf, "%pU\n", ids->nguid); - } - return sysfs_emit(buf, "%pU\n", &ids->uuid); -} -static DEVICE_ATTR_RO(uuid); - -static ssize_t eui_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); -} -static DEVICE_ATTR_RO(eui); - -static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id); -} -static DEVICE_ATTR_RO(nsid); - -static struct attribute *nvme_ns_id_attrs[] = { - &dev_attr_wwid.attr, - &dev_attr_uuid.attr, - &dev_attr_nguid.attr, - &dev_attr_eui.attr, - &dev_attr_nsid.attr, -#ifdef CONFIG_NVME_MULTIPATH - &dev_attr_ana_grpid.attr, - &dev_attr_ana_state.attr, -#endif - NULL, -}; - -static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, - struct attribute *a, int n) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; - - if (a == &dev_attr_uuid.attr) { - if (uuid_is_null(&ids->uuid) && - !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) - return 0; - } - if (a == &dev_attr_nguid.attr) { - if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) - return 0; - } - if (a == &dev_attr_eui.attr) { - if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) - return 0; - } -#ifdef CONFIG_NVME_MULTIPATH - if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { - if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */ - return 0; - if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) - return 0; - } -#endif - return a->mode; -} - -static const struct attribute_group nvme_ns_id_attr_group = { - .attrs = nvme_ns_id_attrs, - .is_visible = nvme_ns_id_attrs_are_visible, -}; - -const struct attribute_group *nvme_ns_id_attr_groups[] = { - &nvme_ns_id_attr_group, - NULL, -}; - -#define nvme_show_str_function(field) \ -static ssize_t field##_show(struct device *dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ - return sysfs_emit(buf, "%.*s\n", \ - (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ -} \ -static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); - -nvme_show_str_function(model); -nvme_show_str_function(serial); -nvme_show_str_function(firmware_rev); - -#define nvme_show_int_function(field) \ -static ssize_t field##_show(struct device *dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ - return sysfs_emit(buf, "%d\n", ctrl->field); \ -} \ -static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); - -nvme_show_int_function(cntlid); -nvme_show_int_function(numa_node); -nvme_show_int_function(queue_count); -nvme_show_int_function(sqsize); -nvme_show_int_function(kato); - -static ssize_t nvme_sysfs_delete(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t count) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - - if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags)) - return -EBUSY; - - if (device_remove_file_self(dev, attr)) - nvme_delete_ctrl_sync(ctrl); - return count; -} -static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); - -static ssize_t nvme_sysfs_show_transport(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - - return sysfs_emit(buf, "%s\n", ctrl->ops->name); -} -static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); - -static ssize_t nvme_sysfs_show_state(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - static const char *const state_name[] = { - [NVME_CTRL_NEW] = "new", - [NVME_CTRL_LIVE] = "live", - [NVME_CTRL_RESETTING] = "resetting", - [NVME_CTRL_CONNECTING] = "connecting", - [NVME_CTRL_DELETING] = "deleting", - [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)", - [NVME_CTRL_DEAD] = "dead", - }; - - if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && - state_name[ctrl->state]) - return sysfs_emit(buf, "%s\n", state_name[ctrl->state]); - - return sysfs_emit(buf, "unknown state\n"); -} - -static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); - -static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - - return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn); -} -static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); - -static ssize_t nvme_sysfs_show_hostnqn(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - - return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn); -} -static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL); - -static ssize_t nvme_sysfs_show_hostid(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - - return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id); -} -static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL); - -static ssize_t nvme_sysfs_show_address(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - - return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); -} -static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); - -static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - struct nvmf_ctrl_options *opts = ctrl->opts; - - if (ctrl->opts->max_reconnects == -1) - return sysfs_emit(buf, "off\n"); - return sysfs_emit(buf, "%d\n", - opts->max_reconnects * opts->reconnect_delay); -} - -static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - struct nvmf_ctrl_options *opts = ctrl->opts; - int ctrl_loss_tmo, err; - - err = kstrtoint(buf, 10, &ctrl_loss_tmo); - if (err) - return -EINVAL; - - if (ctrl_loss_tmo < 0) - opts->max_reconnects = -1; - else - opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, - opts->reconnect_delay); - return count; -} -static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR, - nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store); - -static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - - if (ctrl->opts->reconnect_delay == -1) - return sysfs_emit(buf, "off\n"); - return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay); -} - -static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - unsigned int v; - int err; - - err = kstrtou32(buf, 10, &v); - if (err) - return err; - - ctrl->opts->reconnect_delay = v; - return count; -} -static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, - nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store); - -static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - - if (ctrl->opts->fast_io_fail_tmo == -1) - return sysfs_emit(buf, "off\n"); - return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo); -} - -static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - struct nvmf_ctrl_options *opts = ctrl->opts; - int fast_io_fail_tmo, err; - - err = kstrtoint(buf, 10, &fast_io_fail_tmo); - if (err) - return -EINVAL; - - if (fast_io_fail_tmo < 0) - opts->fast_io_fail_tmo = -1; - else - opts->fast_io_fail_tmo = fast_io_fail_tmo; - return count; -} -static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, - nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store); - -static ssize_t cntrltype_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - static const char * const type[] = { - [NVME_CTRL_IO] = "io\n", - [NVME_CTRL_DISC] = "discovery\n", - [NVME_CTRL_ADMIN] = "admin\n", - }; - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - - if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype]) - return sysfs_emit(buf, "reserved\n"); - - return sysfs_emit(buf, type[ctrl->cntrltype]); -} -static DEVICE_ATTR_RO(cntrltype); - -static ssize_t dctype_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - static const char * const type[] = { - [NVME_DCTYPE_NOT_REPORTED] = "none\n", - [NVME_DCTYPE_DDC] = "ddc\n", - [NVME_DCTYPE_CDC] = "cdc\n", - }; - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - - if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype]) - return sysfs_emit(buf, "reserved\n"); - - return sysfs_emit(buf, type[ctrl->dctype]); -} -static DEVICE_ATTR_RO(dctype); - -#ifdef CONFIG_NVME_AUTH -static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - struct nvmf_ctrl_options *opts = ctrl->opts; - - if (!opts->dhchap_secret) - return sysfs_emit(buf, "none\n"); - return sysfs_emit(buf, "%s\n", opts->dhchap_secret); -} - -static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - struct nvmf_ctrl_options *opts = ctrl->opts; - char *dhchap_secret; - - if (!ctrl->opts->dhchap_secret) - return -EINVAL; - if (count < 7) - return -EINVAL; - if (memcmp(buf, "DHHC-1:", 7)) - return -EINVAL; - - dhchap_secret = kzalloc(count + 1, GFP_KERNEL); - if (!dhchap_secret) - return -ENOMEM; - memcpy(dhchap_secret, buf, count); - nvme_auth_stop(ctrl); - if (strcmp(dhchap_secret, opts->dhchap_secret)) { - struct nvme_dhchap_key *key, *host_key; - int ret; - - ret = nvme_auth_generate_key(dhchap_secret, &key); - if (ret) - return ret; - kfree(opts->dhchap_secret); - opts->dhchap_secret = dhchap_secret; - host_key = ctrl->host_key; - mutex_lock(&ctrl->dhchap_auth_mutex); - ctrl->host_key = key; - mutex_unlock(&ctrl->dhchap_auth_mutex); - nvme_auth_free_key(host_key); - } - /* Start re-authentication */ - dev_info(ctrl->device, "re-authenticating controller\n"); - queue_work(nvme_wq, &ctrl->dhchap_auth_work); - - return count; -} -static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR, - nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store); - -static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - struct nvmf_ctrl_options *opts = ctrl->opts; - - if (!opts->dhchap_ctrl_secret) - return sysfs_emit(buf, "none\n"); - return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret); -} - -static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - struct nvmf_ctrl_options *opts = ctrl->opts; - char *dhchap_secret; - - if (!ctrl->opts->dhchap_ctrl_secret) - return -EINVAL; - if (count < 7) - return -EINVAL; - if (memcmp(buf, "DHHC-1:", 7)) - return -EINVAL; - - dhchap_secret = kzalloc(count + 1, GFP_KERNEL); - if (!dhchap_secret) - return -ENOMEM; - memcpy(dhchap_secret, buf, count); - nvme_auth_stop(ctrl); - if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) { - struct nvme_dhchap_key *key, *ctrl_key; - int ret; - - ret = nvme_auth_generate_key(dhchap_secret, &key); - if (ret) - return ret; - kfree(opts->dhchap_ctrl_secret); - opts->dhchap_ctrl_secret = dhchap_secret; - ctrl_key = ctrl->ctrl_key; - mutex_lock(&ctrl->dhchap_auth_mutex); - ctrl->ctrl_key = key; - mutex_unlock(&ctrl->dhchap_auth_mutex); - nvme_auth_free_key(ctrl_key); - } - /* Start re-authentication */ - dev_info(ctrl->device, "re-authenticating controller\n"); - queue_work(nvme_wq, &ctrl->dhchap_auth_work); - - return count; -} -static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR, - nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store); -#endif - -static struct attribute *nvme_dev_attrs[] = { - &dev_attr_reset_controller.attr, - &dev_attr_rescan_controller.attr, - &dev_attr_model.attr, - &dev_attr_serial.attr, - &dev_attr_firmware_rev.attr, - &dev_attr_cntlid.attr, - &dev_attr_delete_controller.attr, - &dev_attr_transport.attr, - &dev_attr_subsysnqn.attr, - &dev_attr_address.attr, - &dev_attr_state.attr, - &dev_attr_numa_node.attr, - &dev_attr_queue_count.attr, - &dev_attr_sqsize.attr, - &dev_attr_hostnqn.attr, - &dev_attr_hostid.attr, - &dev_attr_ctrl_loss_tmo.attr, - &dev_attr_reconnect_delay.attr, - &dev_attr_fast_io_fail_tmo.attr, - &dev_attr_kato.attr, - &dev_attr_cntrltype.attr, - &dev_attr_dctype.attr, -#ifdef CONFIG_NVME_AUTH - &dev_attr_dhchap_secret.attr, - &dev_attr_dhchap_ctrl_secret.attr, -#endif - NULL -}; - -static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, - struct attribute *a, int n) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - - if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) - return 0; - if (a == &dev_attr_address.attr && !ctrl->ops->get_address) - return 0; - if (a == &dev_attr_hostnqn.attr && !ctrl->opts) - return 0; - if (a == &dev_attr_hostid.attr && !ctrl->opts) - return 0; - if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts) - return 0; - if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts) - return 0; - if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts) - return 0; -#ifdef CONFIG_NVME_AUTH - if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts) - return 0; - if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts) - return 0; -#endif - - return a->mode; -} - -const struct attribute_group nvme_dev_attrs_group = { - .attrs = nvme_dev_attrs, - .is_visible = nvme_dev_attrs_are_visible, -}; -EXPORT_SYMBOL_GPL(nvme_dev_attrs_group); - -static const struct attribute_group *nvme_dev_attr_groups[] = { - &nvme_dev_attrs_group, - NULL, -}; - static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, unsigned nsid) { @@ -4256,7 +3610,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) goto out_put_ns_head; } - if (!multipath && !list_empty(&head->list)) { + if (!multipath) { dev_warn(ctrl->device, "Found shared namespace %d, but multipathing not supported.\n", info->nsid); @@ -4357,7 +3711,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) * instance as shared namespaces will show up as multiple block * devices. */ - if (ns->head->disk) { + if (nvme_ns_head_multipath(ns->head)) { sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, ctrl->instance, ns->head->instance); disk->flags |= GENHD_FL_HIDDEN; @@ -5243,6 +4597,8 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, return 0; out_free_cdev: + nvme_fault_inject_fini(&ctrl->fault_inject); + dev_pm_qos_hide_latency_tolerance(ctrl->device); cdev_device_del(&ctrl->cdev, ctrl->device); out_free_name: nvme_put_ctrl(ctrl); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 0069ebff85df..8175d49f2909 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -21,35 +21,60 @@ static DEFINE_MUTEX(nvmf_hosts_mutex); static struct nvmf_host *nvmf_default_host; -static struct nvmf_host *__nvmf_host_find(const char *hostnqn) +static struct nvmf_host *nvmf_host_alloc(const char *hostnqn, uuid_t *id) { struct nvmf_host *host; - list_for_each_entry(host, &nvmf_hosts, list) { - if (!strcmp(host->nqn, hostnqn)) - return host; - } + host = kmalloc(sizeof(*host), GFP_KERNEL); + if (!host) + return NULL; - return NULL; + kref_init(&host->ref); + uuid_copy(&host->id, id); + strscpy(host->nqn, hostnqn, NVMF_NQN_SIZE); + + return host; } -static struct nvmf_host *nvmf_host_add(const char *hostnqn) +static struct nvmf_host *nvmf_host_add(const char *hostnqn, uuid_t *id) { struct nvmf_host *host; mutex_lock(&nvmf_hosts_mutex); - host = __nvmf_host_find(hostnqn); - if (host) { - kref_get(&host->ref); - goto out_unlock; + + /* + * We have defined a host as how it is perceived by the target. + * Therefore, we don't allow different Host NQNs with the same Host ID. + * Similarly, we do not allow the usage of the same Host NQN with + * different Host IDs. This'll maintain unambiguous host identification. + */ + list_for_each_entry(host, &nvmf_hosts, list) { + bool same_hostnqn = !strcmp(host->nqn, hostnqn); + bool same_hostid = uuid_equal(&host->id, id); + + if (same_hostnqn && same_hostid) { + kref_get(&host->ref); + goto out_unlock; + } + if (same_hostnqn) { + pr_err("found same hostnqn %s but different hostid %pUb\n", + hostnqn, id); + host = ERR_PTR(-EINVAL); + goto out_unlock; + } + if (same_hostid) { + pr_err("found same hostid %pUb but different hostnqn %s\n", + id, hostnqn); + host = ERR_PTR(-EINVAL); + goto out_unlock; + } } - host = kmalloc(sizeof(*host), GFP_KERNEL); - if (!host) + host = nvmf_host_alloc(hostnqn, id); + if (!host) { + host = ERR_PTR(-ENOMEM); goto out_unlock; - - kref_init(&host->ref); - strscpy(host->nqn, hostnqn, NVMF_NQN_SIZE); + } list_add_tail(&host->list, &nvmf_hosts); out_unlock: @@ -60,16 +85,17 @@ out_unlock: static struct nvmf_host *nvmf_host_default(void) { struct nvmf_host *host; + char nqn[NVMF_NQN_SIZE]; + uuid_t id; - host = kmalloc(sizeof(*host), GFP_KERNEL); + uuid_gen(&id); + snprintf(nqn, NVMF_NQN_SIZE, + "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id); + + host = nvmf_host_alloc(nqn, &id); if (!host) return NULL; - kref_init(&host->ref); - uuid_gen(&host->id); - snprintf(host->nqn, NVMF_NQN_SIZE, - "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id); - mutex_lock(&nvmf_hosts_mutex); list_add_tail(&host->list, &nvmf_hosts); mutex_unlock(&nvmf_hosts_mutex); @@ -349,6 +375,45 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, } } +static struct nvmf_connect_data *nvmf_connect_data_prep(struct nvme_ctrl *ctrl, + u16 cntlid) +{ + struct nvmf_connect_data *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return NULL; + + uuid_copy(&data->hostid, &ctrl->opts->host->id); + data->cntlid = cpu_to_le16(cntlid); + strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); + strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); + + return data; +} + +static void nvmf_connect_cmd_prep(struct nvme_ctrl *ctrl, u16 qid, + struct nvme_command *cmd) +{ + cmd->connect.opcode = nvme_fabrics_command; + cmd->connect.fctype = nvme_fabrics_type_connect; + cmd->connect.qid = cpu_to_le16(qid); + + if (qid) { + cmd->connect.sqsize = cpu_to_le16(ctrl->sqsize); + } else { + cmd->connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); + + /* + * set keep-alive timeout in seconds granularity (ms * 1000) + */ + cmd->connect.kato = cpu_to_le32(ctrl->kato * 1000); + } + + if (ctrl->opts->disable_sqflow) + cmd->connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW; +} + /** * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect" * API function. @@ -377,28 +442,12 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) int ret; u32 result; - cmd.connect.opcode = nvme_fabrics_command; - cmd.connect.fctype = nvme_fabrics_type_connect; - cmd.connect.qid = 0; - cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); - - /* - * Set keep-alive timeout in seconds granularity (ms * 1000) - */ - cmd.connect.kato = cpu_to_le32(ctrl->kato * 1000); - - if (ctrl->opts->disable_sqflow) - cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW; + nvmf_connect_cmd_prep(ctrl, 0, &cmd); - data = kzalloc(sizeof(*data), GFP_KERNEL); + data = nvmf_connect_data_prep(ctrl, 0xffff); if (!data) return -ENOMEM; - uuid_copy(&data->hostid, &ctrl->opts->host->id); - data->cntlid = cpu_to_le16(0xffff); - strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); - strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); - ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, data, sizeof(*data), NVME_QID_ANY, 1, BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); @@ -468,23 +517,12 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) int ret; u32 result; - cmd.connect.opcode = nvme_fabrics_command; - cmd.connect.fctype = nvme_fabrics_type_connect; - cmd.connect.qid = cpu_to_le16(qid); - cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize); + nvmf_connect_cmd_prep(ctrl, qid, &cmd); - if (ctrl->opts->disable_sqflow) - cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW; - - data = kzalloc(sizeof(*data), GFP_KERNEL); + data = nvmf_connect_data_prep(ctrl, ctrl->cntlid); if (!data) return -ENOMEM; - uuid_copy(&data->hostid, &ctrl->opts->host->id); - data->cntlid = cpu_to_le16(ctrl->cntlid); - strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); - strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); - ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res, data, sizeof(*data), qid, 1, BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); @@ -621,6 +659,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, size_t nqnlen = 0; int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO; uuid_t hostid; + char hostnqn[NVMF_NQN_SIZE]; /* Set defaults */ opts->queue_size = NVMF_DEF_QUEUE_SIZE; @@ -637,7 +676,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, if (!options) return -ENOMEM; - uuid_gen(&hostid); + /* use default host if not given by user space */ + uuid_copy(&hostid, &nvmf_default_host->id); + strscpy(hostnqn, nvmf_default_host->nqn, NVMF_NQN_SIZE); while ((p = strsep(&o, ",\n")) != NULL) { if (!*p) @@ -783,12 +824,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -EINVAL; goto out; } - opts->host = nvmf_host_add(p); + strscpy(hostnqn, p, NVMF_NQN_SIZE); kfree(p); - if (!opts->host) { - ret = -ENOMEM; - goto out; - } break; case NVMF_OPT_RECONNECT_DELAY: if (match_int(args, &token)) { @@ -945,18 +982,94 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, opts->fast_io_fail_tmo, ctrl_loss_tmo); } - if (!opts->host) { - kref_get(&nvmf_default_host->ref); - opts->host = nvmf_default_host; + opts->host = nvmf_host_add(hostnqn, &hostid); + if (IS_ERR(opts->host)) { + ret = PTR_ERR(opts->host); + opts->host = NULL; + goto out; } - uuid_copy(&opts->host->id, &hostid); - out: kfree(options); return ret; } +void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues, + u32 io_queues[HCTX_MAX_TYPES]) +{ + if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) { + /* + * separate read/write queues + * hand out dedicated default queues only after we have + * sufficient read queues. + */ + io_queues[HCTX_TYPE_READ] = opts->nr_io_queues; + nr_io_queues -= io_queues[HCTX_TYPE_READ]; + io_queues[HCTX_TYPE_DEFAULT] = + min(opts->nr_write_queues, nr_io_queues); + nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT]; + } else { + /* + * shared read/write queues + * either no write queues were requested, or we don't have + * sufficient queue count to have dedicated default queues. + */ + io_queues[HCTX_TYPE_DEFAULT] = + min(opts->nr_io_queues, nr_io_queues); + nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT]; + } + + if (opts->nr_poll_queues && nr_io_queues) { + /* map dedicated poll queues only if we have queues left */ + io_queues[HCTX_TYPE_POLL] = + min(opts->nr_poll_queues, nr_io_queues); + } +} +EXPORT_SYMBOL_GPL(nvmf_set_io_queues); + +void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl, + u32 io_queues[HCTX_MAX_TYPES]) +{ + struct nvmf_ctrl_options *opts = ctrl->opts; + + if (opts->nr_write_queues && io_queues[HCTX_TYPE_READ]) { + /* separate read/write queues */ + set->map[HCTX_TYPE_DEFAULT].nr_queues = + io_queues[HCTX_TYPE_DEFAULT]; + set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; + set->map[HCTX_TYPE_READ].nr_queues = + io_queues[HCTX_TYPE_READ]; + set->map[HCTX_TYPE_READ].queue_offset = + io_queues[HCTX_TYPE_DEFAULT]; + } else { + /* shared read/write queues */ + set->map[HCTX_TYPE_DEFAULT].nr_queues = + io_queues[HCTX_TYPE_DEFAULT]; + set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; + set->map[HCTX_TYPE_READ].nr_queues = + io_queues[HCTX_TYPE_DEFAULT]; + set->map[HCTX_TYPE_READ].queue_offset = 0; + } + + blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); + blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); + if (opts->nr_poll_queues && io_queues[HCTX_TYPE_POLL]) { + /* map dedicated poll queues only if we have queues left */ + set->map[HCTX_TYPE_POLL].nr_queues = io_queues[HCTX_TYPE_POLL]; + set->map[HCTX_TYPE_POLL].queue_offset = + io_queues[HCTX_TYPE_DEFAULT] + + io_queues[HCTX_TYPE_READ]; + blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); + } + + dev_info(ctrl->device, + "mapped %d/%d/%d default/read/poll queues.\n", + io_queues[HCTX_TYPE_DEFAULT], + io_queues[HCTX_TYPE_READ], + io_queues[HCTX_TYPE_POLL]); +} +EXPORT_SYMBOL_GPL(nvmf_map_queues); + static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts, unsigned int required_opts) { diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index dcac3df8a5f7..82e7a27ffbde 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -77,6 +77,9 @@ enum { * with the parsing opts enum. * @mask: Used by the fabrics library to parse through sysfs options * on adding a NVMe controller. + * @max_reconnects: maximum number of allowed reconnect attempts before removing + * the controller, (-1) means reconnect forever, zero means remove + * immediately; * @transport: Holds the fabric transport "technology name" (for a lack of * better description) that will be used by an NVMe controller * being added. @@ -96,9 +99,6 @@ enum { * @discovery_nqn: indicates if the subsysnqn is the well-known discovery NQN. * @kato: Keep-alive timeout. * @host: Virtual NVMe host, contains the NQN and Host ID. - * @max_reconnects: maximum number of allowed reconnect attempts before removing - * the controller, (-1) means reconnect forever, zero means remove - * immediately; * @dhchap_secret: DH-HMAC-CHAP secret * @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional * authentication @@ -112,6 +112,7 @@ enum { */ struct nvmf_ctrl_options { unsigned mask; + int max_reconnects; char *transport; char *subsysnqn; char *traddr; @@ -125,7 +126,6 @@ struct nvmf_ctrl_options { bool duplicate_connect; unsigned int kato; struct nvmf_host *host; - int max_reconnects; char *dhchap_secret; char *dhchap_ctrl_secret; bool disable_sqflow; @@ -181,7 +181,7 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl, ctrl->state == NVME_CTRL_DEAD || strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) || strcmp(opts->host->nqn, ctrl->opts->host->nqn) || - memcmp(&opts->host->id, &ctrl->opts->host->id, sizeof(uuid_t))) + !uuid_equal(&opts->host->id, &ctrl->opts->host->id)) return false; return true; @@ -203,6 +203,13 @@ static inline void nvmf_complete_timed_out_request(struct request *rq) } } +static inline unsigned int nvmf_nr_io_queues(struct nvmf_ctrl_options *opts) +{ + return min(opts->nr_io_queues, num_online_cpus()) + + min(opts->nr_write_queues, num_online_cpus()) + + min(opts->nr_poll_queues, num_online_cpus()); +} + int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val); int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val); int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val); @@ -215,5 +222,9 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); bool nvmf_ip_options_match(struct nvme_ctrl *ctrl, struct nvmf_ctrl_options *opts); +void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues, + u32 io_queues[HCTX_MAX_TYPES]); +void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl, + u32 io_queues[HCTX_MAX_TYPES]); #endif /* _NVME_FABRICS_H */ diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 2ed75923507d..691f2df574ce 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2917,8 +2917,8 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set, &nvme_fc_mq_ops, 1, - struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, - ctrl->lport->ops->fcprqst_priv_sz)); + struct_size_t(struct nvme_fcp_op_w_sgl, priv, + ctrl->lport->ops->fcprqst_priv_sz)); if (ret) return ret; @@ -3536,8 +3536,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set, &nvme_fc_admin_mq_ops, - struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, - ctrl->lport->ops->fcprqst_priv_sz)); + struct_size_t(struct nvme_fcp_op_w_sgl, priv, + ctrl->lport->ops->fcprqst_priv_sz)); if (ret) goto fail_ctrl; diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index f15e7330b75a..2130ad65b58c 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -14,7 +14,7 @@ enum { }; static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c, - unsigned int flags, fmode_t mode) + unsigned int flags, bool open_for_write) { u32 effects; @@ -80,7 +80,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c, * writing. */ if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) - return mode & FMODE_WRITE; + return open_for_write; return true; } @@ -337,7 +337,7 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, struct nvme_passthru_cmd __user *ucmd, unsigned int flags, - fmode_t mode) + bool open_for_write) { struct nvme_passthru_cmd cmd; struct nvme_command c; @@ -365,7 +365,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, c.common.cdw14 = cpu_to_le32(cmd.cdw14); c.common.cdw15 = cpu_to_le32(cmd.cdw15); - if (!nvme_cmd_allowed(ns, &c, 0, mode)) + if (!nvme_cmd_allowed(ns, &c, 0, open_for_write)) return -EACCES; if (cmd.timeout_ms) @@ -385,7 +385,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags, - fmode_t mode) + bool open_for_write) { struct nvme_passthru_cmd64 cmd; struct nvme_command c; @@ -412,7 +412,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, c.common.cdw14 = cpu_to_le32(cmd.cdw14); c.common.cdw15 = cpu_to_le32(cmd.cdw15); - if (!nvme_cmd_allowed(ns, &c, flags, mode)) + if (!nvme_cmd_allowed(ns, &c, flags, open_for_write)) return -EACCES; if (cmd.timeout_ms) @@ -521,7 +521,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, if (cookie != NULL && blk_rq_is_poll(req)) nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED); else - io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb); + io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb); return RQ_END_IO_FREE; } @@ -543,7 +543,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req, if (cookie != NULL && blk_rq_is_poll(req)) nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED); else - io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb); + io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_meta_cb); return RQ_END_IO_NONE; } @@ -583,7 +583,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14)); c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15)); - if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode)) + if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode & FMODE_WRITE)) return -EACCES; d.metadata = READ_ONCE(cmd->metadata); @@ -649,13 +649,13 @@ static bool is_ctrl_ioctl(unsigned int cmd) } static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd, - void __user *argp, fmode_t mode) + void __user *argp, bool open_for_write) { switch (cmd) { case NVME_IOCTL_ADMIN_CMD: - return nvme_user_cmd(ctrl, NULL, argp, 0, mode); + return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write); case NVME_IOCTL_ADMIN64_CMD: - return nvme_user_cmd64(ctrl, NULL, argp, 0, mode); + return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write); default: return sed_ioctl(ctrl->opal_dev, cmd, argp); } @@ -680,14 +680,14 @@ struct nvme_user_io32 { #endif /* COMPAT_FOR_U64_ALIGNMENT */ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd, - void __user *argp, unsigned int flags, fmode_t mode) + void __user *argp, unsigned int flags, bool open_for_write) { switch (cmd) { case NVME_IOCTL_ID: force_successful_syscall_return(); return ns->head->ns_id; case NVME_IOCTL_IO_CMD: - return nvme_user_cmd(ns->ctrl, ns, argp, flags, mode); + return nvme_user_cmd(ns->ctrl, ns, argp, flags, open_for_write); /* * struct nvme_user_io can have different padding on some 32-bit ABIs. * Just accept the compat version as all fields that are used are the @@ -702,16 +702,18 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd, flags |= NVME_IOCTL_VEC; fallthrough; case NVME_IOCTL_IO64_CMD: - return nvme_user_cmd64(ns->ctrl, ns, argp, flags, mode); + return nvme_user_cmd64(ns->ctrl, ns, argp, flags, + open_for_write); default: return -ENOTTY; } } -int nvme_ioctl(struct block_device *bdev, fmode_t mode, +int nvme_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct nvme_ns *ns = bdev->bd_disk->private_data; + bool open_for_write = mode & BLK_OPEN_WRITE; void __user *argp = (void __user *)arg; unsigned int flags = 0; @@ -719,19 +721,20 @@ int nvme_ioctl(struct block_device *bdev, fmode_t mode, flags |= NVME_IOCTL_PARTITION; if (is_ctrl_ioctl(cmd)) - return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, mode); - return nvme_ns_ioctl(ns, cmd, argp, flags, mode); + return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); + return nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write); } long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct nvme_ns *ns = container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev); + bool open_for_write = file->f_mode & FMODE_WRITE; void __user *argp = (void __user *)arg; if (is_ctrl_ioctl(cmd)) - return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, file->f_mode); - return nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode); + return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); + return nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write); } static int nvme_uring_cmd_checks(unsigned int issue_flags) @@ -800,7 +803,7 @@ int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd, #ifdef CONFIG_NVME_MULTIPATH static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *argp, struct nvme_ns_head *head, int srcu_idx, - fmode_t mode) + bool open_for_write) __releases(&head->srcu) { struct nvme_ctrl *ctrl = ns->ctrl; @@ -808,16 +811,17 @@ static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, nvme_get_ctrl(ns->ctrl); srcu_read_unlock(&head->srcu, srcu_idx); - ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, mode); + ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); nvme_put_ctrl(ctrl); return ret; } -int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, +int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct nvme_ns_head *head = bdev->bd_disk->private_data; + bool open_for_write = mode & BLK_OPEN_WRITE; void __user *argp = (void __user *)arg; struct nvme_ns *ns; int srcu_idx, ret = -EWOULDBLOCK; @@ -838,9 +842,9 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, */ if (is_ctrl_ioctl(cmd)) return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx, - mode); + open_for_write); - ret = nvme_ns_ioctl(ns, cmd, argp, flags, mode); + ret = nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write); out_unlock: srcu_read_unlock(&head->srcu, srcu_idx); return ret; @@ -849,6 +853,7 @@ out_unlock: long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { + bool open_for_write = file->f_mode & FMODE_WRITE; struct cdev *cdev = file_inode(file)->i_cdev; struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev); @@ -863,9 +868,9 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, if (is_ctrl_ioctl(cmd)) return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx, - file->f_mode); + open_for_write); - ret = nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode); + ret = nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write); out_unlock: srcu_read_unlock(&head->srcu, srcu_idx); return ret; @@ -940,7 +945,7 @@ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) } static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp, - fmode_t mode) + bool open_for_write) { struct nvme_ns *ns; int ret; @@ -964,7 +969,7 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp, kref_get(&ns->kref); up_read(&ctrl->namespaces_rwsem); - ret = nvme_user_cmd(ctrl, ns, argp, 0, mode); + ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write); nvme_put_ns(ns); return ret; @@ -976,16 +981,17 @@ out_unlock: long nvme_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { + bool open_for_write = file->f_mode & FMODE_WRITE; struct nvme_ctrl *ctrl = file->private_data; void __user *argp = (void __user *)arg; switch (cmd) { case NVME_IOCTL_ADMIN_CMD: - return nvme_user_cmd(ctrl, NULL, argp, 0, file->f_mode); + return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write); case NVME_IOCTL_ADMIN64_CMD: - return nvme_user_cmd64(ctrl, NULL, argp, 0, file->f_mode); + return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write); case NVME_IOCTL_IO_CMD: - return nvme_dev_user_cmd(ctrl, argp, file->f_mode); + return nvme_dev_user_cmd(ctrl, argp, open_for_write); case NVME_IOCTL_RESET: if (!capable(CAP_SYS_ADMIN)) return -EACCES; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 2bc159a318ff..98001eebd275 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -402,14 +402,14 @@ static void nvme_ns_head_submit_bio(struct bio *bio) srcu_read_unlock(&head->srcu, srcu_idx); } -static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode) +static int nvme_ns_head_open(struct gendisk *disk, blk_mode_t mode) { - if (!nvme_tryget_ns_head(bdev->bd_disk->private_data)) + if (!nvme_tryget_ns_head(disk->private_data)) return -ENXIO; return 0; } -static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode) +static void nvme_ns_head_release(struct gendisk *disk) { nvme_put_ns_head(disk->private_data); } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 8657811f8b88..9a98c14c552a 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -247,12 +247,13 @@ enum nvme_ctrl_flags { NVME_CTRL_ADMIN_Q_STOPPED = 1, NVME_CTRL_STARTED_ONCE = 2, NVME_CTRL_STOPPED = 3, + NVME_CTRL_SKIP_ID_CNS_CS = 4, }; struct nvme_ctrl { bool comp_seen; - enum nvme_ctrl_state state; bool identified; + enum nvme_ctrl_state state; spinlock_t lock; struct mutex scan_lock; const struct nvme_ctrl_ops *ops; @@ -284,8 +285,8 @@ struct nvme_ctrl { char name[12]; u16 cntlid; - u32 ctrl_config; u16 mtfa; + u32 ctrl_config; u32 queue_count; u64 cap; @@ -359,10 +360,10 @@ struct nvme_ctrl { bool apst_enabled; /* PCIe only: */ + u16 hmmaxd; u32 hmpre; u32 hmmin; u32 hmminds; - u16 hmmaxd; /* Fabrics only */ u32 ioccsz; @@ -842,10 +843,10 @@ void nvme_put_ns_head(struct nvme_ns_head *head); int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, const struct file_operations *fops, struct module *owner); void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device); -int nvme_ioctl(struct block_device *bdev, fmode_t mode, +int nvme_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg); long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, +int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg); long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg); @@ -866,7 +867,11 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[]; extern const struct pr_ops nvme_pr_ops; extern const struct block_device_operations nvme_ns_head_ops; extern const struct attribute_group nvme_dev_attrs_group; +extern const struct attribute_group *nvme_subsys_attrs_groups[]; +extern const struct attribute_group *nvme_dev_attr_groups[]; +extern const struct block_device_operations nvme_bdev_ops; +void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl); struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); #ifdef CONFIG_NVME_MULTIPATH static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 492f319ebdf3..48c60f7fda0b 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -420,10 +420,9 @@ static int nvme_pci_init_request(struct blk_mq_tag_set *set, struct request *req, unsigned int hctx_idx, unsigned int numa_node) { - struct nvme_dev *dev = to_nvme_dev(set->driver_data); struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - nvme_req(req)->ctrl = &dev->ctrl; + nvme_req(req)->ctrl = set->driver_data; nvme_req(req)->cmd = &iod->cmd; return 0; } diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 0eb79696fb73..d433b2ec07a6 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -501,7 +501,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) } ibdev = queue->device->dev; - /* +1 for ib_stop_cq */ + /* +1 for ib_drain_qp */ queue->cq_size = cq_factor * queue->queue_size + 1; ret = nvme_rdma_create_cq(ibdev, queue); @@ -713,18 +713,10 @@ out_stop_queues: static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) { struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; - struct ib_device *ibdev = ctrl->device->dev; - unsigned int nr_io_queues, nr_default_queues; - unsigned int nr_read_queues, nr_poll_queues; + unsigned int nr_io_queues; int i, ret; - nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors, - min(opts->nr_io_queues, num_online_cpus())); - nr_default_queues = min_t(unsigned int, ibdev->num_comp_vectors, - min(opts->nr_write_queues, num_online_cpus())); - nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus()); - nr_io_queues = nr_read_queues + nr_default_queues + nr_poll_queues; - + nr_io_queues = nvmf_nr_io_queues(opts); ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); if (ret) return ret; @@ -739,34 +731,7 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); - if (opts->nr_write_queues && nr_read_queues < nr_io_queues) { - /* - * separate read/write queues - * hand out dedicated default queues only after we have - * sufficient read queues. - */ - ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues; - nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ]; - ctrl->io_queues[HCTX_TYPE_DEFAULT] = - min(nr_default_queues, nr_io_queues); - nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; - } else { - /* - * shared read/write queues - * either no write queues were requested, or we don't have - * sufficient queue count to have dedicated default queues. - */ - ctrl->io_queues[HCTX_TYPE_DEFAULT] = - min(nr_read_queues, nr_io_queues); - nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; - } - - if (opts->nr_poll_queues && nr_io_queues) { - /* map dedicated poll queues only if we have queues left */ - ctrl->io_queues[HCTX_TYPE_POLL] = - min(nr_poll_queues, nr_io_queues); - } - + nvmf_set_io_queues(opts, nr_io_queues, ctrl->io_queues); for (i = 1; i < ctrl->ctrl.queue_count; i++) { ret = nvme_rdma_alloc_queue(ctrl, i, ctrl->ctrl.sqsize + 1); @@ -2138,44 +2103,8 @@ static void nvme_rdma_complete_rq(struct request *rq) static void nvme_rdma_map_queues(struct blk_mq_tag_set *set) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data); - struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; - if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { - /* separate read/write queues */ - set->map[HCTX_TYPE_DEFAULT].nr_queues = - ctrl->io_queues[HCTX_TYPE_DEFAULT]; - set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; - set->map[HCTX_TYPE_READ].nr_queues = - ctrl->io_queues[HCTX_TYPE_READ]; - set->map[HCTX_TYPE_READ].queue_offset = - ctrl->io_queues[HCTX_TYPE_DEFAULT]; - } else { - /* shared read/write queues */ - set->map[HCTX_TYPE_DEFAULT].nr_queues = - ctrl->io_queues[HCTX_TYPE_DEFAULT]; - set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; - set->map[HCTX_TYPE_READ].nr_queues = - ctrl->io_queues[HCTX_TYPE_DEFAULT]; - set->map[HCTX_TYPE_READ].queue_offset = 0; - } - blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); - blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); - - if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { - /* map dedicated poll queues only if we have queues left */ - set->map[HCTX_TYPE_POLL].nr_queues = - ctrl->io_queues[HCTX_TYPE_POLL]; - set->map[HCTX_TYPE_POLL].queue_offset = - ctrl->io_queues[HCTX_TYPE_DEFAULT] + - ctrl->io_queues[HCTX_TYPE_READ]; - blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); - } - - dev_info(ctrl->ctrl.device, - "mapped %d/%d/%d default/read/poll queues.\n", - ctrl->io_queues[HCTX_TYPE_DEFAULT], - ctrl->io_queues[HCTX_TYPE_READ], - ctrl->io_queues[HCTX_TYPE_POLL]); + nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues); } static const struct blk_mq_ops nvme_rdma_mq_ops = { diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c new file mode 100644 index 000000000000..45e91811f905 --- /dev/null +++ b/drivers/nvme/host/sysfs.c @@ -0,0 +1,668 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sysfs interface for the NVMe core driver. + * + * Copyright (c) 2011-2014, Intel Corporation. + */ + +#include <linux/nvme-auth.h> + +#include "nvme.h" +#include "fabrics.h" + +static ssize_t nvme_sysfs_reset(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + int ret; + + ret = nvme_reset_ctrl_sync(ctrl); + if (ret < 0) + return ret; + return count; +} +static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); + +static ssize_t nvme_sysfs_rescan(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + nvme_queue_scan(ctrl); + return count; +} +static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); + +static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) +{ + struct gendisk *disk = dev_to_disk(dev); + + if (disk->fops == &nvme_bdev_ops) + return nvme_get_ns_from_dev(dev)->head; + else + return disk->private_data; +} + +static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct nvme_ns_head *head = dev_to_ns_head(dev); + struct nvme_ns_ids *ids = &head->ids; + struct nvme_subsystem *subsys = head->subsys; + int serial_len = sizeof(subsys->serial); + int model_len = sizeof(subsys->model); + + if (!uuid_is_null(&ids->uuid)) + return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid); + + if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) + return sysfs_emit(buf, "eui.%16phN\n", ids->nguid); + + if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) + return sysfs_emit(buf, "eui.%8phN\n", ids->eui64); + + while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || + subsys->serial[serial_len - 1] == '\0')) + serial_len--; + while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || + subsys->model[model_len - 1] == '\0')) + model_len--; + + return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, + serial_len, subsys->serial, model_len, subsys->model, + head->ns_id); +} +static DEVICE_ATTR_RO(wwid); + +static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); +} +static DEVICE_ATTR_RO(nguid); + +static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; + + /* For backward compatibility expose the NGUID to userspace if + * we have no UUID set + */ + if (uuid_is_null(&ids->uuid)) { + dev_warn_ratelimited(dev, + "No UUID available providing old NGUID\n"); + return sysfs_emit(buf, "%pU\n", ids->nguid); + } + return sysfs_emit(buf, "%pU\n", &ids->uuid); +} +static DEVICE_ATTR_RO(uuid); + +static ssize_t eui_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); +} +static DEVICE_ATTR_RO(eui); + +static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id); +} +static DEVICE_ATTR_RO(nsid); + +static struct attribute *nvme_ns_id_attrs[] = { + &dev_attr_wwid.attr, + &dev_attr_uuid.attr, + &dev_attr_nguid.attr, + &dev_attr_eui.attr, + &dev_attr_nsid.attr, +#ifdef CONFIG_NVME_MULTIPATH + &dev_attr_ana_grpid.attr, + &dev_attr_ana_state.attr, +#endif + NULL, +}; + +static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; + + if (a == &dev_attr_uuid.attr) { + if (uuid_is_null(&ids->uuid) && + !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) + return 0; + } + if (a == &dev_attr_nguid.attr) { + if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) + return 0; + } + if (a == &dev_attr_eui.attr) { + if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) + return 0; + } +#ifdef CONFIG_NVME_MULTIPATH + if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { + if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */ + return 0; + if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) + return 0; + } +#endif + return a->mode; +} + +static const struct attribute_group nvme_ns_id_attr_group = { + .attrs = nvme_ns_id_attrs, + .is_visible = nvme_ns_id_attrs_are_visible, +}; + +const struct attribute_group *nvme_ns_id_attr_groups[] = { + &nvme_ns_id_attr_group, + NULL, +}; + +#define nvme_show_str_function(field) \ +static ssize_t field##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ + return sysfs_emit(buf, "%.*s\n", \ + (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ +} \ +static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); + +nvme_show_str_function(model); +nvme_show_str_function(serial); +nvme_show_str_function(firmware_rev); + +#define nvme_show_int_function(field) \ +static ssize_t field##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ + return sysfs_emit(buf, "%d\n", ctrl->field); \ +} \ +static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); + +nvme_show_int_function(cntlid); +nvme_show_int_function(numa_node); +nvme_show_int_function(queue_count); +nvme_show_int_function(sqsize); +nvme_show_int_function(kato); + +static ssize_t nvme_sysfs_delete(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags)) + return -EBUSY; + + if (device_remove_file_self(dev, attr)) + nvme_delete_ctrl_sync(ctrl); + return count; +} +static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); + +static ssize_t nvme_sysfs_show_transport(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", ctrl->ops->name); +} +static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); + +static ssize_t nvme_sysfs_show_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + static const char *const state_name[] = { + [NVME_CTRL_NEW] = "new", + [NVME_CTRL_LIVE] = "live", + [NVME_CTRL_RESETTING] = "resetting", + [NVME_CTRL_CONNECTING] = "connecting", + [NVME_CTRL_DELETING] = "deleting", + [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)", + [NVME_CTRL_DEAD] = "dead", + }; + + if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && + state_name[ctrl->state]) + return sysfs_emit(buf, "%s\n", state_name[ctrl->state]); + + return sysfs_emit(buf, "unknown state\n"); +} + +static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); + +static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn); +} +static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); + +static ssize_t nvme_sysfs_show_hostnqn(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn); +} +static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL); + +static ssize_t nvme_sysfs_show_hostid(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id); +} +static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL); + +static ssize_t nvme_sysfs_show_address(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); +} +static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); + +static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct nvmf_ctrl_options *opts = ctrl->opts; + + if (ctrl->opts->max_reconnects == -1) + return sysfs_emit(buf, "off\n"); + return sysfs_emit(buf, "%d\n", + opts->max_reconnects * opts->reconnect_delay); +} + +static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct nvmf_ctrl_options *opts = ctrl->opts; + int ctrl_loss_tmo, err; + + err = kstrtoint(buf, 10, &ctrl_loss_tmo); + if (err) + return -EINVAL; + + if (ctrl_loss_tmo < 0) + opts->max_reconnects = -1; + else + opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, + opts->reconnect_delay); + return count; +} +static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR, + nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store); + +static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (ctrl->opts->reconnect_delay == -1) + return sysfs_emit(buf, "off\n"); + return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay); +} + +static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + unsigned int v; + int err; + + err = kstrtou32(buf, 10, &v); + if (err) + return err; + + ctrl->opts->reconnect_delay = v; + return count; +} +static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, + nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store); + +static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (ctrl->opts->fast_io_fail_tmo == -1) + return sysfs_emit(buf, "off\n"); + return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo); +} + +static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct nvmf_ctrl_options *opts = ctrl->opts; + int fast_io_fail_tmo, err; + + err = kstrtoint(buf, 10, &fast_io_fail_tmo); + if (err) + return -EINVAL; + + if (fast_io_fail_tmo < 0) + opts->fast_io_fail_tmo = -1; + else + opts->fast_io_fail_tmo = fast_io_fail_tmo; + return count; +} +static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, + nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store); + +static ssize_t cntrltype_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + static const char * const type[] = { + [NVME_CTRL_IO] = "io\n", + [NVME_CTRL_DISC] = "discovery\n", + [NVME_CTRL_ADMIN] = "admin\n", + }; + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype]) + return sysfs_emit(buf, "reserved\n"); + + return sysfs_emit(buf, type[ctrl->cntrltype]); +} +static DEVICE_ATTR_RO(cntrltype); + +static ssize_t dctype_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + static const char * const type[] = { + [NVME_DCTYPE_NOT_REPORTED] = "none\n", + [NVME_DCTYPE_DDC] = "ddc\n", + [NVME_DCTYPE_CDC] = "cdc\n", + }; + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype]) + return sysfs_emit(buf, "reserved\n"); + + return sysfs_emit(buf, type[ctrl->dctype]); +} +static DEVICE_ATTR_RO(dctype); + +#ifdef CONFIG_NVME_AUTH +static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct nvmf_ctrl_options *opts = ctrl->opts; + + if (!opts->dhchap_secret) + return sysfs_emit(buf, "none\n"); + return sysfs_emit(buf, "%s\n", opts->dhchap_secret); +} + +static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct nvmf_ctrl_options *opts = ctrl->opts; + char *dhchap_secret; + + if (!ctrl->opts->dhchap_secret) + return -EINVAL; + if (count < 7) + return -EINVAL; + if (memcmp(buf, "DHHC-1:", 7)) + return -EINVAL; + + dhchap_secret = kzalloc(count + 1, GFP_KERNEL); + if (!dhchap_secret) + return -ENOMEM; + memcpy(dhchap_secret, buf, count); + nvme_auth_stop(ctrl); + if (strcmp(dhchap_secret, opts->dhchap_secret)) { + struct nvme_dhchap_key *key, *host_key; + int ret; + + ret = nvme_auth_generate_key(dhchap_secret, &key); + if (ret) { + kfree(dhchap_secret); + return ret; + } + kfree(opts->dhchap_secret); + opts->dhchap_secret = dhchap_secret; + host_key = ctrl->host_key; + mutex_lock(&ctrl->dhchap_auth_mutex); + ctrl->host_key = key; + mutex_unlock(&ctrl->dhchap_auth_mutex); + nvme_auth_free_key(host_key); + } else + kfree(dhchap_secret); + /* Start re-authentication */ + dev_info(ctrl->device, "re-authenticating controller\n"); + queue_work(nvme_wq, &ctrl->dhchap_auth_work); + + return count; +} + +static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR, + nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store); + +static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct nvmf_ctrl_options *opts = ctrl->opts; + + if (!opts->dhchap_ctrl_secret) + return sysfs_emit(buf, "none\n"); + return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret); +} + +static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct nvmf_ctrl_options *opts = ctrl->opts; + char *dhchap_secret; + + if (!ctrl->opts->dhchap_ctrl_secret) + return -EINVAL; + if (count < 7) + return -EINVAL; + if (memcmp(buf, "DHHC-1:", 7)) + return -EINVAL; + + dhchap_secret = kzalloc(count + 1, GFP_KERNEL); + if (!dhchap_secret) + return -ENOMEM; + memcpy(dhchap_secret, buf, count); + nvme_auth_stop(ctrl); + if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) { + struct nvme_dhchap_key *key, *ctrl_key; + int ret; + + ret = nvme_auth_generate_key(dhchap_secret, &key); + if (ret) { + kfree(dhchap_secret); + return ret; + } + kfree(opts->dhchap_ctrl_secret); + opts->dhchap_ctrl_secret = dhchap_secret; + ctrl_key = ctrl->ctrl_key; + mutex_lock(&ctrl->dhchap_auth_mutex); + ctrl->ctrl_key = key; + mutex_unlock(&ctrl->dhchap_auth_mutex); + nvme_auth_free_key(ctrl_key); + } else + kfree(dhchap_secret); + /* Start re-authentication */ + dev_info(ctrl->device, "re-authenticating controller\n"); + queue_work(nvme_wq, &ctrl->dhchap_auth_work); + + return count; +} + +static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR, + nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store); +#endif + +static struct attribute *nvme_dev_attrs[] = { + &dev_attr_reset_controller.attr, + &dev_attr_rescan_controller.attr, + &dev_attr_model.attr, + &dev_attr_serial.attr, + &dev_attr_firmware_rev.attr, + &dev_attr_cntlid.attr, + &dev_attr_delete_controller.attr, + &dev_attr_transport.attr, + &dev_attr_subsysnqn.attr, + &dev_attr_address.attr, + &dev_attr_state.attr, + &dev_attr_numa_node.attr, + &dev_attr_queue_count.attr, + &dev_attr_sqsize.attr, + &dev_attr_hostnqn.attr, + &dev_attr_hostid.attr, + &dev_attr_ctrl_loss_tmo.attr, + &dev_attr_reconnect_delay.attr, + &dev_attr_fast_io_fail_tmo.attr, + &dev_attr_kato.attr, + &dev_attr_cntrltype.attr, + &dev_attr_dctype.attr, +#ifdef CONFIG_NVME_AUTH + &dev_attr_dhchap_secret.attr, + &dev_attr_dhchap_ctrl_secret.attr, +#endif + NULL +}; + +static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) + return 0; + if (a == &dev_attr_address.attr && !ctrl->ops->get_address) + return 0; + if (a == &dev_attr_hostnqn.attr && !ctrl->opts) + return 0; + if (a == &dev_attr_hostid.attr && !ctrl->opts) + return 0; + if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts) + return 0; + if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts) + return 0; + if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts) + return 0; +#ifdef CONFIG_NVME_AUTH + if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts) + return 0; + if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts) + return 0; +#endif + + return a->mode; +} + +const struct attribute_group nvme_dev_attrs_group = { + .attrs = nvme_dev_attrs, + .is_visible = nvme_dev_attrs_are_visible, +}; +EXPORT_SYMBOL_GPL(nvme_dev_attrs_group); + +const struct attribute_group *nvme_dev_attr_groups[] = { + &nvme_dev_attrs_group, + NULL, +}; + +#define SUBSYS_ATTR_RO(_name, _mode, _show) \ + struct device_attribute subsys_attr_##_name = \ + __ATTR(_name, _mode, _show, NULL) + +static ssize_t nvme_subsys_show_nqn(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_subsystem *subsys = + container_of(dev, struct nvme_subsystem, dev); + + return sysfs_emit(buf, "%s\n", subsys->subnqn); +} +static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); + +static ssize_t nvme_subsys_show_type(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_subsystem *subsys = + container_of(dev, struct nvme_subsystem, dev); + + switch (subsys->subtype) { + case NVME_NQN_DISC: + return sysfs_emit(buf, "discovery\n"); + case NVME_NQN_NVME: + return sysfs_emit(buf, "nvm\n"); + default: + return sysfs_emit(buf, "reserved\n"); + } +} +static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type); + +#define nvme_subsys_show_str_function(field) \ +static ssize_t subsys_##field##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct nvme_subsystem *subsys = \ + container_of(dev, struct nvme_subsystem, dev); \ + return sysfs_emit(buf, "%.*s\n", \ + (int)sizeof(subsys->field), subsys->field); \ +} \ +static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); + +nvme_subsys_show_str_function(model); +nvme_subsys_show_str_function(serial); +nvme_subsys_show_str_function(firmware_rev); + +static struct attribute *nvme_subsys_attrs[] = { + &subsys_attr_model.attr, + &subsys_attr_serial.attr, + &subsys_attr_firmware_rev.attr, + &subsys_attr_subsysnqn.attr, + &subsys_attr_subsystype.attr, +#ifdef CONFIG_NVME_MULTIPATH + &subsys_attr_iopolicy.attr, +#endif + NULL, +}; + +static const struct attribute_group nvme_subsys_attrs_group = { + .attrs = nvme_subsys_attrs, +}; + +const struct attribute_group *nvme_subsys_attrs_groups[] = { + &nvme_subsys_attrs_group, + NULL, +}; diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index bf0230442d57..260b3554d821 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1802,58 +1802,12 @@ out_free_queues: return ret; } -static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl) -{ - unsigned int nr_io_queues; - - nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus()); - nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus()); - nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus()); - - return nr_io_queues; -} - -static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl, - unsigned int nr_io_queues) -{ - struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); - struct nvmf_ctrl_options *opts = nctrl->opts; - - if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) { - /* - * separate read/write queues - * hand out dedicated default queues only after we have - * sufficient read queues. - */ - ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues; - nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ]; - ctrl->io_queues[HCTX_TYPE_DEFAULT] = - min(opts->nr_write_queues, nr_io_queues); - nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; - } else { - /* - * shared read/write queues - * either no write queues were requested, or we don't have - * sufficient queue count to have dedicated default queues. - */ - ctrl->io_queues[HCTX_TYPE_DEFAULT] = - min(opts->nr_io_queues, nr_io_queues); - nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; - } - - if (opts->nr_poll_queues && nr_io_queues) { - /* map dedicated poll queues only if we have queues left */ - ctrl->io_queues[HCTX_TYPE_POLL] = - min(opts->nr_poll_queues, nr_io_queues); - } -} - static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) { unsigned int nr_io_queues; int ret; - nr_io_queues = nvme_tcp_nr_io_queues(ctrl); + nr_io_queues = nvmf_nr_io_queues(ctrl->opts); ret = nvme_set_queue_count(ctrl, &nr_io_queues); if (ret) return ret; @@ -1868,8 +1822,8 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) dev_info(ctrl->device, "creating %d I/O queues.\n", nr_io_queues); - nvme_tcp_set_io_queues(ctrl, nr_io_queues); - + nvmf_set_io_queues(ctrl->opts, nr_io_queues, + to_tcp_ctrl(ctrl)->io_queues); return __nvme_tcp_alloc_io_queues(ctrl); } @@ -2449,44 +2403,8 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, static void nvme_tcp_map_queues(struct blk_mq_tag_set *set) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data); - struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; - - if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { - /* separate read/write queues */ - set->map[HCTX_TYPE_DEFAULT].nr_queues = - ctrl->io_queues[HCTX_TYPE_DEFAULT]; - set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; - set->map[HCTX_TYPE_READ].nr_queues = - ctrl->io_queues[HCTX_TYPE_READ]; - set->map[HCTX_TYPE_READ].queue_offset = - ctrl->io_queues[HCTX_TYPE_DEFAULT]; - } else { - /* shared read/write queues */ - set->map[HCTX_TYPE_DEFAULT].nr_queues = - ctrl->io_queues[HCTX_TYPE_DEFAULT]; - set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; - set->map[HCTX_TYPE_READ].nr_queues = - ctrl->io_queues[HCTX_TYPE_DEFAULT]; - set->map[HCTX_TYPE_READ].queue_offset = 0; - } - blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); - blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); - - if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { - /* map dedicated poll queues only if we have queues left */ - set->map[HCTX_TYPE_POLL].nr_queues = - ctrl->io_queues[HCTX_TYPE_POLL]; - set->map[HCTX_TYPE_POLL].queue_offset = - ctrl->io_queues[HCTX_TYPE_DEFAULT] + - ctrl->io_queues[HCTX_TYPE_READ]; - blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); - } - - dev_info(ctrl->ctrl.device, - "mapped %d/%d/%d default/read/poll queues.\n", - ctrl->io_queues[HCTX_TYPE_DEFAULT], - ctrl->io_queues[HCTX_TYPE_READ], - ctrl->io_queues[HCTX_TYPE_POLL]); + + nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues); } static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c index 7970a7640e58..586458f765f1 100644 --- a/drivers/nvme/target/fabrics-cmd-auth.c +++ b/drivers/nvme/target/fabrics-cmd-auth.c @@ -295,13 +295,11 @@ void nvmet_execute_auth_send(struct nvmet_req *req) status = 0; } goto done_kfree; - break; case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2: req->sq->authenticated = true; pr_debug("%s: ctrl %d qid %d ctrl authenticated\n", __func__, ctrl->cntlid, req->sq->qid); goto done_kfree; - break; case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2: status = nvmet_auth_failure2(d); if (status) { @@ -312,7 +310,6 @@ void nvmet_execute_auth_send(struct nvmet_req *req) status = 0; } goto done_kfree; - break; default: req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; @@ -320,7 +317,6 @@ void nvmet_execute_auth_send(struct nvmet_req *req) NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; req->sq->authenticated = false; goto done_kfree; - break; } done_failure1: req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; @@ -483,15 +479,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req) status = NVME_SC_INTERNAL; break; } - if (status) { - req->sq->dhchap_status = status; - nvmet_auth_failure1(req, d, al); - pr_warn("ctrl %d qid %d: challenge status (%x)\n", - ctrl->cntlid, req->sq->qid, - req->sq->dhchap_status); - status = 0; - break; - } req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY; break; case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1: diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index e940a7d37a9d..c65a73433c05 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -645,8 +645,6 @@ fcloop_fcp_recv_work(struct work_struct *work) } if (ret) fcloop_call_host_done(fcpreq, tfcp_req, ret); - - return; } static void @@ -1168,7 +1166,8 @@ __wait_localport_unreg(struct fcloop_lport *lport) ret = nvme_fc_unregister_localport(lport->localport); - wait_for_completion(&lport->unreg_done); + if (!ret) + wait_for_completion(&lport->unreg_done); kfree(lport); diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index c2d6cea0236b..2733e0158585 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -51,7 +51,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) void nvmet_bdev_ns_disable(struct nvmet_ns *ns) { if (ns->bdev) { - blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ); + blkdev_put(ns->bdev, NULL); ns->bdev = NULL; } } @@ -85,7 +85,7 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns) return -ENOTBLK; ns->bdev = blkdev_get_by_path(ns->device_path, - FMODE_READ | FMODE_WRITE, NULL); + BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL); if (IS_ERR(ns->bdev)) { ret = PTR_ERR(ns->bdev); if (ret != -ENOTBLK) { diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index dc60a22646f7..6cf723bc664e 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -109,8 +109,8 @@ struct nvmet_sq { u32 sqhd; bool sqhd_disabled; #ifdef CONFIG_NVME_TARGET_AUTH - struct delayed_work auth_expired_work; bool authenticated; + struct delayed_work auth_expired_work; u16 dhchap_tid; u16 dhchap_status; int dhchap_step; diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 2e01960f1aeb..7feb643f1370 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -811,6 +811,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs) if (!fragment->target) { pr_err("symbols in overlay, but not in live tree\n"); ret = -EINVAL; + of_node_put(node); goto err_out; } diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 9309f2469b41..3c07d8d214b3 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -168,6 +168,7 @@ config PCI_P2PDMA # depends on 64BIT select GENERIC_ALLOCATOR + select NEED_SG_DMA_FLAGS help EnableÑ• drivers to do PCI peer-to-peer transactions to and from BARs that are exposed in other devices that are the part of diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index bc32662c6bb7..2d93d0c4f10d 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -489,7 +489,10 @@ struct hv_pcibus_device { struct fwnode_handle *fwnode; /* Protocol version negotiated with the host */ enum pci_protocol_version_t protocol_version; + + struct mutex state_lock; enum hv_pcibus_state state; + struct hv_device *hdev; resource_size_t low_mmio_space; resource_size_t high_mmio_space; @@ -545,19 +548,10 @@ struct hv_dr_state { struct hv_pcidev_description func[]; }; -enum hv_pcichild_state { - hv_pcichild_init = 0, - hv_pcichild_requirements, - hv_pcichild_resourced, - hv_pcichild_ejecting, - hv_pcichild_maximum -}; - struct hv_pci_dev { /* List protected by pci_rescan_remove_lock */ struct list_head list_entry; refcount_t refs; - enum hv_pcichild_state state; struct pci_slot *pci_slot; struct hv_pcidev_description desc; bool reported_missing; @@ -635,6 +629,11 @@ static void hv_arch_irq_unmask(struct irq_data *data) pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); int_desc = data->chip_data; + if (!int_desc) { + dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n", + __func__, data->irq); + return; + } local_irq_save(flags); @@ -2004,12 +2003,6 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) hv_pci_onchannelcallback(hbus); spin_unlock_irqrestore(&channel->sched_lock, flags); - if (hpdev->state == hv_pcichild_ejecting) { - dev_err_once(&hbus->hdev->device, - "the device is being ejected\n"); - goto enable_tasklet; - } - udelay(100); } @@ -2615,6 +2608,8 @@ static void pci_devices_present_work(struct work_struct *work) if (!dr) return; + mutex_lock(&hbus->state_lock); + /* First, mark all existing children as reported missing. */ spin_lock_irqsave(&hbus->device_list_lock, flags); list_for_each_entry(hpdev, &hbus->children, list_entry) { @@ -2696,6 +2691,8 @@ static void pci_devices_present_work(struct work_struct *work) break; } + mutex_unlock(&hbus->state_lock); + kfree(dr); } @@ -2844,7 +2841,7 @@ static void hv_eject_device_work(struct work_struct *work) hpdev = container_of(work, struct hv_pci_dev, wrk); hbus = hpdev->hbus; - WARN_ON(hpdev->state != hv_pcichild_ejecting); + mutex_lock(&hbus->state_lock); /* * Ejection can come before or after the PCI bus has been set up, so @@ -2882,6 +2879,8 @@ static void hv_eject_device_work(struct work_struct *work) put_pcichild(hpdev); put_pcichild(hpdev); /* hpdev has been freed. Do not use it any more. */ + + mutex_unlock(&hbus->state_lock); } /** @@ -2902,7 +2901,6 @@ static void hv_pci_eject_device(struct hv_pci_dev *hpdev) return; } - hpdev->state = hv_pcichild_ejecting; get_pcichild(hpdev); INIT_WORK(&hpdev->wrk, hv_eject_device_work); queue_work(hbus->wq, &hpdev->wrk); @@ -3331,8 +3329,10 @@ static int hv_pci_enter_d0(struct hv_device *hdev) struct pci_bus_d0_entry *d0_entry; struct hv_pci_compl comp_pkt; struct pci_packet *pkt; + bool retry = true; int ret; +enter_d0_retry: /* * Tell the host that the bus is ready to use, and moved into the * powered-on state. This includes telling the host which region @@ -3359,6 +3359,38 @@ static int hv_pci_enter_d0(struct hv_device *hdev) if (ret) goto exit; + /* + * In certain case (Kdump) the pci device of interest was + * not cleanly shut down and resource is still held on host + * side, the host could return invalid device status. + * We need to explicitly request host to release the resource + * and try to enter D0 again. + */ + if (comp_pkt.completion_status < 0 && retry) { + retry = false; + + dev_err(&hdev->device, "Retrying D0 Entry\n"); + + /* + * Hv_pci_bus_exit() calls hv_send_resource_released() + * to free up resources of its child devices. + * In the kdump kernel we need to set the + * wslot_res_allocated to 255 so it scans all child + * devices to release resources allocated in the + * normal kernel before panic happened. + */ + hbus->wslot_res_allocated = 255; + + ret = hv_pci_bus_exit(hdev, true); + + if (ret == 0) { + kfree(pkt); + goto enter_d0_retry; + } + dev_err(&hdev->device, + "Retrying D0 failed with ret %d\n", ret); + } + if (comp_pkt.completion_status < 0) { dev_err(&hdev->device, "PCI Pass-through VSP failed D0 Entry with status %x\n", @@ -3401,6 +3433,24 @@ static int hv_pci_query_relations(struct hv_device *hdev) if (!ret) ret = wait_for_response(hdev, &comp); + /* + * In the case of fast device addition/removal, it's possible that + * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we + * already got a PCI_BUS_RELATIONS* message from the host and the + * channel callback already scheduled a work to hbus->wq, which can be + * running pci_devices_present_work() -> survey_child_resources() -> + * complete(&hbus->survey_event), even after hv_pci_query_relations() + * exits and the stack variable 'comp' is no longer valid; as a result, + * a hang or a page fault may happen when the complete() calls + * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from + * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is + * -ENODEV, there can't be any more work item scheduled to hbus->wq + * after the flush_workqueue(): see vmbus_onoffer_rescind() -> + * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() -> + * channel->rescind = true. + */ + flush_workqueue(hbus->wq); + return ret; } @@ -3586,7 +3636,6 @@ static int hv_pci_probe(struct hv_device *hdev, struct hv_pcibus_device *hbus; u16 dom_req, dom; char *name; - bool enter_d0_retry = true; int ret; bridge = devm_pci_alloc_host_bridge(&hdev->device, 0); @@ -3598,6 +3647,7 @@ static int hv_pci_probe(struct hv_device *hdev, return -ENOMEM; hbus->bridge = bridge; + mutex_init(&hbus->state_lock); hbus->state = hv_pcibus_init; hbus->wslot_res_allocated = -1; @@ -3703,49 +3753,15 @@ static int hv_pci_probe(struct hv_device *hdev, if (ret) goto free_fwnode; -retry: ret = hv_pci_query_relations(hdev); if (ret) goto free_irq_domain; - ret = hv_pci_enter_d0(hdev); - /* - * In certain case (Kdump) the pci device of interest was - * not cleanly shut down and resource is still held on host - * side, the host could return invalid device status. - * We need to explicitly request host to release the resource - * and try to enter D0 again. - * Since the hv_pci_bus_exit() call releases structures - * of all its child devices, we need to start the retry from - * hv_pci_query_relations() call, requesting host to send - * the synchronous child device relations message before this - * information is needed in hv_send_resources_allocated() - * call later. - */ - if (ret == -EPROTO && enter_d0_retry) { - enter_d0_retry = false; - - dev_err(&hdev->device, "Retrying D0 Entry\n"); - - /* - * Hv_pci_bus_exit() calls hv_send_resources_released() - * to free up resources of its child devices. - * In the kdump kernel we need to set the - * wslot_res_allocated to 255 so it scans all child - * devices to release resources allocated in the - * normal kernel before panic happened. - */ - hbus->wslot_res_allocated = 255; - ret = hv_pci_bus_exit(hdev, true); - - if (ret == 0) - goto retry; + mutex_lock(&hbus->state_lock); - dev_err(&hdev->device, - "Retrying D0 failed with ret %d\n", ret); - } + ret = hv_pci_enter_d0(hdev); if (ret) - goto free_irq_domain; + goto release_state_lock; ret = hv_pci_allocate_bridge_windows(hbus); if (ret) @@ -3763,12 +3779,15 @@ retry: if (ret) goto free_windows; + mutex_unlock(&hbus->state_lock); return 0; free_windows: hv_pci_free_bridge_windows(hbus); exit_d0: (void) hv_pci_bus_exit(hdev, true); +release_state_lock: + mutex_unlock(&hbus->state_lock); free_irq_domain: irq_domain_remove(hbus->irq_domain); free_fwnode: @@ -4018,20 +4037,26 @@ static int hv_pci_resume(struct hv_device *hdev) if (ret) goto out; + mutex_lock(&hbus->state_lock); + ret = hv_pci_enter_d0(hdev); if (ret) - goto out; + goto release_state_lock; ret = hv_send_resources_allocated(hdev); if (ret) - goto out; + goto release_state_lock; prepopulate_bars(hbus); hv_pci_restore_msi_state(hbus); hbus->state = hv_pcibus_installed; + mutex_unlock(&hbus->state_lock); return 0; + +release_state_lock: + mutex_unlock(&hbus->state_lock); out: vmbus_close(hdev->channel); return ret; diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 711f82400086..4c07d71cab0b 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -127,6 +127,14 @@ config FSL_IMX8_DDR_PMU can give information about memory throughput and other related events. +config FSL_IMX9_DDR_PMU + tristate "Freescale i.MX9 DDR perf monitor" + depends on ARCH_MXC + help + Provides support for the DDR performance monitor in i.MX9, which + can give information about memory throughput and other related + events. + config QCOM_L2_PMU bool "Qualcomm Technologies L2-cache PMU" depends on ARCH_QCOM && ARM64 && ACPI diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index dabc859540ce..5cfe8954c250 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o obj-$(CONFIG_ARM_PMUV3) += arm_pmuv3.o obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o +obj-$(CONFIG_FSL_IMX9_DDR_PMU) += fsl_imx9_ddr_perf.o obj-$(CONFIG_HISI_PMU) += hisilicon/ obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c index 8574c6e58c83..cd2de44b61b9 100644 --- a/drivers/perf/apple_m1_cpu_pmu.c +++ b/drivers/perf/apple_m1_cpu_pmu.c @@ -493,6 +493,17 @@ static int m1_pmu_map_event(struct perf_event *event) return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT); } +static int m2_pmu_map_event(struct perf_event *event) +{ + /* + * Same deal as the above, except that M2 has 64bit counters. + * Which, as far as we're concerned, actually means 63 bits. + * Yes, this is getting awkward. + */ + event->hw.flags |= ARMPMU_EVT_63BIT; + return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT); +} + static void m1_pmu_reset(void *info) { int i; @@ -525,7 +536,7 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event, return 0; } -static int m1_pmu_init(struct arm_pmu *cpu_pmu) +static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags) { cpu_pmu->handle_irq = m1_pmu_handle_irq; cpu_pmu->enable = m1_pmu_enable_event; @@ -536,7 +547,14 @@ static int m1_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->clear_event_idx = m1_pmu_clear_event_idx; cpu_pmu->start = m1_pmu_start; cpu_pmu->stop = m1_pmu_stop; - cpu_pmu->map_event = m1_pmu_map_event; + + if (flags & ARMPMU_EVT_47BIT) + cpu_pmu->map_event = m1_pmu_map_event; + else if (flags & ARMPMU_EVT_63BIT) + cpu_pmu->map_event = m2_pmu_map_event; + else + return WARN_ON(-EINVAL); + cpu_pmu->reset = m1_pmu_reset; cpu_pmu->set_event_filter = m1_pmu_set_event_filter; @@ -550,25 +568,25 @@ static int m1_pmu_init(struct arm_pmu *cpu_pmu) static int m1_pmu_ice_init(struct arm_pmu *cpu_pmu) { cpu_pmu->name = "apple_icestorm_pmu"; - return m1_pmu_init(cpu_pmu); + return m1_pmu_init(cpu_pmu, ARMPMU_EVT_47BIT); } static int m1_pmu_fire_init(struct arm_pmu *cpu_pmu) { cpu_pmu->name = "apple_firestorm_pmu"; - return m1_pmu_init(cpu_pmu); + return m1_pmu_init(cpu_pmu, ARMPMU_EVT_47BIT); } static int m2_pmu_avalanche_init(struct arm_pmu *cpu_pmu) { cpu_pmu->name = "apple_avalanche_pmu"; - return m1_pmu_init(cpu_pmu); + return m1_pmu_init(cpu_pmu, ARMPMU_EVT_63BIT); } static int m2_pmu_blizzard_init(struct arm_pmu *cpu_pmu) { cpu_pmu->name = "apple_blizzard_pmu"; - return m1_pmu_init(cpu_pmu); + return m1_pmu_init(cpu_pmu, ARMPMU_EVT_63BIT); } static const struct of_device_id m1_pmu_of_device_ids[] = { diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 03b1309875ae..998259f1d973 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -645,7 +645,7 @@ static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu) struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; DECLARE_BITMAP(mask, HW_CNTRS_MAX); - bitmap_zero(mask, cci_pmu->num_cntrs); + bitmap_zero(mask, HW_CNTRS_MAX); for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) { struct perf_event *event = cci_hw->events[i]; @@ -656,7 +656,7 @@ static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu) if (event->hw.state & PERF_HES_STOPPED) continue; if (event->hw.state & PERF_HES_ARCH) { - set_bit(i, mask); + __set_bit(i, mask); event->hw.state &= ~PERF_HES_ARCH; } } diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 47d359f72957..b8c15878bc86 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -44,8 +44,11 @@ #define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4) /* The CFG node has various info besides the discovery tree */ -#define CMN_CFGM_PERIPH_ID_2 0x0010 -#define CMN_CFGM_PID2_REVISION GENMASK(7, 4) +#define CMN_CFGM_PERIPH_ID_01 0x0008 +#define CMN_CFGM_PID0_PART_0 GENMASK_ULL(7, 0) +#define CMN_CFGM_PID1_PART_1 GENMASK_ULL(35, 32) +#define CMN_CFGM_PERIPH_ID_23 0x0010 +#define CMN_CFGM_PID2_REVISION GENMASK_ULL(7, 4) #define CMN_CFGM_INFO_GLOBAL 0x900 #define CMN_INFO_MULTIPLE_DTM_EN BIT_ULL(63) @@ -186,6 +189,7 @@ #define CMN_WP_DOWN 2 +/* Internal values for encoding event support */ enum cmn_model { CMN600 = 1, CMN650 = 2, @@ -197,26 +201,34 @@ enum cmn_model { CMN_650ON = CMN650 | CMN700, }; +/* Actual part numbers and revision IDs defined by the hardware */ +enum cmn_part { + PART_CMN600 = 0x434, + PART_CMN650 = 0x436, + PART_CMN700 = 0x43c, + PART_CI700 = 0x43a, +}; + /* CMN-600 r0px shouldn't exist in silicon, thankfully */ enum cmn_revision { - CMN600_R1P0, - CMN600_R1P1, - CMN600_R1P2, - CMN600_R1P3, - CMN600_R2P0, - CMN600_R3P0, - CMN600_R3P1, - CMN650_R0P0 = 0, - CMN650_R1P0, - CMN650_R1P1, - CMN650_R2P0, - CMN650_R1P2, - CMN700_R0P0 = 0, - CMN700_R1P0, - CMN700_R2P0, - CI700_R0P0 = 0, - CI700_R1P0, - CI700_R2P0, + REV_CMN600_R1P0, + REV_CMN600_R1P1, + REV_CMN600_R1P2, + REV_CMN600_R1P3, + REV_CMN600_R2P0, + REV_CMN600_R3P0, + REV_CMN600_R3P1, + REV_CMN650_R0P0 = 0, + REV_CMN650_R1P0, + REV_CMN650_R1P1, + REV_CMN650_R2P0, + REV_CMN650_R1P2, + REV_CMN700_R0P0 = 0, + REV_CMN700_R1P0, + REV_CMN700_R2P0, + REV_CI700_R0P0 = 0, + REV_CI700_R1P0, + REV_CI700_R2P0, }; enum cmn_node_type { @@ -306,7 +318,7 @@ struct arm_cmn { unsigned int state; enum cmn_revision rev; - enum cmn_model model; + enum cmn_part part; u8 mesh_x; u8 mesh_y; u16 num_xps; @@ -394,19 +406,35 @@ static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn, return NULL; } +static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn) +{ + switch (cmn->part) { + case PART_CMN600: + return CMN600; + case PART_CMN650: + return CMN650; + case PART_CMN700: + return CMN700; + case PART_CI700: + return CI700; + default: + return 0; + }; +} + static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn, const struct arm_cmn_node *xp, int port) { int offset = CMN_MXP__CONNECT_INFO(port); if (port >= 2) { - if (cmn->model & (CMN600 | CMN650)) + if (cmn->part == PART_CMN600 || cmn->part == PART_CMN650) return 0; /* * CI-700 may have extra ports, but still has the * mesh_port_connect_info registers in the way. */ - if (cmn->model == CI700) + if (cmn->part == PART_CI700) offset += CI700_CONNECT_INFO_P2_5_OFFSET; } @@ -640,7 +668,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, eattr = container_of(attr, typeof(*eattr), attr.attr); - if (!(eattr->model & cmn->model)) + if (!(eattr->model & arm_cmn_model(cmn))) return 0; type = eattr->type; @@ -658,7 +686,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3))) return 0; - if (chan == 4 && cmn->model == CMN600) + if (chan == 4 && cmn->part == PART_CMN600) return 0; if ((chan == 5 && cmn->rsp_vc_num < 2) || @@ -669,19 +697,19 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, } /* Revision-specific differences */ - if (cmn->model == CMN600) { - if (cmn->rev < CMN600_R1P3) { + if (cmn->part == PART_CMN600) { + if (cmn->rev < REV_CMN600_R1P3) { if (type == CMN_TYPE_CXRA && eventid > 0x10) return 0; } - if (cmn->rev < CMN600_R1P2) { + if (cmn->rev < REV_CMN600_R1P2) { if (type == CMN_TYPE_HNF && eventid == 0x1b) return 0; if (type == CMN_TYPE_CXRA || type == CMN_TYPE_CXHA) return 0; } - } else if (cmn->model == CMN650) { - if (cmn->rev < CMN650_R2P0 || cmn->rev == CMN650_R1P2) { + } else if (cmn->part == PART_CMN650) { + if (cmn->rev < REV_CMN650_R2P0 || cmn->rev == REV_CMN650_R1P2) { if (type == CMN_TYPE_HNF && eventid > 0x22) return 0; if (type == CMN_TYPE_SBSX && eventid == 0x17) @@ -689,8 +717,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, if (type == CMN_TYPE_RNI && eventid > 0x10) return 0; } - } else if (cmn->model == CMN700) { - if (cmn->rev < CMN700_R2P0) { + } else if (cmn->part == PART_CMN700) { + if (cmn->rev < REV_CMN700_R2P0) { if (type == CMN_TYPE_HNF && eventid > 0x2c) return 0; if (type == CMN_TYPE_CCHA && eventid > 0x74) @@ -698,7 +726,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, if (type == CMN_TYPE_CCLA && eventid > 0x27) return 0; } - if (cmn->rev < CMN700_R1P0) { + if (cmn->rev < REV_CMN700_R1P0) { if (type == CMN_TYPE_HNF && eventid > 0x2b) return 0; } @@ -1171,19 +1199,31 @@ static ssize_t arm_cmn_cpumask_show(struct device *dev, static struct device_attribute arm_cmn_cpumask_attr = __ATTR(cpumask, 0444, arm_cmn_cpumask_show, NULL); -static struct attribute *arm_cmn_cpumask_attrs[] = { +static ssize_t arm_cmn_identifier_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%03x%02x\n", cmn->part, cmn->rev); +} + +static struct device_attribute arm_cmn_identifier_attr = + __ATTR(identifier, 0444, arm_cmn_identifier_show, NULL); + +static struct attribute *arm_cmn_other_attrs[] = { &arm_cmn_cpumask_attr.attr, + &arm_cmn_identifier_attr.attr, NULL, }; -static const struct attribute_group arm_cmn_cpumask_attr_group = { - .attrs = arm_cmn_cpumask_attrs, +static const struct attribute_group arm_cmn_other_attrs_group = { + .attrs = arm_cmn_other_attrs, }; static const struct attribute_group *arm_cmn_attr_groups[] = { &arm_cmn_event_attrs_group, &arm_cmn_format_attrs_group, - &arm_cmn_cpumask_attr_group, + &arm_cmn_other_attrs_group, NULL }; @@ -1200,7 +1240,7 @@ static u32 arm_cmn_wp_config(struct perf_event *event) u32 grp = CMN_EVENT_WP_GRP(event); u32 exc = CMN_EVENT_WP_EXCLUSIVE(event); u32 combine = CMN_EVENT_WP_COMBINE(event); - bool is_cmn600 = to_cmn(event->pmu)->model == CMN600; + bool is_cmn600 = to_cmn(event->pmu)->part == PART_CMN600; config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) | FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) | @@ -1520,14 +1560,14 @@ done: return ret; } -static enum cmn_filter_select arm_cmn_filter_sel(enum cmn_model model, +static enum cmn_filter_select arm_cmn_filter_sel(const struct arm_cmn *cmn, enum cmn_node_type type, unsigned int eventid) { struct arm_cmn_event_attr *e; - int i; + enum cmn_model model = arm_cmn_model(cmn); - for (i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) { + for (int i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) { e = container_of(arm_cmn_event_attrs[i], typeof(*e), attr.attr); if (e->model & model && e->type == type && e->eventid == eventid) return e->fsel; @@ -1570,12 +1610,12 @@ static int arm_cmn_event_init(struct perf_event *event) /* ...but the DTM may depend on which port we're watching */ if (cmn->multi_dtm) hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2; - } else if (type == CMN_TYPE_XP && cmn->model == CMN700) { + } else if (type == CMN_TYPE_XP && cmn->part == PART_CMN700) { hw->wide_sel = true; } /* This is sufficiently annoying to recalculate, so cache it */ - hw->filter_sel = arm_cmn_filter_sel(cmn->model, type, eventid); + hw->filter_sel = arm_cmn_filter_sel(cmn, type, eventid); bynodeid = CMN_EVENT_BYNODEID(event); nodeid = CMN_EVENT_NODEID(event); @@ -1899,9 +1939,10 @@ static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int id if (dtc->irq < 0) return dtc->irq; - writel_relaxed(0, dtc->base + CMN_DT_PMCR); + writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL); + writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR); + writeq_relaxed(0, dtc->base + CMN_DT_PMCCNTR); writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR); - writel_relaxed(CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR); return 0; } @@ -1961,7 +2002,7 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) dn->type = CMN_TYPE_CCLA; } - writel_relaxed(CMN_DT_DTC_CTL_DT_EN, cmn->dtc[0].base + CMN_DT_DTC_CTL); + arm_cmn_set_state(cmn, CMN_STATE_DISABLED); return 0; } @@ -2006,6 +2047,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) void __iomem *cfg_region; struct arm_cmn_node cfg, *dn; struct arm_cmn_dtm *dtm; + enum cmn_part part; u16 child_count, child_poff; u32 xp_offset[CMN_MAX_XPS]; u64 reg; @@ -2017,7 +2059,19 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) return -ENODEV; cfg_region = cmn->base + rgn_offset; - reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_2); + + reg = readq_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_01); + part = FIELD_GET(CMN_CFGM_PID0_PART_0, reg); + part |= FIELD_GET(CMN_CFGM_PID1_PART_1, reg) << 8; + if (cmn->part && cmn->part != part) + dev_warn(cmn->dev, + "Firmware binding mismatch: expected part number 0x%x, found 0x%x\n", + cmn->part, part); + cmn->part = part; + if (!arm_cmn_model(cmn)) + dev_warn(cmn->dev, "Unknown part number: 0x%x\n", part); + + reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_23); cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg); reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL); @@ -2081,7 +2135,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) if (xp->id == (1 << 3)) cmn->mesh_x = xp->logid; - if (cmn->model == CMN600) + if (cmn->part == PART_CMN600) xp->dtc = 0xf; else xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO); @@ -2201,7 +2255,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) if (cmn->num_xps == 1) dev_warn(cmn->dev, "1x1 config not fully supported, translate XP events manually\n"); - dev_dbg(cmn->dev, "model %d, periph_id_2 revision %d\n", cmn->model, cmn->rev); + dev_dbg(cmn->dev, "periph_id part 0x%03x revision %d\n", cmn->part, cmn->rev); reg = cmn->ports_used; dev_dbg(cmn->dev, "mesh %dx%d, ID width %d, ports %6pbl%s\n", cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn), ®, @@ -2256,17 +2310,17 @@ static int arm_cmn_probe(struct platform_device *pdev) return -ENOMEM; cmn->dev = &pdev->dev; - cmn->model = (unsigned long)device_get_match_data(cmn->dev); + cmn->part = (unsigned long)device_get_match_data(cmn->dev); platform_set_drvdata(pdev, cmn); - if (cmn->model == CMN600 && has_acpi_companion(cmn->dev)) { + if (cmn->part == PART_CMN600 && has_acpi_companion(cmn->dev)) { rootnode = arm_cmn600_acpi_probe(pdev, cmn); } else { rootnode = 0; cmn->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(cmn->base)) return PTR_ERR(cmn->base); - if (cmn->model == CMN600) + if (cmn->part == PART_CMN600) rootnode = arm_cmn600_of_probe(pdev->dev.of_node); } if (rootnode < 0) @@ -2335,10 +2389,10 @@ static int arm_cmn_remove(struct platform_device *pdev) #ifdef CONFIG_OF static const struct of_device_id arm_cmn_of_match[] = { - { .compatible = "arm,cmn-600", .data = (void *)CMN600 }, - { .compatible = "arm,cmn-650", .data = (void *)CMN650 }, - { .compatible = "arm,cmn-700", .data = (void *)CMN700 }, - { .compatible = "arm,ci-700", .data = (void *)CI700 }, + { .compatible = "arm,cmn-600", .data = (void *)PART_CMN600 }, + { .compatible = "arm,cmn-650" }, + { .compatible = "arm,cmn-700" }, + { .compatible = "arm,ci-700" }, {} }; MODULE_DEVICE_TABLE(of, arm_cmn_of_match); @@ -2346,9 +2400,9 @@ MODULE_DEVICE_TABLE(of, arm_cmn_of_match); #ifdef CONFIG_ACPI static const struct acpi_device_id arm_cmn_acpi_match[] = { - { "ARMHC600", CMN600 }, - { "ARMHC650", CMN650 }, - { "ARMHC700", CMN700 }, + { "ARMHC600", PART_CMN600 }, + { "ARMHC650" }, + { "ARMHC700" }, {} }; MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match); diff --git a/drivers/perf/arm_cspmu/Kconfig b/drivers/perf/arm_cspmu/Kconfig index 0b316fe69a45..25d25ded0983 100644 --- a/drivers/perf/arm_cspmu/Kconfig +++ b/drivers/perf/arm_cspmu/Kconfig @@ -4,8 +4,7 @@ config ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU tristate "ARM Coresight Architecture PMU" - depends on ARM64 && ACPI - depends on ACPI_APMT || COMPILE_TEST + depends on ARM64 || COMPILE_TEST help Provides support for performance monitoring unit (PMU) devices based on ARM CoreSight PMU architecture. Note that this PMU diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c index a3f1c410b417..e2b7827c4563 100644 --- a/drivers/perf/arm_cspmu/arm_cspmu.c +++ b/drivers/perf/arm_cspmu/arm_cspmu.c @@ -28,7 +28,6 @@ #include <linux/module.h> #include <linux/perf_event.h> #include <linux/platform_device.h> -#include <acpi/processor.h> #include "arm_cspmu.h" #include "nvidia_cspmu.h" @@ -101,10 +100,6 @@ #define ARM_CSPMU_ACTIVE_CPU_MASK 0x0 #define ARM_CSPMU_ASSOCIATED_CPU_MASK 0x1 -/* Check if field f in flags is set with value v */ -#define CHECK_APMT_FLAG(flags, f, v) \ - ((flags & (ACPI_APMT_FLAGS_ ## f)) == (ACPI_APMT_FLAGS_ ## f ## _ ## v)) - /* Check and use default if implementer doesn't provide attribute callback */ #define CHECK_DEFAULT_IMPL_OPS(ops, callback) \ do { \ @@ -122,6 +117,11 @@ static unsigned long arm_cspmu_cpuhp_state; +static struct acpi_apmt_node *arm_cspmu_apmt_node(struct device *dev) +{ + return *(struct acpi_apmt_node **)dev_get_platdata(dev); +} + /* * In CoreSight PMU architecture, all of the MMIO registers are 32-bit except * counter register. The counter register can be implemented as 32-bit or 64-bit @@ -156,12 +156,6 @@ static u64 read_reg64_hilohi(const void __iomem *addr, u32 max_poll_count) return val; } -/* Check if PMU supports 64-bit single copy atomic. */ -static inline bool supports_64bit_atomics(const struct arm_cspmu *cspmu) -{ - return CHECK_APMT_FLAG(cspmu->apmt_node->flags, ATOMIC, SUPP); -} - /* Check if cycle counter is supported. */ static inline bool supports_cycle_counter(const struct arm_cspmu *cspmu) { @@ -189,10 +183,10 @@ static inline bool use_64b_counter_reg(const struct arm_cspmu *cspmu) ssize_t arm_cspmu_sysfs_event_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dev_ext_attribute *eattr = - container_of(attr, struct dev_ext_attribute, attr); - return sysfs_emit(buf, "event=0x%llx\n", - (unsigned long long)eattr->var); + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, typeof(*pmu_attr), attr); + return sysfs_emit(buf, "event=0x%llx\n", pmu_attr->id); } EXPORT_SYMBOL_GPL(arm_cspmu_sysfs_event_show); @@ -320,7 +314,7 @@ static const char *arm_cspmu_get_name(const struct arm_cspmu *cspmu) static atomic_t pmu_idx[ACPI_APMT_NODE_TYPE_COUNT] = { 0 }; dev = cspmu->dev; - apmt_node = cspmu->apmt_node; + apmt_node = arm_cspmu_apmt_node(dev); pmu_type = apmt_node->type; if (pmu_type >= ACPI_APMT_NODE_TYPE_COUNT) { @@ -397,8 +391,8 @@ static const struct impl_match impl_match[] = { static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu) { int ret; - struct acpi_apmt_node *apmt_node = cspmu->apmt_node; struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops; + struct acpi_apmt_node *apmt_node = arm_cspmu_apmt_node(cspmu->dev); const struct impl_match *match = impl_match; /* @@ -720,7 +714,7 @@ static u64 arm_cspmu_read_counter(struct perf_event *event) offset = counter_offset(sizeof(u64), event->hw.idx); counter_addr = cspmu->base1 + offset; - return supports_64bit_atomics(cspmu) ? + return cspmu->has_atomic_dword ? readq(counter_addr) : read_reg64_hilohi(counter_addr, HILOHI_MAX_POLL); } @@ -911,24 +905,18 @@ static struct arm_cspmu *arm_cspmu_alloc(struct platform_device *pdev) { struct acpi_apmt_node *apmt_node; struct arm_cspmu *cspmu; - struct device *dev; - - dev = &pdev->dev; - apmt_node = *(struct acpi_apmt_node **)dev_get_platdata(dev); - if (!apmt_node) { - dev_err(dev, "failed to get APMT node\n"); - return NULL; - } + struct device *dev = &pdev->dev; cspmu = devm_kzalloc(dev, sizeof(*cspmu), GFP_KERNEL); if (!cspmu) return NULL; cspmu->dev = dev; - cspmu->apmt_node = apmt_node; - platform_set_drvdata(pdev, cspmu); + apmt_node = arm_cspmu_apmt_node(dev); + cspmu->has_atomic_dword = apmt_node->flags & ACPI_APMT_FLAGS_ATOMIC; + return cspmu; } @@ -936,11 +924,9 @@ static int arm_cspmu_init_mmio(struct arm_cspmu *cspmu) { struct device *dev; struct platform_device *pdev; - struct acpi_apmt_node *apmt_node; dev = cspmu->dev; pdev = to_platform_device(dev); - apmt_node = cspmu->apmt_node; /* Base address for page 0. */ cspmu->base0 = devm_platform_ioremap_resource(pdev, 0); @@ -951,7 +937,7 @@ static int arm_cspmu_init_mmio(struct arm_cspmu *cspmu) /* Base address for page 1 if supported. Otherwise point to page 0. */ cspmu->base1 = cspmu->base0; - if (CHECK_APMT_FLAG(apmt_node->flags, DUAL_PAGE, SUPP)) { + if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) { cspmu->base1 = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(cspmu->base1)) { dev_err(dev, "ioremap failed for page-1 resource\n"); @@ -1048,19 +1034,14 @@ static int arm_cspmu_request_irq(struct arm_cspmu *cspmu) int irq, ret; struct device *dev; struct platform_device *pdev; - struct acpi_apmt_node *apmt_node; dev = cspmu->dev; pdev = to_platform_device(dev); - apmt_node = cspmu->apmt_node; /* Skip IRQ request if the PMU does not support overflow interrupt. */ - if (apmt_node->ovflw_irq == 0) - return 0; - - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); if (irq < 0) - return irq; + return irq == -ENXIO ? 0 : irq; ret = devm_request_irq(dev, irq, arm_cspmu_handle_irq, IRQF_NOBALANCING | IRQF_NO_THREAD, dev_name(dev), @@ -1075,6 +1056,9 @@ static int arm_cspmu_request_irq(struct arm_cspmu *cspmu) return 0; } +#if defined(CONFIG_ACPI) && defined(CONFIG_ARM64) +#include <acpi/processor.h> + static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid) { u32 acpi_uid; @@ -1099,15 +1083,13 @@ static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid) return -ENODEV; } -static int arm_cspmu_get_cpus(struct arm_cspmu *cspmu) +static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu) { - struct device *dev; struct acpi_apmt_node *apmt_node; int affinity_flag; int cpu; - dev = cspmu->pmu.dev; - apmt_node = cspmu->apmt_node; + apmt_node = arm_cspmu_apmt_node(cspmu->dev); affinity_flag = apmt_node->flags & ACPI_APMT_FLAGS_AFFINITY; if (affinity_flag == ACPI_APMT_FLAGS_AFFINITY_PROC) { @@ -1129,12 +1111,23 @@ static int arm_cspmu_get_cpus(struct arm_cspmu *cspmu) } if (cpumask_empty(&cspmu->associated_cpus)) { - dev_dbg(dev, "No cpu associated with the PMU\n"); + dev_dbg(cspmu->dev, "No cpu associated with the PMU\n"); return -ENODEV; } return 0; } +#else +static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu) +{ + return -ENODEV; +} +#endif + +static int arm_cspmu_get_cpus(struct arm_cspmu *cspmu) +{ + return arm_cspmu_acpi_get_cpus(cspmu); +} static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu) { @@ -1220,6 +1213,12 @@ static int arm_cspmu_device_remove(struct platform_device *pdev) return 0; } +static const struct platform_device_id arm_cspmu_id[] = { + {DRVNAME, 0}, + { }, +}; +MODULE_DEVICE_TABLE(platform, arm_cspmu_id); + static struct platform_driver arm_cspmu_driver = { .driver = { .name = DRVNAME, @@ -1227,12 +1226,14 @@ static struct platform_driver arm_cspmu_driver = { }, .probe = arm_cspmu_device_probe, .remove = arm_cspmu_device_remove, + .id_table = arm_cspmu_id, }; static void arm_cspmu_set_active_cpu(int cpu, struct arm_cspmu *cspmu) { cpumask_set_cpu(cpu, &cspmu->active_cpu); - WARN_ON(irq_set_affinity(cspmu->irq, &cspmu->active_cpu)); + if (cspmu->irq) + WARN_ON(irq_set_affinity(cspmu->irq, &cspmu->active_cpu)); } static int arm_cspmu_cpu_online(unsigned int cpu, struct hlist_node *node) diff --git a/drivers/perf/arm_cspmu/arm_cspmu.h b/drivers/perf/arm_cspmu/arm_cspmu.h index 51323b175a4a..83df53d1c132 100644 --- a/drivers/perf/arm_cspmu/arm_cspmu.h +++ b/drivers/perf/arm_cspmu/arm_cspmu.h @@ -8,7 +8,6 @@ #ifndef __ARM_CSPMU_H__ #define __ARM_CSPMU_H__ -#include <linux/acpi.h> #include <linux/bitfield.h> #include <linux/cpumask.h> #include <linux/device.h> @@ -118,16 +117,16 @@ struct arm_cspmu_impl { struct arm_cspmu { struct pmu pmu; struct device *dev; - struct acpi_apmt_node *apmt_node; const char *name; const char *identifier; void __iomem *base0; void __iomem *base1; - int irq; cpumask_t associated_cpus; cpumask_t active_cpu; struct hlist_node cpuhp_node; + int irq; + bool has_atomic_dword; u32 pmcfgr; u32 num_logical_ctrs; u32 num_set_clr_reg; diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c index 5de06f9a4dd3..9d0f01c4455a 100644 --- a/drivers/perf/arm_dmc620_pmu.c +++ b/drivers/perf/arm_dmc620_pmu.c @@ -227,9 +227,31 @@ static const struct attribute_group dmc620_pmu_format_attr_group = { .attrs = dmc620_pmu_formats_attrs, }; +static ssize_t dmc620_pmu_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, + cpumask_of(dmc620_pmu->irq->cpu)); +} + +static struct device_attribute dmc620_pmu_cpumask_attr = + __ATTR(cpumask, 0444, dmc620_pmu_cpumask_show, NULL); + +static struct attribute *dmc620_pmu_cpumask_attrs[] = { + &dmc620_pmu_cpumask_attr.attr, + NULL, +}; + +static const struct attribute_group dmc620_pmu_cpumask_attr_group = { + .attrs = dmc620_pmu_cpumask_attrs, +}; + static const struct attribute_group *dmc620_pmu_attr_groups[] = { &dmc620_pmu_events_attr_group, &dmc620_pmu_format_attr_group, + &dmc620_pmu_cpumask_attr_group, NULL, }; diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 15bd1e34a88e..f6ccb2cd4dfc 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -109,6 +109,8 @@ static inline u64 arm_pmu_event_max_period(struct perf_event *event) { if (event->hw.flags & ARMPMU_EVT_64BIT) return GENMASK_ULL(63, 0); + else if (event->hw.flags & ARMPMU_EVT_63BIT) + return GENMASK_ULL(62, 0); else if (event->hw.flags & ARMPMU_EVT_47BIT) return GENMASK_ULL(46, 0); else @@ -687,6 +689,11 @@ static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) return per_cpu(hw_events->irq, cpu); } +bool arm_pmu_irq_is_nmi(void) +{ + return has_nmi; +} + /* * PMU hardware loses all context when a CPU goes offline. * When a CPU is hotplugged back in, since some hardware registers are diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index c98e4039386d..08b3a1bf0ef6 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -22,6 +22,7 @@ #include <linux/platform_device.h> #include <linux/sched_clock.h> #include <linux/smp.h> +#include <linux/nmi.h> #include <asm/arm_pmuv3.h> @@ -677,9 +678,25 @@ static inline u32 armv8pmu_getreset_flags(void) return value; } +static void update_pmuserenr(u64 val) +{ + lockdep_assert_irqs_disabled(); + + /* + * The current PMUSERENR_EL0 value might be the value for the guest. + * If that's the case, have KVM keep tracking of the register value + * for the host EL0 so that KVM can restore it before returning to + * the host EL0. Otherwise, update the register now. + */ + if (kvm_set_pmuserenr(val)) + return; + + write_pmuserenr(val); +} + static void armv8pmu_disable_user_access(void) { - write_pmuserenr(0); + update_pmuserenr(0); } static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu) @@ -695,8 +712,7 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu) armv8pmu_write_evcntr(i, 0); } - write_pmuserenr(0); - write_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR); + update_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR); } static void armv8pmu_enable_event(struct perf_event *event) @@ -1348,10 +1364,17 @@ static struct platform_driver armv8_pmu_driver = { static int __init armv8_pmu_driver_init(void) { + int ret; + if (acpi_disabled) - return platform_driver_register(&armv8_pmu_driver); + ret = platform_driver_register(&armv8_pmu_driver); else - return arm_pmu_acpi_probe(armv8_pmuv3_pmu_init); + ret = arm_pmu_acpi_probe(armv8_pmuv3_pmu_init); + + if (!ret) + lockup_detector_retry_init(); + + return ret; } device_initcall(armv8_pmu_driver_init) diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c new file mode 100644 index 000000000000..71d5b07e3aff --- /dev/null +++ b/drivers/perf/fsl_imx9_ddr_perf.c @@ -0,0 +1,711 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright 2023 NXP + +#include <linux/bitfield.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/perf_event.h> + +/* Performance monitor configuration */ +#define PMCFG1 0x00 +#define PMCFG1_RD_TRANS_FILT_EN BIT(31) +#define PMCFG1_WR_TRANS_FILT_EN BIT(30) +#define PMCFG1_RD_BT_FILT_EN BIT(29) +#define PMCFG1_ID_MASK GENMASK(17, 0) + +#define PMCFG2 0x04 +#define PMCFG2_ID GENMASK(17, 0) + +/* Global control register affects all counters and takes priority over local control registers */ +#define PMGC0 0x40 +/* Global control register bits */ +#define PMGC0_FAC BIT(31) +#define PMGC0_PMIE BIT(30) +#define PMGC0_FCECE BIT(29) + +/* + * 64bit counter0 exclusively dedicated to counting cycles + * 32bit counters monitor counter-specific events in addition to counting reference events + */ +#define PMLCA(n) (0x40 + 0x10 + (0x10 * n)) +#define PMLCB(n) (0x40 + 0x14 + (0x10 * n)) +#define PMC(n) (0x40 + 0x18 + (0x10 * n)) +/* Local control register bits */ +#define PMLCA_FC BIT(31) +#define PMLCA_CE BIT(26) +#define PMLCA_EVENT GENMASK(22, 16) + +#define NUM_COUNTERS 11 +#define CYCLES_COUNTER 0 + +#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) + +#define DDR_PERF_DEV_NAME "imx9_ddr" +#define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu" + +static DEFINE_IDA(ddr_ida); + +struct imx_ddr_devtype_data { + const char *identifier; /* system PMU identifier for userspace */ +}; + +struct ddr_pmu { + struct pmu pmu; + void __iomem *base; + unsigned int cpu; + struct hlist_node node; + struct device *dev; + struct perf_event *events[NUM_COUNTERS]; + int active_events; + enum cpuhp_state cpuhp_state; + const struct imx_ddr_devtype_data *devtype_data; + int irq; + int id; +}; + +static const struct imx_ddr_devtype_data imx93_devtype_data = { + .identifier = "imx93", +}; + +static const struct of_device_id imx_ddr_pmu_dt_ids[] = { + {.compatible = "fsl,imx93-ddr-pmu", .data = &imx93_devtype_data}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids); + +static ssize_t ddr_perf_identifier_show(struct device *dev, + struct device_attribute *attr, + char *page) +{ + struct ddr_pmu *pmu = dev_get_drvdata(dev); + + return sysfs_emit(page, "%s\n", pmu->devtype_data->identifier); +} + +static struct device_attribute ddr_perf_identifier_attr = + __ATTR(identifier, 0444, ddr_perf_identifier_show, NULL); + +static struct attribute *ddr_perf_identifier_attrs[] = { + &ddr_perf_identifier_attr.attr, + NULL, +}; + +static struct attribute_group ddr_perf_identifier_attr_group = { + .attrs = ddr_perf_identifier_attrs, +}; + +static ssize_t ddr_perf_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ddr_pmu *pmu = dev_get_drvdata(dev); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); +} + +static struct device_attribute ddr_perf_cpumask_attr = + __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL); + +static struct attribute *ddr_perf_cpumask_attrs[] = { + &ddr_perf_cpumask_attr.attr, + NULL, +}; + +static const struct attribute_group ddr_perf_cpumask_attr_group = { + .attrs = ddr_perf_cpumask_attrs, +}; + +static ssize_t ddr_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id); +} + +#define IMX9_DDR_PMU_EVENT_ATTR(_name, _id) \ + (&((struct perf_pmu_events_attr[]) { \ + { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\ + .id = _id, } \ + })[0].attr.attr) + +static struct attribute *ddr_perf_events_attrs[] = { + /* counter0 cycles event */ + IMX9_DDR_PMU_EVENT_ATTR(cycles, 0), + + /* reference events for all normal counters, need assert DEBUG19[21] bit */ + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ddrc1_rmw_for_ecc, 12), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_rreorder, 13), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_wreorder, 14), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_0, 15), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_1, 16), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_2, 17), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_3, 18), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_4, 19), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_5, 22), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_6, 23), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_7, 24), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_8, 25), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_9, 26), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_10, 27), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_11, 28), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_12, 31), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_13, 59), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_15, 61), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_29, 63), + + /* counter1 specific events */ + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_0, 64), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_1, 65), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_2, 66), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_3, 67), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_4, 68), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_5, 69), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_6, 70), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_7, 71), + + /* counter2 specific events */ + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_0, 64), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_1, 65), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_2, 66), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_3, 67), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_4, 68), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_5, 69), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_6, 70), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_7, 71), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_empty, 72), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_trans_filt, 73), + + /* counter3 specific events */ + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_0, 64), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_1, 65), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_2, 66), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_3, 67), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_4, 68), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_5, 69), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_6, 70), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_7, 71), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_full, 72), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_trans_filt, 73), + + /* counter4 specific events */ + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_0, 64), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_1, 65), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_2, 66), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_3, 67), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_4, 68), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_5, 69), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_6, 70), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_7, 71), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2_rmw, 72), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt, 73), + + /* counter5 specific events */ + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_0, 64), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_1, 65), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_2, 66), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_3, 67), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_4, 68), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_5, 69), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_6, 70), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_7, 71), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq1, 72), + + /* counter6 specific events */ + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_end_0, 64), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2, 72), + + /* counter7 specific events */ + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_2_full, 64), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq0, 65), + + /* counter8 specific events */ + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_bias_switched, 64), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_4_full, 65), + + /* counter9 specific events */ + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq1, 65), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_3_4_full, 66), + + /* counter10 specific events */ + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_misc_mrk, 65), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq0, 66), + NULL, +}; + +static const struct attribute_group ddr_perf_events_attr_group = { + .name = "events", + .attrs = ddr_perf_events_attrs, +}; + +PMU_FORMAT_ATTR(event, "config:0-7"); +PMU_FORMAT_ATTR(counter, "config:8-15"); +PMU_FORMAT_ATTR(axi_id, "config1:0-17"); +PMU_FORMAT_ATTR(axi_mask, "config2:0-17"); + +static struct attribute *ddr_perf_format_attrs[] = { + &format_attr_event.attr, + &format_attr_counter.attr, + &format_attr_axi_id.attr, + &format_attr_axi_mask.attr, + NULL, +}; + +static const struct attribute_group ddr_perf_format_attr_group = { + .name = "format", + .attrs = ddr_perf_format_attrs, +}; + +static const struct attribute_group *attr_groups[] = { + &ddr_perf_identifier_attr_group, + &ddr_perf_cpumask_attr_group, + &ddr_perf_events_attr_group, + &ddr_perf_format_attr_group, + NULL, +}; + +static void ddr_perf_clear_counter(struct ddr_pmu *pmu, int counter) +{ + if (counter == CYCLES_COUNTER) { + writel(0, pmu->base + PMC(counter) + 0x4); + writel(0, pmu->base + PMC(counter)); + } else { + writel(0, pmu->base + PMC(counter)); + } +} + +static u64 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter) +{ + u32 val_lower, val_upper; + u64 val; + + if (counter != CYCLES_COUNTER) { + val = readl_relaxed(pmu->base + PMC(counter)); + goto out; + } + + /* special handling for reading 64bit cycle counter */ + do { + val_upper = readl_relaxed(pmu->base + PMC(counter) + 0x4); + val_lower = readl_relaxed(pmu->base + PMC(counter)); + } while (val_upper != readl_relaxed(pmu->base + PMC(counter) + 0x4)); + + val = val_upper; + val = (val << 32); + val |= val_lower; +out: + return val; +} + +static void ddr_perf_counter_global_config(struct ddr_pmu *pmu, bool enable) +{ + u32 ctrl; + + ctrl = readl_relaxed(pmu->base + PMGC0); + + if (enable) { + /* + * The performance monitor must be reset before event counting + * sequences. The performance monitor can be reset by first freezing + * one or more counters and then clearing the freeze condition to + * allow the counters to count according to the settings in the + * performance monitor registers. Counters can be frozen individually + * by setting PMLCAn[FC] bits, or simultaneously by setting PMGC0[FAC]. + * Simply clearing these freeze bits will then allow the performance + * monitor to begin counting based on the register settings. + */ + ctrl |= PMGC0_FAC; + writel(ctrl, pmu->base + PMGC0); + + /* + * Freeze all counters disabled, interrupt enabled, and freeze + * counters on condition enabled. + */ + ctrl &= ~PMGC0_FAC; + ctrl |= PMGC0_PMIE | PMGC0_FCECE; + writel(ctrl, pmu->base + PMGC0); + } else { + ctrl |= PMGC0_FAC; + ctrl &= ~(PMGC0_PMIE | PMGC0_FCECE); + writel(ctrl, pmu->base + PMGC0); + } +} + +static void ddr_perf_counter_local_config(struct ddr_pmu *pmu, int config, + int counter, bool enable) +{ + u32 ctrl_a; + + ctrl_a = readl_relaxed(pmu->base + PMLCA(counter)); + + if (enable) { + ctrl_a |= PMLCA_FC; + writel(ctrl_a, pmu->base + PMLCA(counter)); + + ddr_perf_clear_counter(pmu, counter); + + /* Freeze counter disabled, condition enabled, and program event.*/ + ctrl_a &= ~PMLCA_FC; + ctrl_a |= PMLCA_CE; + ctrl_a &= ~FIELD_PREP(PMLCA_EVENT, 0x7F); + ctrl_a |= FIELD_PREP(PMLCA_EVENT, (config & 0x000000FF)); + writel(ctrl_a, pmu->base + PMLCA(counter)); + } else { + /* Freeze counter. */ + ctrl_a |= PMLCA_FC; + writel(ctrl_a, pmu->base + PMLCA(counter)); + } +} + +static void ddr_perf_monitor_config(struct ddr_pmu *pmu, int cfg, int cfg1, int cfg2) +{ + u32 pmcfg1, pmcfg2; + int event, counter; + + event = cfg & 0x000000FF; + counter = (cfg & 0x0000FF00) >> 8; + + pmcfg1 = readl_relaxed(pmu->base + PMCFG1); + + if (counter == 2 && event == 73) + pmcfg1 |= PMCFG1_RD_TRANS_FILT_EN; + else if (counter == 2 && event != 73) + pmcfg1 &= ~PMCFG1_RD_TRANS_FILT_EN; + + if (counter == 3 && event == 73) + pmcfg1 |= PMCFG1_WR_TRANS_FILT_EN; + else if (counter == 3 && event != 73) + pmcfg1 &= ~PMCFG1_WR_TRANS_FILT_EN; + + if (counter == 4 && event == 73) + pmcfg1 |= PMCFG1_RD_BT_FILT_EN; + else if (counter == 4 && event != 73) + pmcfg1 &= ~PMCFG1_RD_BT_FILT_EN; + + pmcfg1 &= ~FIELD_PREP(PMCFG1_ID_MASK, 0x3FFFF); + pmcfg1 |= FIELD_PREP(PMCFG1_ID_MASK, cfg2); + writel(pmcfg1, pmu->base + PMCFG1); + + pmcfg2 = readl_relaxed(pmu->base + PMCFG2); + pmcfg2 &= ~FIELD_PREP(PMCFG2_ID, 0x3FFFF); + pmcfg2 |= FIELD_PREP(PMCFG2_ID, cfg1); + writel(pmcfg2, pmu->base + PMCFG2); +} + +static void ddr_perf_event_update(struct perf_event *event) +{ + struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + u64 new_raw_count; + + new_raw_count = ddr_perf_read_counter(pmu, counter); + local64_add(new_raw_count, &event->count); + + /* clear counter's value every time */ + ddr_perf_clear_counter(pmu, counter); +} + +static int ddr_perf_event_init(struct perf_event *event) +{ + struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + struct perf_event *sibling; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + if (event->cpu < 0) { + dev_warn(pmu->dev, "Can't provide per-task data!\n"); + return -EOPNOTSUPP; + } + + /* + * We must NOT create groups containing mixed PMUs, although software + * events are acceptable (for example to create a CCN group + * periodically read when a hrtimer aka cpu-clock leader triggers). + */ + if (event->group_leader->pmu != event->pmu && + !is_software_event(event->group_leader)) + return -EINVAL; + + for_each_sibling_event(sibling, event->group_leader) { + if (sibling->pmu != event->pmu && + !is_software_event(sibling)) + return -EINVAL; + } + + event->cpu = pmu->cpu; + hwc->idx = -1; + + return 0; +} + +static void ddr_perf_event_start(struct perf_event *event, int flags) +{ + struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + + local64_set(&hwc->prev_count, 0); + + ddr_perf_counter_local_config(pmu, event->attr.config, counter, true); + hwc->state = 0; +} + +static int ddr_perf_event_add(struct perf_event *event, int flags) +{ + struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int cfg = event->attr.config; + int cfg1 = event->attr.config1; + int cfg2 = event->attr.config2; + int counter; + + counter = (cfg & 0x0000FF00) >> 8; + + pmu->events[counter] = event; + pmu->active_events++; + hwc->idx = counter; + hwc->state |= PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + ddr_perf_event_start(event, flags); + + /* read trans, write trans, read beat */ + ddr_perf_monitor_config(pmu, cfg, cfg1, cfg2); + + return 0; +} + +static void ddr_perf_event_stop(struct perf_event *event, int flags) +{ + struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + + ddr_perf_counter_local_config(pmu, event->attr.config, counter, false); + ddr_perf_event_update(event); + + hwc->state |= PERF_HES_STOPPED; +} + +static void ddr_perf_event_del(struct perf_event *event, int flags) +{ + struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + ddr_perf_event_stop(event, PERF_EF_UPDATE); + + pmu->active_events--; + hwc->idx = -1; +} + +static void ddr_perf_pmu_enable(struct pmu *pmu) +{ + struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); + + ddr_perf_counter_global_config(ddr_pmu, true); +} + +static void ddr_perf_pmu_disable(struct pmu *pmu) +{ + struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); + + ddr_perf_counter_global_config(ddr_pmu, false); +} + +static void ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, + struct device *dev) +{ + *pmu = (struct ddr_pmu) { + .pmu = (struct pmu) { + .module = THIS_MODULE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + .task_ctx_nr = perf_invalid_context, + .attr_groups = attr_groups, + .event_init = ddr_perf_event_init, + .add = ddr_perf_event_add, + .del = ddr_perf_event_del, + .start = ddr_perf_event_start, + .stop = ddr_perf_event_stop, + .read = ddr_perf_event_update, + .pmu_enable = ddr_perf_pmu_enable, + .pmu_disable = ddr_perf_pmu_disable, + }, + .base = base, + .dev = dev, + }; +} + +static irqreturn_t ddr_perf_irq_handler(int irq, void *p) +{ + struct ddr_pmu *pmu = (struct ddr_pmu *)p; + struct perf_event *event; + int i; + + /* + * Counters can generate an interrupt on an overflow when msb of a + * counter changes from 0 to 1. For the interrupt to be signalled, + * below condition mush be satisfied: + * PMGC0[PMIE] = 1, PMGC0[FCECE] = 1, PMLCAn[CE] = 1 + * When an interrupt is signalled, PMGC0[FAC] is set by hardware and + * all of the registers are frozen. + * Software can clear the interrupt condition by resetting the performance + * monitor and clearing the most significant bit of the counter that + * generate the overflow. + */ + for (i = 0; i < NUM_COUNTERS; i++) { + if (!pmu->events[i]) + continue; + + event = pmu->events[i]; + + ddr_perf_event_update(event); + } + + ddr_perf_counter_global_config(pmu, true); + + return IRQ_HANDLED; +} + +static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node); + int target; + + if (cpu != pmu->cpu) + return 0; + + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&pmu->pmu, cpu, target); + pmu->cpu = target; + + WARN_ON(irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu))); + + return 0; +} + +static int ddr_perf_probe(struct platform_device *pdev) +{ + struct ddr_pmu *pmu; + void __iomem *base; + int ret, irq; + char *name; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); + if (!pmu) + return -ENOMEM; + + ddr_perf_init(pmu, base, &pdev->dev); + + pmu->devtype_data = of_device_get_match_data(&pdev->dev); + + platform_set_drvdata(pdev, pmu); + + pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL); + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", pmu->id); + if (!name) { + ret = -ENOMEM; + goto format_string_err; + } + + pmu->cpu = raw_smp_processor_id(); + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DDR_CPUHP_CB_NAME, + NULL, ddr_perf_offline_cpu); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to add callbacks for multi state\n"); + goto cpuhp_state_err; + } + pmu->cpuhp_state = ret; + + /* Register the pmu instance for cpu hotplug */ + ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + goto cpuhp_instance_err; + } + + /* Request irq */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto ddr_perf_err; + } + + ret = devm_request_irq(&pdev->dev, irq, ddr_perf_irq_handler, + IRQF_NOBALANCING | IRQF_NO_THREAD, + DDR_CPUHP_CB_NAME, pmu); + if (ret < 0) { + dev_err(&pdev->dev, "Request irq failed: %d", ret); + goto ddr_perf_err; + } + + pmu->irq = irq; + ret = irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu)); + if (ret) { + dev_err(pmu->dev, "Failed to set interrupt affinity\n"); + goto ddr_perf_err; + } + + ret = perf_pmu_register(&pmu->pmu, name, -1); + if (ret) + goto ddr_perf_err; + + return 0; + +ddr_perf_err: + cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); +cpuhp_instance_err: + cpuhp_remove_multi_state(pmu->cpuhp_state); +cpuhp_state_err: +format_string_err: + ida_simple_remove(&ddr_ida, pmu->id); + dev_warn(&pdev->dev, "i.MX9 DDR Perf PMU failed (%d), disabled\n", ret); + return ret; +} + +static int ddr_perf_remove(struct platform_device *pdev) +{ + struct ddr_pmu *pmu = platform_get_drvdata(pdev); + + cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); + cpuhp_remove_multi_state(pmu->cpuhp_state); + + perf_pmu_unregister(&pmu->pmu); + + ida_simple_remove(&ddr_ida, pmu->id); + + return 0; +} + +static struct platform_driver imx_ddr_pmu_driver = { + .driver = { + .name = "imx9-ddr-pmu", + .of_match_table = imx_ddr_pmu_dt_ids, + .suppress_bind_attrs = true, + }, + .probe = ddr_perf_probe, + .remove = ddr_perf_remove, +}; +module_platform_driver(imx_ddr_pmu_driver); + +MODULE_AUTHOR("Xu Yang <xu.yang_2@nxp.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DDRC PerfMon for i.MX9 SoCs"); diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile index 4d2c9abe3372..48dcc8381ea7 100644 --- a/drivers/perf/hisilicon/Makefile +++ b/drivers/perf/hisilicon/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \ hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \ - hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o + hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o hisi_uncore_uc_pmu.o obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o obj-$(CONFIG_HNS3_PMU) += hns3_pmu.o diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c index 6fee0b6e163b..e10fc7cb9493 100644 --- a/drivers/perf/hisilicon/hisi_pcie_pmu.c +++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c @@ -683,7 +683,7 @@ static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) pcie_pmu->on_cpu = -1; /* Choose a new CPU from all online cpus. */ - target = cpumask_first(cpu_online_mask); + target = cpumask_any_but(cpu_online_mask, cpu); if (target >= nr_cpu_ids) { pci_err(pcie_pmu->pdev, "There is no CPU to set\n"); return 0; diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c index 71b6687d6696..d941e746b424 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c @@ -22,9 +22,15 @@ #define PA_TT_CTRL 0x1c08 #define PA_TGTID_CTRL 0x1c14 #define PA_SRCID_CTRL 0x1c18 + +/* H32 PA interrupt registers */ #define PA_INT_MASK 0x1c70 #define PA_INT_STATUS 0x1c78 #define PA_INT_CLEAR 0x1c7c + +#define H60PA_INT_STATUS 0x1c70 +#define H60PA_INT_MASK 0x1c74 + #define PA_EVENT_TYPE0 0x1c80 #define PA_PMU_VERSION 0x1cf0 #define PA_EVENT_CNT0_L 0x1d00 @@ -46,6 +52,12 @@ HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 32, 22); HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 43, 33); HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 44, 44); +struct hisi_pa_pmu_int_regs { + u32 mask_offset; + u32 clear_offset; + u32 status_offset; +}; + static void hisi_pa_pmu_enable_tracetag(struct perf_event *event) { struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu); @@ -219,40 +231,40 @@ static void hisi_pa_pmu_disable_counter(struct hisi_pmu *pa_pmu, static void hisi_pa_pmu_enable_counter_int(struct hisi_pmu *pa_pmu, struct hw_perf_event *hwc) { + struct hisi_pa_pmu_int_regs *regs = pa_pmu->dev_info->private; u32 val; /* Write 0 to enable interrupt */ - val = readl(pa_pmu->base + PA_INT_MASK); + val = readl(pa_pmu->base + regs->mask_offset); val &= ~(1 << hwc->idx); - writel(val, pa_pmu->base + PA_INT_MASK); + writel(val, pa_pmu->base + regs->mask_offset); } static void hisi_pa_pmu_disable_counter_int(struct hisi_pmu *pa_pmu, struct hw_perf_event *hwc) { + struct hisi_pa_pmu_int_regs *regs = pa_pmu->dev_info->private; u32 val; /* Write 1 to mask interrupt */ - val = readl(pa_pmu->base + PA_INT_MASK); + val = readl(pa_pmu->base + regs->mask_offset); val |= 1 << hwc->idx; - writel(val, pa_pmu->base + PA_INT_MASK); + writel(val, pa_pmu->base + regs->mask_offset); } static u32 hisi_pa_pmu_get_int_status(struct hisi_pmu *pa_pmu) { - return readl(pa_pmu->base + PA_INT_STATUS); + struct hisi_pa_pmu_int_regs *regs = pa_pmu->dev_info->private; + + return readl(pa_pmu->base + regs->status_offset); } static void hisi_pa_pmu_clear_int_status(struct hisi_pmu *pa_pmu, int idx) { - writel(1 << idx, pa_pmu->base + PA_INT_CLEAR); -} + struct hisi_pa_pmu_int_regs *regs = pa_pmu->dev_info->private; -static const struct acpi_device_id hisi_pa_pmu_acpi_match[] = { - { "HISI0273", }, - {} -}; -MODULE_DEVICE_TABLE(acpi, hisi_pa_pmu_acpi_match); + writel(1 << idx, pa_pmu->base + regs->clear_offset); +} static int hisi_pa_pmu_init_data(struct platform_device *pdev, struct hisi_pmu *pa_pmu) @@ -276,6 +288,10 @@ static int hisi_pa_pmu_init_data(struct platform_device *pdev, pa_pmu->ccl_id = -1; pa_pmu->sccl_id = -1; + pa_pmu->dev_info = device_get_match_data(&pdev->dev); + if (!pa_pmu->dev_info) + return -ENODEV; + pa_pmu->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pa_pmu->base)) { dev_err(&pdev->dev, "ioremap failed for pa_pmu resource.\n"); @@ -314,6 +330,32 @@ static const struct attribute_group hisi_pa_pmu_v2_events_group = { .attrs = hisi_pa_pmu_v2_events_attr, }; +static struct attribute *hisi_pa_pmu_v3_events_attr[] = { + HISI_PMU_EVENT_ATTR(tx_req, 0x0), + HISI_PMU_EVENT_ATTR(tx_dat, 0x1), + HISI_PMU_EVENT_ATTR(tx_snp, 0x2), + HISI_PMU_EVENT_ATTR(rx_req, 0x7), + HISI_PMU_EVENT_ATTR(rx_dat, 0x8), + HISI_PMU_EVENT_ATTR(rx_snp, 0x9), + NULL +}; + +static const struct attribute_group hisi_pa_pmu_v3_events_group = { + .name = "events", + .attrs = hisi_pa_pmu_v3_events_attr, +}; + +static struct attribute *hisi_h60pa_pmu_events_attr[] = { + HISI_PMU_EVENT_ATTR(rx_flit, 0x50), + HISI_PMU_EVENT_ATTR(tx_flit, 0x65), + NULL +}; + +static const struct attribute_group hisi_h60pa_pmu_events_group = { + .name = "events", + .attrs = hisi_h60pa_pmu_events_attr, +}; + static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); static struct attribute *hisi_pa_pmu_cpumask_attrs[] = { @@ -337,6 +379,12 @@ static const struct attribute_group hisi_pa_pmu_identifier_group = { .attrs = hisi_pa_pmu_identifier_attrs, }; +static struct hisi_pa_pmu_int_regs hisi_pa_pmu_regs = { + .mask_offset = PA_INT_MASK, + .clear_offset = PA_INT_CLEAR, + .status_offset = PA_INT_STATUS, +}; + static const struct attribute_group *hisi_pa_pmu_v2_attr_groups[] = { &hisi_pa_pmu_v2_format_group, &hisi_pa_pmu_v2_events_group, @@ -345,6 +393,46 @@ static const struct attribute_group *hisi_pa_pmu_v2_attr_groups[] = { NULL }; +static const struct hisi_pmu_dev_info hisi_h32pa_v2 = { + .name = "pa", + .attr_groups = hisi_pa_pmu_v2_attr_groups, + .private = &hisi_pa_pmu_regs, +}; + +static const struct attribute_group *hisi_pa_pmu_v3_attr_groups[] = { + &hisi_pa_pmu_v2_format_group, + &hisi_pa_pmu_v3_events_group, + &hisi_pa_pmu_cpumask_attr_group, + &hisi_pa_pmu_identifier_group, + NULL +}; + +static const struct hisi_pmu_dev_info hisi_h32pa_v3 = { + .name = "pa", + .attr_groups = hisi_pa_pmu_v3_attr_groups, + .private = &hisi_pa_pmu_regs, +}; + +static struct hisi_pa_pmu_int_regs hisi_h60pa_pmu_regs = { + .mask_offset = H60PA_INT_MASK, + .clear_offset = H60PA_INT_STATUS, /* Clear on write */ + .status_offset = H60PA_INT_STATUS, +}; + +static const struct attribute_group *hisi_h60pa_pmu_attr_groups[] = { + &hisi_pa_pmu_v2_format_group, + &hisi_h60pa_pmu_events_group, + &hisi_pa_pmu_cpumask_attr_group, + &hisi_pa_pmu_identifier_group, + NULL +}; + +static const struct hisi_pmu_dev_info hisi_h60pa = { + .name = "h60pa", + .attr_groups = hisi_h60pa_pmu_attr_groups, + .private = &hisi_h60pa_pmu_regs, +}; + static const struct hisi_uncore_ops hisi_uncore_pa_ops = { .write_evtype = hisi_pa_pmu_write_evtype, .get_event_idx = hisi_uncore_pmu_get_event_idx, @@ -375,7 +463,7 @@ static int hisi_pa_pmu_dev_probe(struct platform_device *pdev, if (ret) return ret; - pa_pmu->pmu_events.attr_groups = hisi_pa_pmu_v2_attr_groups; + pa_pmu->pmu_events.attr_groups = pa_pmu->dev_info->attr_groups; pa_pmu->num_counters = PA_NR_COUNTERS; pa_pmu->ops = &hisi_uncore_pa_ops; pa_pmu->check_event = 0xB0; @@ -400,8 +488,9 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev) if (ret) return ret; - name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%u_pa%u", - pa_pmu->sicl_id, pa_pmu->index_id); + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_%s%u", + pa_pmu->sicl_id, pa_pmu->dev_info->name, + pa_pmu->index_id); if (!name) return -ENOMEM; @@ -435,6 +524,14 @@ static int hisi_pa_pmu_remove(struct platform_device *pdev) return 0; } +static const struct acpi_device_id hisi_pa_pmu_acpi_match[] = { + { "HISI0273", (kernel_ulong_t)&hisi_h32pa_v2 }, + { "HISI0275", (kernel_ulong_t)&hisi_h32pa_v3 }, + { "HISI0274", (kernel_ulong_t)&hisi_h60pa }, + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_pa_pmu_acpi_match); + static struct platform_driver hisi_pa_pmu_driver = { .driver = { .name = "hisi_pa_pmu", diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 2823f381930d..04031450d5fe 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -20,7 +20,6 @@ #include "hisi_uncore_pmu.h" -#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff) #define HISI_MAX_PERIOD(nr) (GENMASK_ULL((nr) - 1, 0)) /* @@ -226,6 +225,9 @@ int hisi_uncore_pmu_event_init(struct perf_event *event) hwc->idx = -1; hwc->config_base = event->attr.config; + if (hisi_pmu->ops->check_filter && hisi_pmu->ops->check_filter(event)) + return -EINVAL; + /* Enforce to use the same CPU for all events in this PMU */ event->cpu = hisi_pmu->on_cpu; diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h index 07890a8e96ca..92402aa69d70 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.h +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h @@ -43,9 +43,15 @@ return FIELD_GET(GENMASK_ULL(hi, lo), event->attr.config); \ } +#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff) + +#define HISI_PMU_EVTYPE_BITS 8 +#define HISI_PMU_EVTYPE_SHIFT(idx) ((idx) % 4 * HISI_PMU_EVTYPE_BITS) + struct hisi_pmu; struct hisi_uncore_ops { + int (*check_filter)(struct perf_event *event); void (*write_evtype)(struct hisi_pmu *, int, u32); int (*get_event_idx)(struct perf_event *); u64 (*read_counter)(struct hisi_pmu *, struct hw_perf_event *); @@ -62,6 +68,13 @@ struct hisi_uncore_ops { void (*disable_filter)(struct perf_event *event); }; +/* Describes the HISI PMU chip features information */ +struct hisi_pmu_dev_info { + const char *name; + const struct attribute_group **attr_groups; + void *private; +}; + struct hisi_pmu_hwevents { struct perf_event *hw_events[HISI_MAX_COUNTERS]; DECLARE_BITMAP(used_mask, HISI_MAX_COUNTERS); @@ -72,6 +85,7 @@ struct hisi_pmu_hwevents { struct hisi_pmu { struct pmu pmu; const struct hisi_uncore_ops *ops; + const struct hisi_pmu_dev_info *dev_info; struct hisi_pmu_hwevents pmu_events; /* associated_cpus: All CPUs associated with the PMU */ cpumask_t associated_cpus; diff --git a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c new file mode 100644 index 000000000000..63da05e5831c --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c @@ -0,0 +1,578 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon SoC UC (unified cache) uncore Hardware event counters support + * + * Copyright (C) 2023 HiSilicon Limited + * + * This code is based on the uncore PMUs like hisi_uncore_l3c_pmu. + */ +#include <linux/cpuhotplug.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/list.h> +#include <linux/mod_devicetable.h> +#include <linux/property.h> + +#include "hisi_uncore_pmu.h" + +/* Dynamic CPU hotplug state used by UC PMU */ +static enum cpuhp_state hisi_uc_pmu_online; + +/* UC register definition */ +#define HISI_UC_INT_MASK_REG 0x0800 +#define HISI_UC_INT_STS_REG 0x0808 +#define HISI_UC_INT_CLEAR_REG 0x080c +#define HISI_UC_TRACETAG_CTRL_REG 0x1b2c +#define HISI_UC_TRACETAG_REQ_MSK GENMASK(9, 7) +#define HISI_UC_TRACETAG_MARK_EN BIT(0) +#define HISI_UC_TRACETAG_REQ_EN (HISI_UC_TRACETAG_MARK_EN | BIT(2)) +#define HISI_UC_TRACETAG_SRCID_EN BIT(3) +#define HISI_UC_SRCID_CTRL_REG 0x1b40 +#define HISI_UC_SRCID_MSK GENMASK(14, 1) +#define HISI_UC_EVENT_CTRL_REG 0x1c00 +#define HISI_UC_EVENT_TRACETAG_EN BIT(29) +#define HISI_UC_EVENT_URING_MSK GENMASK(28, 27) +#define HISI_UC_EVENT_GLB_EN BIT(26) +#define HISI_UC_VERSION_REG 0x1cf0 +#define HISI_UC_EVTYPE_REGn(n) (0x1d00 + (n) * 4) +#define HISI_UC_EVTYPE_MASK GENMASK(7, 0) +#define HISI_UC_CNTR_REGn(n) (0x1e00 + (n) * 8) + +#define HISI_UC_NR_COUNTERS 0x8 +#define HISI_UC_V2_NR_EVENTS 0xFF +#define HISI_UC_CNTR_REG_BITS 64 + +#define HISI_UC_RD_REQ_TRACETAG 0x4 +#define HISI_UC_URING_EVENT_MIN 0x47 +#define HISI_UC_URING_EVENT_MAX 0x59 + +HISI_PMU_EVENT_ATTR_EXTRACTOR(rd_req_en, config1, 0, 0); +HISI_PMU_EVENT_ATTR_EXTRACTOR(uring_channel, config1, 5, 4); +HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid, config1, 19, 6); +HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_en, config1, 20, 20); + +static int hisi_uc_pmu_check_filter(struct perf_event *event) +{ + struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu); + + if (hisi_get_srcid_en(event) && !hisi_get_rd_req_en(event)) { + dev_err(uc_pmu->dev, + "rcid_en depends on rd_req_en being enabled!\n"); + return -EINVAL; + } + + if (!hisi_get_uring_channel(event)) + return 0; + + if ((HISI_GET_EVENTID(event) < HISI_UC_URING_EVENT_MIN) || + (HISI_GET_EVENTID(event) > HISI_UC_URING_EVENT_MAX)) + dev_warn(uc_pmu->dev, + "Only events: [%#x ~ %#x] support channel filtering!", + HISI_UC_URING_EVENT_MIN, HISI_UC_URING_EVENT_MAX); + + return 0; +} + +static void hisi_uc_pmu_config_req_tracetag(struct perf_event *event) +{ + struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu); + u32 val; + + if (!hisi_get_rd_req_en(event)) + return; + + val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG); + + /* The request-type has been configured */ + if (FIELD_GET(HISI_UC_TRACETAG_REQ_MSK, val) == HISI_UC_RD_REQ_TRACETAG) + return; + + /* Set request-type for tracetag, only read request is supported! */ + val &= ~HISI_UC_TRACETAG_REQ_MSK; + val |= FIELD_PREP(HISI_UC_TRACETAG_REQ_MSK, HISI_UC_RD_REQ_TRACETAG); + val |= HISI_UC_TRACETAG_REQ_EN; + writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG); +} + +static void hisi_uc_pmu_clear_req_tracetag(struct perf_event *event) +{ + struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu); + u32 val; + + if (!hisi_get_rd_req_en(event)) + return; + + val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG); + + /* Do nothing, the request-type tracetag has been cleaned up */ + if (FIELD_GET(HISI_UC_TRACETAG_REQ_MSK, val) == 0) + return; + + /* Clear request-type */ + val &= ~HISI_UC_TRACETAG_REQ_MSK; + val &= ~HISI_UC_TRACETAG_REQ_EN; + writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG); +} + +static void hisi_uc_pmu_config_srcid_tracetag(struct perf_event *event) +{ + struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu); + u32 val; + + if (!hisi_get_srcid_en(event)) + return; + + val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG); + + /* Do nothing, the source id has been configured */ + if (FIELD_GET(HISI_UC_TRACETAG_SRCID_EN, val)) + return; + + /* Enable source id tracetag */ + val |= HISI_UC_TRACETAG_SRCID_EN; + writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG); + + val = readl(uc_pmu->base + HISI_UC_SRCID_CTRL_REG); + val &= ~HISI_UC_SRCID_MSK; + val |= FIELD_PREP(HISI_UC_SRCID_MSK, hisi_get_srcid(event)); + writel(val, uc_pmu->base + HISI_UC_SRCID_CTRL_REG); + + /* Depend on request-type tracetag enabled */ + hisi_uc_pmu_config_req_tracetag(event); +} + +static void hisi_uc_pmu_clear_srcid_tracetag(struct perf_event *event) +{ + struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu); + u32 val; + + if (!hisi_get_srcid_en(event)) + return; + + val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG); + + /* Do nothing, the source id has been cleaned up */ + if (FIELD_GET(HISI_UC_TRACETAG_SRCID_EN, val) == 0) + return; + + hisi_uc_pmu_clear_req_tracetag(event); + + /* Disable source id tracetag */ + val &= ~HISI_UC_TRACETAG_SRCID_EN; + writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG); + + val = readl(uc_pmu->base + HISI_UC_SRCID_CTRL_REG); + val &= ~HISI_UC_SRCID_MSK; + writel(val, uc_pmu->base + HISI_UC_SRCID_CTRL_REG); +} + +static void hisi_uc_pmu_config_uring_channel(struct perf_event *event) +{ + struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu); + u32 uring_channel = hisi_get_uring_channel(event); + u32 val; + + /* Do nothing if not being set or is set explicitly to zero (default) */ + if (uring_channel == 0) + return; + + val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG); + + /* Do nothing, the uring_channel has been configured */ + if (uring_channel == FIELD_GET(HISI_UC_EVENT_URING_MSK, val)) + return; + + val &= ~HISI_UC_EVENT_URING_MSK; + val |= FIELD_PREP(HISI_UC_EVENT_URING_MSK, uring_channel); + writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG); +} + +static void hisi_uc_pmu_clear_uring_channel(struct perf_event *event) +{ + struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu); + u32 val; + + /* Do nothing if not being set or is set explicitly to zero (default) */ + if (hisi_get_uring_channel(event) == 0) + return; + + val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG); + + /* Do nothing, the uring_channel has been cleaned up */ + if (FIELD_GET(HISI_UC_EVENT_URING_MSK, val) == 0) + return; + + val &= ~HISI_UC_EVENT_URING_MSK; + writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG); +} + +static void hisi_uc_pmu_enable_filter(struct perf_event *event) +{ + if (event->attr.config1 == 0) + return; + + hisi_uc_pmu_config_uring_channel(event); + hisi_uc_pmu_config_req_tracetag(event); + hisi_uc_pmu_config_srcid_tracetag(event); +} + +static void hisi_uc_pmu_disable_filter(struct perf_event *event) +{ + if (event->attr.config1 == 0) + return; + + hisi_uc_pmu_clear_srcid_tracetag(event); + hisi_uc_pmu_clear_req_tracetag(event); + hisi_uc_pmu_clear_uring_channel(event); +} + +static void hisi_uc_pmu_write_evtype(struct hisi_pmu *uc_pmu, int idx, u32 type) +{ + u32 val; + + /* + * Select the appropriate event select register. + * There are 2 32-bit event select registers for the + * 8 hardware counters, each event code is 8-bit wide. + */ + val = readl(uc_pmu->base + HISI_UC_EVTYPE_REGn(idx / 4)); + val &= ~(HISI_UC_EVTYPE_MASK << HISI_PMU_EVTYPE_SHIFT(idx)); + val |= (type << HISI_PMU_EVTYPE_SHIFT(idx)); + writel(val, uc_pmu->base + HISI_UC_EVTYPE_REGn(idx / 4)); +} + +static void hisi_uc_pmu_start_counters(struct hisi_pmu *uc_pmu) +{ + u32 val; + + val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG); + val |= HISI_UC_EVENT_GLB_EN; + writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG); +} + +static void hisi_uc_pmu_stop_counters(struct hisi_pmu *uc_pmu) +{ + u32 val; + + val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG); + val &= ~HISI_UC_EVENT_GLB_EN; + writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG); +} + +static void hisi_uc_pmu_enable_counter(struct hisi_pmu *uc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Enable counter index */ + val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG); + val |= (1 << hwc->idx); + writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG); +} + +static void hisi_uc_pmu_disable_counter(struct hisi_pmu *uc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index */ + val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG); + val &= ~(1 << hwc->idx); + writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG); +} + +static u64 hisi_uc_pmu_read_counter(struct hisi_pmu *uc_pmu, + struct hw_perf_event *hwc) +{ + return readq(uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx)); +} + +static void hisi_uc_pmu_write_counter(struct hisi_pmu *uc_pmu, + struct hw_perf_event *hwc, u64 val) +{ + writeq(val, uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx)); +} + +static void hisi_uc_pmu_enable_counter_int(struct hisi_pmu *uc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(uc_pmu->base + HISI_UC_INT_MASK_REG); + val &= ~(1 << hwc->idx); + writel(val, uc_pmu->base + HISI_UC_INT_MASK_REG); +} + +static void hisi_uc_pmu_disable_counter_int(struct hisi_pmu *uc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(uc_pmu->base + HISI_UC_INT_MASK_REG); + val |= (1 << hwc->idx); + writel(val, uc_pmu->base + HISI_UC_INT_MASK_REG); +} + +static u32 hisi_uc_pmu_get_int_status(struct hisi_pmu *uc_pmu) +{ + return readl(uc_pmu->base + HISI_UC_INT_STS_REG); +} + +static void hisi_uc_pmu_clear_int_status(struct hisi_pmu *uc_pmu, int idx) +{ + writel(1 << idx, uc_pmu->base + HISI_UC_INT_CLEAR_REG); +} + +static int hisi_uc_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *uc_pmu) +{ + /* + * Use SCCL (Super CPU Cluster) ID and CCL (CPU Cluster) ID to + * identify the topology information of UC PMU devices in the chip. + * They have some CCLs per SCCL and then 4 UC PMU per CCL. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &uc_pmu->sccl_id)) { + dev_err(&pdev->dev, "Can not read uc sccl-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id", + &uc_pmu->ccl_id)) { + dev_err(&pdev->dev, "Can not read uc ccl-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id", + &uc_pmu->sub_id)) { + dev_err(&pdev->dev, "Can not read sub-id!\n"); + return -EINVAL; + } + + uc_pmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(uc_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for uc_pmu resource\n"); + return PTR_ERR(uc_pmu->base); + } + + uc_pmu->identifier = readl(uc_pmu->base + HISI_UC_VERSION_REG); + + return 0; +} + +static struct attribute *hisi_uc_pmu_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-7"), + HISI_PMU_FORMAT_ATTR(rd_req_en, "config1:0-0"), + HISI_PMU_FORMAT_ATTR(uring_channel, "config1:4-5"), + HISI_PMU_FORMAT_ATTR(srcid, "config1:6-19"), + HISI_PMU_FORMAT_ATTR(srcid_en, "config1:20-20"), + NULL +}; + +static const struct attribute_group hisi_uc_pmu_format_group = { + .name = "format", + .attrs = hisi_uc_pmu_format_attr, +}; + +static struct attribute *hisi_uc_pmu_events_attr[] = { + HISI_PMU_EVENT_ATTR(sq_time, 0x00), + HISI_PMU_EVENT_ATTR(pq_time, 0x01), + HISI_PMU_EVENT_ATTR(hbm_time, 0x02), + HISI_PMU_EVENT_ATTR(iq_comp_time_cring, 0x03), + HISI_PMU_EVENT_ATTR(iq_comp_time_uring, 0x05), + HISI_PMU_EVENT_ATTR(cpu_rd, 0x10), + HISI_PMU_EVENT_ATTR(cpu_rd64, 0x17), + HISI_PMU_EVENT_ATTR(cpu_rs64, 0x19), + HISI_PMU_EVENT_ATTR(cpu_mru, 0x1a), + HISI_PMU_EVENT_ATTR(cycles, 0x9c), + HISI_PMU_EVENT_ATTR(spipe_hit, 0xb3), + HISI_PMU_EVENT_ATTR(hpipe_hit, 0xdb), + HISI_PMU_EVENT_ATTR(cring_rxdat_cnt, 0xfa), + HISI_PMU_EVENT_ATTR(cring_txdat_cnt, 0xfb), + HISI_PMU_EVENT_ATTR(uring_rxdat_cnt, 0xfc), + HISI_PMU_EVENT_ATTR(uring_txdat_cnt, 0xfd), + NULL +}; + +static const struct attribute_group hisi_uc_pmu_events_group = { + .name = "events", + .attrs = hisi_uc_pmu_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_uc_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group hisi_uc_pmu_cpumask_attr_group = { + .attrs = hisi_uc_pmu_cpumask_attrs, +}; + +static struct device_attribute hisi_uc_pmu_identifier_attr = + __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL); + +static struct attribute *hisi_uc_pmu_identifier_attrs[] = { + &hisi_uc_pmu_identifier_attr.attr, + NULL +}; + +static const struct attribute_group hisi_uc_pmu_identifier_group = { + .attrs = hisi_uc_pmu_identifier_attrs, +}; + +static const struct attribute_group *hisi_uc_pmu_attr_groups[] = { + &hisi_uc_pmu_format_group, + &hisi_uc_pmu_events_group, + &hisi_uc_pmu_cpumask_attr_group, + &hisi_uc_pmu_identifier_group, + NULL +}; + +static const struct hisi_uncore_ops hisi_uncore_uc_pmu_ops = { + .check_filter = hisi_uc_pmu_check_filter, + .write_evtype = hisi_uc_pmu_write_evtype, + .get_event_idx = hisi_uncore_pmu_get_event_idx, + .start_counters = hisi_uc_pmu_start_counters, + .stop_counters = hisi_uc_pmu_stop_counters, + .enable_counter = hisi_uc_pmu_enable_counter, + .disable_counter = hisi_uc_pmu_disable_counter, + .enable_counter_int = hisi_uc_pmu_enable_counter_int, + .disable_counter_int = hisi_uc_pmu_disable_counter_int, + .write_counter = hisi_uc_pmu_write_counter, + .read_counter = hisi_uc_pmu_read_counter, + .get_int_status = hisi_uc_pmu_get_int_status, + .clear_int_status = hisi_uc_pmu_clear_int_status, + .enable_filter = hisi_uc_pmu_enable_filter, + .disable_filter = hisi_uc_pmu_disable_filter, +}; + +static int hisi_uc_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *uc_pmu) +{ + int ret; + + ret = hisi_uc_pmu_init_data(pdev, uc_pmu); + if (ret) + return ret; + + ret = hisi_uncore_pmu_init_irq(uc_pmu, pdev); + if (ret) + return ret; + + uc_pmu->pmu_events.attr_groups = hisi_uc_pmu_attr_groups; + uc_pmu->check_event = HISI_UC_EVTYPE_MASK; + uc_pmu->ops = &hisi_uncore_uc_pmu_ops; + uc_pmu->counter_bits = HISI_UC_CNTR_REG_BITS; + uc_pmu->num_counters = HISI_UC_NR_COUNTERS; + uc_pmu->dev = &pdev->dev; + uc_pmu->on_cpu = -1; + + return 0; +} + +static void hisi_uc_pmu_remove_cpuhp_instance(void *hotplug_node) +{ + cpuhp_state_remove_instance_nocalls(hisi_uc_pmu_online, hotplug_node); +} + +static void hisi_uc_pmu_unregister_pmu(void *pmu) +{ + perf_pmu_unregister(pmu); +} + +static int hisi_uc_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *uc_pmu; + char *name; + int ret; + + uc_pmu = devm_kzalloc(&pdev->dev, sizeof(*uc_pmu), GFP_KERNEL); + if (!uc_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, uc_pmu); + + ret = hisi_uc_pmu_dev_probe(pdev, uc_pmu); + if (ret) + return ret; + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_uc%d_%u", + uc_pmu->sccl_id, uc_pmu->ccl_id, uc_pmu->sub_id); + if (!name) + return -ENOMEM; + + ret = cpuhp_state_add_instance(hisi_uc_pmu_online, &uc_pmu->node); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Error registering hotplug\n"); + + ret = devm_add_action_or_reset(&pdev->dev, + hisi_uc_pmu_remove_cpuhp_instance, + &uc_pmu->node); + if (ret) + return ret; + + hisi_pmu_init(uc_pmu, THIS_MODULE); + + ret = perf_pmu_register(&uc_pmu->pmu, name, -1); + if (ret) + return ret; + + return devm_add_action_or_reset(&pdev->dev, + hisi_uc_pmu_unregister_pmu, + &uc_pmu->pmu); +} + +static const struct acpi_device_id hisi_uc_pmu_acpi_match[] = { + { "HISI0291", }, + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_uc_pmu_acpi_match); + +static struct platform_driver hisi_uc_pmu_driver = { + .driver = { + .name = "hisi_uc_pmu", + .acpi_match_table = hisi_uc_pmu_acpi_match, + /* + * We have not worked out a safe bind/unbind process, + * Forcefully unbinding during sampling will lead to a + * kernel panic, so this is not supported yet. + */ + .suppress_bind_attrs = true, + }, + .probe = hisi_uc_pmu_probe, +}; + +static int __init hisi_uc_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/hisi/uc:online", + hisi_uncore_pmu_online_cpu, + hisi_uncore_pmu_offline_cpu); + if (ret < 0) { + pr_err("UC PMU: Error setup hotplug, ret = %d\n", ret); + return ret; + } + hisi_uc_pmu_online = ret; + + ret = platform_driver_register(&hisi_uc_pmu_driver); + if (ret) + cpuhp_remove_multi_state(hisi_uc_pmu_online); + + return ret; +} +module_init(hisi_uc_pmu_module_init); + +static void __exit hisi_uc_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_uc_pmu_driver); + cpuhp_remove_multi_state(hisi_uc_pmu_online); +} +module_exit(hisi_uc_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC UC uncore PMU driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Junhao He <hejunhao3@huawei.com>"); diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index aaca6db7d8f6..3f9a98c17a89 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -857,7 +857,6 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data) return -ENOMEM; INIT_LIST_HEAD(&cluster->next); - list_add(&cluster->next, &l2cache_pmu->clusters); cluster->cluster_id = fw_cluster_id; irq = platform_get_irq(sdev, 0); @@ -883,6 +882,7 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data) spin_lock_init(&cluster->pmu_lock); + list_add(&cluster->next, &l2cache_pmu->clusters); l2cache_pmu->num_pmus++; return 0; diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index f279b360c20d..43d3530bab48 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -30,6 +30,7 @@ #include <linux/pinctrl/pinconf.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinmux.h> +#include <linux/suspend.h> #include "core.h" #include "pinctrl-utils.h" @@ -636,9 +637,8 @@ static bool do_amd_gpio_irq_handler(int irq, void *dev_id) regval = readl(regs + i); if (regval & PIN_IRQ_PENDING) - dev_dbg(&gpio_dev->pdev->dev, - "GPIO %d is active: 0x%x", - irqnr + i, regval); + pm_pr_dbg("GPIO %d is active: 0x%x", + irqnr + i, regval); /* caused wake on resume context for shared IRQ */ if (irq < 0 && (regval & BIT(WAKE_STS_OFF))) diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c index dbe698f33128..e29c51cbfd71 100644 --- a/drivers/platform/chrome/cros_ec_i2c.c +++ b/drivers/platform/chrome/cros_ec_i2c.c @@ -372,7 +372,7 @@ static struct i2c_driver cros_ec_driver = { .of_match_table = of_match_ptr(cros_ec_i2c_of_match), .pm = &cros_ec_i2c_pm_ops, }, - .probe_new = cros_ec_i2c_probe, + .probe = cros_ec_i2c_probe, .remove = cros_ec_i2c_remove, .id_table = cros_ec_i2c_id, }; diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index 68bba0fcafab..500a61b093e4 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -16,6 +16,7 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/interrupt.h> +#include <linux/kobject.h> #include <linux/module.h> #include <linux/platform_data/cros_ec_commands.h> #include <linux/platform_data/cros_ec_proto.h> @@ -315,6 +316,7 @@ static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset, static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data) { + static const char *env[] = { "ERROR=PANIC", NULL }; struct cros_ec_device *ec_dev = data; bool ec_has_more_events; int ret; @@ -324,6 +326,7 @@ static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data) if (value == ACPI_NOTIFY_CROS_EC_PANIC) { dev_emerg(ec_dev->dev, "CrOS EC Panic Reported. Shutdown is imminent!"); blocking_notifier_call_chain(&ec_dev->panic_notifier, 0, ec_dev); + kobject_uevent_env(&ec_dev->dev->kobj, KOBJ_CHANGE, (char **)env); /* Begin orderly shutdown. Force shutdown after 1 second. */ hw_protection_shutdown("CrOS EC Panic", 1000); /* Do not query for other events after a panic is reported */ @@ -543,23 +546,25 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = { MODULE_DEVICE_TABLE(dmi, cros_ec_lpc_dmi_table); #ifdef CONFIG_PM_SLEEP -static int cros_ec_lpc_suspend(struct device *dev) +static int cros_ec_lpc_prepare(struct device *dev) { struct cros_ec_device *ec_dev = dev_get_drvdata(dev); return cros_ec_suspend(ec_dev); } -static int cros_ec_lpc_resume(struct device *dev) +static void cros_ec_lpc_complete(struct device *dev) { struct cros_ec_device *ec_dev = dev_get_drvdata(dev); - - return cros_ec_resume(ec_dev); + cros_ec_resume(ec_dev); } #endif static const struct dev_pm_ops cros_ec_lpc_pm_ops = { - SET_LATE_SYSTEM_SLEEP_PM_OPS(cros_ec_lpc_suspend, cros_ec_lpc_resume) +#ifdef CONFIG_PM_SLEEP + .prepare = cros_ec_lpc_prepare, + .complete = cros_ec_lpc_complete +#endif }; static struct platform_driver cros_ec_lpc_driver = { diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c index 21143dba8970..3e88cc92e819 100644 --- a/drivers/platform/chrome/cros_ec_spi.c +++ b/drivers/platform/chrome/cros_ec_spi.c @@ -104,13 +104,7 @@ static void debug_packet(struct device *dev, const char *name, u8 *ptr, int len) { #ifdef DEBUG - int i; - - dev_dbg(dev, "%s: ", name); - for (i = 0; i < len; i++) - pr_cont(" %02x", ptr[i]); - - pr_cont("\n"); + dev_dbg(dev, "%s: %*ph\n", name, len, ptr); #endif } diff --git a/drivers/platform/chrome/cros_hps_i2c.c b/drivers/platform/chrome/cros_hps_i2c.c index 62ccb1acb5de..b31313080332 100644 --- a/drivers/platform/chrome/cros_hps_i2c.c +++ b/drivers/platform/chrome/cros_hps_i2c.c @@ -143,7 +143,7 @@ MODULE_DEVICE_TABLE(acpi, hps_acpi_id); #endif /* CONFIG_ACPI */ static struct i2c_driver hps_i2c_driver = { - .probe_new = hps_i2c_probe, + .probe = hps_i2c_probe, .remove = hps_i2c_remove, .id_table = hps_i2c_id, .driver = { diff --git a/drivers/platform/chrome/cros_typec_switch.c b/drivers/platform/chrome/cros_typec_switch.c index 752720483753..0eefdcf14d63 100644 --- a/drivers/platform/chrome/cros_typec_switch.c +++ b/drivers/platform/chrome/cros_typec_switch.c @@ -51,13 +51,18 @@ static int cros_typec_cmd_mux_set(struct cros_typec_switch_data *sdata, int port static int cros_typec_get_mux_state(unsigned long mode, struct typec_altmode *alt) { int ret = -EOPNOTSUPP; + u8 pin_assign; - if (mode == TYPEC_STATE_SAFE) + if (mode == TYPEC_STATE_SAFE) { ret = USB_PD_MUX_SAFE_MODE; - else if (mode == TYPEC_STATE_USB) + } else if (mode == TYPEC_STATE_USB) { ret = USB_PD_MUX_USB_ENABLED; - else if (alt && alt->svid == USB_TYPEC_DP_SID) + } else if (alt && alt->svid == USB_TYPEC_DP_SID) { ret = USB_PD_MUX_DP_ENABLED; + pin_assign = mode - TYPEC_STATE_MODAL; + if (pin_assign & DP_PIN_ASSIGN_D) + ret |= USB_PD_MUX_USB_ENABLED; + } return ret; } diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c index 427905714f79..1304cd6f13f6 100644 --- a/drivers/platform/x86/amd/pmc.c +++ b/drivers/platform/x86/amd/pmc.c @@ -543,7 +543,7 @@ static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev, } if (dev) - dev_dbg(pdev->dev, "SMU idlemask s0i3: 0x%x\n", val); + pm_pr_dbg("SMU idlemask s0i3: 0x%x\n", val); if (s) seq_printf(s, "SMU idlemask : 0x%x\n", val); @@ -769,7 +769,7 @@ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg) *arg |= (duration << 16); rc = rtc_alarm_irq_enable(rtc_device, 0); - dev_dbg(pdev->dev, "wakeup timer programmed for %lld seconds\n", duration); + pm_pr_dbg("wakeup timer programmed for %lld seconds\n", duration); return rc; } diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c index ee5f124f78b6..7780705917b7 100644 --- a/drivers/platform/x86/amd/pmf/core.c +++ b/drivers/platform/x86/amd/pmf/core.c @@ -297,6 +297,8 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev) /* Enable Static Slider */ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) { amd_pmf_init_sps(dev); + dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call; + power_supply_reg_notifier(&dev->pwr_src_notifier); dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n"); } @@ -315,8 +317,10 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev) static void amd_pmf_deinit_features(struct amd_pmf_dev *dev) { - if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) + if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) { + power_supply_unreg_notifier(&dev->pwr_src_notifier); amd_pmf_deinit_sps(dev); + } if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) { amd_pmf_deinit_auto_mode(dev); @@ -399,9 +403,6 @@ static int amd_pmf_probe(struct platform_device *pdev) apmf_install_handler(dev); amd_pmf_dbgfs_register(dev); - dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call; - power_supply_reg_notifier(&dev->pwr_src_notifier); - dev_info(dev->dev, "registered PMF device successfully\n"); return 0; @@ -411,7 +412,6 @@ static void amd_pmf_remove(struct platform_device *pdev) { struct amd_pmf_dev *dev = platform_get_drvdata(pdev); - power_supply_unreg_notifier(&dev->pwr_src_notifier); amd_pmf_deinit_features(dev); apmf_acpi_deinit(dev); amd_pmf_dbgfs_unregister(dev); diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig index 90d33cd1b670..69ef8d081c98 100644 --- a/drivers/powercap/Kconfig +++ b/drivers/powercap/Kconfig @@ -18,10 +18,12 @@ if POWERCAP # Client driver configurations go here. config INTEL_RAPL_CORE tristate + depends on PCI + select IOSF_MBI config INTEL_RAPL tristate "Intel RAPL Support via MSR Interface" - depends on X86 && IOSF_MBI + depends on X86 && PCI select INTEL_RAPL_CORE help This enables support for the Intel Running Average Power Limit (RAPL) @@ -33,6 +35,20 @@ config INTEL_RAPL controller, CPU core (Power Plane 0), graphics uncore (Power Plane 1), etc. +config INTEL_RAPL_TPMI + tristate "Intel RAPL Support via TPMI Interface" + depends on X86 + depends on INTEL_TPMI + select INTEL_RAPL_CORE + help + This enables support for the Intel Running Average Power Limit (RAPL) + technology via TPMI interface, which allows power limits to be enforced + and monitored. + + In RAPL, the platform level settings are divided into domains for + fine grained control. These domains include processor package, DRAM + controller, platform, etc. + config IDLE_INJECT bool "Idle injection framework" depends on CPU_IDLE diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile index 4474201b4aa7..5ab0dce565b9 100644 --- a/drivers/powercap/Makefile +++ b/drivers/powercap/Makefile @@ -5,5 +5,6 @@ obj-$(CONFIG_DTPM_DEVFREQ) += dtpm_devfreq.o obj-$(CONFIG_POWERCAP) += powercap_sys.o obj-$(CONFIG_INTEL_RAPL_CORE) += intel_rapl_common.o obj-$(CONFIG_INTEL_RAPL) += intel_rapl_msr.o +obj-$(CONFIG_INTEL_RAPL_TPMI) += intel_rapl_tpmi.o obj-$(CONFIG_IDLE_INJECT) += idle_inject.o obj-$(CONFIG_ARM_SCMI_POWERCAP) += arm_scmi_powercap.o diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 8970c7b80884..4e646e5e48f6 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -75,6 +75,15 @@ #define PSYS_TIME_WINDOW1_MASK (0x7FULL<<19) #define PSYS_TIME_WINDOW2_MASK (0x7FULL<<51) +/* bitmasks for RAPL TPMI, used by primitive access functions */ +#define TPMI_POWER_LIMIT_MASK 0x3FFFF +#define TPMI_POWER_LIMIT_ENABLE BIT_ULL(62) +#define TPMI_TIME_WINDOW_MASK (0x7FULL<<18) +#define TPMI_INFO_SPEC_MASK 0x3FFFF +#define TPMI_INFO_MIN_MASK (0x3FFFFULL << 18) +#define TPMI_INFO_MAX_MASK (0x3FFFFULL << 36) +#define TPMI_INFO_MAX_TIME_WIN_MASK (0x7FULL << 54) + /* Non HW constants */ #define RAPL_PRIMITIVE_DERIVED BIT(1) /* not from raw data */ #define RAPL_PRIMITIVE_DUMMY BIT(2) @@ -94,26 +103,120 @@ enum unit_type { #define DOMAIN_STATE_INACTIVE BIT(0) #define DOMAIN_STATE_POWER_LIMIT_SET BIT(1) -#define DOMAIN_STATE_BIOS_LOCKED BIT(2) -static const char pl1_name[] = "long_term"; -static const char pl2_name[] = "short_term"; -static const char pl4_name[] = "peak_power"; +static const char *pl_names[NR_POWER_LIMITS] = { + [POWER_LIMIT1] = "long_term", + [POWER_LIMIT2] = "short_term", + [POWER_LIMIT4] = "peak_power", +}; + +enum pl_prims { + PL_ENABLE, + PL_CLAMP, + PL_LIMIT, + PL_TIME_WINDOW, + PL_MAX_POWER, + PL_LOCK, +}; + +static bool is_pl_valid(struct rapl_domain *rd, int pl) +{ + if (pl < POWER_LIMIT1 || pl > POWER_LIMIT4) + return false; + return rd->rpl[pl].name ? true : false; +} + +static int get_pl_lock_prim(struct rapl_domain *rd, int pl) +{ + if (rd->rp->priv->type == RAPL_IF_TPMI) { + if (pl == POWER_LIMIT1) + return PL1_LOCK; + if (pl == POWER_LIMIT2) + return PL2_LOCK; + if (pl == POWER_LIMIT4) + return PL4_LOCK; + } + + /* MSR/MMIO Interface doesn't have Lock bit for PL4 */ + if (pl == POWER_LIMIT4) + return -EINVAL; + + /* + * Power Limit register that supports two power limits has a different + * bit position for the Lock bit. + */ + if (rd->rp->priv->limits[rd->id] & BIT(POWER_LIMIT2)) + return FW_HIGH_LOCK; + return FW_LOCK; +} + +static int get_pl_prim(struct rapl_domain *rd, int pl, enum pl_prims prim) +{ + switch (pl) { + case POWER_LIMIT1: + if (prim == PL_ENABLE) + return PL1_ENABLE; + if (prim == PL_CLAMP && rd->rp->priv->type != RAPL_IF_TPMI) + return PL1_CLAMP; + if (prim == PL_LIMIT) + return POWER_LIMIT1; + if (prim == PL_TIME_WINDOW) + return TIME_WINDOW1; + if (prim == PL_MAX_POWER) + return THERMAL_SPEC_POWER; + if (prim == PL_LOCK) + return get_pl_lock_prim(rd, pl); + return -EINVAL; + case POWER_LIMIT2: + if (prim == PL_ENABLE) + return PL2_ENABLE; + if (prim == PL_CLAMP && rd->rp->priv->type != RAPL_IF_TPMI) + return PL2_CLAMP; + if (prim == PL_LIMIT) + return POWER_LIMIT2; + if (prim == PL_TIME_WINDOW) + return TIME_WINDOW2; + if (prim == PL_MAX_POWER) + return MAX_POWER; + if (prim == PL_LOCK) + return get_pl_lock_prim(rd, pl); + return -EINVAL; + case POWER_LIMIT4: + if (prim == PL_LIMIT) + return POWER_LIMIT4; + if (prim == PL_ENABLE) + return PL4_ENABLE; + /* PL4 would be around two times PL2, use same prim as PL2. */ + if (prim == PL_MAX_POWER) + return MAX_POWER; + if (prim == PL_LOCK) + return get_pl_lock_prim(rd, pl); + return -EINVAL; + default: + return -EINVAL; + } +} #define power_zone_to_rapl_domain(_zone) \ container_of(_zone, struct rapl_domain, power_zone) struct rapl_defaults { u8 floor_freq_reg_addr; - int (*check_unit)(struct rapl_package *rp, int cpu); + int (*check_unit)(struct rapl_domain *rd); void (*set_floor_freq)(struct rapl_domain *rd, bool mode); - u64 (*compute_time_window)(struct rapl_package *rp, u64 val, + u64 (*compute_time_window)(struct rapl_domain *rd, u64 val, bool to_raw); unsigned int dram_domain_energy_unit; unsigned int psys_domain_energy_unit; bool spr_psys_bits; }; -static struct rapl_defaults *rapl_defaults; +static struct rapl_defaults *defaults_msr; +static const struct rapl_defaults defaults_tpmi; + +static struct rapl_defaults *get_defaults(struct rapl_package *rp) +{ + return rp->priv->defaults; +} /* Sideband MBI registers */ #define IOSF_CPU_POWER_BUDGET_CTL_BYT (0x2) @@ -150,6 +253,12 @@ static int rapl_read_data_raw(struct rapl_domain *rd, static int rapl_write_data_raw(struct rapl_domain *rd, enum rapl_primitives prim, unsigned long long value); +static int rapl_read_pl_data(struct rapl_domain *rd, int pl, + enum pl_prims pl_prim, + bool xlate, u64 *data); +static int rapl_write_pl_data(struct rapl_domain *rd, int pl, + enum pl_prims pl_prim, + unsigned long long value); static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type, u64 value, int to_raw); static void package_power_limit_irq_save(struct rapl_package *rp); @@ -217,7 +326,7 @@ static int find_nr_power_limit(struct rapl_domain *rd) int i, nr_pl = 0; for (i = 0; i < NR_POWER_LIMITS; i++) { - if (rd->rpl[i].name) + if (is_pl_valid(rd, i)) nr_pl++; } @@ -227,37 +336,35 @@ static int find_nr_power_limit(struct rapl_domain *rd) static int set_domain_enable(struct powercap_zone *power_zone, bool mode) { struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone); - - if (rd->state & DOMAIN_STATE_BIOS_LOCKED) - return -EACCES; + struct rapl_defaults *defaults = get_defaults(rd->rp); + int ret; cpus_read_lock(); - rapl_write_data_raw(rd, PL1_ENABLE, mode); - if (rapl_defaults->set_floor_freq) - rapl_defaults->set_floor_freq(rd, mode); + ret = rapl_write_pl_data(rd, POWER_LIMIT1, PL_ENABLE, mode); + if (!ret && defaults->set_floor_freq) + defaults->set_floor_freq(rd, mode); cpus_read_unlock(); - return 0; + return ret; } static int get_domain_enable(struct powercap_zone *power_zone, bool *mode) { struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone); u64 val; + int ret; - if (rd->state & DOMAIN_STATE_BIOS_LOCKED) { + if (rd->rpl[POWER_LIMIT1].locked) { *mode = false; return 0; } cpus_read_lock(); - if (rapl_read_data_raw(rd, PL1_ENABLE, true, &val)) { - cpus_read_unlock(); - return -EIO; - } - *mode = val; + ret = rapl_read_pl_data(rd, POWER_LIMIT1, PL_ENABLE, true, &val); + if (!ret) + *mode = val; cpus_read_unlock(); - return 0; + return ret; } /* per RAPL domain ops, in the order of rapl_domain_type */ @@ -313,8 +420,8 @@ static int contraint_to_pl(struct rapl_domain *rd, int cid) { int i, j; - for (i = 0, j = 0; i < NR_POWER_LIMITS; i++) { - if ((rd->rpl[i].name) && j++ == cid) { + for (i = POWER_LIMIT1, j = 0; i < NR_POWER_LIMITS; i++) { + if (is_pl_valid(rd, i) && j++ == cid) { pr_debug("%s: index %d\n", __func__, i); return i; } @@ -335,36 +442,11 @@ static int set_power_limit(struct powercap_zone *power_zone, int cid, cpus_read_lock(); rd = power_zone_to_rapl_domain(power_zone); id = contraint_to_pl(rd, cid); - if (id < 0) { - ret = id; - goto set_exit; - } - rp = rd->rp; - if (rd->state & DOMAIN_STATE_BIOS_LOCKED) { - dev_warn(&power_zone->dev, - "%s locked by BIOS, monitoring only\n", rd->name); - ret = -EACCES; - goto set_exit; - } - - switch (rd->rpl[id].prim_id) { - case PL1_ENABLE: - rapl_write_data_raw(rd, POWER_LIMIT1, power_limit); - break; - case PL2_ENABLE: - rapl_write_data_raw(rd, POWER_LIMIT2, power_limit); - break; - case PL4_ENABLE: - rapl_write_data_raw(rd, POWER_LIMIT4, power_limit); - break; - default: - ret = -EINVAL; - } + ret = rapl_write_pl_data(rd, id, PL_LIMIT, power_limit); if (!ret) package_power_limit_irq_save(rp); -set_exit: cpus_read_unlock(); return ret; } @@ -374,38 +456,17 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid, { struct rapl_domain *rd; u64 val; - int prim; int ret = 0; int id; cpus_read_lock(); rd = power_zone_to_rapl_domain(power_zone); id = contraint_to_pl(rd, cid); - if (id < 0) { - ret = id; - goto get_exit; - } - switch (rd->rpl[id].prim_id) { - case PL1_ENABLE: - prim = POWER_LIMIT1; - break; - case PL2_ENABLE: - prim = POWER_LIMIT2; - break; - case PL4_ENABLE: - prim = POWER_LIMIT4; - break; - default: - cpus_read_unlock(); - return -EINVAL; - } - if (rapl_read_data_raw(rd, prim, true, &val)) - ret = -EIO; - else + ret = rapl_read_pl_data(rd, id, PL_LIMIT, true, &val); + if (!ret) *data = val; -get_exit: cpus_read_unlock(); return ret; @@ -421,23 +482,9 @@ static int set_time_window(struct powercap_zone *power_zone, int cid, cpus_read_lock(); rd = power_zone_to_rapl_domain(power_zone); id = contraint_to_pl(rd, cid); - if (id < 0) { - ret = id; - goto set_time_exit; - } - switch (rd->rpl[id].prim_id) { - case PL1_ENABLE: - rapl_write_data_raw(rd, TIME_WINDOW1, window); - break; - case PL2_ENABLE: - rapl_write_data_raw(rd, TIME_WINDOW2, window); - break; - default: - ret = -EINVAL; - } + ret = rapl_write_pl_data(rd, id, PL_TIME_WINDOW, window); -set_time_exit: cpus_read_unlock(); return ret; } @@ -453,33 +500,11 @@ static int get_time_window(struct powercap_zone *power_zone, int cid, cpus_read_lock(); rd = power_zone_to_rapl_domain(power_zone); id = contraint_to_pl(rd, cid); - if (id < 0) { - ret = id; - goto get_time_exit; - } - switch (rd->rpl[id].prim_id) { - case PL1_ENABLE: - ret = rapl_read_data_raw(rd, TIME_WINDOW1, true, &val); - break; - case PL2_ENABLE: - ret = rapl_read_data_raw(rd, TIME_WINDOW2, true, &val); - break; - case PL4_ENABLE: - /* - * Time window parameter is not applicable for PL4 entry - * so assigining '0' as default value. - */ - val = 0; - break; - default: - cpus_read_unlock(); - return -EINVAL; - } + ret = rapl_read_pl_data(rd, id, PL_TIME_WINDOW, true, &val); if (!ret) *data = val; -get_time_exit: cpus_read_unlock(); return ret; @@ -499,36 +524,23 @@ static const char *get_constraint_name(struct powercap_zone *power_zone, return NULL; } -static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data) +static int get_max_power(struct powercap_zone *power_zone, int cid, u64 *data) { struct rapl_domain *rd; u64 val; - int prim; int ret = 0; + int id; cpus_read_lock(); rd = power_zone_to_rapl_domain(power_zone); - switch (rd->rpl[id].prim_id) { - case PL1_ENABLE: - prim = THERMAL_SPEC_POWER; - break; - case PL2_ENABLE: - prim = MAX_POWER; - break; - case PL4_ENABLE: - prim = MAX_POWER; - break; - default: - cpus_read_unlock(); - return -EINVAL; - } - if (rapl_read_data_raw(rd, prim, true, &val)) - ret = -EIO; - else + id = contraint_to_pl(rd, cid); + + ret = rapl_read_pl_data(rd, id, PL_MAX_POWER, true, &val); + if (!ret) *data = val; /* As a generalization rule, PL4 would be around two times PL2. */ - if (rd->rpl[id].prim_id == PL4_ENABLE) + if (id == POWER_LIMIT4) *data = *data * 2; cpus_read_unlock(); @@ -545,6 +557,12 @@ static const struct powercap_zone_constraint_ops constraint_ops = { .get_name = get_constraint_name, }; +/* Return the id used for read_raw/write_raw callback */ +static int get_rid(struct rapl_package *rp) +{ + return rp->lead_cpu >= 0 ? rp->lead_cpu : rp->id; +} + /* called after domain detection and package level data are set */ static void rapl_init_domains(struct rapl_package *rp) { @@ -554,6 +572,7 @@ static void rapl_init_domains(struct rapl_package *rp) for (i = 0; i < RAPL_DOMAIN_MAX; i++) { unsigned int mask = rp->domain_map & (1 << i); + int t; if (!mask) continue; @@ -562,51 +581,26 @@ static void rapl_init_domains(struct rapl_package *rp) if (i == RAPL_DOMAIN_PLATFORM && rp->id > 0) { snprintf(rd->name, RAPL_DOMAIN_NAME_LENGTH, "psys-%d", - topology_physical_package_id(rp->lead_cpu)); - } else + rp->lead_cpu >= 0 ? topology_physical_package_id(rp->lead_cpu) : + rp->id); + } else { snprintf(rd->name, RAPL_DOMAIN_NAME_LENGTH, "%s", rapl_domain_names[i]); + } rd->id = i; - rd->rpl[0].prim_id = PL1_ENABLE; - rd->rpl[0].name = pl1_name; - /* - * The PL2 power domain is applicable for limits two - * and limits three - */ - if (rp->priv->limits[i] >= 2) { - rd->rpl[1].prim_id = PL2_ENABLE; - rd->rpl[1].name = pl2_name; - } + /* PL1 is supported by default */ + rp->priv->limits[i] |= BIT(POWER_LIMIT1); - /* Enable PL4 domain if the total power limits are three */ - if (rp->priv->limits[i] == 3) { - rd->rpl[2].prim_id = PL4_ENABLE; - rd->rpl[2].name = pl4_name; + for (t = POWER_LIMIT1; t < NR_POWER_LIMITS; t++) { + if (rp->priv->limits[i] & BIT(t)) + rd->rpl[t].name = pl_names[t]; } for (j = 0; j < RAPL_DOMAIN_REG_MAX; j++) rd->regs[j] = rp->priv->regs[i][j]; - switch (i) { - case RAPL_DOMAIN_DRAM: - rd->domain_energy_unit = - rapl_defaults->dram_domain_energy_unit; - if (rd->domain_energy_unit) - pr_info("DRAM domain energy unit %dpj\n", - rd->domain_energy_unit); - break; - case RAPL_DOMAIN_PLATFORM: - rd->domain_energy_unit = - rapl_defaults->psys_domain_energy_unit; - if (rd->domain_energy_unit) - pr_info("Platform domain energy unit %dpj\n", - rd->domain_energy_unit); - break; - default: - break; - } rd++; } } @@ -615,23 +609,19 @@ static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type, u64 value, int to_raw) { u64 units = 1; - struct rapl_package *rp = rd->rp; + struct rapl_defaults *defaults = get_defaults(rd->rp); u64 scale = 1; switch (type) { case POWER_UNIT: - units = rp->power_unit; + units = rd->power_unit; break; case ENERGY_UNIT: scale = ENERGY_UNIT_SCALE; - /* per domain unit takes precedence */ - if (rd->domain_energy_unit) - units = rd->domain_energy_unit; - else - units = rp->energy_unit; + units = rd->energy_unit; break; case TIME_UNIT: - return rapl_defaults->compute_time_window(rp, value, to_raw); + return defaults->compute_time_window(rd, value, to_raw); case ARBITRARY_UNIT: default: return value; @@ -645,67 +635,141 @@ static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type, return div64_u64(value, scale); } -/* in the order of enum rapl_primitives */ -static struct rapl_primitive_info rpi[] = { +/* RAPL primitives for MSR and MMIO I/F */ +static struct rapl_primitive_info rpi_msr[NR_RAPL_PRIMITIVES] = { /* name, mask, shift, msr index, unit divisor */ - PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0, - RAPL_DOMAIN_REG_STATUS, ENERGY_UNIT, 0), - PRIMITIVE_INFO_INIT(POWER_LIMIT1, POWER_LIMIT1_MASK, 0, + [POWER_LIMIT1] = PRIMITIVE_INFO_INIT(POWER_LIMIT1, POWER_LIMIT1_MASK, 0, RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0), - PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32, + [POWER_LIMIT2] = PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32, RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0), - PRIMITIVE_INFO_INIT(POWER_LIMIT4, POWER_LIMIT4_MASK, 0, + [POWER_LIMIT4] = PRIMITIVE_INFO_INIT(POWER_LIMIT4, POWER_LIMIT4_MASK, 0, RAPL_DOMAIN_REG_PL4, POWER_UNIT, 0), - PRIMITIVE_INFO_INIT(FW_LOCK, POWER_LOW_LOCK, 31, + [ENERGY_COUNTER] = PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0, + RAPL_DOMAIN_REG_STATUS, ENERGY_UNIT, 0), + [FW_LOCK] = PRIMITIVE_INFO_INIT(FW_LOCK, POWER_LOW_LOCK, 31, RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), - PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15, + [FW_HIGH_LOCK] = PRIMITIVE_INFO_INIT(FW_LOCK, POWER_HIGH_LOCK, 63, RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), - PRIMITIVE_INFO_INIT(PL1_CLAMP, POWER_LIMIT1_CLAMP, 16, + [PL1_ENABLE] = PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15, RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), - PRIMITIVE_INFO_INIT(PL2_ENABLE, POWER_LIMIT2_ENABLE, 47, + [PL1_CLAMP] = PRIMITIVE_INFO_INIT(PL1_CLAMP, POWER_LIMIT1_CLAMP, 16, RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), - PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48, + [PL2_ENABLE] = PRIMITIVE_INFO_INIT(PL2_ENABLE, POWER_LIMIT2_ENABLE, 47, RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), - PRIMITIVE_INFO_INIT(PL4_ENABLE, POWER_LIMIT4_MASK, 0, + [PL2_CLAMP] = PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48, + RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), + [PL4_ENABLE] = PRIMITIVE_INFO_INIT(PL4_ENABLE, POWER_LIMIT4_MASK, 0, RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0), - PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17, + [TIME_WINDOW1] = PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17, RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0), - PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49, + [TIME_WINDOW2] = PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49, RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0), - PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, POWER_INFO_THERMAL_SPEC_MASK, + [THERMAL_SPEC_POWER] = PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, POWER_INFO_THERMAL_SPEC_MASK, 0, RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0), - PRIMITIVE_INFO_INIT(MAX_POWER, POWER_INFO_MAX_MASK, 32, + [MAX_POWER] = PRIMITIVE_INFO_INIT(MAX_POWER, POWER_INFO_MAX_MASK, 32, RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0), - PRIMITIVE_INFO_INIT(MIN_POWER, POWER_INFO_MIN_MASK, 16, + [MIN_POWER] = PRIMITIVE_INFO_INIT(MIN_POWER, POWER_INFO_MIN_MASK, 16, RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0), - PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, POWER_INFO_MAX_TIME_WIN_MASK, 48, + [MAX_TIME_WINDOW] = PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, POWER_INFO_MAX_TIME_WIN_MASK, 48, RAPL_DOMAIN_REG_INFO, TIME_UNIT, 0), - PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0, + [THROTTLED_TIME] = PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0, RAPL_DOMAIN_REG_PERF, TIME_UNIT, 0), - PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0, + [PRIORITY_LEVEL] = PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0, RAPL_DOMAIN_REG_POLICY, ARBITRARY_UNIT, 0), - PRIMITIVE_INFO_INIT(PSYS_POWER_LIMIT1, PSYS_POWER_LIMIT1_MASK, 0, + [PSYS_POWER_LIMIT1] = PRIMITIVE_INFO_INIT(PSYS_POWER_LIMIT1, PSYS_POWER_LIMIT1_MASK, 0, RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0), - PRIMITIVE_INFO_INIT(PSYS_POWER_LIMIT2, PSYS_POWER_LIMIT2_MASK, 32, + [PSYS_POWER_LIMIT2] = PRIMITIVE_INFO_INIT(PSYS_POWER_LIMIT2, PSYS_POWER_LIMIT2_MASK, 32, RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0), - PRIMITIVE_INFO_INIT(PSYS_PL1_ENABLE, PSYS_POWER_LIMIT1_ENABLE, 17, + [PSYS_PL1_ENABLE] = PRIMITIVE_INFO_INIT(PSYS_PL1_ENABLE, PSYS_POWER_LIMIT1_ENABLE, 17, RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), - PRIMITIVE_INFO_INIT(PSYS_PL2_ENABLE, PSYS_POWER_LIMIT2_ENABLE, 49, + [PSYS_PL2_ENABLE] = PRIMITIVE_INFO_INIT(PSYS_PL2_ENABLE, PSYS_POWER_LIMIT2_ENABLE, 49, RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), - PRIMITIVE_INFO_INIT(PSYS_TIME_WINDOW1, PSYS_TIME_WINDOW1_MASK, 19, + [PSYS_TIME_WINDOW1] = PRIMITIVE_INFO_INIT(PSYS_TIME_WINDOW1, PSYS_TIME_WINDOW1_MASK, 19, RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0), - PRIMITIVE_INFO_INIT(PSYS_TIME_WINDOW2, PSYS_TIME_WINDOW2_MASK, 51, + [PSYS_TIME_WINDOW2] = PRIMITIVE_INFO_INIT(PSYS_TIME_WINDOW2, PSYS_TIME_WINDOW2_MASK, 51, RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0), /* non-hardware */ - PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT, + [AVERAGE_POWER] = PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT, RAPL_PRIMITIVE_DERIVED), - {NULL, 0, 0, 0}, }; +/* RAPL primitives for TPMI I/F */ +static struct rapl_primitive_info rpi_tpmi[NR_RAPL_PRIMITIVES] = { + /* name, mask, shift, msr index, unit divisor */ + [POWER_LIMIT1] = PRIMITIVE_INFO_INIT(POWER_LIMIT1, TPMI_POWER_LIMIT_MASK, 0, + RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0), + [POWER_LIMIT2] = PRIMITIVE_INFO_INIT(POWER_LIMIT2, TPMI_POWER_LIMIT_MASK, 0, + RAPL_DOMAIN_REG_PL2, POWER_UNIT, 0), + [POWER_LIMIT4] = PRIMITIVE_INFO_INIT(POWER_LIMIT4, TPMI_POWER_LIMIT_MASK, 0, + RAPL_DOMAIN_REG_PL4, POWER_UNIT, 0), + [ENERGY_COUNTER] = PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0, + RAPL_DOMAIN_REG_STATUS, ENERGY_UNIT, 0), + [PL1_LOCK] = PRIMITIVE_INFO_INIT(PL1_LOCK, POWER_HIGH_LOCK, 63, + RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), + [PL2_LOCK] = PRIMITIVE_INFO_INIT(PL2_LOCK, POWER_HIGH_LOCK, 63, + RAPL_DOMAIN_REG_PL2, ARBITRARY_UNIT, 0), + [PL4_LOCK] = PRIMITIVE_INFO_INIT(PL4_LOCK, POWER_HIGH_LOCK, 63, + RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0), + [PL1_ENABLE] = PRIMITIVE_INFO_INIT(PL1_ENABLE, TPMI_POWER_LIMIT_ENABLE, 62, + RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), + [PL2_ENABLE] = PRIMITIVE_INFO_INIT(PL2_ENABLE, TPMI_POWER_LIMIT_ENABLE, 62, + RAPL_DOMAIN_REG_PL2, ARBITRARY_UNIT, 0), + [PL4_ENABLE] = PRIMITIVE_INFO_INIT(PL4_ENABLE, TPMI_POWER_LIMIT_ENABLE, 62, + RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0), + [TIME_WINDOW1] = PRIMITIVE_INFO_INIT(TIME_WINDOW1, TPMI_TIME_WINDOW_MASK, 18, + RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0), + [TIME_WINDOW2] = PRIMITIVE_INFO_INIT(TIME_WINDOW2, TPMI_TIME_WINDOW_MASK, 18, + RAPL_DOMAIN_REG_PL2, TIME_UNIT, 0), + [THERMAL_SPEC_POWER] = PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, TPMI_INFO_SPEC_MASK, 0, + RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0), + [MAX_POWER] = PRIMITIVE_INFO_INIT(MAX_POWER, TPMI_INFO_MAX_MASK, 36, + RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0), + [MIN_POWER] = PRIMITIVE_INFO_INIT(MIN_POWER, TPMI_INFO_MIN_MASK, 18, + RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0), + [MAX_TIME_WINDOW] = PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, TPMI_INFO_MAX_TIME_WIN_MASK, 54, + RAPL_DOMAIN_REG_INFO, TIME_UNIT, 0), + [THROTTLED_TIME] = PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0, + RAPL_DOMAIN_REG_PERF, TIME_UNIT, 0), + /* non-hardware */ + [AVERAGE_POWER] = PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, + POWER_UNIT, RAPL_PRIMITIVE_DERIVED), +}; + +static struct rapl_primitive_info *get_rpi(struct rapl_package *rp, int prim) +{ + struct rapl_primitive_info *rpi = rp->priv->rpi; + + if (prim < 0 || prim > NR_RAPL_PRIMITIVES || !rpi) + return NULL; + + return &rpi[prim]; +} + +static int rapl_config(struct rapl_package *rp) +{ + switch (rp->priv->type) { + /* MMIO I/F shares the same register layout as MSR registers */ + case RAPL_IF_MMIO: + case RAPL_IF_MSR: + rp->priv->defaults = (void *)defaults_msr; + rp->priv->rpi = (void *)rpi_msr; + break; + case RAPL_IF_TPMI: + rp->priv->defaults = (void *)&defaults_tpmi; + rp->priv->rpi = (void *)rpi_tpmi; + break; + default: + return -EINVAL; + } + return 0; +} + static enum rapl_primitives prim_fixups(struct rapl_domain *rd, enum rapl_primitives prim) { - if (!rapl_defaults->spr_psys_bits) + struct rapl_defaults *defaults = get_defaults(rd->rp); + + if (!defaults->spr_psys_bits) return prim; if (rd->id != RAPL_DOMAIN_PLATFORM) @@ -747,41 +811,33 @@ static int rapl_read_data_raw(struct rapl_domain *rd, { u64 value; enum rapl_primitives prim_fixed = prim_fixups(rd, prim); - struct rapl_primitive_info *rp = &rpi[prim_fixed]; + struct rapl_primitive_info *rpi = get_rpi(rd->rp, prim_fixed); struct reg_action ra; - int cpu; - if (!rp->name || rp->flag & RAPL_PRIMITIVE_DUMMY) + if (!rpi || !rpi->name || rpi->flag & RAPL_PRIMITIVE_DUMMY) return -EINVAL; - ra.reg = rd->regs[rp->id]; + ra.reg = rd->regs[rpi->id]; if (!ra.reg) return -EINVAL; - cpu = rd->rp->lead_cpu; - - /* domain with 2 limits has different bit */ - if (prim == FW_LOCK && rd->rp->priv->limits[rd->id] == 2) { - rp->mask = POWER_HIGH_LOCK; - rp->shift = 63; - } /* non-hardware data are collected by the polling thread */ - if (rp->flag & RAPL_PRIMITIVE_DERIVED) { + if (rpi->flag & RAPL_PRIMITIVE_DERIVED) { *data = rd->rdd.primitives[prim]; return 0; } - ra.mask = rp->mask; + ra.mask = rpi->mask; - if (rd->rp->priv->read_raw(cpu, &ra)) { - pr_debug("failed to read reg 0x%llx on cpu %d\n", ra.reg, cpu); + if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) { + pr_debug("failed to read reg 0x%llx for %s:%s\n", ra.reg, rd->rp->name, rd->name); return -EIO; } - value = ra.value >> rp->shift; + value = ra.value >> rpi->shift; if (xlate) - *data = rapl_unit_xlate(rd, rp->unit, value, 0); + *data = rapl_unit_xlate(rd, rpi->unit, value, 0); else *data = value; @@ -794,28 +850,56 @@ static int rapl_write_data_raw(struct rapl_domain *rd, unsigned long long value) { enum rapl_primitives prim_fixed = prim_fixups(rd, prim); - struct rapl_primitive_info *rp = &rpi[prim_fixed]; - int cpu; + struct rapl_primitive_info *rpi = get_rpi(rd->rp, prim_fixed); u64 bits; struct reg_action ra; int ret; - cpu = rd->rp->lead_cpu; - bits = rapl_unit_xlate(rd, rp->unit, value, 1); - bits <<= rp->shift; - bits &= rp->mask; + if (!rpi || !rpi->name || rpi->flag & RAPL_PRIMITIVE_DUMMY) + return -EINVAL; + + bits = rapl_unit_xlate(rd, rpi->unit, value, 1); + bits <<= rpi->shift; + bits &= rpi->mask; memset(&ra, 0, sizeof(ra)); - ra.reg = rd->regs[rp->id]; - ra.mask = rp->mask; + ra.reg = rd->regs[rpi->id]; + ra.mask = rpi->mask; ra.value = bits; - ret = rd->rp->priv->write_raw(cpu, &ra); + ret = rd->rp->priv->write_raw(get_rid(rd->rp), &ra); return ret; } +static int rapl_read_pl_data(struct rapl_domain *rd, int pl, + enum pl_prims pl_prim, bool xlate, u64 *data) +{ + enum rapl_primitives prim = get_pl_prim(rd, pl, pl_prim); + + if (!is_pl_valid(rd, pl)) + return -EINVAL; + + return rapl_read_data_raw(rd, prim, xlate, data); +} + +static int rapl_write_pl_data(struct rapl_domain *rd, int pl, + enum pl_prims pl_prim, + unsigned long long value) +{ + enum rapl_primitives prim = get_pl_prim(rd, pl, pl_prim); + + if (!is_pl_valid(rd, pl)) + return -EINVAL; + + if (rd->rpl[pl].locked) { + pr_warn("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]); + return -EACCES; + } + + return rapl_write_data_raw(rd, prim, value); +} /* * Raw RAPL data stored in MSRs are in certain scales. We need to * convert them into standard units based on the units reported in @@ -827,58 +911,58 @@ static int rapl_write_data_raw(struct rapl_domain *rd, * power unit : microWatts : Represented in milliWatts by default * time unit : microseconds: Represented in seconds by default */ -static int rapl_check_unit_core(struct rapl_package *rp, int cpu) +static int rapl_check_unit_core(struct rapl_domain *rd) { struct reg_action ra; u32 value; - ra.reg = rp->priv->reg_unit; + ra.reg = rd->regs[RAPL_DOMAIN_REG_UNIT]; ra.mask = ~0; - if (rp->priv->read_raw(cpu, &ra)) { - pr_err("Failed to read power unit REG 0x%llx on CPU %d, exit.\n", - rp->priv->reg_unit, cpu); + if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) { + pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n", + ra.reg, rd->rp->name, rd->name); return -ENODEV; } value = (ra.value & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; - rp->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value); + rd->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value); value = (ra.value & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; - rp->power_unit = 1000000 / (1 << value); + rd->power_unit = 1000000 / (1 << value); value = (ra.value & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; - rp->time_unit = 1000000 / (1 << value); + rd->time_unit = 1000000 / (1 << value); - pr_debug("Core CPU %s energy=%dpJ, time=%dus, power=%duW\n", - rp->name, rp->energy_unit, rp->time_unit, rp->power_unit); + pr_debug("Core CPU %s:%s energy=%dpJ, time=%dus, power=%duW\n", + rd->rp->name, rd->name, rd->energy_unit, rd->time_unit, rd->power_unit); return 0; } -static int rapl_check_unit_atom(struct rapl_package *rp, int cpu) +static int rapl_check_unit_atom(struct rapl_domain *rd) { struct reg_action ra; u32 value; - ra.reg = rp->priv->reg_unit; + ra.reg = rd->regs[RAPL_DOMAIN_REG_UNIT]; ra.mask = ~0; - if (rp->priv->read_raw(cpu, &ra)) { - pr_err("Failed to read power unit REG 0x%llx on CPU %d, exit.\n", - rp->priv->reg_unit, cpu); + if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) { + pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n", + ra.reg, rd->rp->name, rd->name); return -ENODEV; } value = (ra.value & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; - rp->energy_unit = ENERGY_UNIT_SCALE * 1 << value; + rd->energy_unit = ENERGY_UNIT_SCALE * 1 << value; value = (ra.value & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; - rp->power_unit = (1 << value) * 1000; + rd->power_unit = (1 << value) * 1000; value = (ra.value & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; - rp->time_unit = 1000000 / (1 << value); + rd->time_unit = 1000000 / (1 << value); - pr_debug("Atom %s energy=%dpJ, time=%dus, power=%duW\n", - rp->name, rp->energy_unit, rp->time_unit, rp->power_unit); + pr_debug("Atom %s:%s energy=%dpJ, time=%dus, power=%duW\n", + rd->rp->name, rd->name, rd->energy_unit, rd->time_unit, rd->power_unit); return 0; } @@ -910,6 +994,9 @@ static void power_limit_irq_save_cpu(void *info) static void package_power_limit_irq_save(struct rapl_package *rp) { + if (rp->lead_cpu < 0) + return; + if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN)) return; @@ -924,6 +1011,9 @@ static void package_power_limit_irq_restore(struct rapl_package *rp) { u32 l, h; + if (rp->lead_cpu < 0) + return; + if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN)) return; @@ -943,33 +1033,33 @@ static void package_power_limit_irq_restore(struct rapl_package *rp) static void set_floor_freq_default(struct rapl_domain *rd, bool mode) { - int nr_powerlimit = find_nr_power_limit(rd); + int i; /* always enable clamp such that p-state can go below OS requested * range. power capping priority over guranteed frequency. */ - rapl_write_data_raw(rd, PL1_CLAMP, mode); + rapl_write_pl_data(rd, POWER_LIMIT1, PL_CLAMP, mode); - /* some domains have pl2 */ - if (nr_powerlimit > 1) { - rapl_write_data_raw(rd, PL2_ENABLE, mode); - rapl_write_data_raw(rd, PL2_CLAMP, mode); + for (i = POWER_LIMIT2; i < NR_POWER_LIMITS; i++) { + rapl_write_pl_data(rd, i, PL_ENABLE, mode); + rapl_write_pl_data(rd, i, PL_CLAMP, mode); } } static void set_floor_freq_atom(struct rapl_domain *rd, bool enable) { static u32 power_ctrl_orig_val; + struct rapl_defaults *defaults = get_defaults(rd->rp); u32 mdata; - if (!rapl_defaults->floor_freq_reg_addr) { + if (!defaults->floor_freq_reg_addr) { pr_err("Invalid floor frequency config register\n"); return; } if (!power_ctrl_orig_val) iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_CR_READ, - rapl_defaults->floor_freq_reg_addr, + defaults->floor_freq_reg_addr, &power_ctrl_orig_val); mdata = power_ctrl_orig_val; if (enable) { @@ -977,10 +1067,10 @@ static void set_floor_freq_atom(struct rapl_domain *rd, bool enable) mdata |= 1 << 8; } iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_CR_WRITE, - rapl_defaults->floor_freq_reg_addr, mdata); + defaults->floor_freq_reg_addr, mdata); } -static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value, +static u64 rapl_compute_time_window_core(struct rapl_domain *rd, u64 value, bool to_raw) { u64 f, y; /* fraction and exp. used for time unit */ @@ -992,12 +1082,12 @@ static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value, if (!to_raw) { f = (value & 0x60) >> 5; y = value & 0x1f; - value = (1 << y) * (4 + f) * rp->time_unit / 4; + value = (1 << y) * (4 + f) * rd->time_unit / 4; } else { - if (value < rp->time_unit) + if (value < rd->time_unit) return 0; - do_div(value, rp->time_unit); + do_div(value, rd->time_unit); y = ilog2(value); /* @@ -1013,7 +1103,7 @@ static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value, return value; } -static u64 rapl_compute_time_window_atom(struct rapl_package *rp, u64 value, +static u64 rapl_compute_time_window_atom(struct rapl_domain *rd, u64 value, bool to_raw) { /* @@ -1021,13 +1111,56 @@ static u64 rapl_compute_time_window_atom(struct rapl_package *rp, u64 value, * where time_unit is default to 1 sec. Never 0. */ if (!to_raw) - return (value) ? value * rp->time_unit : rp->time_unit; + return (value) ? value * rd->time_unit : rd->time_unit; - value = div64_u64(value, rp->time_unit); + value = div64_u64(value, rd->time_unit); return value; } +/* TPMI Unit register has different layout */ +#define TPMI_POWER_UNIT_OFFSET POWER_UNIT_OFFSET +#define TPMI_POWER_UNIT_MASK POWER_UNIT_MASK +#define TPMI_ENERGY_UNIT_OFFSET 0x06 +#define TPMI_ENERGY_UNIT_MASK 0x7C0 +#define TPMI_TIME_UNIT_OFFSET 0x0C +#define TPMI_TIME_UNIT_MASK 0xF000 + +static int rapl_check_unit_tpmi(struct rapl_domain *rd) +{ + struct reg_action ra; + u32 value; + + ra.reg = rd->regs[RAPL_DOMAIN_REG_UNIT]; + ra.mask = ~0; + if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) { + pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n", + ra.reg, rd->rp->name, rd->name); + return -ENODEV; + } + + value = (ra.value & TPMI_ENERGY_UNIT_MASK) >> TPMI_ENERGY_UNIT_OFFSET; + rd->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value); + + value = (ra.value & TPMI_POWER_UNIT_MASK) >> TPMI_POWER_UNIT_OFFSET; + rd->power_unit = 1000000 / (1 << value); + + value = (ra.value & TPMI_TIME_UNIT_MASK) >> TPMI_TIME_UNIT_OFFSET; + rd->time_unit = 1000000 / (1 << value); + + pr_debug("Core CPU %s:%s energy=%dpJ, time=%dus, power=%duW\n", + rd->rp->name, rd->name, rd->energy_unit, rd->time_unit, rd->power_unit); + + return 0; +} + +static const struct rapl_defaults defaults_tpmi = { + .check_unit = rapl_check_unit_tpmi, + /* Reuse existing logic, ignore the PL_CLAMP failures and enable all Power Limits */ + .set_floor_freq = set_floor_freq_default, + .compute_time_window = rapl_compute_time_window_core, +}; + static const struct rapl_defaults rapl_defaults_core = { .floor_freq_reg_addr = 0, .check_unit = rapl_check_unit_core, @@ -1159,8 +1292,10 @@ static void rapl_update_domain_data(struct rapl_package *rp) rp->domains[dmn].name); /* exclude non-raw primitives */ for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++) { + struct rapl_primitive_info *rpi = get_rpi(rp, prim); + if (!rapl_read_data_raw(&rp->domains[dmn], prim, - rpi[prim].unit, &val)) + rpi->unit, &val)) rp->domains[dmn].rdd.primitives[prim] = val; } } @@ -1239,7 +1374,7 @@ err_cleanup: return ret; } -static int rapl_check_domain(int cpu, int domain, struct rapl_package *rp) +static int rapl_check_domain(int domain, struct rapl_package *rp) { struct reg_action ra; @@ -1260,9 +1395,43 @@ static int rapl_check_domain(int cpu, int domain, struct rapl_package *rp) */ ra.mask = ENERGY_STATUS_MASK; - if (rp->priv->read_raw(cpu, &ra) || !ra.value) + if (rp->priv->read_raw(get_rid(rp), &ra) || !ra.value) + return -ENODEV; + + return 0; +} + +/* + * Get per domain energy/power/time unit. + * RAPL Interfaces without per domain unit register will use the package + * scope unit register to set per domain units. + */ +static int rapl_get_domain_unit(struct rapl_domain *rd) +{ + struct rapl_defaults *defaults = get_defaults(rd->rp); + int ret; + + if (!rd->regs[RAPL_DOMAIN_REG_UNIT]) { + if (!rd->rp->priv->reg_unit) { + pr_err("No valid Unit register found\n"); + return -ENODEV; + } + rd->regs[RAPL_DOMAIN_REG_UNIT] = rd->rp->priv->reg_unit; + } + + if (!defaults->check_unit) { + pr_err("missing .check_unit() callback\n"); return -ENODEV; + } + + ret = defaults->check_unit(rd); + if (ret) + return ret; + if (rd->id == RAPL_DOMAIN_DRAM && defaults->dram_domain_energy_unit) + rd->energy_unit = defaults->dram_domain_energy_unit; + if (rd->id == RAPL_DOMAIN_PLATFORM && defaults->psys_domain_energy_unit) + rd->energy_unit = defaults->psys_domain_energy_unit; return 0; } @@ -1280,19 +1449,16 @@ static void rapl_detect_powerlimit(struct rapl_domain *rd) u64 val64; int i; - /* check if the domain is locked by BIOS, ignore if MSR doesn't exist */ - if (!rapl_read_data_raw(rd, FW_LOCK, false, &val64)) { - if (val64) { - pr_info("RAPL %s domain %s locked by BIOS\n", - rd->rp->name, rd->name); - rd->state |= DOMAIN_STATE_BIOS_LOCKED; + for (i = POWER_LIMIT1; i < NR_POWER_LIMITS; i++) { + if (!rapl_read_pl_data(rd, i, PL_LOCK, false, &val64)) { + if (val64) { + rd->rpl[i].locked = true; + pr_info("%s:%s:%s locked by BIOS\n", + rd->rp->name, rd->name, pl_names[i]); + } } - } - /* check if power limit MSR exists, otherwise domain is monitoring only */ - for (i = 0; i < NR_POWER_LIMITS; i++) { - int prim = rd->rpl[i].prim_id; - if (rapl_read_data_raw(rd, prim, false, &val64)) + if (rapl_read_pl_data(rd, i, PL_ENABLE, false, &val64)) rd->rpl[i].name = NULL; } } @@ -1300,14 +1466,14 @@ static void rapl_detect_powerlimit(struct rapl_domain *rd) /* Detect active and valid domains for the given CPU, caller must * ensure the CPU belongs to the targeted package and CPU hotlug is disabled. */ -static int rapl_detect_domains(struct rapl_package *rp, int cpu) +static int rapl_detect_domains(struct rapl_package *rp) { struct rapl_domain *rd; int i; for (i = 0; i < RAPL_DOMAIN_MAX; i++) { /* use physical package id to read counters */ - if (!rapl_check_domain(cpu, i, rp)) { + if (!rapl_check_domain(i, rp)) { rp->domain_map |= 1 << i; pr_info("Found RAPL domain %s\n", rapl_domain_names[i]); } @@ -1326,8 +1492,10 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu) rapl_init_domains(rp); - for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) + for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { + rapl_get_domain_unit(rd); rapl_detect_powerlimit(rd); + } return 0; } @@ -1340,13 +1508,13 @@ void rapl_remove_package(struct rapl_package *rp) package_power_limit_irq_restore(rp); for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { - rapl_write_data_raw(rd, PL1_ENABLE, 0); - rapl_write_data_raw(rd, PL1_CLAMP, 0); - if (find_nr_power_limit(rd) > 1) { - rapl_write_data_raw(rd, PL2_ENABLE, 0); - rapl_write_data_raw(rd, PL2_CLAMP, 0); - rapl_write_data_raw(rd, PL4_ENABLE, 0); + int i; + + for (i = POWER_LIMIT1; i < NR_POWER_LIMITS; i++) { + rapl_write_pl_data(rd, i, PL_ENABLE, 0); + rapl_write_pl_data(rd, i, PL_CLAMP, 0); } + if (rd->id == RAPL_DOMAIN_PACKAGE) { rd_package = rd; continue; @@ -1365,13 +1533,18 @@ void rapl_remove_package(struct rapl_package *rp) EXPORT_SYMBOL_GPL(rapl_remove_package); /* caller to ensure CPU hotplug lock is held */ -struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv) +struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu) { - int id = topology_logical_die_id(cpu); struct rapl_package *rp; + int uid; + + if (id_is_cpu) + uid = topology_logical_die_id(id); + else + uid = id; list_for_each_entry(rp, &rapl_packages, plist) { - if (rp->id == id + if (rp->id == uid && rp->priv->control_type == priv->control_type) return rp; } @@ -1381,34 +1554,37 @@ struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv EXPORT_SYMBOL_GPL(rapl_find_package_domain); /* called from CPU hotplug notifier, hotplug lock held */ -struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv) +struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu) { - int id = topology_logical_die_id(cpu); struct rapl_package *rp; int ret; - if (!rapl_defaults) - return ERR_PTR(-ENODEV); - rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL); if (!rp) return ERR_PTR(-ENOMEM); - /* add the new package to the list */ - rp->id = id; - rp->lead_cpu = cpu; - rp->priv = priv; + if (id_is_cpu) { + rp->id = topology_logical_die_id(id); + rp->lead_cpu = id; + if (topology_max_die_per_package() > 1) + snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d-die-%d", + topology_physical_package_id(id), topology_die_id(id)); + else + snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d", + topology_physical_package_id(id)); + } else { + rp->id = id; + rp->lead_cpu = -1; + snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d", id); + } - if (topology_max_die_per_package() > 1) - snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, - "package-%d-die-%d", - topology_physical_package_id(cpu), topology_die_id(cpu)); - else - snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d", - topology_physical_package_id(cpu)); + rp->priv = priv; + ret = rapl_config(rp); + if (ret) + goto err_free_package; /* check if the package contains valid domains */ - if (rapl_detect_domains(rp, cpu) || rapl_defaults->check_unit(rp, cpu)) { + if (rapl_detect_domains(rp)) { ret = -ENODEV; goto err_free_package; } @@ -1430,38 +1606,18 @@ static void power_limit_state_save(void) { struct rapl_package *rp; struct rapl_domain *rd; - int nr_pl, ret, i; + int ret, i; cpus_read_lock(); list_for_each_entry(rp, &rapl_packages, plist) { if (!rp->power_zone) continue; rd = power_zone_to_rapl_domain(rp->power_zone); - nr_pl = find_nr_power_limit(rd); - for (i = 0; i < nr_pl; i++) { - switch (rd->rpl[i].prim_id) { - case PL1_ENABLE: - ret = rapl_read_data_raw(rd, - POWER_LIMIT1, true, - &rd->rpl[i].last_power_limit); - if (ret) - rd->rpl[i].last_power_limit = 0; - break; - case PL2_ENABLE: - ret = rapl_read_data_raw(rd, - POWER_LIMIT2, true, + for (i = POWER_LIMIT1; i < NR_POWER_LIMITS; i++) { + ret = rapl_read_pl_data(rd, i, PL_LIMIT, true, &rd->rpl[i].last_power_limit); - if (ret) - rd->rpl[i].last_power_limit = 0; - break; - case PL4_ENABLE: - ret = rapl_read_data_raw(rd, - POWER_LIMIT4, true, - &rd->rpl[i].last_power_limit); - if (ret) - rd->rpl[i].last_power_limit = 0; - break; - } + if (ret) + rd->rpl[i].last_power_limit = 0; } } cpus_read_unlock(); @@ -1471,33 +1627,17 @@ static void power_limit_state_restore(void) { struct rapl_package *rp; struct rapl_domain *rd; - int nr_pl, i; + int i; cpus_read_lock(); list_for_each_entry(rp, &rapl_packages, plist) { if (!rp->power_zone) continue; rd = power_zone_to_rapl_domain(rp->power_zone); - nr_pl = find_nr_power_limit(rd); - for (i = 0; i < nr_pl; i++) { - switch (rd->rpl[i].prim_id) { - case PL1_ENABLE: - if (rd->rpl[i].last_power_limit) - rapl_write_data_raw(rd, POWER_LIMIT1, - rd->rpl[i].last_power_limit); - break; - case PL2_ENABLE: - if (rd->rpl[i].last_power_limit) - rapl_write_data_raw(rd, POWER_LIMIT2, - rd->rpl[i].last_power_limit); - break; - case PL4_ENABLE: - if (rd->rpl[i].last_power_limit) - rapl_write_data_raw(rd, POWER_LIMIT4, - rd->rpl[i].last_power_limit); - break; - } - } + for (i = POWER_LIMIT1; i < NR_POWER_LIMITS; i++) + if (rd->rpl[i].last_power_limit) + rapl_write_pl_data(rd, i, PL_LIMIT, + rd->rpl[i].last_power_limit); } cpus_read_unlock(); } @@ -1528,32 +1668,25 @@ static int __init rapl_init(void) int ret; id = x86_match_cpu(rapl_ids); - if (!id) { - pr_err("driver does not support CPU family %d model %d\n", - boot_cpu_data.x86, boot_cpu_data.x86_model); - - return -ENODEV; - } + if (id) { + defaults_msr = (struct rapl_defaults *)id->driver_data; - rapl_defaults = (struct rapl_defaults *)id->driver_data; - - ret = register_pm_notifier(&rapl_pm_notifier); - if (ret) - return ret; + rapl_msr_platdev = platform_device_alloc("intel_rapl_msr", 0); + if (!rapl_msr_platdev) + return -ENOMEM; - rapl_msr_platdev = platform_device_alloc("intel_rapl_msr", 0); - if (!rapl_msr_platdev) { - ret = -ENOMEM; - goto end; + ret = platform_device_add(rapl_msr_platdev); + if (ret) { + platform_device_put(rapl_msr_platdev); + return ret; + } } - ret = platform_device_add(rapl_msr_platdev); - if (ret) + ret = register_pm_notifier(&rapl_pm_notifier); + if (ret && rapl_msr_platdev) { + platform_device_del(rapl_msr_platdev); platform_device_put(rapl_msr_platdev); - -end: - if (ret) - unregister_pm_notifier(&rapl_pm_notifier); + } return ret; } diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c index a27673706c3d..569e25eab1e1 100644 --- a/drivers/powercap/intel_rapl_msr.c +++ b/drivers/powercap/intel_rapl_msr.c @@ -22,7 +22,6 @@ #include <linux/processor.h> #include <linux/platform_device.h> -#include <asm/iosf_mbi.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> @@ -34,6 +33,7 @@ static struct rapl_if_priv *rapl_msr_priv; static struct rapl_if_priv rapl_msr_priv_intel = { + .type = RAPL_IF_MSR, .reg_unit = MSR_RAPL_POWER_UNIT, .regs[RAPL_DOMAIN_PACKAGE] = { MSR_PKG_POWER_LIMIT, MSR_PKG_ENERGY_STATUS, MSR_PKG_PERF_STATUS, 0, MSR_PKG_POWER_INFO }, @@ -45,11 +45,12 @@ static struct rapl_if_priv rapl_msr_priv_intel = { MSR_DRAM_POWER_LIMIT, MSR_DRAM_ENERGY_STATUS, MSR_DRAM_PERF_STATUS, 0, MSR_DRAM_POWER_INFO }, .regs[RAPL_DOMAIN_PLATFORM] = { MSR_PLATFORM_POWER_LIMIT, MSR_PLATFORM_ENERGY_STATUS, 0, 0, 0}, - .limits[RAPL_DOMAIN_PACKAGE] = 2, - .limits[RAPL_DOMAIN_PLATFORM] = 2, + .limits[RAPL_DOMAIN_PACKAGE] = BIT(POWER_LIMIT2), + .limits[RAPL_DOMAIN_PLATFORM] = BIT(POWER_LIMIT2), }; static struct rapl_if_priv rapl_msr_priv_amd = { + .type = RAPL_IF_MSR, .reg_unit = MSR_AMD_RAPL_POWER_UNIT, .regs[RAPL_DOMAIN_PACKAGE] = { 0, MSR_AMD_PKG_ENERGY_STATUS, 0, 0, 0 }, @@ -68,9 +69,9 @@ static int rapl_cpu_online(unsigned int cpu) { struct rapl_package *rp; - rp = rapl_find_package_domain(cpu, rapl_msr_priv); + rp = rapl_find_package_domain(cpu, rapl_msr_priv, true); if (!rp) { - rp = rapl_add_package(cpu, rapl_msr_priv); + rp = rapl_add_package(cpu, rapl_msr_priv, true); if (IS_ERR(rp)) return PTR_ERR(rp); } @@ -83,7 +84,7 @@ static int rapl_cpu_down_prep(unsigned int cpu) struct rapl_package *rp; int lead_cpu; - rp = rapl_find_package_domain(cpu, rapl_msr_priv); + rp = rapl_find_package_domain(cpu, rapl_msr_priv, true); if (!rp) return 0; @@ -137,14 +138,14 @@ static int rapl_msr_write_raw(int cpu, struct reg_action *ra) /* List of verified CPUs. */ static const struct x86_cpu_id pl4_support_ids[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_TIGERLAKE_L, X86_FEATURE_ANY }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE, X86_FEATURE_ANY }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_L, X86_FEATURE_ANY }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_N, X86_FEATURE_ANY }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE, X86_FEATURE_ANY }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE_P, X86_FEATURE_ANY }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_METEORLAKE, X86_FEATURE_ANY }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_METEORLAKE_L, X86_FEATURE_ANY }, + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, NULL), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL), + X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, NULL), {} }; @@ -169,7 +170,7 @@ static int rapl_msr_probe(struct platform_device *pdev) rapl_msr_priv->write_raw = rapl_msr_write_raw; if (id) { - rapl_msr_priv->limits[RAPL_DOMAIN_PACKAGE] = 3; + rapl_msr_priv->limits[RAPL_DOMAIN_PACKAGE] |= BIT(POWER_LIMIT4); rapl_msr_priv->regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PL4] = MSR_VR_CURRENT_CONFIG; pr_info("PL4 support detected.\n"); diff --git a/drivers/powercap/intel_rapl_tpmi.c b/drivers/powercap/intel_rapl_tpmi.c new file mode 100644 index 000000000000..4f4f13ded225 --- /dev/null +++ b/drivers/powercap/intel_rapl_tpmi.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * intel_rapl_tpmi: Intel RAPL driver via TPMI interface + * + * Copyright (c) 2023, Intel Corporation. + * All Rights Reserved. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/auxiliary_bus.h> +#include <linux/io.h> +#include <linux/intel_tpmi.h> +#include <linux/intel_rapl.h> +#include <linux/module.h> +#include <linux/slab.h> + +#define TPMI_RAPL_VERSION 1 + +/* 1 header + 10 registers + 5 reserved. 8 bytes for each. */ +#define TPMI_RAPL_DOMAIN_SIZE 128 + +enum tpmi_rapl_domain_type { + TPMI_RAPL_DOMAIN_INVALID, + TPMI_RAPL_DOMAIN_SYSTEM, + TPMI_RAPL_DOMAIN_PACKAGE, + TPMI_RAPL_DOMAIN_RESERVED, + TPMI_RAPL_DOMAIN_MEMORY, + TPMI_RAPL_DOMAIN_MAX, +}; + +enum tpmi_rapl_register { + TPMI_RAPL_REG_HEADER, + TPMI_RAPL_REG_UNIT, + TPMI_RAPL_REG_PL1, + TPMI_RAPL_REG_PL2, + TPMI_RAPL_REG_PL3, + TPMI_RAPL_REG_PL4, + TPMI_RAPL_REG_RESERVED, + TPMI_RAPL_REG_ENERGY_STATUS, + TPMI_RAPL_REG_PERF_STATUS, + TPMI_RAPL_REG_POWER_INFO, + TPMI_RAPL_REG_INTERRUPT, + TPMI_RAPL_REG_MAX = 15, +}; + +struct tpmi_rapl_package { + struct rapl_if_priv priv; + struct intel_tpmi_plat_info *tpmi_info; + struct rapl_package *rp; + void __iomem *base; + struct list_head node; +}; + +static LIST_HEAD(tpmi_rapl_packages); +static DEFINE_MUTEX(tpmi_rapl_lock); + +static struct powercap_control_type *tpmi_control_type; + +static int tpmi_rapl_read_raw(int id, struct reg_action *ra) +{ + if (!ra->reg) + return -EINVAL; + + ra->value = readq((void __iomem *)ra->reg); + + ra->value &= ra->mask; + return 0; +} + +static int tpmi_rapl_write_raw(int id, struct reg_action *ra) +{ + u64 val; + + if (!ra->reg) + return -EINVAL; + + val = readq((void __iomem *)ra->reg); + + val &= ~ra->mask; + val |= ra->value; + + writeq(val, (void __iomem *)ra->reg); + return 0; +} + +static struct tpmi_rapl_package *trp_alloc(int pkg_id) +{ + struct tpmi_rapl_package *trp; + int ret; + + mutex_lock(&tpmi_rapl_lock); + + if (list_empty(&tpmi_rapl_packages)) { + tpmi_control_type = powercap_register_control_type(NULL, "intel-rapl", NULL); + if (IS_ERR(tpmi_control_type)) { + ret = PTR_ERR(tpmi_control_type); + goto err_unlock; + } + } + + trp = kzalloc(sizeof(*trp), GFP_KERNEL); + if (!trp) { + ret = -ENOMEM; + goto err_del_powercap; + } + + list_add(&trp->node, &tpmi_rapl_packages); + + mutex_unlock(&tpmi_rapl_lock); + return trp; + +err_del_powercap: + if (list_empty(&tpmi_rapl_packages)) + powercap_unregister_control_type(tpmi_control_type); +err_unlock: + mutex_unlock(&tpmi_rapl_lock); + return ERR_PTR(ret); +} + +static void trp_release(struct tpmi_rapl_package *trp) +{ + mutex_lock(&tpmi_rapl_lock); + list_del(&trp->node); + + if (list_empty(&tpmi_rapl_packages)) + powercap_unregister_control_type(tpmi_control_type); + + kfree(trp); + mutex_unlock(&tpmi_rapl_lock); +} + +static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset) +{ + u8 tpmi_domain_version; + enum rapl_domain_type domain_type; + enum tpmi_rapl_domain_type tpmi_domain_type; + enum tpmi_rapl_register reg_index; + enum rapl_domain_reg_id reg_id; + int tpmi_domain_size, tpmi_domain_flags; + u64 *tpmi_rapl_regs = trp->base + offset; + u64 tpmi_domain_header = readq((void __iomem *)tpmi_rapl_regs); + + /* Domain Parent bits are ignored for now */ + tpmi_domain_version = tpmi_domain_header & 0xff; + tpmi_domain_type = tpmi_domain_header >> 8 & 0xff; + tpmi_domain_size = tpmi_domain_header >> 16 & 0xff; + tpmi_domain_flags = tpmi_domain_header >> 32 & 0xffff; + + if (tpmi_domain_version != TPMI_RAPL_VERSION) { + pr_warn(FW_BUG "Unsupported version:%d\n", tpmi_domain_version); + return -ENODEV; + } + + /* Domain size: in unit of 128 Bytes */ + if (tpmi_domain_size != 1) { + pr_warn(FW_BUG "Invalid Domain size %d\n", tpmi_domain_size); + return -EINVAL; + } + + /* Unit register and Energy Status register are mandatory for each domain */ + if (!(tpmi_domain_flags & BIT(TPMI_RAPL_REG_UNIT)) || + !(tpmi_domain_flags & BIT(TPMI_RAPL_REG_ENERGY_STATUS))) { + pr_warn(FW_BUG "Invalid Domain flag 0x%x\n", tpmi_domain_flags); + return -EINVAL; + } + + switch (tpmi_domain_type) { + case TPMI_RAPL_DOMAIN_PACKAGE: + domain_type = RAPL_DOMAIN_PACKAGE; + break; + case TPMI_RAPL_DOMAIN_SYSTEM: + domain_type = RAPL_DOMAIN_PLATFORM; + break; + case TPMI_RAPL_DOMAIN_MEMORY: + domain_type = RAPL_DOMAIN_DRAM; + break; + default: + pr_warn(FW_BUG "Unsupported Domain type %d\n", tpmi_domain_type); + return -EINVAL; + } + + if (trp->priv.regs[domain_type][RAPL_DOMAIN_REG_UNIT]) { + pr_warn(FW_BUG "Duplicate Domain type %d\n", tpmi_domain_type); + return -EINVAL; + } + + reg_index = TPMI_RAPL_REG_HEADER; + while (++reg_index != TPMI_RAPL_REG_MAX) { + if (!(tpmi_domain_flags & BIT(reg_index))) + continue; + + switch (reg_index) { + case TPMI_RAPL_REG_UNIT: + reg_id = RAPL_DOMAIN_REG_UNIT; + break; + case TPMI_RAPL_REG_PL1: + reg_id = RAPL_DOMAIN_REG_LIMIT; + trp->priv.limits[domain_type] |= BIT(POWER_LIMIT1); + break; + case TPMI_RAPL_REG_PL2: + reg_id = RAPL_DOMAIN_REG_PL2; + trp->priv.limits[domain_type] |= BIT(POWER_LIMIT2); + break; + case TPMI_RAPL_REG_PL4: + reg_id = RAPL_DOMAIN_REG_PL4; + trp->priv.limits[domain_type] |= BIT(POWER_LIMIT4); + break; + case TPMI_RAPL_REG_ENERGY_STATUS: + reg_id = RAPL_DOMAIN_REG_STATUS; + break; + case TPMI_RAPL_REG_PERF_STATUS: + reg_id = RAPL_DOMAIN_REG_PERF; + break; + case TPMI_RAPL_REG_POWER_INFO: + reg_id = RAPL_DOMAIN_REG_INFO; + break; + default: + continue; + } + trp->priv.regs[domain_type][reg_id] = (u64)&tpmi_rapl_regs[reg_index]; + } + + return 0; +} + +static int intel_rapl_tpmi_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *id) +{ + struct tpmi_rapl_package *trp; + struct intel_tpmi_plat_info *info; + struct resource *res; + u32 offset; + int ret; + + info = tpmi_get_platform_data(auxdev); + if (!info) + return -ENODEV; + + trp = trp_alloc(info->package_id); + if (IS_ERR(trp)) + return PTR_ERR(trp); + + if (tpmi_get_resource_count(auxdev) > 1) { + dev_err(&auxdev->dev, "does not support multiple resources\n"); + ret = -EINVAL; + goto err; + } + + res = tpmi_get_resource_at_index(auxdev, 0); + if (!res) { + dev_err(&auxdev->dev, "can't fetch device resource info\n"); + ret = -EIO; + goto err; + } + + trp->base = devm_ioremap_resource(&auxdev->dev, res); + if (IS_ERR(trp->base)) { + ret = PTR_ERR(trp->base); + goto err; + } + + for (offset = 0; offset < resource_size(res); offset += TPMI_RAPL_DOMAIN_SIZE) { + ret = parse_one_domain(trp, offset); + if (ret) + goto err; + } + + trp->tpmi_info = info; + trp->priv.type = RAPL_IF_TPMI; + trp->priv.read_raw = tpmi_rapl_read_raw; + trp->priv.write_raw = tpmi_rapl_write_raw; + trp->priv.control_type = tpmi_control_type; + + /* RAPL TPMI I/F is per physical package */ + trp->rp = rapl_find_package_domain(info->package_id, &trp->priv, false); + if (trp->rp) { + dev_err(&auxdev->dev, "Domain for Package%d already exists\n", info->package_id); + ret = -EEXIST; + goto err; + } + + trp->rp = rapl_add_package(info->package_id, &trp->priv, false); + if (IS_ERR(trp->rp)) { + dev_err(&auxdev->dev, "Failed to add RAPL Domain for Package%d, %ld\n", + info->package_id, PTR_ERR(trp->rp)); + ret = PTR_ERR(trp->rp); + goto err; + } + + auxiliary_set_drvdata(auxdev, trp); + + return 0; +err: + trp_release(trp); + return ret; +} + +static void intel_rapl_tpmi_remove(struct auxiliary_device *auxdev) +{ + struct tpmi_rapl_package *trp = auxiliary_get_drvdata(auxdev); + + rapl_remove_package(trp->rp); + trp_release(trp); +} + +static const struct auxiliary_device_id intel_rapl_tpmi_ids[] = { + {.name = "intel_vsec.tpmi-rapl" }, + { } +}; + +MODULE_DEVICE_TABLE(auxiliary, intel_rapl_tpmi_ids); + +static struct auxiliary_driver intel_rapl_tpmi_driver = { + .probe = intel_rapl_tpmi_probe, + .remove = intel_rapl_tpmi_remove, + .id_table = intel_rapl_tpmi_ids, +}; + +module_auxiliary_driver(intel_rapl_tpmi_driver) + +MODULE_IMPORT_NS(INTEL_TPMI); + +MODULE_DESCRIPTION("Intel RAPL TPMI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c index 0c567d9623cd..5f7d286871cf 100644 --- a/drivers/pwm/pwm-atmel.c +++ b/drivers/pwm/pwm-atmel.c @@ -6,7 +6,7 @@ * Bo Shen <voice.shen@atmel.com> * * Links to reference manuals for the supported PWM chips can be found in - * Documentation/arm/microchip.rst. + * Documentation/arch/arm/microchip.rst. * * Limitations: * - Periods start with the inactive level. diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c index 46ed668bd141..762429d5647f 100644 --- a/drivers/pwm/pwm-pxa.c +++ b/drivers/pwm/pwm-pxa.c @@ -8,7 +8,7 @@ * eric miao <eric.miao@marvell.com> * * Links to reference manuals for some of the supported PWM chips can be found - * in Documentation/arm/marvell.rst. + * in Documentation/arch/arm/marvell.rst. * * Limitations: * - When PWM is stopped, the current PWM period stops abruptly at the next diff --git a/drivers/ras/debugfs.c b/drivers/ras/debugfs.c index f0a6391b1146..ffb973c328e3 100644 --- a/drivers/ras/debugfs.c +++ b/drivers/ras/debugfs.c @@ -46,7 +46,7 @@ int __init ras_add_daemon_trace(void) fentry = debugfs_create_file("daemon_active", S_IRUSR, ras_debugfs_dir, NULL, &trace_fops); - if (!fentry) + if (IS_ERR(fentry)) return -ENODEV; return 0; diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index b0a58c62b1e2..f3b280af0773 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -1057,21 +1057,21 @@ static const struct rpmh_vreg_init_data pm8450_vreg_data[] = { }; static const struct rpmh_vreg_init_data pm8550_vreg_data[] = { - RPMH_VREG("ldo1", "ldo%s1", &pmic5_pldo, "vdd-l1-l4-l10"), + RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1-l4-l10"), RPMH_VREG("ldo2", "ldo%s2", &pmic5_pldo, "vdd-l2-l13-l14"), - RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"), - RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l1-l4-l10"), + RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"), + RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo515, "vdd-l1-l4-l10"), RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l16"), - RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo_lv, "vdd-l6-l7"), - RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l6-l7"), - RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo_lv, "vdd-l8-l9"), + RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l6-l7"), + RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo, "vdd-l6-l7"), + RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo, "vdd-l8-l9"), RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l8-l9"), - RPMH_VREG("ldo10", "ldo%s10", &pmic5_nldo, "vdd-l1-l4-l10"), - RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo, "vdd-l11"), + RPMH_VREG("ldo10", "ldo%s10", &pmic5_nldo515, "vdd-l1-l4-l10"), + RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo515, "vdd-l11"), RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo, "vdd-l12"), RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l2-l13-l14"), RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo, "vdd-l2-l13-l14"), - RPMH_VREG("ldo15", "ldo%s15", &pmic5_pldo, "vdd-l15"), + RPMH_VREG("ldo15", "ldo%s15", &pmic5_nldo515, "vdd-l15"), RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l5-l16"), RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l17"), RPMH_VREG("bob1", "bob%s1", &pmic5_bob, "vdd-bob1"), @@ -1086,9 +1086,9 @@ static const struct rpmh_vreg_init_data pm8550vs_vreg_data[] = { RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_lv, "vdd-s4"), RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"), RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_mv, "vdd-s6"), - RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1"), - RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2"), - RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"), + RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"), + RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2"), + RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"), {} }; @@ -1101,9 +1101,9 @@ static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = { RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"), RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"), RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525_lv, "vdd-s8"), - RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1"), - RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2"), - RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"), + RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"), + RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2"), + RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"), {} }; diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 9fbfce735d56..45788955c4e6 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -3234,12 +3234,12 @@ struct blk_mq_ops dasd_mq_ops = { .exit_hctx = dasd_exit_hctx, }; -static int dasd_open(struct block_device *bdev, fmode_t mode) +static int dasd_open(struct gendisk *disk, blk_mode_t mode) { struct dasd_device *base; int rc; - base = dasd_device_from_gendisk(bdev->bd_disk); + base = dasd_device_from_gendisk(disk); if (!base) return -ENODEV; @@ -3268,14 +3268,12 @@ static int dasd_open(struct block_device *bdev, fmode_t mode) rc = -ENODEV; goto out; } - - if ((mode & FMODE_WRITE) && + if ((mode & BLK_OPEN_WRITE) && (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || (base->features & DASD_FEATURE_READONLY))) { rc = -EROFS; goto out; } - dasd_put_device(base); return 0; @@ -3287,7 +3285,7 @@ unlock: return rc; } -static void dasd_release(struct gendisk *disk, fmode_t mode) +static void dasd_release(struct gendisk *disk) { struct dasd_device *base = dasd_device_from_gendisk(disk); if (base) { diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index 998a961e1704..fe5108a1b332 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -130,7 +130,8 @@ int dasd_scan_partitions(struct dasd_block *block) struct block_device *bdev; int rc; - bdev = blkdev_get_by_dev(disk_devt(block->gdp), FMODE_READ, NULL); + bdev = blkdev_get_by_dev(disk_devt(block->gdp), BLK_OPEN_READ, NULL, + NULL); if (IS_ERR(bdev)) { DBF_DEV_EVENT(DBF_ERR, block->base, "scan partitions error, blkdev_get returned %ld", @@ -179,7 +180,7 @@ void dasd_destroy_partitions(struct dasd_block *block) mutex_unlock(&bdev->bd_disk->open_mutex); /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */ - blkdev_put(bdev, FMODE_READ); + blkdev_put(bdev, NULL); } int dasd_gendisk_init(void) diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 33f812f0e515..0aa56351da72 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -965,7 +965,8 @@ int dasd_scan_partitions(struct dasd_block *); void dasd_destroy_partitions(struct dasd_block *); /* externals in dasd_ioctl.c */ -int dasd_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long); +int dasd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, + unsigned long arg); int dasd_set_read_only(struct block_device *bdev, bool ro); /* externals in dasd_proc.c */ diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 8fca725b3dae..513a7e6eee63 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -612,7 +612,7 @@ static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd, return ret; } -int dasd_ioctl(struct block_device *bdev, fmode_t mode, +int dasd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct dasd_block *block; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index c09f2e053bf8..200f88f0e451 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -28,8 +28,8 @@ #define DCSSBLK_PARM_LEN 400 #define DCSS_BUS_ID_SIZE 20 -static int dcssblk_open(struct block_device *bdev, fmode_t mode); -static void dcssblk_release(struct gendisk *disk, fmode_t mode); +static int dcssblk_open(struct gendisk *disk, blk_mode_t mode); +static void dcssblk_release(struct gendisk *disk); static void dcssblk_submit_bio(struct bio *bio); static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, enum dax_access_mode mode, void **kaddr, @@ -809,12 +809,11 @@ out_buf: } static int -dcssblk_open(struct block_device *bdev, fmode_t mode) +dcssblk_open(struct gendisk *disk, blk_mode_t mode) { - struct dcssblk_dev_info *dev_info; + struct dcssblk_dev_info *dev_info = disk->private_data; int rc; - dev_info = bdev->bd_disk->private_data; if (NULL == dev_info) { rc = -ENODEV; goto out; @@ -826,7 +825,7 @@ out: } static void -dcssblk_release(struct gendisk *disk, fmode_t mode) +dcssblk_release(struct gendisk *disk) { struct dcssblk_dev_info *dev_info = disk->private_data; struct segment_info *entry; diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 599f547310f8..942c73a11ca3 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -51,6 +51,7 @@ static struct dentry *zcore_dir; static struct dentry *zcore_reipl_file; static struct dentry *zcore_hsa_file; static struct ipl_parameter_block *zcore_ipl_block; +static unsigned long os_info_flags; static DEFINE_MUTEX(hsa_buf_mutex); static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE); @@ -139,7 +140,13 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf, { if (zcore_ipl_block) { diag308(DIAG308_SET, zcore_ipl_block); - diag308(DIAG308_LOAD_CLEAR, NULL); + if (os_info_flags & OS_INFO_FLAG_REIPL_CLEAR) + diag308(DIAG308_LOAD_CLEAR, NULL); + /* Use special diag308 subcode for CCW normal ipl */ + if (zcore_ipl_block->pb0_hdr.pbt == IPL_PBT_CCW) + diag308(DIAG308_LOAD_NORMAL_DUMP, NULL); + else + diag308(DIAG308_LOAD_NORMAL, NULL); } return count; } @@ -212,7 +219,10 @@ static int __init check_sdias(void) */ static int __init zcore_reipl_init(void) { + struct os_info_entry *entry; struct ipib_info ipib_info; + unsigned long os_info_addr; + struct os_info *os_info; int rc; rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info)); @@ -234,6 +244,35 @@ static int __init zcore_reipl_init(void) free_page((unsigned long) zcore_ipl_block); zcore_ipl_block = NULL; } + /* + * Read the bit-flags field from os_info flags entry. + * Return zero even for os_info read or entry checksum errors in order + * to continue dump processing, considering that os_info could be + * corrupted on the panicked system. + */ + os_info = (void *)__get_free_page(GFP_KERNEL); + if (!os_info) + return -ENOMEM; + rc = memcpy_hsa_kernel(&os_info_addr, __LC_OS_INFO, sizeof(os_info_addr)); + if (rc) + goto out; + if (os_info_addr < sclp.hsa_size) + rc = memcpy_hsa_kernel(os_info, os_info_addr, PAGE_SIZE); + else + rc = memcpy_real(os_info, os_info_addr, PAGE_SIZE); + if (rc || os_info_csum(os_info) != os_info->csum) + goto out; + entry = &os_info->entry[OS_INFO_FLAGS_ENTRY]; + if (entry->addr && entry->size) { + if (entry->addr < sclp.hsa_size) + rc = memcpy_hsa_kernel(&os_info_flags, entry->addr, sizeof(os_info_flags)); + else + rc = memcpy_real(&os_info_flags, entry->addr, sizeof(os_info_flags)); + if (rc || (__force u32)csum_partial(&os_info_flags, entry->size, 0) != entry->csum) + os_info_flags = 0; + } +out: + free_page((unsigned long)os_info); return 0; } diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index ff538a086fc7..43601816ea4e 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -171,7 +171,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) return -ENODEV; } - parent = kzalloc(sizeof(*parent), GFP_KERNEL); + parent = kzalloc(struct_size(parent, mdev_types, 1), GFP_KERNEL); if (!parent) return -ENOMEM; diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h index b441ae6700fd..b62bbc5c6376 100644 --- a/drivers/s390/cio/vfio_ccw_private.h +++ b/drivers/s390/cio/vfio_ccw_private.h @@ -79,7 +79,7 @@ struct vfio_ccw_parent { struct mdev_parent parent; struct mdev_type mdev_type; - struct mdev_type *mdev_types[1]; + struct mdev_type *mdev_types[]; }; /** diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index a8def50c149b..e58bfd225323 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -2,7 +2,8 @@ /* * pkey device driver * - * Copyright IBM Corp. 2017,2019 + * Copyright IBM Corp. 2017, 2023 + * * Author(s): Harald Freudenberger */ @@ -32,8 +33,10 @@ MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("s390 protected key interface"); #define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */ +#define MINKEYBLOBBUFSIZE (sizeof(struct keytoken_header)) #define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */ #define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */ +#define AES_WK_VP_SIZE 32 /* Size of WK VP block appended to a prot key */ /* * debug feature data and functions @@ -71,49 +74,106 @@ struct protaeskeytoken { } __packed; /* inside view of a clear key token (type 0x00 version 0x02) */ -struct clearaeskeytoken { - u8 type; /* 0x00 for PAES specific key tokens */ +struct clearkeytoken { + u8 type; /* 0x00 for PAES specific key tokens */ u8 res0[3]; - u8 version; /* 0x02 for clear AES key token */ + u8 version; /* 0x02 for clear key token */ u8 res1[3]; - u32 keytype; /* key type, one of the PKEY_KEYTYPE values */ - u32 len; /* bytes actually stored in clearkey[] */ + u32 keytype; /* key type, one of the PKEY_KEYTYPE_* values */ + u32 len; /* bytes actually stored in clearkey[] */ u8 clearkey[]; /* clear key value */ } __packed; +/* helper function which translates the PKEY_KEYTYPE_AES_* to their keysize */ +static inline u32 pkey_keytype_aes_to_size(u32 keytype) +{ + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + return 16; + case PKEY_KEYTYPE_AES_192: + return 24; + case PKEY_KEYTYPE_AES_256: + return 32; + default: + return 0; + } +} + /* - * Create a protected key from a clear key value. + * Create a protected key from a clear key value via PCKMO instruction. */ -static int pkey_clr2protkey(u32 keytype, - const struct pkey_clrkey *clrkey, - struct pkey_protkey *protkey) +static int pkey_clr2protkey(u32 keytype, const u8 *clrkey, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) { /* mask of available pckmo subfunctions */ static cpacf_mask_t pckmo_functions; - long fc; + u8 paramblock[112]; + u32 pkeytype; int keysize; - u8 paramblock[64]; + long fc; switch (keytype) { case PKEY_KEYTYPE_AES_128: + /* 16 byte key, 32 byte aes wkvp, total 48 bytes */ keysize = 16; + pkeytype = keytype; fc = CPACF_PCKMO_ENC_AES_128_KEY; break; case PKEY_KEYTYPE_AES_192: + /* 24 byte key, 32 byte aes wkvp, total 56 bytes */ keysize = 24; + pkeytype = keytype; fc = CPACF_PCKMO_ENC_AES_192_KEY; break; case PKEY_KEYTYPE_AES_256: + /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ keysize = 32; + pkeytype = keytype; fc = CPACF_PCKMO_ENC_AES_256_KEY; break; + case PKEY_KEYTYPE_ECC_P256: + /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ + keysize = 32; + pkeytype = PKEY_KEYTYPE_ECC; + fc = CPACF_PCKMO_ENC_ECC_P256_KEY; + break; + case PKEY_KEYTYPE_ECC_P384: + /* 48 byte key, 32 byte aes wkvp, total 80 bytes */ + keysize = 48; + pkeytype = PKEY_KEYTYPE_ECC; + fc = CPACF_PCKMO_ENC_ECC_P384_KEY; + break; + case PKEY_KEYTYPE_ECC_P521: + /* 80 byte key, 32 byte aes wkvp, total 112 bytes */ + keysize = 80; + pkeytype = PKEY_KEYTYPE_ECC; + fc = CPACF_PCKMO_ENC_ECC_P521_KEY; + break; + case PKEY_KEYTYPE_ECC_ED25519: + /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ + keysize = 32; + pkeytype = PKEY_KEYTYPE_ECC; + fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY; + break; + case PKEY_KEYTYPE_ECC_ED448: + /* 64 byte key, 32 byte aes wkvp, total 96 bytes */ + keysize = 64; + pkeytype = PKEY_KEYTYPE_ECC; + fc = CPACF_PCKMO_ENC_ECC_ED448_KEY; + break; default: - DEBUG_ERR("%s unknown/unsupported keytype %d\n", + DEBUG_ERR("%s unknown/unsupported keytype %u\n", __func__, keytype); return -EINVAL; } + if (*protkeylen < keysize + AES_WK_VP_SIZE) { + DEBUG_ERR("%s prot key buffer size too small: %u < %d\n", + __func__, *protkeylen, keysize + AES_WK_VP_SIZE); + return -EINVAL; + } + /* Did we already check for PCKMO ? */ if (!pckmo_functions.bytes[0]) { /* no, so check now */ @@ -128,15 +188,15 @@ static int pkey_clr2protkey(u32 keytype, /* prepare param block */ memset(paramblock, 0, sizeof(paramblock)); - memcpy(paramblock, clrkey->clrkey, keysize); + memcpy(paramblock, clrkey, keysize); /* call the pckmo instruction */ cpacf_pckmo(fc, paramblock); - /* copy created protected key */ - protkey->type = keytype; - protkey->len = keysize + 32; - memcpy(protkey->protkey, paramblock, keysize + 32); + /* copy created protected key to key buffer including the wkvp block */ + *protkeylen = keysize + AES_WK_VP_SIZE; + memcpy(protkey, paramblock, *protkeylen); + *protkeytype = pkeytype; return 0; } @@ -144,11 +204,12 @@ static int pkey_clr2protkey(u32 keytype, /* * Find card and transform secure key into protected key. */ -static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey) +static int pkey_skey2pkey(const u8 *key, u8 *protkey, + u32 *protkeylen, u32 *protkeytype) { - int rc, verify; - u16 cardnr, domain; struct keytoken_header *hdr = (struct keytoken_header *)key; + u16 cardnr, domain; + int rc, verify; zcrypt_wait_api_operational(); @@ -167,14 +228,13 @@ static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey) continue; switch (hdr->version) { case TOKVER_CCA_AES: - rc = cca_sec2protkey(cardnr, domain, - key, pkey->protkey, - &pkey->len, &pkey->type); + rc = cca_sec2protkey(cardnr, domain, key, + protkey, protkeylen, protkeytype); break; case TOKVER_CCA_VLSC: - rc = cca_cipher2protkey(cardnr, domain, - key, pkey->protkey, - &pkey->len, &pkey->type); + rc = cca_cipher2protkey(cardnr, domain, key, + protkey, protkeylen, + protkeytype); break; default: return -EINVAL; @@ -195,9 +255,9 @@ static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey) static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen, u8 *keybuf, size_t *keybuflen) { - int i, rc; - u16 card, dom; u32 nr_apqns, *apqns = NULL; + u16 card, dom; + int i, rc; zcrypt_wait_api_operational(); @@ -227,12 +287,13 @@ out: /* * Find card and transform EP11 secure key into protected key. */ -static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey) +static int pkey_ep11key2pkey(const u8 *key, u8 *protkey, + u32 *protkeylen, u32 *protkeytype) { - int i, rc; - u16 card, dom; - u32 nr_apqns, *apqns = NULL; struct ep11keyblob *kb = (struct ep11keyblob *)key; + u32 nr_apqns, *apqns = NULL; + u16 card, dom; + int i, rc; zcrypt_wait_api_operational(); @@ -246,9 +307,8 @@ static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey) for (rc = -ENODEV, i = 0; i < nr_apqns; i++) { card = apqns[i] >> 16; dom = apqns[i] & 0xFFFF; - pkey->len = sizeof(pkey->protkey); rc = ep11_kblob2protkey(card, dom, key, kb->head.len, - pkey->protkey, &pkey->len, &pkey->type); + protkey, protkeylen, protkeytype); if (rc == 0) break; } @@ -306,38 +366,31 @@ out: /* * Generate a random protected key */ -static int pkey_genprotkey(u32 keytype, struct pkey_protkey *protkey) +static int pkey_genprotkey(u32 keytype, u8 *protkey, + u32 *protkeylen, u32 *protkeytype) { - struct pkey_clrkey clrkey; + u8 clrkey[32]; int keysize; int rc; - switch (keytype) { - case PKEY_KEYTYPE_AES_128: - keysize = 16; - break; - case PKEY_KEYTYPE_AES_192: - keysize = 24; - break; - case PKEY_KEYTYPE_AES_256: - keysize = 32; - break; - default: + keysize = pkey_keytype_aes_to_size(keytype); + if (!keysize) { DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__, keytype); return -EINVAL; } /* generate a dummy random clear key */ - get_random_bytes(clrkey.clrkey, keysize); + get_random_bytes(clrkey, keysize); /* convert it to a dummy protected key */ - rc = pkey_clr2protkey(keytype, &clrkey, protkey); + rc = pkey_clr2protkey(keytype, clrkey, + protkey, protkeylen, protkeytype); if (rc) return rc; /* replace the key part of the protected key with random bytes */ - get_random_bytes(protkey->protkey, keysize); + get_random_bytes(protkey, keysize); return 0; } @@ -345,37 +398,46 @@ static int pkey_genprotkey(u32 keytype, struct pkey_protkey *protkey) /* * Verify if a protected key is still valid */ -static int pkey_verifyprotkey(const struct pkey_protkey *protkey) +static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen, + u32 protkeytype) { - unsigned long fc; struct { u8 iv[AES_BLOCK_SIZE]; u8 key[MAXPROTKEYSIZE]; } param; u8 null_msg[AES_BLOCK_SIZE]; u8 dest_buf[AES_BLOCK_SIZE]; - unsigned int k; + unsigned int k, pkeylen; + unsigned long fc; - switch (protkey->type) { + switch (protkeytype) { case PKEY_KEYTYPE_AES_128: + pkeylen = 16 + AES_WK_VP_SIZE; fc = CPACF_KMC_PAES_128; break; case PKEY_KEYTYPE_AES_192: + pkeylen = 24 + AES_WK_VP_SIZE; fc = CPACF_KMC_PAES_192; break; case PKEY_KEYTYPE_AES_256: + pkeylen = 32 + AES_WK_VP_SIZE; fc = CPACF_KMC_PAES_256; break; default: - DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__, - protkey->type); + DEBUG_ERR("%s unknown/unsupported keytype %u\n", __func__, + protkeytype); + return -EINVAL; + } + if (protkeylen != pkeylen) { + DEBUG_ERR("%s invalid protected key size %u for keytype %u\n", + __func__, protkeylen, protkeytype); return -EINVAL; } memset(null_msg, 0, sizeof(null_msg)); memset(param.iv, 0, sizeof(param.iv)); - memcpy(param.key, protkey->protkey, sizeof(param.key)); + memcpy(param.key, protkey, protkeylen); k = cpacf_kmc(fc | CPACF_ENCRYPT, ¶m, null_msg, dest_buf, sizeof(null_msg)); @@ -387,15 +449,119 @@ static int pkey_verifyprotkey(const struct pkey_protkey *protkey) return 0; } +/* Helper for pkey_nonccatok2pkey, handles aes clear key token */ +static int nonccatokaes2pkey(const struct clearkeytoken *t, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) +{ + size_t tmpbuflen = max_t(size_t, SECKEYBLOBSIZE, MAXEP11AESKEYBLOBSIZE); + u8 *tmpbuf = NULL; + u32 keysize; + int rc; + + keysize = pkey_keytype_aes_to_size(t->keytype); + if (!keysize) { + DEBUG_ERR("%s unknown/unsupported keytype %u\n", + __func__, t->keytype); + return -EINVAL; + } + if (t->len != keysize) { + DEBUG_ERR("%s non clear key aes token: invalid key len %u\n", + __func__, t->len); + return -EINVAL; + } + + /* try direct way with the PCKMO instruction */ + rc = pkey_clr2protkey(t->keytype, t->clearkey, + protkey, protkeylen, protkeytype); + if (!rc) + goto out; + + /* PCKMO failed, so try the CCA secure key way */ + tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC); + if (!tmpbuf) + return -ENOMEM; + zcrypt_wait_api_operational(); + rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype, t->clearkey, tmpbuf); + if (rc) + goto try_via_ep11; + rc = pkey_skey2pkey(tmpbuf, + protkey, protkeylen, protkeytype); + if (!rc) + goto out; + +try_via_ep11: + /* if the CCA way also failed, let's try via EP11 */ + rc = pkey_clr2ep11key(t->clearkey, t->len, + tmpbuf, &tmpbuflen); + if (rc) + goto failure; + rc = pkey_ep11key2pkey(tmpbuf, + protkey, protkeylen, protkeytype); + if (!rc) + goto out; + +failure: + DEBUG_ERR("%s unable to build protected key from clear", __func__); + +out: + kfree(tmpbuf); + return rc; +} + +/* Helper for pkey_nonccatok2pkey, handles ecc clear key token */ +static int nonccatokecc2pkey(const struct clearkeytoken *t, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) +{ + u32 keylen; + int rc; + + switch (t->keytype) { + case PKEY_KEYTYPE_ECC_P256: + keylen = 32; + break; + case PKEY_KEYTYPE_ECC_P384: + keylen = 48; + break; + case PKEY_KEYTYPE_ECC_P521: + keylen = 80; + break; + case PKEY_KEYTYPE_ECC_ED25519: + keylen = 32; + break; + case PKEY_KEYTYPE_ECC_ED448: + keylen = 64; + break; + default: + DEBUG_ERR("%s unknown/unsupported keytype %u\n", + __func__, t->keytype); + return -EINVAL; + } + + if (t->len != keylen) { + DEBUG_ERR("%s non clear key ecc token: invalid key len %u\n", + __func__, t->len); + return -EINVAL; + } + + /* only one path possible: via PCKMO instruction */ + rc = pkey_clr2protkey(t->keytype, t->clearkey, + protkey, protkeylen, protkeytype); + if (rc) { + DEBUG_ERR("%s unable to build protected key from clear", + __func__); + } + + return rc; +} + /* * Transform a non-CCA key token into a protected key */ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, - struct pkey_protkey *protkey) + u8 *protkey, u32 *protkeylen, u32 *protkeytype) { - int rc = -EINVAL; - u8 *tmpbuf = NULL; struct keytoken_header *hdr = (struct keytoken_header *)key; + int rc = -EINVAL; switch (hdr->version) { case TOKVER_PROTECTED_KEY: { @@ -404,59 +570,40 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, if (keylen != sizeof(struct protaeskeytoken)) goto out; t = (struct protaeskeytoken *)key; - protkey->len = t->len; - protkey->type = t->keytype; - memcpy(protkey->protkey, t->protkey, - sizeof(protkey->protkey)); - rc = pkey_verifyprotkey(protkey); + rc = pkey_verifyprotkey(t->protkey, t->len, t->keytype); + if (rc) + goto out; + memcpy(protkey, t->protkey, t->len); + *protkeylen = t->len; + *protkeytype = t->keytype; break; } case TOKVER_CLEAR_KEY: { - struct clearaeskeytoken *t; - struct pkey_clrkey ckey; - union u_tmpbuf { - u8 skey[SECKEYBLOBSIZE]; - u8 ep11key[MAXEP11AESKEYBLOBSIZE]; - }; - size_t tmpbuflen = sizeof(union u_tmpbuf); - - if (keylen < sizeof(struct clearaeskeytoken)) - goto out; - t = (struct clearaeskeytoken *)key; - if (keylen != sizeof(*t) + t->len) - goto out; - if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16) || - (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24) || - (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32)) - memcpy(ckey.clrkey, t->clearkey, t->len); - else - goto out; - /* alloc temp key buffer space */ - tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC); - if (!tmpbuf) { - rc = -ENOMEM; + struct clearkeytoken *t = (struct clearkeytoken *)key; + + if (keylen < sizeof(struct clearkeytoken) || + keylen != sizeof(*t) + t->len) goto out; - } - /* try direct way with the PCKMO instruction */ - rc = pkey_clr2protkey(t->keytype, &ckey, protkey); - if (rc == 0) + switch (t->keytype) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_192: + case PKEY_KEYTYPE_AES_256: + rc = nonccatokaes2pkey(t, protkey, + protkeylen, protkeytype); break; - /* PCKMO failed, so try the CCA secure key way */ - zcrypt_wait_api_operational(); - rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype, - ckey.clrkey, tmpbuf); - if (rc == 0) - rc = pkey_skey2pkey(tmpbuf, protkey); - if (rc == 0) + case PKEY_KEYTYPE_ECC_P256: + case PKEY_KEYTYPE_ECC_P384: + case PKEY_KEYTYPE_ECC_P521: + case PKEY_KEYTYPE_ECC_ED25519: + case PKEY_KEYTYPE_ECC_ED448: + rc = nonccatokecc2pkey(t, protkey, + protkeylen, protkeytype); break; - /* if the CCA way also failed, let's try via EP11 */ - rc = pkey_clr2ep11key(ckey.clrkey, t->len, - tmpbuf, &tmpbuflen); - if (rc == 0) - rc = pkey_ep11key2pkey(tmpbuf, protkey); - /* now we should really have an protected key */ - DEBUG_ERR("%s unable to build protected key from clear", - __func__); + default: + DEBUG_ERR("%s unknown/unsupported non cca clear key type %u\n", + __func__, t->keytype); + return -EINVAL; + } break; } case TOKVER_EP11_AES: { @@ -464,7 +611,8 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1); if (rc) goto out; - rc = pkey_ep11key2pkey(key, protkey); + rc = pkey_ep11key2pkey(key, + protkey, protkeylen, protkeytype); break; } case TOKVER_EP11_AES_WITH_HEADER: @@ -473,16 +621,14 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, if (rc) goto out; rc = pkey_ep11key2pkey(key + sizeof(struct ep11kblob_header), - protkey); + protkey, protkeylen, protkeytype); break; default: DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n", __func__, hdr->version); - rc = -EINVAL; } out: - kfree(tmpbuf); return rc; } @@ -490,7 +636,7 @@ out: * Transform a CCA internal key token into a protected key */ static int pkey_ccainttok2pkey(const u8 *key, u32 keylen, - struct pkey_protkey *protkey) + u8 *protkey, u32 *protkeylen, u32 *protkeytype) { struct keytoken_header *hdr = (struct keytoken_header *)key; @@ -509,17 +655,17 @@ static int pkey_ccainttok2pkey(const u8 *key, u32 keylen, return -EINVAL; } - return pkey_skey2pkey(key, protkey); + return pkey_skey2pkey(key, protkey, protkeylen, protkeytype); } /* * Transform a key blob (of any type) into a protected key */ int pkey_keyblob2pkey(const u8 *key, u32 keylen, - struct pkey_protkey *protkey) + u8 *protkey, u32 *protkeylen, u32 *protkeytype) { - int rc; struct keytoken_header *hdr = (struct keytoken_header *)key; + int rc; if (keylen < sizeof(struct keytoken_header)) { DEBUG_ERR("%s invalid keylen %d\n", __func__, keylen); @@ -528,10 +674,12 @@ int pkey_keyblob2pkey(const u8 *key, u32 keylen, switch (hdr->type) { case TOKTYPE_NON_CCA: - rc = pkey_nonccatok2pkey(key, keylen, protkey); + rc = pkey_nonccatok2pkey(key, keylen, + protkey, protkeylen, protkeytype); break; case TOKTYPE_CCA_INTERNAL: - rc = pkey_ccainttok2pkey(key, keylen, protkey); + rc = pkey_ccainttok2pkey(key, keylen, + protkey, protkeylen, protkeytype); break; default: DEBUG_ERR("%s unknown/unsupported blob type %d\n", @@ -663,9 +811,9 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, enum pkey_key_type *ktype, enum pkey_key_size *ksize, u32 *flags) { - int rc; - u32 _nr_apqns, *_apqns = NULL; struct keytoken_header *hdr = (struct keytoken_header *)key; + u32 _nr_apqns, *_apqns = NULL; + int rc; if (keylen < sizeof(struct keytoken_header)) return -EINVAL; @@ -771,10 +919,10 @@ out: static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, size_t keylen, - struct pkey_protkey *pkey) + u8 *protkey, u32 *protkeylen, u32 *protkeytype) { - int i, card, dom, rc; struct keytoken_header *hdr = (struct keytoken_header *)key; + int i, card, dom, rc; /* check for at least one apqn given */ if (!apqns || !nr_apqns) @@ -806,7 +954,9 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, if (ep11_check_aes_key(debug_info, 3, key, keylen, 1)) return -EINVAL; } else { - return pkey_nonccatok2pkey(key, keylen, pkey); + return pkey_nonccatok2pkey(key, keylen, + protkey, protkeylen, + protkeytype); } } else { DEBUG_ERR("%s unknown/unsupported blob type %d\n", @@ -822,20 +972,20 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, dom = apqns[i].domain; if (hdr->type == TOKTYPE_CCA_INTERNAL && hdr->version == TOKVER_CCA_AES) { - rc = cca_sec2protkey(card, dom, key, pkey->protkey, - &pkey->len, &pkey->type); + rc = cca_sec2protkey(card, dom, key, + protkey, protkeylen, protkeytype); } else if (hdr->type == TOKTYPE_CCA_INTERNAL && hdr->version == TOKVER_CCA_VLSC) { - rc = cca_cipher2protkey(card, dom, key, pkey->protkey, - &pkey->len, &pkey->type); + rc = cca_cipher2protkey(card, dom, key, + protkey, protkeylen, + protkeytype); } else { /* EP11 AES secure key blob */ struct ep11keyblob *kb = (struct ep11keyblob *)key; - pkey->len = sizeof(pkey->protkey); rc = ep11_kblob2protkey(card, dom, key, kb->head.len, - pkey->protkey, &pkey->len, - &pkey->type); + protkey, protkeylen, + protkeytype); } if (rc == 0) break; @@ -847,9 +997,9 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, struct pkey_apqn *apqns, size_t *nr_apqns) { - int rc; - u32 _nr_apqns, *_apqns = NULL; struct keytoken_header *hdr = (struct keytoken_header *)key; + u32 _nr_apqns, *_apqns = NULL; + int rc; if (keylen < sizeof(struct keytoken_header) || flags == 0) return -EINVAL; @@ -860,9 +1010,9 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, (hdr->version == TOKVER_EP11_AES_WITH_HEADER || hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { - int minhwtype = 0, api = 0; struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(struct ep11kblob_header)); + int minhwtype = 0, api = 0; if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) return -EINVAL; @@ -877,8 +1027,8 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, } else if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_AES && is_ep11_keyblob(key)) { - int minhwtype = 0, api = 0; struct ep11keyblob *kb = (struct ep11keyblob *)key; + int minhwtype = 0, api = 0; if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) return -EINVAL; @@ -891,8 +1041,8 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, if (rc) goto out; } else if (hdr->type == TOKTYPE_CCA_INTERNAL) { - int minhwtype = ZCRYPT_CEX3C; u64 cur_mkvp = 0, old_mkvp = 0; + int minhwtype = ZCRYPT_CEX3C; if (hdr->version == TOKVER_CCA_AES) { struct secaeskeytoken *t = (struct secaeskeytoken *)key; @@ -919,8 +1069,8 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, if (rc) goto out; } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { - u64 cur_mkvp = 0, old_mkvp = 0; struct eccprivkeytoken *t = (struct eccprivkeytoken *)key; + u64 cur_mkvp = 0, old_mkvp = 0; if (t->secid == 0x20) { if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) @@ -957,8 +1107,8 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, struct pkey_apqn *apqns, size_t *nr_apqns) { - int rc; u32 _nr_apqns, *_apqns = NULL; + int rc; zcrypt_wait_api_operational(); @@ -1020,11 +1170,11 @@ out: } static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns, - const u8 *key, size_t keylen, u32 *protkeytype, - u8 *protkey, u32 *protkeylen) + const u8 *key, size_t keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) { - int i, card, dom, rc; struct keytoken_header *hdr = (struct keytoken_header *)key; + int i, card, dom, rc; /* check for at least one apqn given */ if (!apqns || !nr_apqns) @@ -1076,15 +1226,8 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns, if (cca_check_sececckeytoken(debug_info, 3, key, keylen, 1)) return -EINVAL; } else if (hdr->type == TOKTYPE_NON_CCA) { - struct pkey_protkey pkey; - - rc = pkey_nonccatok2pkey(key, keylen, &pkey); - if (rc) - return rc; - memcpy(protkey, pkey.protkey, pkey.len); - *protkeylen = pkey.len; - *protkeytype = pkey.type; - return 0; + return pkey_nonccatok2pkey(key, keylen, + protkey, protkeylen, protkeytype); } else { DEBUG_ERR("%s unknown/unsupported blob type %d\n", __func__, hdr->type); @@ -1130,7 +1273,7 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns, static void *_copy_key_from_user(void __user *ukey, size_t keylen) { - if (!ukey || keylen < MINKEYBLOBSIZE || keylen > KEYBLOBBUFSIZE) + if (!ukey || keylen < MINKEYBLOBBUFSIZE || keylen > KEYBLOBBUFSIZE) return ERR_PTR(-EINVAL); return memdup_user(ukey, keylen); @@ -1187,6 +1330,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, if (copy_from_user(&ksp, usp, sizeof(ksp))) return -EFAULT; + ksp.protkey.len = sizeof(ksp.protkey.protkey); rc = cca_sec2protkey(ksp.cardnr, ksp.domain, ksp.seckey.seckey, ksp.protkey.protkey, &ksp.protkey.len, &ksp.protkey.type); @@ -1203,8 +1347,10 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, if (copy_from_user(&kcp, ucp, sizeof(kcp))) return -EFAULT; - rc = pkey_clr2protkey(kcp.keytype, - &kcp.clrkey, &kcp.protkey); + kcp.protkey.len = sizeof(kcp.protkey.protkey); + rc = pkey_clr2protkey(kcp.keytype, kcp.clrkey.clrkey, + kcp.protkey.protkey, + &kcp.protkey.len, &kcp.protkey.type); DEBUG_DBG("%s pkey_clr2protkey()=%d\n", __func__, rc); if (rc) break; @@ -1234,7 +1380,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, if (copy_from_user(&ksp, usp, sizeof(ksp))) return -EFAULT; - rc = pkey_skey2pkey(ksp.seckey.seckey, &ksp.protkey); + ksp.protkey.len = sizeof(ksp.protkey.protkey); + rc = pkey_skey2pkey(ksp.seckey.seckey, ksp.protkey.protkey, + &ksp.protkey.len, &ksp.protkey.type); DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc); if (rc) break; @@ -1263,7 +1411,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, if (copy_from_user(&kgp, ugp, sizeof(kgp))) return -EFAULT; - rc = pkey_genprotkey(kgp.keytype, &kgp.protkey); + kgp.protkey.len = sizeof(kgp.protkey.protkey); + rc = pkey_genprotkey(kgp.keytype, kgp.protkey.protkey, + &kgp.protkey.len, &kgp.protkey.type); DEBUG_DBG("%s pkey_genprotkey()=%d\n", __func__, rc); if (rc) break; @@ -1277,7 +1427,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, if (copy_from_user(&kvp, uvp, sizeof(kvp))) return -EFAULT; - rc = pkey_verifyprotkey(&kvp.protkey); + rc = pkey_verifyprotkey(kvp.protkey.protkey, + kvp.protkey.len, kvp.protkey.type); DEBUG_DBG("%s pkey_verifyprotkey()=%d\n", __func__, rc); break; } @@ -1291,7 +1442,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, kkey = _copy_key_from_user(ktp.key, ktp.keylen); if (IS_ERR(kkey)) return PTR_ERR(kkey); - rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey); + ktp.protkey.len = sizeof(ktp.protkey.protkey); + rc = pkey_keyblob2pkey(kkey, ktp.keylen, ktp.protkey.protkey, + &ktp.protkey.len, &ktp.protkey.type); DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc); memzero_explicit(kkey, ktp.keylen); kfree(kkey); @@ -1303,9 +1456,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, } case PKEY_GENSECK2: { struct pkey_genseck2 __user *ugs = (void __user *)arg; + size_t klen = KEYBLOBBUFSIZE; struct pkey_genseck2 kgs; struct pkey_apqn *apqns; - size_t klen = KEYBLOBBUFSIZE; u8 *kkey; if (copy_from_user(&kgs, ugs, sizeof(kgs))) @@ -1345,9 +1498,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, } case PKEY_CLR2SECK2: { struct pkey_clr2seck2 __user *ucs = (void __user *)arg; + size_t klen = KEYBLOBBUFSIZE; struct pkey_clr2seck2 kcs; struct pkey_apqn *apqns; - size_t klen = KEYBLOBBUFSIZE; u8 *kkey; if (copy_from_user(&kcs, ucs, sizeof(kcs))) @@ -1409,8 +1562,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, } case PKEY_KBLOB2PROTK2: { struct pkey_kblob2pkey2 __user *utp = (void __user *)arg; - struct pkey_kblob2pkey2 ktp; struct pkey_apqn *apqns = NULL; + struct pkey_kblob2pkey2 ktp; u8 *kkey; if (copy_from_user(&ktp, utp, sizeof(ktp))) @@ -1423,8 +1576,11 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, kfree(apqns); return PTR_ERR(kkey); } + ktp.protkey.len = sizeof(ktp.protkey.protkey); rc = pkey_keyblob2pkey2(apqns, ktp.apqn_entries, - kkey, ktp.keylen, &ktp.protkey); + kkey, ktp.keylen, + ktp.protkey.protkey, &ktp.protkey.len, + &ktp.protkey.type); DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc); kfree(apqns); memzero_explicit(kkey, ktp.keylen); @@ -1437,8 +1593,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, } case PKEY_APQNS4K: { struct pkey_apqns4key __user *uak = (void __user *)arg; - struct pkey_apqns4key kak; struct pkey_apqn *apqns = NULL; + struct pkey_apqns4key kak; size_t nr_apqns, len; u8 *kkey; @@ -1486,8 +1642,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, } case PKEY_APQNS4KT: { struct pkey_apqns4keytype __user *uat = (void __user *)arg; - struct pkey_apqns4keytype kat; struct pkey_apqn *apqns = NULL; + struct pkey_apqns4keytype kat; size_t nr_apqns, len; if (copy_from_user(&kat, uat, sizeof(kat))) @@ -1528,9 +1684,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, } case PKEY_KBLOB2PROTK3: { struct pkey_kblob2pkey3 __user *utp = (void __user *)arg; - struct pkey_kblob2pkey3 ktp; - struct pkey_apqn *apqns = NULL; u32 protkeylen = PROTKEYBLOBBUFSIZE; + struct pkey_apqn *apqns = NULL; + struct pkey_kblob2pkey3 ktp; u8 *kkey, *protkey; if (copy_from_user(&ktp, utp, sizeof(ktp))) @@ -1549,9 +1705,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, kfree(kkey); return -ENOMEM; } - rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries, kkey, - ktp.keylen, &ktp.pkeytype, - protkey, &protkeylen); + rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries, + kkey, ktp.keylen, + protkey, &protkeylen, &ktp.pkeytype); DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc); kfree(apqns); memzero_explicit(kkey, ktp.keylen); @@ -1612,7 +1768,9 @@ static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf, protkeytoken.version = TOKVER_PROTECTED_KEY; protkeytoken.keytype = keytype; - rc = pkey_genprotkey(protkeytoken.keytype, &protkey); + protkey.len = sizeof(protkey.protkey); + rc = pkey_genprotkey(protkeytoken.keytype, + protkey.protkey, &protkey.len, &protkey.type); if (rc) return rc; @@ -1622,7 +1780,10 @@ static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf, memcpy(buf, &protkeytoken, sizeof(protkeytoken)); if (is_xts) { - rc = pkey_genprotkey(protkeytoken.keytype, &protkey); + /* xts needs a second protected key, reuse protkey struct */ + protkey.len = sizeof(protkey.protkey); + rc = pkey_genprotkey(protkeytoken.keytype, + protkey.protkey, &protkey.len, &protkey.type); if (rc) return rc; @@ -1717,8 +1878,8 @@ static struct attribute_group protkey_attr_group = { static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf, loff_t off, size_t count) { - int rc; struct pkey_seckey *seckey = (struct pkey_seckey *)buf; + int rc; if (off != 0 || count < sizeof(struct secaeskeytoken)) return -EINVAL; @@ -1824,9 +1985,9 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits, bool is_xts, char *buf, loff_t off, size_t count) { - int i, rc, card, dom; - u32 nr_apqns, *apqns = NULL; size_t keysize = CCACIPHERTOKENSIZE; + u32 nr_apqns, *apqns = NULL; + int i, rc, card, dom; if (off != 0 || count < CCACIPHERTOKENSIZE) return -EINVAL; @@ -1947,9 +2108,9 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits, bool is_xts, char *buf, loff_t off, size_t count) { - int i, rc, card, dom; - u32 nr_apqns, *apqns = NULL; size_t keysize = MAXEP11AESKEYBLOBSIZE; + u32 nr_apqns, *apqns = NULL; + int i, rc, card, dom; if (off != 0 || count < MAXEP11AESKEYBLOBSIZE) return -EINVAL; diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index cfbcb864ab63..a8f58e133e6e 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -716,6 +716,7 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev) ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev); if (ret) goto err_put_vdev; + matrix_mdev->req_trigger = NULL; dev_set_drvdata(&mdev->dev, matrix_mdev); mutex_lock(&matrix_dev->mdevs_lock); list_add(&matrix_mdev->node, &matrix_dev->mdev_list); @@ -1735,6 +1736,26 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev) vfio_ap_mdev_unset_kvm(matrix_mdev); } +static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count) +{ + struct device *dev = vdev->dev; + struct ap_matrix_mdev *matrix_mdev; + + matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev); + + if (matrix_mdev->req_trigger) { + if (!(count % 10)) + dev_notice_ratelimited(dev, + "Relaying device request to user (#%u)\n", + count); + + eventfd_signal(matrix_mdev->req_trigger, 1); + } else if (count == 0) { + dev_notice(dev, + "No device request registered, blocked until released by user\n"); + } +} + static int vfio_ap_mdev_get_device_info(unsigned long arg) { unsigned long minsz; @@ -1750,11 +1771,115 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg) info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET; info.num_regions = 0; - info.num_irqs = 0; + info.num_irqs = VFIO_AP_NUM_IRQS; return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } +static ssize_t vfio_ap_get_irq_info(unsigned long arg) +{ + unsigned long minsz; + struct vfio_irq_info info; + + minsz = offsetofend(struct vfio_irq_info, count); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz || info.index >= VFIO_AP_NUM_IRQS) + return -EINVAL; + + switch (info.index) { + case VFIO_AP_REQ_IRQ_INDEX: + info.count = 1; + info.flags = VFIO_IRQ_INFO_EVENTFD; + break; + default: + return -EINVAL; + } + + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; +} + +static int vfio_ap_irq_set_init(struct vfio_irq_set *irq_set, unsigned long arg) +{ + int ret; + size_t data_size; + unsigned long minsz; + + minsz = offsetofend(struct vfio_irq_set, count); + + if (copy_from_user(irq_set, (void __user *)arg, minsz)) + return -EFAULT; + + ret = vfio_set_irqs_validate_and_prepare(irq_set, 1, VFIO_AP_NUM_IRQS, + &data_size); + if (ret) + return ret; + + if (!(irq_set->flags & VFIO_IRQ_SET_ACTION_TRIGGER)) + return -EINVAL; + + return 0; +} + +static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev, + unsigned long arg) +{ + s32 fd; + void __user *data; + unsigned long minsz; + struct eventfd_ctx *req_trigger; + + minsz = offsetofend(struct vfio_irq_set, count); + data = (void __user *)(arg + minsz); + + if (get_user(fd, (s32 __user *)data)) + return -EFAULT; + + if (fd == -1) { + if (matrix_mdev->req_trigger) + eventfd_ctx_put(matrix_mdev->req_trigger); + matrix_mdev->req_trigger = NULL; + } else if (fd >= 0) { + req_trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(req_trigger)) + return PTR_ERR(req_trigger); + + if (matrix_mdev->req_trigger) + eventfd_ctx_put(matrix_mdev->req_trigger); + + matrix_mdev->req_trigger = req_trigger; + } else { + return -EINVAL; + } + + return 0; +} + +static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev, + unsigned long arg) +{ + int ret; + struct vfio_irq_set irq_set; + + ret = vfio_ap_irq_set_init(&irq_set, arg); + if (ret) + return ret; + + switch (irq_set.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { + case VFIO_IRQ_SET_DATA_EVENTFD: + switch (irq_set.index) { + case VFIO_AP_REQ_IRQ_INDEX: + return vfio_ap_set_request_irq(matrix_mdev, arg); + default: + return -EINVAL; + } + default: + return -EINVAL; + } +} + static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev, unsigned int cmd, unsigned long arg) { @@ -1770,6 +1895,12 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev, case VFIO_DEVICE_RESET: ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable); break; + case VFIO_DEVICE_GET_IRQ_INFO: + ret = vfio_ap_get_irq_info(arg); + break; + case VFIO_DEVICE_SET_IRQS: + ret = vfio_ap_set_irqs(matrix_mdev, arg); + break; default: ret = -EOPNOTSUPP; break; @@ -1844,6 +1975,7 @@ static const struct vfio_device_ops vfio_ap_matrix_dev_ops = { .bind_iommufd = vfio_iommufd_emulated_bind, .unbind_iommufd = vfio_iommufd_emulated_unbind, .attach_ioas = vfio_iommufd_emulated_attach_ioas, + .request = vfio_ap_mdev_request }; static struct mdev_driver vfio_ap_matrix_driver = { diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index 976a65f32e7d..4642bbdbd1b2 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -15,6 +15,7 @@ #include <linux/types.h> #include <linux/mdev.h> #include <linux/delay.h> +#include <linux/eventfd.h> #include <linux/mutex.h> #include <linux/kvm_host.h> #include <linux/vfio.h> @@ -103,6 +104,7 @@ struct ap_queue_table { * PQAP(AQIC) instruction. * @mdev: the mediated device * @qtable: table of queues (struct vfio_ap_queue) assigned to the mdev + * @req_trigger eventfd ctx for signaling userspace to return a device * @apm_add: bitmap of APIDs added to the host's AP configuration * @aqm_add: bitmap of APQIs added to the host's AP configuration * @adm_add: bitmap of control domain numbers added to the host's AP @@ -117,6 +119,7 @@ struct ap_matrix_mdev { crypto_hook pqap_hook; struct mdev_device *mdev; struct ap_queue_table qtable; + struct eventfd_ctx *req_trigger; DECLARE_BITMAP(apm_add, AP_DEVICES); DECLARE_BITMAP(aqm_add, AP_DOMAINS); DECLARE_BITMAP(adm_add, AP_DOMAINS); diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index 8acb9eba691b..c2096e4bba31 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -771,14 +771,6 @@ static int __init ism_init(void) static void __exit ism_exit(void) { - struct ism_dev *ism; - - mutex_lock(&ism_dev_list.mutex); - list_for_each_entry(ism, &ism_dev_list.list, list) { - ism_dev_exit(ism); - } - mutex_unlock(&ism_dev_list.mutex); - pci_unregister_driver(&ism_driver); debug_unregister(ism_debug_info); } diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 38d20a69ee12..f925f8664c2c 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -617,7 +617,7 @@ static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed) } /* Load rest of compatibility struct */ - strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, + strscpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, sizeof(tw_dev->tw_compat_info.driver_version)); tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL; tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH; diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index ca85bddb582b..cea3a79d538e 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -417,7 +417,7 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags) INIT_WORK(&hostdata->main_task, NCR5380_main); hostdata->work_q = alloc_workqueue("ncr5380_%d", WQ_UNBOUND | WQ_MEM_RECLAIM, - 1, instance->host_no); + 0, instance->host_no); if (!hostdata->work_q) return -ENOMEM; diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 24c049eff157..70e1cac1975e 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -3289,7 +3289,7 @@ static int query_disk(struct aac_dev *dev, void __user *arg) else qd.unmapped = 0; - strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname, + strscpy(qd.name, fsa_dev_ptr[qd.cnum].devname, min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1)); if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk))) diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 5e115e8b2ba4..7c6efde75da6 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1678,6 +1678,7 @@ struct aac_dev u32 handle_pci_error; bool init_reset; u8 soft_reset_support; + u8 use_map_queue; }; #define aac_adapter_interrupt(dev) \ diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index deb32c9f4b3e..3f062e4013ab 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -223,8 +223,12 @@ int aac_fib_setup(struct aac_dev * dev) struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd) { struct fib *fibptr; + u32 blk_tag; + int i; - fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag]; + blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + i = blk_mq_unique_tag_to_tag(blk_tag); + fibptr = &dev->fibs[i]; /* * Null out fields that depend on being zero at the start of * each I/O diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 68f4dbcfff49..c4a36c0be527 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -19,6 +19,7 @@ #include <linux/compat.h> #include <linux/blkdev.h> +#include <linux/blk-mq-pci.h> #include <linux/completion.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -504,6 +505,15 @@ common_config: return 0; } +static void aac_map_queues(struct Scsi_Host *shost) +{ + struct aac_dev *aac = (struct aac_dev *)shost->hostdata; + + blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], + aac->pdev, 0); + aac->use_map_queue = true; +} + /** * aac_change_queue_depth - alter queue depths * @sdev: SCSI device we are considering @@ -1488,6 +1498,7 @@ static const struct scsi_host_template aac_driver_template = { .bios_param = aac_biosparm, .shost_groups = aac_host_groups, .slave_configure = aac_slave_configure, + .map_queues = aac_map_queues, .change_queue_depth = aac_change_queue_depth, .sdev_groups = aac_dev_groups, .eh_abort_handler = aac_eh_abort, @@ -1775,6 +1786,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) shost->max_lun = AAC_MAX_LUN; pci_set_drvdata(pdev, shost); + shost->nr_hw_queues = aac->max_msix; + shost->host_tagset = 1; error = scsi_add_host(shost, &pdev->dev); if (error) @@ -1906,6 +1919,7 @@ static void aac_remove_one(struct pci_dev *pdev) struct aac_dev *aac = (struct aac_dev *)shost->hostdata; aac_cancel_rescan_worker(aac); + aac->use_map_queue = false; scsi_remove_host(shost); __aac_shutdown(aac); diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 11ef58204e96..61949f374188 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -493,6 +493,10 @@ static int aac_src_deliver_message(struct fib *fib) #endif u16 vector_no; + struct scsi_cmnd *scmd; + u32 blk_tag; + struct Scsi_Host *shost = dev->scsi_host_ptr; + struct blk_mq_queue_map *qmap; atomic_inc(&q->numpending); @@ -505,8 +509,25 @@ static int aac_src_deliver_message(struct fib *fib) if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) && dev->sa_firmware) vector_no = aac_get_vector(dev); - else - vector_no = fib->vector_no; + else { + if (!fib->vector_no || !fib->callback_data) { + if (shost && dev->use_map_queue) { + qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; + vector_no = qmap->mq_map[raw_smp_processor_id()]; + } + /* + * We hardcode the vector_no for + * reserved commands as a valid shost is + * absent during the init + */ + else + vector_no = 0; + } else { + scmd = (struct scsi_cmnd *)fib->callback_data; + blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + vector_no = blk_mq_unique_tag_to_hwq(blk_tag); + } + } if (native_hba) { if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) { diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 2b3f0c10478e..872ad37e2a6e 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -383,7 +383,7 @@ int bnx2i_get_stats(void *handle) if (!stats) return -ENOMEM; - strlcpy(stats->version, DRV_MODULE_VERSION, sizeof(stats->version)); + strscpy(stats->version, DRV_MODULE_VERSION, sizeof(stats->version)); memcpy(stats->mac_add1 + 2, hba->cnic->mac_addr, ETH_ALEN); stats->max_frame_size = hba->netdev->mtu; diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index ac648bb8f7e7..cb0a399be1cc 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c @@ -877,7 +877,8 @@ static long ch_ioctl(struct file *file, } default: - return scsi_ioctl(ch->device, file->f_mode, cmd, argp); + return scsi_ioctl(ch->device, file->f_mode & FMODE_WRITE, cmd, + argp); } } diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index 06ccb51bf6a9..f5334ccbf2ca 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -1394,8 +1394,8 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id) host->cmd_per_lun = le32_to_cpu(iop_config.max_requests); host->max_cmd_len = 16; - req_size = struct_size((struct hpt_iop_request_scsi_command *)0, - sg_list, hba->max_sg_descriptors); + req_size = struct_size_t(struct hpt_iop_request_scsi_command, + sg_list, hba->max_sg_descriptors); if ((req_size & 0x1f) != 0) req_size = (req_size + 0x1f) & ~0x1f; diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 63f32f843e75..59599299615d 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -250,7 +250,7 @@ static void gather_partition_info(void) ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL); if (ppartition_name) - strlcpy(partition_name, ppartition_name, + strscpy(partition_name, ppartition_name, sizeof(partition_name)); p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL); if (p_number_ptr) @@ -1282,12 +1282,12 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) if (hostdata->client_migrated) hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED); - strlcpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), + strscpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), sizeof(hostdata->caps.name)); location = of_get_property(of_node, "ibm,loc-code", NULL); location = location ? location : dev_name(hostdata->dev); - strlcpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); + strscpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE); req->buffer = cpu_to_be64(hostdata->caps_addr); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 9a322a3a2150..595dca92e8db 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -889,7 +889,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocbq) { uint32_t evt_req_id = 0; - uint32_t cmd; + u16 cmd; struct lpfc_dmabuf *dmabuf = NULL; struct lpfc_bsg_event *evt; struct event_data *evt_dat = NULL; @@ -915,7 +915,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt; evt_req_id = ct_req->FsType; - cmd = ct_req->CommandResponse.bits.CmdRsp; + cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp); spin_lock_irqsave(&phba->ct_ev_lock, flags); list_for_each_entry(evt, &phba->ct_ev_waiters, node) { @@ -3186,8 +3186,8 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job) ctreq->RevisionId.bits.InId = 0; ctreq->FsType = SLI_CT_ELX_LOOPBACK; ctreq->FsSubType = 0; - ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA; - ctreq->CommandResponse.bits.Size = size; + ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(ELX_LOOPBACK_DATA); + ctreq->CommandResponse.bits.Size = cpu_to_be16(size); segment_offset = ELX_LOOPBACK_HEADER_SZ; } else segment_offset = 0; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 317c944c68e3..050eed8e2684 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5153,8 +5153,8 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance) fusion->max_map_sz = ventura_map_sz; } else { fusion->old_map_sz = - struct_size((struct MR_FW_RAID_MAP *)0, ldSpanMap, - instance->fw_supported_vd_count); + struct_size_t(struct MR_FW_RAID_MAP, ldSpanMap, + instance->fw_supported_vd_count); fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); fusion->max_map_sz = @@ -5789,8 +5789,8 @@ megasas_setup_jbod_map(struct megasas_instance *instance) struct fusion_context *fusion = instance->ctrl_context; size_t pd_seq_map_sz; - pd_seq_map_sz = struct_size((struct MR_PD_CFG_SEQ_NUM_SYNC *)0, seq, - MAX_PHYSICAL_DEVICES); + pd_seq_map_sz = struct_size_t(struct MR_PD_CFG_SEQ_NUM_SYNC, seq, + MAX_PHYSICAL_DEVICES); instance->use_seqnum_jbod_fp = instance->support_seqnum_jbod_fp; @@ -8033,8 +8033,8 @@ skip_firing_dcmds: if (instance->adapter_type != MFI_SERIES) { megasas_release_fusion(instance); pd_seq_map_sz = - struct_size((struct MR_PD_CFG_SEQ_NUM_SYNC *)0, - seq, MAX_PHYSICAL_DEVICES); + struct_size_t(struct MR_PD_CFG_SEQ_NUM_SYNC, + seq, MAX_PHYSICAL_DEVICES); for (i = 0; i < 2 ; i++) { if (fusion->ld_map[i]) dma_free_coherent(&instance->pdev->dev, diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 4463a538102a..b8b388a4e28f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -326,9 +326,9 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id) else if (instance->supportmax256vd) expected_size = sizeof(struct MR_FW_RAID_MAP_EXT); else - expected_size = struct_size((struct MR_FW_RAID_MAP *)0, - ldSpanMap, - le16_to_cpu(pDrvRaidMap->ldCount)); + expected_size = struct_size_t(struct MR_FW_RAID_MAP, + ldSpanMap, + le16_to_cpu(pDrvRaidMap->ldCount)); if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) { dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x", diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 45d359554182..450522b204d6 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -2593,7 +2593,7 @@ retry_probe: sp_params.drv_minor = QEDI_DRIVER_MINOR_VER; sp_params.drv_rev = QEDI_DRIVER_REV_VER; sp_params.drv_eng = QEDI_DRIVER_ENG_VER; - strlcpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE); + strscpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE); rc = qedi_ops->common->slowpath_start(qedi->cdev, &sp_params); if (rc) { QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath\n"); diff --git a/drivers/scsi/scsi_bsg.c b/drivers/scsi/scsi_bsg.c index 96ee35256a16..a9a9ec086a7e 100644 --- a/drivers/scsi/scsi_bsg.c +++ b/drivers/scsi/scsi_bsg.c @@ -10,7 +10,7 @@ #define uptr64(val) ((void __user *)(uintptr_t)(val)) static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr, - fmode_t mode, unsigned int timeout) + bool open_for_write, unsigned int timeout) { struct scsi_cmnd *scmd; struct request *rq; @@ -42,7 +42,7 @@ static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr, if (copy_from_user(scmd->cmnd, uptr64(hdr->request), scmd->cmd_len)) goto out_put_request; ret = -EPERM; - if (!scsi_cmd_allowed(scmd->cmnd, mode)) + if (!scsi_cmd_allowed(scmd->cmnd, open_for_write)) goto out_put_request; ret = 0; diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index e3b31d32b6a9..6f6c5973c3ea 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -248,7 +248,7 @@ static int scsi_send_start_stop(struct scsi_device *sdev, int data) * Only a subset of commands are allowed for unprivileged users. Commands used * to format the media, update the firmware, etc. are not permitted. */ -bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode) +bool scsi_cmd_allowed(unsigned char *cmd, bool open_for_write) { /* root can do any command. */ if (capable(CAP_SYS_RAWIO)) @@ -338,7 +338,7 @@ bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode) case GPCMD_SET_READ_AHEAD: /* ZBC */ case ZBC_OUT: - return (mode & FMODE_WRITE); + return open_for_write; default: return false; } @@ -346,7 +346,7 @@ bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode) EXPORT_SYMBOL(scsi_cmd_allowed); static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq, - struct sg_io_hdr *hdr, fmode_t mode) + struct sg_io_hdr *hdr, bool open_for_write) { struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); @@ -354,7 +354,7 @@ static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq, return -EMSGSIZE; if (copy_from_user(scmd->cmnd, hdr->cmdp, hdr->cmd_len)) return -EFAULT; - if (!scsi_cmd_allowed(scmd->cmnd, mode)) + if (!scsi_cmd_allowed(scmd->cmnd, open_for_write)) return -EPERM; scmd->cmd_len = hdr->cmd_len; @@ -407,7 +407,8 @@ static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, return ret; } -static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, fmode_t mode) +static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, + bool open_for_write) { unsigned long start_time; ssize_t ret = 0; @@ -448,7 +449,7 @@ static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, fmode_t mode) goto out_put_request; } - ret = scsi_fill_sghdr_rq(sdev, rq, hdr, mode); + ret = scsi_fill_sghdr_rq(sdev, rq, hdr, open_for_write); if (ret < 0) goto out_put_request; @@ -477,8 +478,7 @@ out_put_request: /** * sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl * @q: request queue to send scsi commands down - * @mode: mode used to open the file through which the ioctl has been - * submitted + * @open_for_write: is the file / block device opened for writing? * @sic: userspace structure describing the command to perform * * Send down the scsi command described by @sic to the device below @@ -501,7 +501,7 @@ out_put_request: * Positive numbers returned are the compacted SCSI error codes (4 * bytes in one int) where the lowest byte is the SCSI status. */ -static int sg_scsi_ioctl(struct request_queue *q, fmode_t mode, +static int sg_scsi_ioctl(struct request_queue *q, bool open_for_write, struct scsi_ioctl_command __user *sic) { struct request *rq; @@ -554,7 +554,7 @@ static int sg_scsi_ioctl(struct request_queue *q, fmode_t mode, goto error; err = -EPERM; - if (!scsi_cmd_allowed(scmd->cmnd, mode)) + if (!scsi_cmd_allowed(scmd->cmnd, open_for_write)) goto error; /* default. possible overridden later */ @@ -776,7 +776,7 @@ static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc, return 0; } -static int scsi_cdrom_send_packet(struct scsi_device *sdev, fmode_t mode, +static int scsi_cdrom_send_packet(struct scsi_device *sdev, bool open_for_write, void __user *arg) { struct cdrom_generic_command cgc; @@ -817,7 +817,7 @@ static int scsi_cdrom_send_packet(struct scsi_device *sdev, fmode_t mode, hdr.cmdp = ((struct cdrom_generic_command __user *) arg)->cmd; hdr.cmd_len = sizeof(cgc.cmd); - err = sg_io(sdev, &hdr, mode); + err = sg_io(sdev, &hdr, open_for_write); if (err == -EFAULT) return -EFAULT; @@ -832,7 +832,7 @@ static int scsi_cdrom_send_packet(struct scsi_device *sdev, fmode_t mode, return err; } -static int scsi_ioctl_sg_io(struct scsi_device *sdev, fmode_t mode, +static int scsi_ioctl_sg_io(struct scsi_device *sdev, bool open_for_write, void __user *argp) { struct sg_io_hdr hdr; @@ -841,7 +841,7 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, fmode_t mode, error = get_sg_io_hdr(&hdr, argp); if (error) return error; - error = sg_io(sdev, &hdr, mode); + error = sg_io(sdev, &hdr, open_for_write); if (error == -EFAULT) return error; if (put_sg_io_hdr(&hdr, argp)) @@ -852,7 +852,7 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, fmode_t mode, /** * scsi_ioctl - Dispatch ioctl to scsi device * @sdev: scsi device receiving ioctl - * @mode: mode the block/char device is opened with + * @open_for_write: is the file / block device opened for writing? * @cmd: which ioctl is it * @arg: data associated with ioctl * @@ -860,7 +860,7 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, fmode_t mode, * does not take a major/minor number as the dev field. Rather, it takes * a pointer to a &struct scsi_device. */ -int scsi_ioctl(struct scsi_device *sdev, fmode_t mode, int cmd, +int scsi_ioctl(struct scsi_device *sdev, bool open_for_write, int cmd, void __user *arg) { struct request_queue *q = sdev->request_queue; @@ -896,11 +896,11 @@ int scsi_ioctl(struct scsi_device *sdev, fmode_t mode, int cmd, case SG_EMULATED_HOST: return sg_emulated_host(q, arg); case SG_IO: - return scsi_ioctl_sg_io(sdev, mode, arg); + return scsi_ioctl_sg_io(sdev, open_for_write, arg); case SCSI_IOCTL_SEND_COMMAND: - return sg_scsi_ioctl(q, mode, arg); + return sg_scsi_ioctl(q, open_for_write, arg); case CDROM_SEND_PACKET: - return scsi_cdrom_send_packet(sdev, mode, arg); + return scsi_cdrom_send_packet(sdev, open_for_write, arg); case CDROMCLOSETRAY: return scsi_send_start_stop(sdev, 3); case CDROMEJECT: diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 1624d528aa1f..ab216976dbdc 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1280,11 +1280,10 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt) mempool_free(rq->special_vec.bv_page, sd_page_pool); } -static bool sd_need_revalidate(struct block_device *bdev, - struct scsi_disk *sdkp) +static bool sd_need_revalidate(struct gendisk *disk, struct scsi_disk *sdkp) { if (sdkp->device->removable || sdkp->write_prot) { - if (bdev_check_media_change(bdev)) + if (disk_check_media_change(disk)) return true; } @@ -1293,13 +1292,13 @@ static bool sd_need_revalidate(struct block_device *bdev, * nothing to do with partitions, BLKRRPART is used to force a full * revalidate after things like a format for historical reasons. */ - return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); + return test_bit(GD_NEED_PART_SCAN, &disk->state); } /** * sd_open - open a scsi disk device - * @bdev: Block device of the scsi disk to open - * @mode: FMODE_* mask + * @disk: disk to open + * @mode: open mode * * Returns 0 if successful. Returns a negated errno value in case * of error. @@ -1309,11 +1308,11 @@ static bool sd_need_revalidate(struct block_device *bdev, * In the latter case @inode and @filp carry an abridged amount * of information as noted above. * - * Locking: called with bdev->bd_disk->open_mutex held. + * Locking: called with disk->open_mutex held. **/ -static int sd_open(struct block_device *bdev, fmode_t mode) +static int sd_open(struct gendisk *disk, blk_mode_t mode) { - struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); + struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdev = sdkp->device; int retval; @@ -1330,14 +1329,15 @@ static int sd_open(struct block_device *bdev, fmode_t mode) if (!scsi_block_when_processing_errors(sdev)) goto error_out; - if (sd_need_revalidate(bdev, sdkp)) - sd_revalidate_disk(bdev->bd_disk); + if (sd_need_revalidate(disk, sdkp)) + sd_revalidate_disk(disk); /* * If the drive is empty, just let the open fail. */ retval = -ENOMEDIUM; - if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY)) + if (sdev->removable && !sdkp->media_present && + !(mode & BLK_OPEN_NDELAY)) goto error_out; /* @@ -1345,7 +1345,7 @@ static int sd_open(struct block_device *bdev, fmode_t mode) * if the user expects to be able to write to the thing. */ retval = -EROFS; - if (sdkp->write_prot && (mode & FMODE_WRITE)) + if (sdkp->write_prot && (mode & BLK_OPEN_WRITE)) goto error_out; /* @@ -1374,16 +1374,15 @@ error_out: * sd_release - invoked when the (last) close(2) is called on this * scsi disk. * @disk: disk to release - * @mode: FMODE_* mask * * Returns 0. * * Note: may block (uninterruptible) if error recovery is underway * on this disk. * - * Locking: called with bdev->bd_disk->open_mutex held. + * Locking: called with disk->open_mutex held. **/ -static void sd_release(struct gendisk *disk, fmode_t mode) +static void sd_release(struct gendisk *disk) { struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdev = sdkp->device; @@ -1426,7 +1425,7 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) /** * sd_ioctl - process an ioctl * @bdev: target block device - * @mode: FMODE_* mask + * @mode: open mode * @cmd: ioctl command number * @arg: this is third argument given to ioctl(2) system call. * Often contains a pointer. @@ -1437,7 +1436,7 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) * Note: most ioctls are forward onto the block subsystem or further * down in the scsi subsystem. **/ -static int sd_ioctl(struct block_device *bdev, fmode_t mode, +static int sd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct gendisk *disk = bdev->bd_disk; @@ -1459,13 +1458,13 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode, * access to the device is prohibited. */ error = scsi_ioctl_block_when_processing_errors(sdp, cmd, - (mode & FMODE_NDELAY) != 0); + (mode & BLK_OPEN_NDELAY)); if (error) return error; if (is_sed_ioctl(cmd)) return sed_ioctl(sdkp->opal_dev, cmd, p); - return scsi_ioctl(sdp, mode, cmd, p); + return scsi_ioctl(sdp, mode & BLK_OPEN_WRITE, cmd, p); } static void set_media_not_present(struct scsi_disk *sdkp) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 037f8c98a6d3..dcb73787c29d 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -237,7 +237,7 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd) if (sfp->parentdp->device->type == TYPE_SCANNER) return 0; - if (!scsi_cmd_allowed(cmd, filp->f_mode)) + if (!scsi_cmd_allowed(cmd, filp->f_mode & FMODE_WRITE)) return -EPERM; return 0; } @@ -1103,7 +1103,8 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp, case SCSI_IOCTL_SEND_COMMAND: if (atomic_read(&sdp->detaching)) return -ENODEV; - return scsi_ioctl(sdp->device, filp->f_mode, cmd_in, p); + return scsi_ioctl(sdp->device, filp->f_mode & FMODE_WRITE, + cmd_in, p); case SG_SET_DEBUG: result = get_user(val, ip); if (result) @@ -1159,7 +1160,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p); if (ret != -ENOIOCTLCMD) return ret; - return scsi_ioctl(sdp->device, filp->f_mode, cmd_in, p); + return scsi_ioctl(sdp->device, filp->f_mode & FMODE_WRITE, cmd_in, p); } static __poll_t @@ -1496,6 +1497,10 @@ sg_add_device(struct device *cl_dev) int error; unsigned long iflags; + error = blk_get_queue(scsidp->request_queue); + if (error) + return error; + error = -ENOMEM; cdev = cdev_alloc(); if (!cdev) { @@ -1553,6 +1558,7 @@ cdev_add_err: out: if (cdev) cdev_del(cdev); + blk_put_queue(scsidp->request_queue); return error; } @@ -1560,6 +1566,7 @@ static void sg_device_destroy(struct kref *kref) { struct sg_device *sdp = container_of(kref, struct sg_device, d_ref); + struct request_queue *q = sdp->device->request_queue; unsigned long flags; /* CAUTION! Note that the device can still be found via idr_find() @@ -1567,6 +1574,9 @@ sg_device_destroy(struct kref *kref) * any other cleanup. */ + blk_trace_remove(q); + blk_put_queue(q); + write_lock_irqsave(&sg_index_lock, flags); idr_remove(&sg_index_idr, sdp->index); write_unlock_irqrestore(&sg_index_lock, flags); diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 03de97cd72c2..f4e0aa262164 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -5015,7 +5015,7 @@ static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) } #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ - struct_size((struct pqi_event_config *)0, descriptors, PQI_MAX_EVENT_DESCRIPTORS) + struct_size_t(struct pqi_event_config, descriptors, PQI_MAX_EVENT_DESCRIPTORS) static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, bool enable_events) diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 12869e6d4ebd..ce886c8c9dbe 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -484,9 +484,9 @@ static void sr_revalidate_disk(struct scsi_cd *cd) get_sectorsize(cd); } -static int sr_block_open(struct block_device *bdev, fmode_t mode) +static int sr_block_open(struct gendisk *disk, blk_mode_t mode) { - struct scsi_cd *cd = scsi_cd(bdev->bd_disk); + struct scsi_cd *cd = scsi_cd(disk); struct scsi_device *sdev = cd->device; int ret; @@ -494,11 +494,11 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode) return -ENXIO; scsi_autopm_get_device(sdev); - if (bdev_check_media_change(bdev)) + if (disk_check_media_change(disk)) sr_revalidate_disk(cd); mutex_lock(&cd->lock); - ret = cdrom_open(&cd->cdi, bdev, mode); + ret = cdrom_open(&cd->cdi, mode); mutex_unlock(&cd->lock); scsi_autopm_put_device(sdev); @@ -507,19 +507,19 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode) return ret; } -static void sr_block_release(struct gendisk *disk, fmode_t mode) +static void sr_block_release(struct gendisk *disk) { struct scsi_cd *cd = scsi_cd(disk); mutex_lock(&cd->lock); - cdrom_release(&cd->cdi, mode); + cdrom_release(&cd->cdi); mutex_unlock(&cd->lock); scsi_device_put(cd->device); } -static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, - unsigned long arg) +static int sr_block_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned cmd, unsigned long arg) { struct scsi_cd *cd = scsi_cd(bdev->bd_disk); struct scsi_device *sdev = cd->device; @@ -532,18 +532,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, mutex_lock(&cd->lock); ret = scsi_ioctl_block_when_processing_errors(sdev, cmd, - (mode & FMODE_NDELAY) != 0); + (mode & BLK_OPEN_NDELAY)); if (ret) goto out; scsi_autopm_get_device(sdev); if (cmd != CDROMCLOSETRAY && cmd != CDROMEJECT) { - ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); + ret = cdrom_ioctl(&cd->cdi, bdev, cmd, arg); if (ret != -ENOSYS) goto put; } - ret = scsi_ioctl(sdev, mode, cmd, argp); + ret = scsi_ioctl(sdev, mode & BLK_OPEN_WRITE, cmd, argp); put: scsi_autopm_put_device(sdev); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index b90a440e135d..14d7981ddcdd 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -3832,7 +3832,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) break; } - retval = scsi_ioctl(STp->device, file->f_mode, cmd_in, p); + retval = scsi_ioctl(STp->device, file->f_mode & FMODE_WRITE, cmd_in, p); if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) { /* unload */ STp->rew_at_close = 0; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index e6bc622954cf..659196a2f63a 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1567,6 +1567,8 @@ static int storvsc_device_configure(struct scsi_device *sdevice) { blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ)); + /* storvsc devices don't support MAINTENANCE_IN SCSI cmd */ + sdevice->no_report_opcodes = 1; sdevice->no_write_same = 1; /* diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 6ddb2dfc0f00..32449bef4415 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1756,8 +1756,11 @@ static int cqspi_probe(struct platform_device *pdev) cqspi->slow_sram = true; if (of_device_is_compatible(pdev->dev.of_node, - "xlnx,versal-ospi-1.0")) - dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + "xlnx,versal-ospi-1.0")) { + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) + goto probe_reset_failed; + } } ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index 5f2aee69c1c1..15f5e9cb54ad 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -274,7 +274,7 @@ static void dw_spi_elba_set_cs(struct spi_device *spi, bool enable) */ spi_set_chipselect(spi, 0, 0); dw_spi_set_cs(spi, enable); - spi_get_chipselect(spi, cs); + spi_set_chipselect(spi, 0, cs); } static int dw_spi_elba_init(struct platform_device *pdev, diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 4339485d202c..674cfe05f411 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1002,7 +1002,9 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, static int dspi_setup(struct spi_device *spi) { struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller); + u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz); unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0; + u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4); u32 cs_sck_delay = 0, sck_cs_delay = 0; struct fsl_dspi_platform_data *pdata; unsigned char pasc = 0, asc = 0; @@ -1031,6 +1033,19 @@ static int dspi_setup(struct spi_device *spi) sck_cs_delay = pdata->sck_cs_delay; } + /* Since tCSC and tASC apply to continuous transfers too, avoid SCK + * glitches of half a cycle by never allowing tCSC + tASC to go below + * half a SCK period. + */ + if (cs_sck_delay < quarter_period_ns) + cs_sck_delay = quarter_period_ns; + if (sck_cs_delay < quarter_period_ns) + sck_cs_delay = quarter_period_ns; + + dev_dbg(&spi->dev, + "DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n", + cs_sck_delay, sck_cs_delay); + clkrate = clk_get_rate(dspi->clk); hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate); diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index a98b781b103a..b293428760bc 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -646,6 +646,8 @@ static int spi_geni_init(struct spi_geni_master *mas) geni_se_select_mode(se, GENI_GPI_DMA); dev_dbg(mas->dev, "Using GPI DMA mode for SPI\n"); break; + } else if (ret == -EPROBE_DEFER) { + goto out_pm; } /* * in case of failure to get gpi dma channel, we can still do the diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 39d94c850839..8d009275a59d 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -237,7 +237,7 @@ static int spidev_message(struct spidev_data *spidev, /* Ensure that also following allocations from rx_buf/tx_buf will meet * DMA alignment requirements. */ - unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN); + unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_DMA_MINALIGN); k_tmp->len = u_tmp->len; diff --git a/drivers/staging/octeon/TODO b/drivers/staging/octeon/TODO index 67a0a1f6b922..044e48e3d65f 100644 --- a/drivers/staging/octeon/TODO +++ b/drivers/staging/octeon/TODO @@ -6,4 +6,3 @@ TODO: - make driver self-contained instead of being split between staging and arch/mips/cavium-octeon. -Contact: Aaro Koskinen <aaro.koskinen@iki.fi> diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index cc838ffd1294..3c462d69daca 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -90,7 +90,7 @@ static int iblock_configure_device(struct se_device *dev) struct request_queue *q; struct block_device *bd = NULL; struct blk_integrity *bi; - fmode_t mode; + blk_mode_t mode = BLK_OPEN_READ; unsigned int max_write_zeroes_sectors; int ret; @@ -108,13 +108,12 @@ static int iblock_configure_device(struct se_device *dev) pr_debug( "IBLOCK: Claiming struct block_device: %s\n", ib_dev->ibd_udev_path); - mode = FMODE_READ|FMODE_EXCL; if (!ib_dev->ibd_readonly) - mode |= FMODE_WRITE; + mode |= BLK_OPEN_WRITE; else dev->dev_flags |= DF_READ_ONLY; - bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); + bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev, NULL); if (IS_ERR(bd)) { ret = PTR_ERR(bd); goto out_free_bioset; @@ -175,7 +174,7 @@ static int iblock_configure_device(struct se_device *dev) return 0; out_blkdev_put: - blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); + blkdev_put(ib_dev->ibd_bd, ib_dev); out_free_bioset: bioset_exit(&ib_dev->ibd_bio_set); out: @@ -201,7 +200,7 @@ static void iblock_destroy_device(struct se_device *dev) struct iblock_dev *ib_dev = IBLOCK_DEV(dev); if (ib_dev->ibd_bd != NULL) - blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); + blkdev_put(ib_dev->ibd_bd, ib_dev); bioset_exit(&ib_dev->ibd_bio_set); } diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index e7425549e39c..0d4f09693ef4 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -366,8 +366,8 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) * Claim exclusive struct block_device access to struct scsi_device * for TYPE_DISK and TYPE_ZBC using supplied udev_path */ - bd = blkdev_get_by_path(dev->udev_path, - FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); + bd = blkdev_get_by_path(dev->udev_path, BLK_OPEN_WRITE | BLK_OPEN_READ, + pdv, NULL); if (IS_ERR(bd)) { pr_err("pSCSI: blkdev_get_by_path() failed\n"); scsi_device_put(sd); @@ -377,7 +377,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) ret = pscsi_add_device_to_list(dev, sd); if (ret) { - blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); + blkdev_put(pdv->pdv_bd, pdv); scsi_device_put(sd); return ret; } @@ -565,8 +565,7 @@ static void pscsi_destroy_device(struct se_device *dev) */ if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) && pdv->pdv_bd) { - blkdev_put(pdv->pdv_bd, - FMODE_WRITE|FMODE_READ|FMODE_EXCL); + blkdev_put(pdv->pdv_bd, pdv); pdv->pdv_bd = NULL; } /* diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 86adff2a86ed..687adc9e086c 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -504,6 +504,8 @@ target_setup_session(struct se_portal_group *tpg, free_sess: transport_free_session(sess); + return ERR_PTR(rc); + free_cnt: target_free_cmd_counter(cmd_cnt); return ERR_PTR(rc); diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 4cd7ab707315..19a4b33cb564 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -130,6 +130,14 @@ config THERMAL_DEFAULT_GOV_POWER_ALLOCATOR system and device power allocation. This governor can only operate on cooling devices that implement the power API. +config THERMAL_DEFAULT_GOV_BANG_BANG + bool "bang_bang" + depends on THERMAL_GOV_BANG_BANG + help + Use the bang_bang governor as default. This throttles the + devices one step at the time, taking into account the trip + point hysteresis. + endchoice config THERMAL_GOV_FAIR_SHARE diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c index 3abc2dcef408..756b218880a7 100644 --- a/drivers/thermal/amlogic_thermal.c +++ b/drivers/thermal/amlogic_thermal.c @@ -282,8 +282,7 @@ static int amlogic_thermal_probe(struct platform_device *pdev) return ret; } - if (devm_thermal_add_hwmon_sysfs(&pdev->dev, pdata->tzd)) - dev_warn(&pdev->dev, "Failed to add hwmon sysfs attributes\n"); + devm_thermal_add_hwmon_sysfs(&pdev->dev, pdata->tzd); ret = amlogic_thermal_initialize(pdata); if (ret) diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c index 0e8dfa6a7757..9f6dc4fc9112 100644 --- a/drivers/thermal/armada_thermal.c +++ b/drivers/thermal/armada_thermal.c @@ -231,7 +231,7 @@ static void armada380_init(struct platform_device *pdev, regmap_write(priv->syscon, data->syscon_control0_off, reg); } -static void armada_ap806_init(struct platform_device *pdev, +static void armada_ap80x_init(struct platform_device *pdev, struct armada_thermal_priv *priv) { struct armada_thermal_data *data = priv->data; @@ -614,7 +614,7 @@ static const struct armada_thermal_data armada380_data = { }; static const struct armada_thermal_data armada_ap806_data = { - .init = armada_ap806_init, + .init = armada_ap80x_init, .is_valid_bit = BIT(16), .temp_shift = 0, .temp_mask = 0x3ff, @@ -637,6 +637,30 @@ static const struct armada_thermal_data armada_ap806_data = { .cpu_nr = 4, }; +static const struct armada_thermal_data armada_ap807_data = { + .init = armada_ap80x_init, + .is_valid_bit = BIT(16), + .temp_shift = 0, + .temp_mask = 0x3ff, + .thresh_shift = 3, + .hyst_shift = 19, + .hyst_mask = 0x3, + .coef_b = -128900LL, + .coef_m = 394ULL, + .coef_div = 1, + .inverted = true, + .signed_sample = true, + .syscon_control0_off = 0x84, + .syscon_control1_off = 0x88, + .syscon_status_off = 0x8C, + .dfx_irq_cause_off = 0x108, + .dfx_irq_mask_off = 0x10C, + .dfx_overheat_irq = BIT(22), + .dfx_server_irq_mask_off = 0x104, + .dfx_server_irq_en = BIT(1), + .cpu_nr = 4, +}; + static const struct armada_thermal_data armada_cp110_data = { .init = armada_cp110_init, .is_valid_bit = BIT(10), @@ -681,6 +705,10 @@ static const struct of_device_id armada_thermal_id_table[] = { .data = &armada_ap806_data, }, { + .compatible = "marvell,armada-ap807-thermal", + .data = &armada_ap807_data, + }, + { .compatible = "marvell,armada-cp110-thermal", .data = &armada_cp110_data, }, diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c index d8005e9ec992..d4b40869c7d7 100644 --- a/drivers/thermal/imx8mm_thermal.c +++ b/drivers/thermal/imx8mm_thermal.c @@ -343,8 +343,7 @@ static int imx8mm_tmu_probe(struct platform_device *pdev) } tmu->sensors[i].hw_id = i; - if (devm_thermal_add_hwmon_sysfs(&pdev->dev, tmu->sensors[i].tzd)) - dev_warn(&pdev->dev, "failed to add hwmon sysfs attributes\n"); + devm_thermal_add_hwmon_sysfs(&pdev->dev, tmu->sensors[i].tzd); } platform_set_drvdata(pdev, tmu); diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c index 839bb9958f60..8d6b4ef23746 100644 --- a/drivers/thermal/imx_sc_thermal.c +++ b/drivers/thermal/imx_sc_thermal.c @@ -116,8 +116,7 @@ static int imx_sc_thermal_probe(struct platform_device *pdev) return ret; } - if (devm_thermal_add_hwmon_sysfs(&pdev->dev, sensor->tzd)) - dev_warn(&pdev->dev, "failed to add hwmon sysfs attributes\n"); + devm_thermal_add_hwmon_sysfs(&pdev->dev, sensor->tzd); } return 0; diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c index 01b80331eab6..dc519a665c18 100644 --- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c +++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c @@ -203,6 +203,151 @@ end: } EXPORT_SYMBOL(acpi_parse_art); +/* + * acpi_parse_psvt - Passive Table (PSVT) for passive cooling + * + * @handle: ACPI handle of the device which contains PSVT + * @psvt_count: the number of valid entries resulted from parsing PSVT + * @psvtp: pointer to array of psvt entries + * + */ +static int acpi_parse_psvt(acpi_handle handle, int *psvt_count, struct psvt **psvtp) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + int nr_bad_entries = 0, revision = 0; + union acpi_object *p; + acpi_status status; + int i, result = 0; + struct psvt *psvts; + + if (!acpi_has_method(handle, "PSVT")) + return -ENODEV; + + status = acpi_evaluate_object(handle, "PSVT", NULL, &buffer); + if (ACPI_FAILURE(status)) + return -ENODEV; + + p = buffer.pointer; + if (!p || (p->type != ACPI_TYPE_PACKAGE)) { + result = -EFAULT; + goto end; + } + + /* first package is the revision number */ + if (p->package.count > 0) { + union acpi_object *prev = &(p->package.elements[0]); + + if (prev->type == ACPI_TYPE_INTEGER) + revision = (int)prev->integer.value; + } else { + result = -EFAULT; + goto end; + } + + /* Support only version 2 */ + if (revision != 2) { + result = -EFAULT; + goto end; + } + + *psvt_count = p->package.count - 1; + if (!*psvt_count) { + result = -EFAULT; + goto end; + } + + psvts = kcalloc(*psvt_count, sizeof(*psvts), GFP_KERNEL); + if (!psvts) { + result = -ENOMEM; + goto end; + } + + /* Start index is 1 because the first package is the revision number */ + for (i = 1; i < p->package.count; i++) { + struct acpi_buffer psvt_int_format = { sizeof("RRNNNNNNNNNN"), "RRNNNNNNNNNN" }; + struct acpi_buffer psvt_str_format = { sizeof("RRNNNNNSNNNN"), "RRNNNNNSNNNN" }; + union acpi_object *package = &(p->package.elements[i]); + struct psvt *psvt = &psvts[i - 1 - nr_bad_entries]; + struct acpi_buffer *psvt_format = &psvt_int_format; + struct acpi_buffer element = { 0, NULL }; + union acpi_object *knob; + struct acpi_device *res; + struct psvt *psvt_ptr; + + element.length = ACPI_ALLOCATE_BUFFER; + element.pointer = NULL; + + if (package->package.count >= ACPI_NR_PSVT_ELEMENTS) { + knob = &(package->package.elements[ACPI_PSVT_CONTROL_KNOB]); + } else { + nr_bad_entries++; + pr_info("PSVT package %d is invalid, ignored\n", i); + continue; + } + + if (knob->type == ACPI_TYPE_STRING) { + psvt_format = &psvt_str_format; + if (knob->string.length > ACPI_LIMIT_STR_MAX_LEN - 1) { + pr_info("PSVT package %d limit string len exceeds max\n", i); + knob->string.length = ACPI_LIMIT_STR_MAX_LEN - 1; + } + } + + status = acpi_extract_package(&(p->package.elements[i]), psvt_format, &element); + if (ACPI_FAILURE(status)) { + nr_bad_entries++; + pr_info("PSVT package %d is invalid, ignored\n", i); + continue; + } + + psvt_ptr = (struct psvt *)element.pointer; + + memcpy(psvt, psvt_ptr, sizeof(*psvt)); + + /* The limit element can be string or U64 */ + psvt->control_knob_type = (u64)knob->type; + + if (knob->type == ACPI_TYPE_STRING) { + memset(&psvt->limit, 0, sizeof(u64)); + strncpy(psvt->limit.string, psvt_ptr->limit.str_ptr, knob->string.length); + } else { + psvt->limit.integer = psvt_ptr->limit.integer; + } + + kfree(element.pointer); + + res = acpi_fetch_acpi_dev(psvt->source); + if (!res) { + nr_bad_entries++; + pr_info("Failed to get source ACPI device\n"); + continue; + } + + res = acpi_fetch_acpi_dev(psvt->target); + if (!res) { + nr_bad_entries++; + pr_info("Failed to get target ACPI device\n"); + continue; + } + } + + /* don't count bad entries */ + *psvt_count -= nr_bad_entries; + + if (!*psvt_count) { + result = -EFAULT; + kfree(psvts); + goto end; + } + + *psvtp = psvts; + + return 0; + +end: + kfree(buffer.pointer); + return result; +} /* get device name from acpi handle */ static void get_single_name(acpi_handle handle, char *name) @@ -289,6 +434,57 @@ free_trt: return ret; } +static int fill_psvt(char __user *ubuf) +{ + int i, ret, count, psvt_len; + union psvt_object *psvt_user; + struct psvt *psvts; + + ret = acpi_parse_psvt(acpi_thermal_rel_handle, &count, &psvts); + if (ret) + return ret; + + psvt_len = count * sizeof(*psvt_user); + + psvt_user = kzalloc(psvt_len, GFP_KERNEL); + if (!psvt_user) { + ret = -ENOMEM; + goto free_psvt; + } + + /* now fill in user psvt data */ + for (i = 0; i < count; i++) { + /* userspace psvt needs device name instead of acpi reference */ + get_single_name(psvts[i].source, psvt_user[i].source_device); + get_single_name(psvts[i].target, psvt_user[i].target_device); + + psvt_user[i].priority = psvts[i].priority; + psvt_user[i].sample_period = psvts[i].sample_period; + psvt_user[i].passive_temp = psvts[i].passive_temp; + psvt_user[i].source_domain = psvts[i].source_domain; + psvt_user[i].control_knob = psvts[i].control_knob; + psvt_user[i].step_size = psvts[i].step_size; + psvt_user[i].limit_coeff = psvts[i].limit_coeff; + psvt_user[i].unlimit_coeff = psvts[i].unlimit_coeff; + psvt_user[i].control_knob_type = psvts[i].control_knob_type; + if (psvt_user[i].control_knob_type == ACPI_TYPE_STRING) + strncpy(psvt_user[i].limit.string, psvts[i].limit.string, + ACPI_LIMIT_STR_MAX_LEN); + else + psvt_user[i].limit.integer = psvts[i].limit.integer; + + } + + if (copy_to_user(ubuf, psvt_user, psvt_len)) + ret = -EFAULT; + + kfree(psvt_user); + +free_psvt: + kfree(psvts); + return ret; +} + static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd, unsigned long __arg) { @@ -298,6 +494,7 @@ static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd, char __user *arg = (void __user *)__arg; struct trt *trts = NULL; struct art *arts = NULL; + struct psvt *psvts; switch (cmd) { case ACPI_THERMAL_GET_TRT_COUNT: @@ -336,6 +533,27 @@ static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd, case ACPI_THERMAL_GET_ART: return fill_art(arg); + case ACPI_THERMAL_GET_PSVT_COUNT: + ret = acpi_parse_psvt(acpi_thermal_rel_handle, &count, &psvts); + if (!ret) { + kfree(psvts); + return put_user(count, (unsigned long __user *)__arg); + } + return ret; + + case ACPI_THERMAL_GET_PSVT_LEN: + /* total length of the data retrieved (count * PSVT entry size) */ + ret = acpi_parse_psvt(acpi_thermal_rel_handle, &count, &psvts); + length = count * sizeof(union psvt_object); + if (!ret) { + kfree(psvts); + return put_user(length, (unsigned long __user *)__arg); + } + return ret; + + case ACPI_THERMAL_GET_PSVT: + return fill_psvt(arg); + default: return -ENOTTY; } diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h index 78d942477035..ac376d8f9ee4 100644 --- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h +++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h @@ -14,6 +14,16 @@ #define ACPI_THERMAL_GET_TRT _IOR(ACPI_THERMAL_MAGIC, 5, unsigned long) #define ACPI_THERMAL_GET_ART _IOR(ACPI_THERMAL_MAGIC, 6, unsigned long) +/* + * ACPI_THERMAL_GET_PSVT_COUNT = Number of PSVT entries + * ACPI_THERMAL_GET_PSVT_LEN = Total return data size (PSVT count x each + * PSVT entry size) + * ACPI_THERMAL_GET_PSVT = Get the data as an array of psvt_objects + */ +#define ACPI_THERMAL_GET_PSVT_LEN _IOR(ACPI_THERMAL_MAGIC, 7, unsigned long) +#define ACPI_THERMAL_GET_PSVT_COUNT _IOR(ACPI_THERMAL_MAGIC, 8, unsigned long) +#define ACPI_THERMAL_GET_PSVT _IOR(ACPI_THERMAL_MAGIC, 9, unsigned long) + struct art { acpi_handle source; acpi_handle target; @@ -43,6 +53,32 @@ struct trt { u64 reserved4; } __packed; +#define ACPI_NR_PSVT_ELEMENTS 12 +#define ACPI_PSVT_CONTROL_KNOB 7 +#define ACPI_LIMIT_STR_MAX_LEN 8 + +struct psvt { + acpi_handle source; + acpi_handle target; + u64 priority; + u64 sample_period; + u64 passive_temp; + u64 source_domain; + u64 control_knob; + union { + /* For limit_type = ACPI_TYPE_INTEGER */ + u64 integer; + /* For limit_type = ACPI_TYPE_STRING */ + char string[ACPI_LIMIT_STR_MAX_LEN]; + char *str_ptr; + } limit; + u64 step_size; + u64 limit_coeff; + u64 unlimit_coeff; + /* Spec calls this field reserved, so we borrow it for type info */ + u64 control_knob_type; /* ACPI_TYPE_STRING or ACPI_TYPE_INTEGER */ +} __packed; + #define ACPI_NR_ART_ELEMENTS 13 /* for usrspace */ union art_object { @@ -77,6 +113,27 @@ union trt_object { u64 __data[8]; }; +union psvt_object { + struct { + char source_device[8]; + char target_device[8]; + u64 priority; + u64 sample_period; + u64 passive_temp; + u64 source_domain; + u64 control_knob; + union { + u64 integer; + char string[ACPI_LIMIT_STR_MAX_LEN]; + } limit; + u64 step_size; + u64 limit_coeff; + u64 unlimit_coeff; + u64 control_knob_type; + }; + u64 __data[ACPI_NR_PSVT_ELEMENTS]; +}; + #ifdef __KERNEL__ int acpi_thermal_rel_misc_device_add(acpi_handle handle); int acpi_thermal_rel_misc_device_remove(acpi_handle handle); diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c index a205221ec8df..013f1633f082 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c @@ -15,8 +15,8 @@ static const struct rapl_mmio_regs rapl_mmio_default = { .reg_unit = 0x5938, .regs[RAPL_DOMAIN_PACKAGE] = { 0x59a0, 0x593c, 0x58f0, 0, 0x5930}, .regs[RAPL_DOMAIN_DRAM] = { 0x58e0, 0x58e8, 0x58ec, 0, 0}, - .limits[RAPL_DOMAIN_PACKAGE] = 2, - .limits[RAPL_DOMAIN_DRAM] = 2, + .limits[RAPL_DOMAIN_PACKAGE] = BIT(POWER_LIMIT2), + .limits[RAPL_DOMAIN_DRAM] = BIT(POWER_LIMIT2), }; static int rapl_mmio_cpu_online(unsigned int cpu) @@ -27,9 +27,9 @@ static int rapl_mmio_cpu_online(unsigned int cpu) if (topology_physical_package_id(cpu)) return 0; - rp = rapl_find_package_domain(cpu, &rapl_mmio_priv); + rp = rapl_find_package_domain(cpu, &rapl_mmio_priv, true); if (!rp) { - rp = rapl_add_package(cpu, &rapl_mmio_priv); + rp = rapl_add_package(cpu, &rapl_mmio_priv, true); if (IS_ERR(rp)) return PTR_ERR(rp); } @@ -42,7 +42,7 @@ static int rapl_mmio_cpu_down_prep(unsigned int cpu) struct rapl_package *rp; int lead_cpu; - rp = rapl_find_package_domain(cpu, &rapl_mmio_priv); + rp = rapl_find_package_domain(cpu, &rapl_mmio_priv, true); if (!rp) return 0; @@ -97,6 +97,7 @@ int proc_thermal_rapl_add(struct pci_dev *pdev, struct proc_thermal_device *proc rapl_regs->regs[domain][reg]; rapl_mmio_priv.limits[domain] = rapl_regs->limits[domain]; } + rapl_mmio_priv.type = RAPL_IF_MMIO; rapl_mmio_priv.reg_unit = (u64)proc_priv->mmio_base + rapl_regs->reg_unit; rapl_mmio_priv.read_raw = rapl_mmio_read_raw; diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c index f99dc7e4ae89..db97499f4f0a 100644 --- a/drivers/thermal/intel/intel_soc_dts_iosf.c +++ b/drivers/thermal/intel/intel_soc_dts_iosf.c @@ -398,7 +398,7 @@ struct intel_soc_dts_sensors *intel_soc_dts_iosf_init( spin_lock_init(&sensors->intr_notify_lock); mutex_init(&sensors->dts_update_lock); sensors->intr_type = intr_type; - sensors->tj_max = tj_max; + sensors->tj_max = tj_max * 1000; if (intr_type == INTEL_SOC_DTS_INTERRUPT_NONE) notification = false; else diff --git a/drivers/thermal/k3_bandgap.c b/drivers/thermal/k3_bandgap.c index 791210458606..1c3e590157ec 100644 --- a/drivers/thermal/k3_bandgap.c +++ b/drivers/thermal/k3_bandgap.c @@ -222,8 +222,7 @@ static int k3_bandgap_probe(struct platform_device *pdev) goto err_alloc; } - if (devm_thermal_add_hwmon_sysfs(dev, data[id].tzd)) - dev_warn(dev, "Failed to add hwmon sysfs attributes\n"); + devm_thermal_add_hwmon_sysfs(dev, data[id].tzd); } platform_set_drvdata(pdev, bgp); diff --git a/drivers/thermal/mediatek/auxadc_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c index 0b5528804bbd..f59d36de20a0 100644 --- a/drivers/thermal/mediatek/auxadc_thermal.c +++ b/drivers/thermal/mediatek/auxadc_thermal.c @@ -1222,12 +1222,7 @@ static int mtk_thermal_probe(struct platform_device *pdev) return -ENODEV; } - auxadc_base = devm_of_iomap(&pdev->dev, auxadc, 0, NULL); - if (IS_ERR(auxadc_base)) { - of_node_put(auxadc); - return PTR_ERR(auxadc_base); - } - + auxadc_base = of_iomap(auxadc, 0); auxadc_phys_base = of_get_phys_base(auxadc); of_node_put(auxadc); @@ -1243,12 +1238,7 @@ static int mtk_thermal_probe(struct platform_device *pdev) return -ENODEV; } - apmixed_base = devm_of_iomap(&pdev->dev, apmixedsys, 0, NULL); - if (IS_ERR(apmixed_base)) { - of_node_put(apmixedsys); - return PTR_ERR(apmixed_base); - } - + apmixed_base = of_iomap(apmixedsys, 0); apmixed_phys_base = of_get_phys_base(apmixedsys); of_node_put(apmixedsys); diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c index d0a3f95b7884..b693fac2d677 100644 --- a/drivers/thermal/mediatek/lvts_thermal.c +++ b/drivers/thermal/mediatek/lvts_thermal.c @@ -19,6 +19,8 @@ #include <linux/thermal.h> #include <dt-bindings/thermal/mediatek,lvts-thermal.h> +#include "../thermal_hwmon.h" + #define LVTS_MONCTL0(__base) (__base + 0x0000) #define LVTS_MONCTL1(__base) (__base + 0x0004) #define LVTS_MONCTL2(__base) (__base + 0x0008) @@ -996,6 +998,8 @@ static int lvts_ctrl_start(struct device *dev, struct lvts_ctrl *lvts_ctrl) return PTR_ERR(tz); } + devm_thermal_add_hwmon_sysfs(dev, tz); + /* * The thermal zone pointer will be needed in the * interrupt handler, we store it in the sensor diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c index 5749149ae2e4..5ddc39b2be32 100644 --- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c +++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c @@ -689,9 +689,7 @@ static int adc_tm5_register_tzd(struct adc_tm5_chip *adc_tm) return PTR_ERR(tzd); } adc_tm->channels[i].tzd = tzd; - if (devm_thermal_add_hwmon_sysfs(adc_tm->dev, tzd)) - dev_warn(adc_tm->dev, - "Failed to add hwmon sysfs attributes\n"); + devm_thermal_add_hwmon_sysfs(adc_tm->dev, tzd); } return 0; diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c index 0f88e98428ac..0e8ebfcd84c5 100644 --- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c @@ -411,22 +411,19 @@ static int qpnp_tm_probe(struct platform_device *pdev) chip->base = res; ret = qpnp_tm_read(chip, QPNP_TM_REG_TYPE, &type); - if (ret < 0) { - dev_err(&pdev->dev, "could not read type\n"); - return ret; - } + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "could not read type\n"); ret = qpnp_tm_read(chip, QPNP_TM_REG_SUBTYPE, &subtype); - if (ret < 0) { - dev_err(&pdev->dev, "could not read subtype\n"); - return ret; - } + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "could not read subtype\n"); ret = qpnp_tm_read(chip, QPNP_TM_REG_DIG_MAJOR, &dig_major); - if (ret < 0) { - dev_err(&pdev->dev, "could not read dig_major\n"); - return ret; - } + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "could not read dig_major\n"); if (type != QPNP_TM_TYPE || (subtype != QPNP_TM_SUBTYPE_GEN1 && subtype != QPNP_TM_SUBTYPE_GEN2)) { @@ -448,20 +445,15 @@ static int qpnp_tm_probe(struct platform_device *pdev) */ chip->tz_dev = devm_thermal_of_zone_register( &pdev->dev, 0, chip, &qpnp_tm_sensor_ops); - if (IS_ERR(chip->tz_dev)) { - dev_err(&pdev->dev, "failed to register sensor\n"); - return PTR_ERR(chip->tz_dev); - } + if (IS_ERR(chip->tz_dev)) + return dev_err_probe(&pdev->dev, PTR_ERR(chip->tz_dev), + "failed to register sensor\n"); ret = qpnp_tm_init(chip); - if (ret < 0) { - dev_err(&pdev->dev, "init failed\n"); - return ret; - } + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "init failed\n"); - if (devm_thermal_add_hwmon_sysfs(&pdev->dev, chip->tz_dev)) - dev_warn(&pdev->dev, - "Failed to add hwmon sysfs attributes\n"); + devm_thermal_add_hwmon_sysfs(&pdev->dev, chip->tz_dev); ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, qpnp_tm_isr, IRQF_ONESHOT, node->name, chip); diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c index e89c6f39a3ae..a941b4241b0a 100644 --- a/drivers/thermal/qcom/tsens-v0_1.c +++ b/drivers/thermal/qcom/tsens-v0_1.c @@ -39,26 +39,6 @@ struct tsens_legacy_calibration_format tsens_8916_nvmem = { }, }; -struct tsens_legacy_calibration_format tsens_8939_nvmem = { - .base_len = 8, - .base_shift = 2, - .sp_len = 6, - .mode = { 12, 0 }, - .invalid = { 12, 2 }, - .base = { { 0, 0 }, { 1, 24 } }, - .sp = { - { { 12, 3 }, { 12, 9 } }, - { { 12, 15 }, { 12, 21 } }, - { { 12, 27 }, { 13, 1 } }, - { { 13, 7 }, { 13, 13 } }, - { { 13, 19 }, { 13, 25 } }, - { { 0, 8 }, { 0, 14 } }, - { { 0, 20 }, { 0, 26 } }, - { { 1, 0 }, { 1, 6 } }, - { { 1, 12 }, { 1, 18 } }, - }, -}; - struct tsens_legacy_calibration_format tsens_8974_nvmem = { .base_len = 8, .base_shift = 2, @@ -103,22 +83,6 @@ struct tsens_legacy_calibration_format tsens_8974_backup_nvmem = { }, }; -struct tsens_legacy_calibration_format tsens_9607_nvmem = { - .base_len = 8, - .base_shift = 2, - .sp_len = 6, - .mode = { 2, 20 }, - .invalid = { 2, 22 }, - .base = { { 0, 0 }, { 2, 12 } }, - .sp = { - { { 0, 8 }, { 0, 14 } }, - { { 0, 20 }, { 0, 26 } }, - { { 1, 0 }, { 1, 6 } }, - { { 1, 12 }, { 1, 18 } }, - { { 2, 0 }, { 2, 6 } }, - }, -}; - static int calibrate_8916(struct tsens_priv *priv) { u32 p1[5], p2[5]; @@ -243,6 +207,39 @@ static int calibrate_8974(struct tsens_priv *priv) return 0; } +static int __init init_8226(struct tsens_priv *priv) +{ + priv->sensor[0].slope = 2901; + priv->sensor[1].slope = 2846; + priv->sensor[2].slope = 3038; + priv->sensor[3].slope = 2955; + priv->sensor[4].slope = 2901; + priv->sensor[5].slope = 2846; + + return init_common(priv); +} + +static int __init init_8909(struct tsens_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_sensors; ++i) + priv->sensor[i].slope = 3000; + + priv->sensor[0].p1_calib_offset = 0; + priv->sensor[0].p2_calib_offset = 0; + priv->sensor[1].p1_calib_offset = -10; + priv->sensor[1].p2_calib_offset = -6; + priv->sensor[2].p1_calib_offset = 0; + priv->sensor[2].p2_calib_offset = 0; + priv->sensor[3].p1_calib_offset = -9; + priv->sensor[3].p2_calib_offset = -9; + priv->sensor[4].p1_calib_offset = -8; + priv->sensor[4].p2_calib_offset = -10; + + return init_common(priv); +} + static int __init init_8939(struct tsens_priv *priv) { priv->sensor[0].slope = 2911; priv->sensor[1].slope = 2789; @@ -258,7 +255,28 @@ static int __init init_8939(struct tsens_priv *priv) { return init_common(priv); } -/* v0.1: 8916, 8939, 8974, 9607 */ +static int __init init_9607(struct tsens_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_sensors; ++i) + priv->sensor[i].slope = 3000; + + priv->sensor[0].p1_calib_offset = 1; + priv->sensor[0].p2_calib_offset = 1; + priv->sensor[1].p1_calib_offset = -4; + priv->sensor[1].p2_calib_offset = -2; + priv->sensor[2].p1_calib_offset = 4; + priv->sensor[2].p2_calib_offset = 8; + priv->sensor[3].p1_calib_offset = -3; + priv->sensor[3].p2_calib_offset = -5; + priv->sensor[4].p1_calib_offset = -4; + priv->sensor[4].p2_calib_offset = -4; + + return init_common(priv); +} + +/* v0.1: 8226, 8909, 8916, 8939, 8974, 9607 */ static struct tsens_features tsens_v0_1_feat = { .ver_major = VER_0_1, @@ -313,6 +331,32 @@ static const struct tsens_ops ops_v0_1 = { .get_temp = get_temp_common, }; +static const struct tsens_ops ops_8226 = { + .init = init_8226, + .calibrate = tsens_calibrate_common, + .get_temp = get_temp_common, +}; + +struct tsens_plat_data data_8226 = { + .num_sensors = 6, + .ops = &ops_8226, + .feat = &tsens_v0_1_feat, + .fields = tsens_v0_1_regfields, +}; + +static const struct tsens_ops ops_8909 = { + .init = init_8909, + .calibrate = tsens_calibrate_common, + .get_temp = get_temp_common, +}; + +struct tsens_plat_data data_8909 = { + .num_sensors = 5, + .ops = &ops_8909, + .feat = &tsens_v0_1_feat, + .fields = tsens_v0_1_regfields, +}; + static const struct tsens_ops ops_8916 = { .init = init_common, .calibrate = calibrate_8916, @@ -356,9 +400,15 @@ struct tsens_plat_data data_8974 = { .fields = tsens_v0_1_regfields, }; +static const struct tsens_ops ops_9607 = { + .init = init_9607, + .calibrate = tsens_calibrate_common, + .get_temp = get_temp_common, +}; + struct tsens_plat_data data_9607 = { .num_sensors = 5, - .ops = &ops_v0_1, + .ops = &ops_9607, .feat = &tsens_v0_1_feat, .fields = tsens_v0_1_regfields, }; diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c index b822a426066d..51322430f1fe 100644 --- a/drivers/thermal/qcom/tsens-v1.c +++ b/drivers/thermal/qcom/tsens-v1.c @@ -42,28 +42,6 @@ struct tsens_legacy_calibration_format tsens_qcs404_nvmem = { }, }; -struct tsens_legacy_calibration_format tsens_8976_nvmem = { - .base_len = 8, - .base_shift = 2, - .sp_len = 6, - .mode = { 4, 0 }, - .invalid = { 4, 2 }, - .base = { { 0, 0 }, { 2, 8 } }, - .sp = { - { { 0, 8 }, { 0, 14 } }, - { { 0, 20 }, { 0, 26 } }, - { { 1, 0 }, { 1, 6 } }, - { { 1, 12 }, { 1, 18 } }, - { { 2, 8 }, { 2, 14 } }, - { { 2, 20 }, { 2, 26 } }, - { { 3, 0 }, { 3, 6 } }, - { { 3, 12 }, { 3, 18 } }, - { { 4, 2 }, { 4, 9 } }, - { { 4, 14 }, { 4, 21 } }, - { { 4, 26 }, { 5, 1 } }, - }, -}; - static int calibrate_v1(struct tsens_priv *priv) { u32 p1[10], p2[10]; diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index d3218127e617..98c356acfe98 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -134,10 +134,12 @@ int tsens_read_calibration(struct tsens_priv *priv, int shift, u32 *p1, u32 *p2, p1[i] = p1[i] + (base1 << shift); break; case TWO_PT_CALIB: + case TWO_PT_CALIB_NO_OFFSET: for (i = 0; i < priv->num_sensors; i++) p2[i] = (p2[i] + base2) << shift; fallthrough; case ONE_PT_CALIB2: + case ONE_PT_CALIB2_NO_OFFSET: for (i = 0; i < priv->num_sensors; i++) p1[i] = (p1[i] + base1) << shift; break; @@ -149,6 +151,18 @@ int tsens_read_calibration(struct tsens_priv *priv, int shift, u32 *p1, u32 *p2, } } + /* Apply calibration offset workaround except for _NO_OFFSET modes */ + switch (mode) { + case TWO_PT_CALIB: + for (i = 0; i < priv->num_sensors; i++) + p2[i] += priv->sensor[i].p2_calib_offset; + fallthrough; + case ONE_PT_CALIB2: + for (i = 0; i < priv->num_sensors; i++) + p1[i] += priv->sensor[i].p1_calib_offset; + break; + } + return mode; } @@ -254,7 +268,7 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1, if (!priv->sensor[i].slope) priv->sensor[i].slope = SLOPE_DEFAULT; - if (mode == TWO_PT_CALIB) { + if (mode == TWO_PT_CALIB || mode == TWO_PT_CALIB_NO_OFFSET) { /* * slope (m) = adc_code2 - adc_code1 (y2 - y1)/ * temp_120_degc - temp_30_degc (x2 - x1) @@ -1096,6 +1110,12 @@ static const struct of_device_id tsens_table[] = { .compatible = "qcom,mdm9607-tsens", .data = &data_9607, }, { + .compatible = "qcom,msm8226-tsens", + .data = &data_8226, + }, { + .compatible = "qcom,msm8909-tsens", + .data = &data_8909, + }, { .compatible = "qcom,msm8916-tsens", .data = &data_8916, }, { @@ -1189,9 +1209,7 @@ static int tsens_register(struct tsens_priv *priv) if (priv->ops->enable) priv->ops->enable(priv, i); - if (devm_thermal_add_hwmon_sysfs(priv->dev, tzd)) - dev_warn(priv->dev, - "Failed to add hwmon sysfs attributes\n"); + devm_thermal_add_hwmon_sysfs(priv->dev, tzd); } /* VER_0 require to set MIN and MAX THRESH diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h index dba9cd38f637..2805de1c6827 100644 --- a/drivers/thermal/qcom/tsens.h +++ b/drivers/thermal/qcom/tsens.h @@ -10,6 +10,8 @@ #define ONE_PT_CALIB 0x1 #define ONE_PT_CALIB2 0x2 #define TWO_PT_CALIB 0x3 +#define ONE_PT_CALIB2_NO_OFFSET 0x6 +#define TWO_PT_CALIB_NO_OFFSET 0x7 #define CAL_DEGC_PT1 30 #define CAL_DEGC_PT2 120 #define SLOPE_FACTOR 1000 @@ -57,6 +59,8 @@ struct tsens_sensor { unsigned int hw_id; int slope; u32 status; + int p1_calib_offset; + int p2_calib_offset; }; /** @@ -635,7 +639,7 @@ int get_temp_common(const struct tsens_sensor *s, int *temp); extern struct tsens_plat_data data_8960; /* TSENS v0.1 targets */ -extern struct tsens_plat_data data_8916, data_8939, data_8974, data_9607; +extern struct tsens_plat_data data_8226, data_8909, data_8916, data_8939, data_8974, data_9607; /* TSENS v1 targets */ extern struct tsens_plat_data data_tsens_v1, data_8976, data_8956; diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index e58756323457..ccc2eea7f9f5 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c @@ -31,7 +31,6 @@ #define TMR_DISABLE 0x0 #define TMR_ME 0x80000000 #define TMR_ALPF 0x0c000000 -#define TMR_MSITE_ALL GENMASK(15, 0) #define REGS_TMTMIR 0x008 /* Temperature measurement interval Register */ #define TMTMIR_DEFAULT 0x0000000f @@ -51,6 +50,7 @@ * Site Register */ #define TRITSR_V BIT(31) +#define TRITSR_TP5 BIT(9) #define REGS_V2_TMSAR(n) (0x304 + 16 * (n)) /* TMU monitoring * site adjustment register */ @@ -105,6 +105,11 @@ static int tmu_get_temp(struct thermal_zone_device *tz, int *temp) * within sensor range. TEMP is an 9 bit value representing * temperature in KelVin. */ + + regmap_read(qdata->regmap, REGS_TMR, &val); + if (!(val & TMR_ME)) + return -EAGAIN; + if (regmap_read_poll_timeout(qdata->regmap, REGS_TRITSR(qsensor->id), val, @@ -113,10 +118,15 @@ static int tmu_get_temp(struct thermal_zone_device *tz, int *temp) 10 * USEC_PER_MSEC)) return -ENODATA; - if (qdata->ver == TMU_VER1) + if (qdata->ver == TMU_VER1) { *temp = (val & GENMASK(7, 0)) * MILLIDEGREE_PER_DEGREE; - else - *temp = kelvin_to_millicelsius(val & GENMASK(8, 0)); + } else { + if (val & TRITSR_TP5) + *temp = milli_kelvin_to_millicelsius((val & GENMASK(8, 0)) * + MILLIDEGREE_PER_DEGREE + 500); + else + *temp = kelvin_to_millicelsius(val & GENMASK(8, 0)); + } return 0; } @@ -128,15 +138,7 @@ static const struct thermal_zone_device_ops tmu_tz_ops = { static int qoriq_tmu_register_tmu_zone(struct device *dev, struct qoriq_tmu_data *qdata) { - int id; - - if (qdata->ver == TMU_VER1) { - regmap_write(qdata->regmap, REGS_TMR, - TMR_MSITE_ALL | TMR_ME | TMR_ALPF); - } else { - regmap_write(qdata->regmap, REGS_V2_TMSR, TMR_MSITE_ALL); - regmap_write(qdata->regmap, REGS_TMR, TMR_ME | TMR_ALPF_V2); - } + int id, sites = 0; for (id = 0; id < SITES_MAX; id++) { struct thermal_zone_device *tzd; @@ -153,14 +155,24 @@ static int qoriq_tmu_register_tmu_zone(struct device *dev, if (ret == -ENODEV) continue; - regmap_write(qdata->regmap, REGS_TMR, TMR_DISABLE); return ret; } - if (devm_thermal_add_hwmon_sysfs(dev, tzd)) - dev_warn(dev, - "Failed to add hwmon sysfs attributes\n"); + if (qdata->ver == TMU_VER1) + sites |= 0x1 << (15 - id); + else + sites |= 0x1 << id; + + devm_thermal_add_hwmon_sysfs(dev, tzd); + } + if (sites) { + if (qdata->ver == TMU_VER1) { + regmap_write(qdata->regmap, REGS_TMR, TMR_ME | TMR_ALPF | sites); + } else { + regmap_write(qdata->regmap, REGS_V2_TMSR, sites); + regmap_write(qdata->regmap, REGS_TMR, TMR_ME | TMR_ALPF_V2); + } } return 0; @@ -208,8 +220,6 @@ static int qoriq_tmu_calibration(struct device *dev, static void qoriq_tmu_init_device(struct qoriq_tmu_data *data) { - int i; - /* Disable interrupt, using polling instead */ regmap_write(data->regmap, REGS_TIER, TIER_DISABLE); @@ -220,8 +230,6 @@ static void qoriq_tmu_init_device(struct qoriq_tmu_data *data) } else { regmap_write(data->regmap, REGS_V2_TMTMIR, TMTMIR_DEFAULT); regmap_write(data->regmap, REGS_V2_TEUMR(0), TEUMR0_V2); - for (i = 0; i < SITES_MAX; i++) - regmap_write(data->regmap, REGS_V2_TMSAR(i), TMSARA_V2); } /* Disable monitoring */ @@ -230,7 +238,7 @@ static void qoriq_tmu_init_device(struct qoriq_tmu_data *data) static const struct regmap_range qoriq_yes_ranges[] = { regmap_reg_range(REGS_TMR, REGS_TSCFGR), - regmap_reg_range(REGS_TTRnCR(0), REGS_TTRnCR(3)), + regmap_reg_range(REGS_TTRnCR(0), REGS_TTRnCR(15)), regmap_reg_range(REGS_V2_TEUMR(0), REGS_V2_TEUMR(2)), regmap_reg_range(REGS_V2_TMSAR(0), REGS_V2_TMSAR(15)), regmap_reg_range(REGS_IPBRR(0), REGS_IPBRR(1)), diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 42a4724d3920..9029d01e029b 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c @@ -35,6 +35,12 @@ #define REG_GEN3_PTAT2 0x60 #define REG_GEN3_PTAT3 0x64 #define REG_GEN3_THSCP 0x68 +#define REG_GEN4_THSFMON00 0x180 +#define REG_GEN4_THSFMON01 0x184 +#define REG_GEN4_THSFMON02 0x188 +#define REG_GEN4_THSFMON15 0x1BC +#define REG_GEN4_THSFMON16 0x1C0 +#define REG_GEN4_THSFMON17 0x1C4 /* IRQ{STR,MSK,EN} bits */ #define IRQ_TEMP1 BIT(0) @@ -55,6 +61,7 @@ #define MCELSIUS(temp) ((temp) * 1000) #define GEN3_FUSE_MASK 0xFFF +#define GEN4_FUSE_MASK 0xFFF #define TSC_MAX_NUM 5 @@ -66,6 +73,13 @@ struct equation_coefs { int b2; }; +struct rcar_gen3_thermal_priv; + +struct rcar_thermal_info { + int ths_tj_1; + void (*read_fuses)(struct rcar_gen3_thermal_priv *priv); +}; + struct rcar_gen3_thermal_tsc { void __iomem *base; struct thermal_zone_device *zone; @@ -79,6 +93,7 @@ struct rcar_gen3_thermal_priv { struct thermal_zone_device_ops ops; unsigned int num_tscs; int ptat[3]; + const struct rcar_thermal_info *info; }; static inline u32 rcar_gen3_thermal_read(struct rcar_gen3_thermal_tsc *tsc, @@ -236,6 +251,62 @@ static irqreturn_t rcar_gen3_thermal_irq(int irq, void *data) return IRQ_HANDLED; } +static void rcar_gen3_thermal_read_fuses_gen3(struct rcar_gen3_thermal_priv *priv) +{ + unsigned int i; + + /* + * Set the pseudo calibration points with fused values. + * PTAT is shared between all TSCs but only fused for the first + * TSC while THCODEs are fused for each TSC. + */ + priv->ptat[0] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT1) & + GEN3_FUSE_MASK; + priv->ptat[1] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT2) & + GEN3_FUSE_MASK; + priv->ptat[2] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT3) & + GEN3_FUSE_MASK; + + for (i = 0; i < priv->num_tscs; i++) { + struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i]; + + tsc->thcode[0] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE1) & + GEN3_FUSE_MASK; + tsc->thcode[1] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE2) & + GEN3_FUSE_MASK; + tsc->thcode[2] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE3) & + GEN3_FUSE_MASK; + } +} + +static void rcar_gen3_thermal_read_fuses_gen4(struct rcar_gen3_thermal_priv *priv) +{ + unsigned int i; + + /* + * Set the pseudo calibration points with fused values. + * PTAT is shared between all TSCs but only fused for the first + * TSC while THCODEs are fused for each TSC. + */ + priv->ptat[0] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN4_THSFMON16) & + GEN4_FUSE_MASK; + priv->ptat[1] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN4_THSFMON17) & + GEN4_FUSE_MASK; + priv->ptat[2] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN4_THSFMON15) & + GEN4_FUSE_MASK; + + for (i = 0; i < priv->num_tscs; i++) { + struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i]; + + tsc->thcode[0] = rcar_gen3_thermal_read(tsc, REG_GEN4_THSFMON01) & + GEN4_FUSE_MASK; + tsc->thcode[1] = rcar_gen3_thermal_read(tsc, REG_GEN4_THSFMON02) & + GEN4_FUSE_MASK; + tsc->thcode[2] = rcar_gen3_thermal_read(tsc, REG_GEN4_THSFMON00) & + GEN4_FUSE_MASK; + } +} + static bool rcar_gen3_thermal_read_fuses(struct rcar_gen3_thermal_priv *priv) { unsigned int i; @@ -243,7 +314,8 @@ static bool rcar_gen3_thermal_read_fuses(struct rcar_gen3_thermal_priv *priv) /* If fuses are not set, fallback to pseudo values. */ thscp = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_THSCP); - if ((thscp & THSCP_COR_PARA_VLD) != THSCP_COR_PARA_VLD) { + if (!priv->info->read_fuses || + (thscp & THSCP_COR_PARA_VLD) != THSCP_COR_PARA_VLD) { /* Default THCODE values in case FUSEs are not set. */ static const int thcodes[TSC_MAX_NUM][3] = { { 3397, 2800, 2221 }, @@ -268,29 +340,7 @@ static bool rcar_gen3_thermal_read_fuses(struct rcar_gen3_thermal_priv *priv) return false; } - /* - * Set the pseudo calibration points with fused values. - * PTAT is shared between all TSCs but only fused for the first - * TSC while THCODEs are fused for each TSC. - */ - priv->ptat[0] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT1) & - GEN3_FUSE_MASK; - priv->ptat[1] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT2) & - GEN3_FUSE_MASK; - priv->ptat[2] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT3) & - GEN3_FUSE_MASK; - - for (i = 0; i < priv->num_tscs; i++) { - struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i]; - - tsc->thcode[0] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE1) & - GEN3_FUSE_MASK; - tsc->thcode[1] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE2) & - GEN3_FUSE_MASK; - tsc->thcode[2] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE3) & - GEN3_FUSE_MASK; - } - + priv->info->read_fuses(priv); return true; } @@ -318,52 +368,65 @@ static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_priv *priv, usleep_range(1000, 2000); } -static const int rcar_gen3_ths_tj_1 = 126; -static const int rcar_gen3_ths_tj_1_m3_w = 116; +static const struct rcar_thermal_info rcar_m3w_thermal_info = { + .ths_tj_1 = 116, + .read_fuses = rcar_gen3_thermal_read_fuses_gen3, +}; + +static const struct rcar_thermal_info rcar_gen3_thermal_info = { + .ths_tj_1 = 126, + .read_fuses = rcar_gen3_thermal_read_fuses_gen3, +}; + +static const struct rcar_thermal_info rcar_gen4_thermal_info = { + .ths_tj_1 = 126, + .read_fuses = rcar_gen3_thermal_read_fuses_gen4, +}; + static const struct of_device_id rcar_gen3_thermal_dt_ids[] = { { .compatible = "renesas,r8a774a1-thermal", - .data = &rcar_gen3_ths_tj_1_m3_w, + .data = &rcar_m3w_thermal_info, }, { .compatible = "renesas,r8a774b1-thermal", - .data = &rcar_gen3_ths_tj_1, + .data = &rcar_gen3_thermal_info, }, { .compatible = "renesas,r8a774e1-thermal", - .data = &rcar_gen3_ths_tj_1, + .data = &rcar_gen3_thermal_info, }, { .compatible = "renesas,r8a7795-thermal", - .data = &rcar_gen3_ths_tj_1, + .data = &rcar_gen3_thermal_info, }, { .compatible = "renesas,r8a7796-thermal", - .data = &rcar_gen3_ths_tj_1_m3_w, + .data = &rcar_m3w_thermal_info, }, { .compatible = "renesas,r8a77961-thermal", - .data = &rcar_gen3_ths_tj_1_m3_w, + .data = &rcar_m3w_thermal_info, }, { .compatible = "renesas,r8a77965-thermal", - .data = &rcar_gen3_ths_tj_1, + .data = &rcar_gen3_thermal_info, }, { .compatible = "renesas,r8a77980-thermal", - .data = &rcar_gen3_ths_tj_1, + .data = &rcar_gen3_thermal_info, }, { .compatible = "renesas,r8a779a0-thermal", - .data = &rcar_gen3_ths_tj_1, + .data = &rcar_gen3_thermal_info, }, { .compatible = "renesas,r8a779f0-thermal", - .data = &rcar_gen3_ths_tj_1, + .data = &rcar_gen4_thermal_info, }, { .compatible = "renesas,r8a779g0-thermal", - .data = &rcar_gen3_ths_tj_1, + .data = &rcar_gen4_thermal_info, }, {}, }; @@ -418,7 +481,6 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev) { struct rcar_gen3_thermal_priv *priv; struct device *dev = &pdev->dev; - const int *ths_tj_1 = of_device_get_match_data(dev); struct resource *res; struct thermal_zone_device *zone; unsigned int i; @@ -430,6 +492,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev) priv->ops = rcar_gen3_tz_of_ops; + priv->info = of_device_get_match_data(dev); platform_set_drvdata(pdev, priv); if (rcar_gen3_thermal_request_irqs(priv, pdev)) @@ -469,7 +532,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev) struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i]; rcar_gen3_thermal_init(priv, tsc); - rcar_gen3_thermal_calc_coefs(priv, tsc, *ths_tj_1); + rcar_gen3_thermal_calc_coefs(priv, tsc, priv->info->ths_tj_1); zone = devm_thermal_of_zone_register(dev, i, tsc, &priv->ops); if (IS_ERR(zone)) { diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c index 2d3042098445..0d6249b36609 100644 --- a/drivers/thermal/st/st_thermal.c +++ b/drivers/thermal/st/st_thermal.c @@ -227,14 +227,12 @@ sensor_off: } EXPORT_SYMBOL_GPL(st_thermal_register); -int st_thermal_unregister(struct platform_device *pdev) +void st_thermal_unregister(struct platform_device *pdev) { struct st_thermal_sensor *sensor = platform_get_drvdata(pdev); st_thermal_sensor_off(sensor); thermal_zone_device_unregister(sensor->thermal_dev); - - return 0; } EXPORT_SYMBOL_GPL(st_thermal_unregister); diff --git a/drivers/thermal/st/st_thermal.h b/drivers/thermal/st/st_thermal.h index d661b2f2ef29..75a84e6ec6a7 100644 --- a/drivers/thermal/st/st_thermal.h +++ b/drivers/thermal/st/st_thermal.h @@ -94,7 +94,7 @@ struct st_thermal_sensor { extern int st_thermal_register(struct platform_device *pdev, const struct of_device_id *st_thermal_of_match); -extern int st_thermal_unregister(struct platform_device *pdev); +extern void st_thermal_unregister(struct platform_device *pdev); extern const struct dev_pm_ops st_thermal_pm_ops; #endif /* __STI_RESET_SYSCFG_H */ diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c index d68596c40be9..e8cfa83b724a 100644 --- a/drivers/thermal/st/st_thermal_memmap.c +++ b/drivers/thermal/st/st_thermal_memmap.c @@ -172,9 +172,9 @@ static int st_mmap_probe(struct platform_device *pdev) return st_thermal_register(pdev, st_mmap_thermal_of_match); } -static int st_mmap_remove(struct platform_device *pdev) +static void st_mmap_remove(struct platform_device *pdev) { - return st_thermal_unregister(pdev); + st_thermal_unregister(pdev); } static struct platform_driver st_mmap_thermal_driver = { @@ -184,7 +184,7 @@ static struct platform_driver st_mmap_thermal_driver = { .of_match_table = st_mmap_thermal_of_match, }, .probe = st_mmap_probe, - .remove = st_mmap_remove, + .remove_new = st_mmap_remove, }; module_platform_driver(st_mmap_thermal_driver); diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c index 793ddce72132..195f3c5d0b38 100644 --- a/drivers/thermal/sun8i_thermal.c +++ b/drivers/thermal/sun8i_thermal.c @@ -319,6 +319,11 @@ out: return ret; } +static void sun8i_ths_reset_control_assert(void *data) +{ + reset_control_assert(data); +} + static int sun8i_ths_resource_init(struct ths_device *tmdev) { struct device *dev = tmdev->dev; @@ -339,47 +344,35 @@ static int sun8i_ths_resource_init(struct ths_device *tmdev) if (IS_ERR(tmdev->reset)) return PTR_ERR(tmdev->reset); - tmdev->bus_clk = devm_clk_get(&pdev->dev, "bus"); + ret = reset_control_deassert(tmdev->reset); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, sun8i_ths_reset_control_assert, + tmdev->reset); + if (ret) + return ret; + + tmdev->bus_clk = devm_clk_get_enabled(&pdev->dev, "bus"); if (IS_ERR(tmdev->bus_clk)) return PTR_ERR(tmdev->bus_clk); } if (tmdev->chip->has_mod_clk) { - tmdev->mod_clk = devm_clk_get(&pdev->dev, "mod"); + tmdev->mod_clk = devm_clk_get_enabled(&pdev->dev, "mod"); if (IS_ERR(tmdev->mod_clk)) return PTR_ERR(tmdev->mod_clk); } - ret = reset_control_deassert(tmdev->reset); - if (ret) - return ret; - - ret = clk_prepare_enable(tmdev->bus_clk); - if (ret) - goto assert_reset; - ret = clk_set_rate(tmdev->mod_clk, 24000000); if (ret) - goto bus_disable; - - ret = clk_prepare_enable(tmdev->mod_clk); - if (ret) - goto bus_disable; + return ret; ret = sun8i_ths_calibrate(tmdev); if (ret) - goto mod_disable; + return ret; return 0; - -mod_disable: - clk_disable_unprepare(tmdev->mod_clk); -bus_disable: - clk_disable_unprepare(tmdev->bus_clk); -assert_reset: - reset_control_assert(tmdev->reset); - - return ret; } static int sun8i_h3_thermal_init(struct ths_device *tmdev) @@ -475,9 +468,7 @@ static int sun8i_ths_register(struct ths_device *tmdev) if (IS_ERR(tmdev->sensor[i].tzd)) return PTR_ERR(tmdev->sensor[i].tzd); - if (devm_thermal_add_hwmon_sysfs(tmdev->dev, tmdev->sensor[i].tzd)) - dev_warn(tmdev->dev, - "Failed to add hwmon sysfs attributes\n"); + devm_thermal_add_hwmon_sysfs(tmdev->dev, tmdev->sensor[i].tzd); } return 0; @@ -530,17 +521,6 @@ static int sun8i_ths_probe(struct platform_device *pdev) return 0; } -static int sun8i_ths_remove(struct platform_device *pdev) -{ - struct ths_device *tmdev = platform_get_drvdata(pdev); - - clk_disable_unprepare(tmdev->mod_clk); - clk_disable_unprepare(tmdev->bus_clk); - reset_control_assert(tmdev->reset); - - return 0; -} - static const struct ths_thermal_chip sun8i_a83t_ths = { .sensor_num = 3, .scale = 705, @@ -642,7 +622,6 @@ MODULE_DEVICE_TABLE(of, of_ths_match); static struct platform_driver ths_driver = { .probe = sun8i_ths_probe, - .remove = sun8i_ths_remove, .driver = { .name = "sun8i-thermal", .of_match_table = of_ths_match, diff --git a/drivers/thermal/tegra/tegra30-tsensor.c b/drivers/thermal/tegra/tegra30-tsensor.c index cb584a5735ed..c243e9d76d3c 100644 --- a/drivers/thermal/tegra/tegra30-tsensor.c +++ b/drivers/thermal/tegra/tegra30-tsensor.c @@ -523,8 +523,7 @@ static int tegra_tsensor_register_channel(struct tegra_tsensor *ts, return 0; } - if (devm_thermal_add_hwmon_sysfs(ts->dev, tsc->tzd)) - dev_warn(ts->dev, "failed to add hwmon sysfs attributes\n"); + devm_thermal_add_hwmon_sysfs(ts->dev, tsc->tzd); return 0; } diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c index 017b0ce52122..f4f1a04f8c0f 100644 --- a/drivers/thermal/thermal-generic-adc.c +++ b/drivers/thermal/thermal-generic-adc.c @@ -13,6 +13,8 @@ #include <linux/slab.h> #include <linux/thermal.h> +#include "thermal_hwmon.h" + struct gadc_thermal_info { struct device *dev; struct thermal_zone_device *tz_dev; @@ -153,6 +155,8 @@ static int gadc_thermal_probe(struct platform_device *pdev) return ret; } + devm_thermal_add_hwmon_sysfs(&pdev->dev, gti->tz_dev); + return 0; } diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 3d4a787c6b28..17c1bbed734d 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -23,6 +23,8 @@ #define DEFAULT_THERMAL_GOVERNOR "user_space" #elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR) #define DEFAULT_THERMAL_GOVERNOR "power_allocator" +#elif defined(CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG) +#define DEFAULT_THERMAL_GOVERNOR "bang_bang" #endif /* Initial state of a cooling device during binding */ diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c index fbe55509e307..c3ae44659b81 100644 --- a/drivers/thermal/thermal_hwmon.c +++ b/drivers/thermal/thermal_hwmon.c @@ -271,11 +271,14 @@ int devm_thermal_add_hwmon_sysfs(struct device *dev, struct thermal_zone_device ptr = devres_alloc(devm_thermal_hwmon_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) + if (!ptr) { + dev_warn(dev, "Failed to allocate device resource data\n"); return -ENOMEM; + } ret = thermal_add_hwmon_sysfs(tz); if (ret) { + dev_warn(dev, "Failed to add hwmon sysfs attributes\n"); devres_free(ptr); return ret; } diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c index 6a5335931f4d..d414a4b7a94a 100644 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c @@ -182,8 +182,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, ti_bandgap_write_update_interval(bgp, data->sensor_id, TI_BANDGAP_UPDATE_INTERVAL_MS); - if (devm_thermal_add_hwmon_sysfs(bgp->dev, data->ti_thermal)) - dev_warn(bgp->dev, "failed to add hwmon sysfs attributes\n"); + devm_thermal_add_hwmon_sysfs(bgp->dev, data->ti_thermal); return 0; } diff --git a/drivers/thunderbolt/dma_test.c b/drivers/thunderbolt/dma_test.c index 3bedecb236e0..14bb6dec6c4b 100644 --- a/drivers/thunderbolt/dma_test.c +++ b/drivers/thunderbolt/dma_test.c @@ -192,9 +192,9 @@ static int dma_test_start_rings(struct dma_test *dt) } ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid, - dt->tx_ring ? dt->tx_ring->hop : 0, + dt->tx_ring ? dt->tx_ring->hop : -1, dt->rx_hopid, - dt->rx_ring ? dt->rx_ring->hop : 0); + dt->rx_ring ? dt->rx_ring->hop : -1); if (ret) { dma_test_free_rings(dt); return ret; @@ -218,9 +218,9 @@ static void dma_test_stop_rings(struct dma_test *dt) tb_ring_stop(dt->tx_ring); ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid, - dt->tx_ring ? dt->tx_ring->hop : 0, + dt->tx_ring ? dt->tx_ring->hop : -1, dt->rx_hopid, - dt->rx_ring ? dt->rx_ring->hop : 0); + dt->rx_ring ? dt->rx_ring->hop : -1); if (ret) dev_warn(&dt->svc->dev, "failed to disable DMA paths\n"); diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index c0aee5dc5237..e58beac44295 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -56,9 +56,14 @@ static int ring_interrupt_index(const struct tb_ring *ring) static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring) { - if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) - return; - iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring); + if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) { + u32 val; + + val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring); + iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring); + } else { + iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring); + } } static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring) diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 7bfbc9ca9ba4..c1af712ca728 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -737,6 +737,7 @@ static void tb_scan_port(struct tb_port *port) { struct tb_cm *tcm = tb_priv(port->sw->tb); struct tb_port *upstream_port; + bool discovery = false; struct tb_switch *sw; int ret; @@ -804,8 +805,10 @@ static void tb_scan_port(struct tb_port *port) * tunnels and know which switches were authorized already by * the boot firmware. */ - if (!tcm->hotplug_active) + if (!tcm->hotplug_active) { dev_set_uevent_suppress(&sw->dev, true); + discovery = true; + } /* * At the moment Thunderbolt 2 and beyond (devices with LC) we @@ -835,10 +838,14 @@ static void tb_scan_port(struct tb_port *port) * CL0s and CL1 are enabled and supported together. * Silently ignore CLx enabling in case CLx is not supported. */ - ret = tb_switch_enable_clx(sw, TB_CL1); - if (ret && ret != -EOPNOTSUPP) - tb_sw_warn(sw, "failed to enable %s on upstream port\n", - tb_switch_clx_name(TB_CL1)); + if (discovery) { + tb_sw_dbg(sw, "discovery, not touching CL states\n"); + } else { + ret = tb_switch_enable_clx(sw, TB_CL1); + if (ret && ret != -EOPNOTSUPP) + tb_sw_warn(sw, "failed to enable %s on upstream port\n", + tb_switch_clx_name(TB_CL1)); + } if (tb_switch_is_clx_enabled(sw, TB_CL1)) /* diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 9099ae73e78f..4f222673d651 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -526,7 +526,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) * Perform connection manager handshake between IN and OUT ports * before capabilities exchange can take place. */ - ret = tb_dp_cm_handshake(in, out, 1500); + ret = tb_dp_cm_handshake(in, out, 3000); if (ret) return ret; diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 3e3fb377d90d..71a7a3e724ca 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -450,8 +450,8 @@ config SERIAL_SA1100 help If you have a machine based on a SA1100/SA1110 StrongARM(R) CPU you can enable its onboard serial port by enabling this option. - Please read <file:Documentation/arm/sa1100/serial_uart.rst> for further - info. + Please read <file:Documentation/arch/arm/sa1100/serial_uart.rst> for + further info. config SERIAL_SA1100_CONSOLE bool "Console on SA1100 serial port" diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 7486a2b8556c..7fd30fcc10c6 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -310,7 +310,7 @@ static const struct lpuart_soc_data ls1021a_data = { static const struct lpuart_soc_data ls1028a_data = { .devtype = LS1028A_LPUART, .iotype = UPIO_MEM32, - .rx_watermark = 1, + .rx_watermark = 0, }; static struct lpuart_soc_data imx7ulp_data = { diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c index a58e9277dfad..f1387f1024db 100644 --- a/drivers/tty/serial/lantiq.c +++ b/drivers/tty/serial/lantiq.c @@ -250,6 +250,7 @@ lqasc_err_int(int irq, void *_port) struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); spin_lock_irqsave(<q_port->lock, flags); + __raw_writel(ASC_IRNCR_EIR, port->membase + LTQ_ASC_IRNCR); /* clear any pending interrupts */ asc_update_bits(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE | ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index c84be40fb8df..4737a8f92c2e 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -466,7 +466,7 @@ static const struct file_operations tty_fops = { .llseek = no_llseek, .read_iter = tty_read, .write_iter = tty_write, - .splice_read = generic_file_splice_read, + .splice_read = copy_splice_read, .splice_write = iter_file_splice_write, .poll = tty_poll, .unlocked_ioctl = tty_ioctl, @@ -481,7 +481,7 @@ static const struct file_operations console_fops = { .llseek = no_llseek, .read_iter = tty_read, .write_iter = redirected_tty_write, - .splice_read = generic_file_splice_read, + .splice_read = copy_splice_read, .splice_write = iter_file_splice_write, .poll = tty_poll, .unlocked_ioctl = tty_ioctl, diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c index 268ccbec88f9..87230869e1fa 100644 --- a/drivers/usb/core/buffer.c +++ b/drivers/usb/core/buffer.c @@ -34,13 +34,13 @@ void __init usb_init_pool_max(void) { /* * The pool_max values must never be smaller than - * ARCH_KMALLOC_MINALIGN. + * ARCH_DMA_MINALIGN. */ - if (ARCH_KMALLOC_MINALIGN <= 32) + if (ARCH_DMA_MINALIGN <= 32) ; /* Original value is okay */ - else if (ARCH_KMALLOC_MINALIGN <= 64) + else if (ARCH_DMA_MINALIGN <= 64) pool_max[0] = 64; - else if (ARCH_KMALLOC_MINALIGN <= 128) + else if (ARCH_DMA_MINALIGN <= 128) pool_max[0] = 0; /* Don't use this pool */ else BUILD_BUG(); /* We don't allow this */ diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 7b2ce013cc5b..d68958e151a7 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1929,6 +1929,11 @@ static int dwc3_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); + /* + * HACK: Clear the driver data, which is currently accessed by parent + * glue drivers, before allowing the parent to suspend. + */ + platform_set_drvdata(pdev, NULL); pm_runtime_set_suspended(&pdev->dev); dwc3_free_event_buffers(dwc); diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 959fc925ca7c..79b22abf9727 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -308,7 +308,16 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom) /* Only usable in contexts where the role can not change. */ static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom) { - struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3); + struct dwc3 *dwc; + + /* + * FIXME: Fix this layering violation. + */ + dwc = platform_get_drvdata(qcom->dwc3); + + /* Core driver may not have probed yet. */ + if (!dwc) + return false; return dwc->xhci; } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index d831f5acf7b5..b78599dd705c 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -198,6 +198,7 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, list_del(&req->list); req->remaining = 0; req->needs_extra_trb = false; + req->num_trbs = 0; if (req->request.status == -EINPROGRESS) req->request.status = status; diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 52e6d2e84e35..83fd1de14784 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -37,6 +37,14 @@ static const struct bus_type gadget_bus_type; * @vbus: for udcs who care about vbus status, this value is real vbus status; * for udcs who do not care about vbus status, this value is always true * @started: the UDC's started state. True if the UDC had started. + * @allow_connect: Indicates whether UDC is allowed to be pulled up. + * Set/cleared by gadget_(un)bind_driver() after gadget driver is bound or + * unbound. + * @connect_lock: protects udc->started, gadget->connect, + * gadget->allow_connect and gadget->deactivate. The routines + * usb_gadget_connect_locked(), usb_gadget_disconnect_locked(), + * usb_udc_connect_control_locked(), usb_gadget_udc_start_locked() and + * usb_gadget_udc_stop_locked() are called with this lock held. * * This represents the internal data structure which is used by the UDC-class * to hold information about udc driver and gadget together. @@ -48,6 +56,9 @@ struct usb_udc { struct list_head list; bool vbus; bool started; + bool allow_connect; + struct work_struct vbus_work; + struct mutex connect_lock; }; static struct class *udc_class; @@ -687,17 +698,8 @@ out: } EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect); -/** - * usb_gadget_connect - software-controlled connect to USB host - * @gadget:the peripheral being connected - * - * Enables the D+ (or potentially D-) pullup. The host will start - * enumerating this gadget when the pullup is active and a VBUS session - * is active (the link is powered). - * - * Returns zero on success, else negative errno. - */ -int usb_gadget_connect(struct usb_gadget *gadget) +static int usb_gadget_connect_locked(struct usb_gadget *gadget) + __must_hold(&gadget->udc->connect_lock) { int ret = 0; @@ -706,10 +708,12 @@ int usb_gadget_connect(struct usb_gadget *gadget) goto out; } - if (gadget->deactivated) { + if (gadget->deactivated || !gadget->udc->allow_connect || !gadget->udc->started) { /* - * If gadget is deactivated we only save new state. - * Gadget will be connected automatically after activation. + * If the gadget isn't usable (because it is deactivated, + * unbound, or not yet started), we only save the new state. + * The gadget will be connected automatically when it is + * activated/bound/started. */ gadget->connected = true; goto out; @@ -724,22 +728,31 @@ out: return ret; } -EXPORT_SYMBOL_GPL(usb_gadget_connect); /** - * usb_gadget_disconnect - software-controlled disconnect from USB host - * @gadget:the peripheral being disconnected - * - * Disables the D+ (or potentially D-) pullup, which the host may see - * as a disconnect (when a VBUS session is active). Not all systems - * support software pullup controls. + * usb_gadget_connect - software-controlled connect to USB host + * @gadget:the peripheral being connected * - * Following a successful disconnect, invoke the ->disconnect() callback - * for the current gadget driver so that UDC drivers don't need to. + * Enables the D+ (or potentially D-) pullup. The host will start + * enumerating this gadget when the pullup is active and a VBUS session + * is active (the link is powered). * * Returns zero on success, else negative errno. */ -int usb_gadget_disconnect(struct usb_gadget *gadget) +int usb_gadget_connect(struct usb_gadget *gadget) +{ + int ret; + + mutex_lock(&gadget->udc->connect_lock); + ret = usb_gadget_connect_locked(gadget); + mutex_unlock(&gadget->udc->connect_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(usb_gadget_connect); + +static int usb_gadget_disconnect_locked(struct usb_gadget *gadget) + __must_hold(&gadget->udc->connect_lock) { int ret = 0; @@ -751,7 +764,7 @@ int usb_gadget_disconnect(struct usb_gadget *gadget) if (!gadget->connected) goto out; - if (gadget->deactivated) { + if (gadget->deactivated || !gadget->udc->started) { /* * If gadget is deactivated we only save new state. * Gadget will stay disconnected after activation. @@ -774,6 +787,30 @@ out: return ret; } + +/** + * usb_gadget_disconnect - software-controlled disconnect from USB host + * @gadget:the peripheral being disconnected + * + * Disables the D+ (or potentially D-) pullup, which the host may see + * as a disconnect (when a VBUS session is active). Not all systems + * support software pullup controls. + * + * Following a successful disconnect, invoke the ->disconnect() callback + * for the current gadget driver so that UDC drivers don't need to. + * + * Returns zero on success, else negative errno. + */ +int usb_gadget_disconnect(struct usb_gadget *gadget) +{ + int ret; + + mutex_lock(&gadget->udc->connect_lock); + ret = usb_gadget_disconnect_locked(gadget); + mutex_unlock(&gadget->udc->connect_lock); + + return ret; +} EXPORT_SYMBOL_GPL(usb_gadget_disconnect); /** @@ -791,13 +828,14 @@ int usb_gadget_deactivate(struct usb_gadget *gadget) { int ret = 0; + mutex_lock(&gadget->udc->connect_lock); if (gadget->deactivated) - goto out; + goto unlock; if (gadget->connected) { - ret = usb_gadget_disconnect(gadget); + ret = usb_gadget_disconnect_locked(gadget); if (ret) - goto out; + goto unlock; /* * If gadget was being connected before deactivation, we want @@ -807,7 +845,8 @@ int usb_gadget_deactivate(struct usb_gadget *gadget) } gadget->deactivated = true; -out: +unlock: + mutex_unlock(&gadget->udc->connect_lock); trace_usb_gadget_deactivate(gadget, ret); return ret; @@ -827,8 +866,9 @@ int usb_gadget_activate(struct usb_gadget *gadget) { int ret = 0; + mutex_lock(&gadget->udc->connect_lock); if (!gadget->deactivated) - goto out; + goto unlock; gadget->deactivated = false; @@ -837,9 +877,11 @@ int usb_gadget_activate(struct usb_gadget *gadget) * while it was being deactivated, we call usb_gadget_connect(). */ if (gadget->connected) - ret = usb_gadget_connect(gadget); + ret = usb_gadget_connect_locked(gadget); + mutex_unlock(&gadget->udc->connect_lock); -out: +unlock: + mutex_unlock(&gadget->udc->connect_lock); trace_usb_gadget_activate(gadget, ret); return ret; @@ -1078,12 +1120,22 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state); /* ------------------------------------------------------------------------- */ -static void usb_udc_connect_control(struct usb_udc *udc) +/* Acquire connect_lock before calling this function. */ +static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock) { if (udc->vbus) - usb_gadget_connect(udc->gadget); + usb_gadget_connect_locked(udc->gadget); else - usb_gadget_disconnect(udc->gadget); + usb_gadget_disconnect_locked(udc->gadget); +} + +static void vbus_event_work(struct work_struct *work) +{ + struct usb_udc *udc = container_of(work, struct usb_udc, vbus_work); + + mutex_lock(&udc->connect_lock); + usb_udc_connect_control_locked(udc); + mutex_unlock(&udc->connect_lock); } /** @@ -1094,6 +1146,14 @@ static void usb_udc_connect_control(struct usb_udc *udc) * * The udc driver calls it when it wants to connect or disconnect gadget * according to vbus status. + * + * This function can be invoked from interrupt context by irq handlers of + * the gadget drivers, however, usb_udc_connect_control() has to run in + * non-atomic context due to the following: + * a. Some of the gadget driver implementations expect the ->pullup + * callback to be invoked in non-atomic context. + * b. usb_gadget_disconnect() acquires udc_lock which is a mutex. + * Hence offload invocation of usb_udc_connect_control() to workqueue. */ void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status) { @@ -1101,7 +1161,7 @@ void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status) if (udc) { udc->vbus = status; - usb_udc_connect_control(udc); + schedule_work(&udc->vbus_work); } } EXPORT_SYMBOL_GPL(usb_udc_vbus_handler); @@ -1124,7 +1184,7 @@ void usb_gadget_udc_reset(struct usb_gadget *gadget, EXPORT_SYMBOL_GPL(usb_gadget_udc_reset); /** - * usb_gadget_udc_start - tells usb device controller to start up + * usb_gadget_udc_start_locked - tells usb device controller to start up * @udc: The UDC to be started * * This call is issued by the UDC Class driver when it's about @@ -1135,8 +1195,11 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset); * necessary to have it powered on. * * Returns zero on success, else negative errno. + * + * Caller should acquire connect_lock before invoking this function. */ -static inline int usb_gadget_udc_start(struct usb_udc *udc) +static inline int usb_gadget_udc_start_locked(struct usb_udc *udc) + __must_hold(&udc->connect_lock) { int ret; @@ -1153,7 +1216,7 @@ static inline int usb_gadget_udc_start(struct usb_udc *udc) } /** - * usb_gadget_udc_stop - tells usb device controller we don't need it anymore + * usb_gadget_udc_stop_locked - tells usb device controller we don't need it anymore * @udc: The UDC to be stopped * * This call is issued by the UDC Class driver after calling @@ -1162,8 +1225,11 @@ static inline int usb_gadget_udc_start(struct usb_udc *udc) * The details are implementation specific, but it can go as * far as powering off UDC completely and disable its data * line pullups. + * + * Caller should acquire connect lock before invoking this function. */ -static inline void usb_gadget_udc_stop(struct usb_udc *udc) +static inline void usb_gadget_udc_stop_locked(struct usb_udc *udc) + __must_hold(&udc->connect_lock) { if (!udc->started) { dev_err(&udc->dev, "UDC had already stopped\n"); @@ -1322,12 +1388,14 @@ int usb_add_gadget(struct usb_gadget *gadget) udc->gadget = gadget; gadget->udc = udc; + mutex_init(&udc->connect_lock); udc->started = false; mutex_lock(&udc_lock); list_add_tail(&udc->list, &udc_list); mutex_unlock(&udc_lock); + INIT_WORK(&udc->vbus_work, vbus_event_work); ret = device_add(&udc->dev); if (ret) @@ -1459,6 +1527,7 @@ void usb_del_gadget(struct usb_gadget *gadget) flush_work(&gadget->work); device_del(&gadget->dev); ida_free(&gadget_id_numbers, gadget->id_number); + cancel_work_sync(&udc->vbus_work); device_unregister(&udc->dev); } EXPORT_SYMBOL_GPL(usb_del_gadget); @@ -1523,11 +1592,16 @@ static int gadget_bind_driver(struct device *dev) if (ret) goto err_bind; - ret = usb_gadget_udc_start(udc); - if (ret) + mutex_lock(&udc->connect_lock); + ret = usb_gadget_udc_start_locked(udc); + if (ret) { + mutex_unlock(&udc->connect_lock); goto err_start; + } usb_gadget_enable_async_callbacks(udc); - usb_udc_connect_control(udc); + udc->allow_connect = true; + usb_udc_connect_control_locked(udc); + mutex_unlock(&udc->connect_lock); kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); return 0; @@ -1558,12 +1632,16 @@ static void gadget_unbind_driver(struct device *dev) kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); - usb_gadget_disconnect(gadget); + udc->allow_connect = false; + cancel_work_sync(&udc->vbus_work); + mutex_lock(&udc->connect_lock); + usb_gadget_disconnect_locked(gadget); usb_gadget_disable_async_callbacks(udc); if (gadget->irq) synchronize_irq(gadget->irq); udc->driver->unbind(gadget); - usb_gadget_udc_stop(udc); + usb_gadget_udc_stop_locked(udc); + mutex_unlock(&udc->connect_lock); mutex_lock(&udc_lock); driver->is_bound = false; @@ -1649,11 +1727,15 @@ static ssize_t soft_connect_store(struct device *dev, } if (sysfs_streq(buf, "connect")) { - usb_gadget_udc_start(udc); - usb_gadget_connect(udc->gadget); + mutex_lock(&udc->connect_lock); + usb_gadget_udc_start_locked(udc); + usb_gadget_connect_locked(udc->gadget); + mutex_unlock(&udc->connect_lock); } else if (sysfs_streq(buf, "disconnect")) { - usb_gadget_disconnect(udc->gadget); - usb_gadget_udc_stop(udc); + mutex_lock(&udc->connect_lock); + usb_gadget_disconnect_locked(udc->gadget); + usb_gadget_udc_stop_locked(udc); + mutex_unlock(&udc->connect_lock); } else { dev_err(dev, "unsupported command '%s'\n", buf); ret = -EINVAL; diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index aac8bc185afa..eb008e873361 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -2877,9 +2877,9 @@ static int renesas_usb3_probe(struct platform_device *pdev) struct rzv2m_usb3drd *ddata = dev_get_drvdata(pdev->dev.parent); usb3->drd_reg = ddata->reg; - ret = devm_request_irq(ddata->dev, ddata->drd_irq, + ret = devm_request_irq(&pdev->dev, ddata->drd_irq, renesas_usb3_otg_irq, 0, - dev_name(ddata->dev), usb3); + dev_name(&pdev->dev), usb3); if (ret < 0) return ret; } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 644a55447fd7..fd42e3a0bd18 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -248,6 +248,8 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_VENDOR_ID 0x2c7c /* These Quectel products use Quectel's vendor ID */ #define QUECTEL_PRODUCT_EC21 0x0121 +#define QUECTEL_PRODUCT_EM061K_LTA 0x0123 +#define QUECTEL_PRODUCT_EM061K_LMS 0x0124 #define QUECTEL_PRODUCT_EC25 0x0125 #define QUECTEL_PRODUCT_EG91 0x0191 #define QUECTEL_PRODUCT_EG95 0x0195 @@ -266,6 +268,8 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_RM520N 0x0801 #define QUECTEL_PRODUCT_EC200U 0x0901 #define QUECTEL_PRODUCT_EC200S_CN 0x6002 +#define QUECTEL_PRODUCT_EM061K_LWW 0x6008 +#define QUECTEL_PRODUCT_EM061K_LCN 0x6009 #define QUECTEL_PRODUCT_EC200T 0x6026 #define QUECTEL_PRODUCT_RM500K 0x7001 @@ -1189,6 +1193,18 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, diff --git a/drivers/usb/typec/pd.c b/drivers/usb/typec/pd.c index 0bcde1ff4d39..8cc66e4467c4 100644 --- a/drivers/usb/typec/pd.c +++ b/drivers/usb/typec/pd.c @@ -95,7 +95,7 @@ peak_current_show(struct device *dev, struct device_attribute *attr, char *buf) static ssize_t fast_role_swap_current_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sysfs_emit(buf, "%u\n", to_pdo(dev)->pdo >> PDO_FIXED_FRS_CURR_SHIFT) & 3; + return sysfs_emit(buf, "%u\n", (to_pdo(dev)->pdo >> PDO_FIXED_FRS_CURR_SHIFT) & 3); } static DEVICE_ATTR_RO(fast_role_swap_current); diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 2b472ec01dc4..b664ecbb798b 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -132,10 +132,8 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) if (ret) return ret; - if (cci & UCSI_CCI_BUSY) { - ucsi->ops->async_write(ucsi, UCSI_CANCEL, NULL, 0); - return -EBUSY; - } + if (cmd != UCSI_CANCEL && cci & UCSI_CCI_BUSY) + return ucsi_exec_command(ucsi, UCSI_CANCEL); if (!(cci & UCSI_CCI_COMMAND_COMPLETE)) return -EIO; @@ -149,6 +147,11 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) return ucsi_read_error(ucsi); } + if (cmd == UCSI_CANCEL && cci & UCSI_CCI_CANCEL_COMPLETE) { + ret = ucsi_acknowledge_command(ucsi); + return ret ? ret : -EBUSY; + } + return UCSI_CCI_LENGTH(cci); } diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 5f5c21674fdc..4619b4a520ef 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -1052,7 +1052,7 @@ static int vduse_dev_reg_umem(struct vduse_dev *dev, goto out; pinned = pin_user_pages(uaddr, npages, FOLL_LONGTERM | FOLL_WRITE, - page_list, NULL); + page_list); if (pinned != npages) { ret = pinned < 0 ? pinned : -ENOMEM; goto out; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 0d2f805468e1..ebe0ad31d0b0 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -514,6 +514,7 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, bool write_fault) { pte_t *ptep; + pte_t pte; spinlock_t *ptl; int ret; @@ -536,10 +537,12 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, return ret; } - if (write_fault && !pte_write(*ptep)) + pte = ptep_get(ptep); + + if (write_fault && !pte_write(pte)) ret = -EFAULT; else - *pfn = pte_pfn(*ptep); + *pfn = pte_pfn(pte); pte_unmap_unlock(ptep, ptl); return ret; @@ -562,7 +565,7 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr, mmap_read_lock(mm); ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM, - pages, NULL, NULL); + pages, NULL); if (ret > 0) { int i; diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index bf77924d5b60..b43e8680eee8 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -1009,7 +1009,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v, while (npages) { sz2pin = min_t(unsigned long, npages, list_size); pinned = pin_user_pages(cur_base, sz2pin, - gup_flags, page_list, NULL); + gup_flags, page_list); if (sz2pin != pinned) { if (pinned < 0) { ret = pinned; diff --git a/drivers/virt/acrn/ioreq.c b/drivers/virt/acrn/ioreq.c index d75ab3f66da4..cecdc1c13af7 100644 --- a/drivers/virt/acrn/ioreq.c +++ b/drivers/virt/acrn/ioreq.c @@ -576,8 +576,8 @@ static void ioreq_resume(void) int acrn_ioreq_intr_setup(void) { acrn_setup_intr_handler(ioreq_intr_handler); - ioreq_wq = alloc_workqueue("ioreq_wq", - WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1); + ioreq_wq = alloc_ordered_workqueue("ioreq_wq", + WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!ioreq_wq) { dev_err(acrn_dev.this_device, "Failed to alloc workqueue!\n"); acrn_remove_intr_handler(); diff --git a/drivers/virt/coco/sev-guest/Kconfig b/drivers/virt/coco/sev-guest/Kconfig index f9db0799ae67..da2d7ca531f0 100644 --- a/drivers/virt/coco/sev-guest/Kconfig +++ b/drivers/virt/coco/sev-guest/Kconfig @@ -2,6 +2,7 @@ config SEV_GUEST tristate "AMD SEV Guest driver" default m depends on AMD_MEM_ENCRYPT + select CRYPTO select CRYPTO_AEAD2 select CRYPTO_GCM help diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index e2f580e30a86..f447cd37cc4c 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -949,7 +949,7 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) */ static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data) { - return pte_none(*pte) ? 0 : -EBUSY; + return pte_none(ptep_get(pte)) ? 0 : -EBUSY; } static int privcmd_vma_range_is_mapped( diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 7beaf2c41fbb..d52593466a79 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -363,7 +363,7 @@ static struct sock_mapping *pvcalls_new_active_socket( map->data.in = map->bytes; map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order); - map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1); + map->ioworker.wq = alloc_ordered_workqueue("pvcalls_io", 0); if (!map->ioworker.wq) goto out; atomic_set(&map->io, 1); @@ -636,7 +636,7 @@ static int pvcalls_back_bind(struct xenbus_device *dev, INIT_WORK(&map->register_work, __pvcalls_back_accept); spin_lock_init(&map->copy_lock); - map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1); + map->wq = alloc_ordered_workqueue("pvcalls_wq", 0); if (!map->wq) { ret = -ENOMEM; goto out; |