diff options
author | Sujoy Ray <sujoy.ray@intel.com> | 2022-05-04 21:01:19 +0300 |
---|---|---|
committer | Sujoy Ray <sujoy.ray@intel.com> | 2022-05-12 17:46:40 +0300 |
commit | efe6d9649b1d6b85b50cef64745df2e6749a8a45 (patch) | |
tree | e9aca55fa1fa29fea638ee52832fa9691fdd6f02 /drivers | |
parent | ab95859fee776e58934d2b0cc1f4e93810e66508 (diff) | |
parent | 49caedb668e476c100d727f2174724e0610a2b92 (diff) | |
download | linux-efe6d9649b1d6b85b50cef64745df2e6749a8a45.tar.xz |
Merge commit '49caedb668e476c100d727f2174724e0610a2b92' of https://github.com/openbmc/linux into openbmc/dev-5.15-intel-bump_v5.15.36
Signed-off-by: Sujoy Ray <sujoy.ray@intel.com>
Diffstat (limited to 'drivers')
1133 files changed, 16459 insertions, 7749 deletions
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index 915c2433463d..e7c30ce06e18 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c @@ -169,6 +169,9 @@ acpi_ns_walk_namespace(acpi_object_type type, if (start_node == ACPI_ROOT_OBJECT) { start_node = acpi_gbl_root_node; + if (!start_node) { + return_ACPI_STATUS(AE_NO_NAMESPACE); + } } /* Null child means "get first node" */ diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c index 19e50fcbf4d6..598fd19b65fa 100644 --- a/drivers/acpi/apei/bert.c +++ b/drivers/acpi/apei/bert.c @@ -29,6 +29,7 @@ #undef pr_fmt #define pr_fmt(fmt) "BERT: " fmt +#define ACPI_BERT_PRINT_MAX_LEN 1024 static int bert_disable; @@ -58,8 +59,11 @@ static void __init bert_print_all(struct acpi_bert_region *region, } pr_info_once("Error records from previous boot:\n"); - - cper_estatus_print(KERN_INFO HW_ERR, estatus); + if (region_len < ACPI_BERT_PRINT_MAX_LEN) + cper_estatus_print(KERN_INFO HW_ERR, estatus); + else + pr_info_once("Max print length exceeded, table data is available at:\n" + "/sys/firmware/acpi/tables/data/BERT"); /* * Because the boot error source is "one-time polled" type, @@ -77,7 +81,7 @@ static int __init setup_bert_disable(char *str) { bert_disable = 1; - return 0; + return 1; } __setup("bert_disable", setup_bert_disable); diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 242f3c2d5533..698d67cee052 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -891,7 +891,7 @@ EXPORT_SYMBOL_GPL(erst_clear); static int __init setup_erst_disable(char *str) { erst_disable = 1; - return 0; + return 1; } __setup("erst_disable", setup_erst_disable); diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 277f00b288d1..317bba602ad5 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -223,7 +223,7 @@ err: static int __init setup_hest_disable(char *str) { hest_disable = HEST_DISABLED; - return 0; + return 1; } __setup("hest_disable", setup_hest_disable); diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index ead0114f27c9..56db7b4da514 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -60,6 +60,10 @@ MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); static const struct acpi_device_id battery_device_ids[] = { {"PNP0C0A", 0}, + + /* Microsoft Surface Go 3 */ + {"MSHW0146", 0}, + {"", 0}, }; @@ -1177,6 +1181,14 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad"), }, }, + { + /* Microsoft Surface Go 3 */ + .callback = battery_notification_delay_quirk, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"), + }, + }, {}, }; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index dd535b4b9a16..3500744e6862 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -332,21 +332,32 @@ static void acpi_bus_osc_negotiate_platform_control(void) if (ACPI_FAILURE(acpi_run_osc(handle, &context))) return; - kfree(context.ret.pointer); + capbuf_ret = context.ret.pointer; + if (context.ret.length <= OSC_SUPPORT_DWORD) { + kfree(context.ret.pointer); + return; + } - /* Now run _OSC again with query flag clear */ + /* + * Now run _OSC again with query flag clear and with the caps + * supported by both the OS and the platform. + */ capbuf[OSC_QUERY_DWORD] = 0; + capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD]; + kfree(context.ret.pointer); if (ACPI_FAILURE(acpi_run_osc(handle, &context))) return; capbuf_ret = context.ret.pointer; - osc_sb_apei_support_acked = - capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT; - osc_pc_lpi_support_confirmed = - capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT; - osc_sb_native_usb4_support_confirmed = - capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT; + if (context.ret.length > OSC_SUPPORT_DWORD) { + osc_sb_apei_support_acked = + capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT; + osc_pc_lpi_support_confirmed = + capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT; + osc_sb_native_usb4_support_confirmed = + capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT; + } kfree(context.ret.pointer); } diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 6fe28a2d387b..71b87c16f92d 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -703,6 +703,11 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) cpc_obj = &out_obj->package.elements[0]; if (cpc_obj->type == ACPI_TYPE_INTEGER) { num_ent = cpc_obj->integer.value; + if (num_ent <= 1) { + pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n", + num_ent, pr->id); + goto out_free; + } } else { pr_debug("Unexpected entry type(%d) for NumEntries\n", cpc_obj->type); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 98d178227544..9b859ff976e8 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -2052,16 +2052,6 @@ bool acpi_ec_dispatch_gpe(void) return true; /* - * Cancel the SCI wakeup and process all pending events in case there - * are any wakeup ones in there. - * - * Note that if any non-EC GPEs are active at this point, the SCI will - * retrigger after the rearming in acpi_s2idle_wake(), so no events - * should be missed by canceling the wakeup here. - */ - pm_system_cancel_wakeup(); - - /* * Dispatch the EC GPE in-band, but do not report wakeup in any case * to allow the caller to process events properly after that. */ diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index f37fba9e5ba0..1fd6a4a34c15 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -95,6 +95,11 @@ static const struct dmi_system_id processor_power_dmi_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, (void *)1}, + /* T40 can not handle C3 idle state */ + { set_max_cstate, "IBM ThinkPad T40", { + DMI_MATCH(DMI_SYS_VENDOR, "IBM"), + DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")}, + (void *)2}, {}, }; @@ -789,7 +794,8 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) state->enter = acpi_idle_enter; state->flags = 0; - if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) { + if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 || + cx->type == ACPI_STATE_C3) { state->enter_dead = acpi_idle_play_dead; drv->safe_state_index = count; } @@ -1075,6 +1081,11 @@ static int flatten_lpi_states(struct acpi_processor *pr, return 0; } +int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) +{ + return -EOPNOTSUPP; +} + static int acpi_processor_get_lpi_info(struct acpi_processor *pr) { int ret, i; @@ -1083,6 +1094,11 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) struct acpi_device *d = NULL; struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; + /* make sure our architecture has support */ + ret = acpi_processor_ffh_lpi_probe(pr->id); + if (ret == -EOPNOTSUPP) + return ret; + if (!osc_pc_lpi_support_confirmed) return -EOPNOTSUPP; @@ -1134,11 +1150,6 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) return 0; } -int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) -{ - return -ENODEV; -} - int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) { return -ENODEV; diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 781e312f4534..11e4c930a981 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -685,7 +685,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, */ if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) { if (index) - return -EINVAL; + return -ENOENT; ret = acpi_bus_get_device(obj->reference.handle, &device); if (ret) diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 245a0fa979cb..7ae09e4b4592 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -739,15 +739,21 @@ bool acpi_s2idle_wake(void) return true; } - /* - * Check non-EC GPE wakeups and if there are none, cancel the - * SCI-related wakeup and dispatch the EC GPE. - */ + /* Check non-EC GPE wakeups and dispatch the EC GPE. */ if (acpi_ec_dispatch_gpe()) { pm_pr_dbg("ACPI non-EC GPE wakeup\n"); return true; } + /* + * Cancel the SCI wakeup and process all pending events in case + * there are any wakeup ones in there. + * + * Note that if any non-EC GPEs are active at this point, the + * SCI will retrigger after the rearming below, so no events + * should be missed by canceling the wakeup here. + */ + pm_system_cancel_wakeup(); acpi_os_wait_events_complete(); /* diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 33474fd96991..7b9793cb55c5 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -409,6 +409,81 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "GA503"), }, }, + /* + * Clevo NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2 have both a + * working native and video interface. However the default detection + * mechanism first registers the video interface before unregistering + * it again and switching to the native interface during boot. This + * results in a dangling SBIOS request for backlight change for some + * reason, causing the backlight to switch to ~2% once per boot on the + * first power cord connect or disconnect event. Setting the native + * interface explicitly circumvents this buggy behaviour, by avoiding + * the unregistering process. + */ + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "AURA1501"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xNU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xNU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xNU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + }, + }, /* * Desktops which falsely report a backlight and which our heuristics diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c index 1c48358b43ba..e0185e841b2a 100644 --- a/drivers/acpi/x86/s2idle.c +++ b/drivers/acpi/x86/s2idle.c @@ -424,15 +424,11 @@ static int lps0_device_attach(struct acpi_device *adev, mem_sleep_current = PM_SUSPEND_TO_IDLE; /* - * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't - * use intel-hid or intel-vbtn but require the EC GPE to be enabled while - * suspended for certain wakeup devices to work, so mark it as wakeup-capable. - * - * Only enable on !AMD as enabling this universally causes problems for a number - * of AMD based systems. + * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the + * EC GPE to be enabled while suspended for certain wakeup devices to + * work, so mark it as wakeup-capable. */ - if (!acpi_s2idle_vendor_amd()) - acpi_ec_mark_gpe_for_wake(); + acpi_ec_mark_gpe_for_wake(); return 0; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 4d848cfc406f..a0343b7c9add 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -3999,6 +3999,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_NO_DMA_LOG | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | @@ -4014,6 +4017,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* devices that don't properly handle TRIM commands */ { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, + { "M88V29*", NULL, ATA_HORKAGE_NOTRIM, }, /* * As defined, the DRAT (Deterministic Read After Trim) and RZAT diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index f242157bc81b..9d371859e81e 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -920,6 +920,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) pci_write_config_byte(dev, 0x5a, irqmask); /* + * HPT371 chips physically have only one channel, the secondary one, + * but the primary channel registers do exist! Go figure... + * So, we manually disable the non-existing channel here + * (if the BIOS hasn't done this already). + */ + if (dev->device == PCI_DEVICE_ID_TTI_HPT371) { + u8 mcr1; + + pci_read_config_byte(dev, 0x50, &mcr1); + mcr1 &= ~0x04; + pci_write_config_byte(dev, 0x50, mcr1); + } + + /* * default to pci clock. make sure MA15/16 are set to output * to prevent drives having problems with 40-pin cables. Needed * for some drives such as IBM-DTLA which will not enter ready @@ -950,14 +964,14 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) if ((freq >> 12) != 0xABCDE) { int i; - u8 sr; + u16 sr; u32 total = 0; pr_warn("BIOS has not set timing clocks\n"); /* This is the process the HPT371 BIOS is reported to use */ for (i = 0; i < 128; i++) { - pci_read_config_byte(dev, 0x78, &sr); + pci_read_config_word(dev, 0x78, &sr); total += sr & 0x1FF; udelay(15); } diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c index 361597d14c56..d45a75bfc016 100644 --- a/drivers/ata/pata_marvell.c +++ b/drivers/ata/pata_marvell.c @@ -83,6 +83,8 @@ static int marvell_cable_detect(struct ata_port *ap) switch(ap->port_no) { case 0: + if (!ap->ioaddr.bmdma_addr) + return ATA_CBL_PATA_UNK; if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1) return ATA_CBL_PATA40; return ATA_CBL_PATA80; diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index 338c2e50f759..29e2b0dfba30 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c @@ -145,7 +145,11 @@ struct sata_dwc_device { #endif }; -#define SATA_DWC_QCMD_MAX 32 +/* + * Allow one extra special slot for commands and DMA management + * to account for libata internal commands. + */ +#define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1) struct sata_dwc_device_port { struct sata_dwc_device *hsdev; diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 422753d52244..a31ffe16e626 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -1112,6 +1112,8 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags); skb_data3 = skb->data[3]; paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&eni_dev->pci_dev->dev, paddr)) + return enq_next; ENI_PRV_PADDR(skb) = paddr; /* prepare DMA queue entries */ j = 0; diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 3bc3c314a467..4f67404fe64c 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -1676,6 +1676,8 @@ static int fs_init(struct fs_dev *dev) dev->hw_base = pci_resource_start(pci_dev, 0); dev->base = ioremap(dev->hw_base, 0x1000); + if (!dev->base) + return 1; reset_chip (dev); diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c index 38ba08628ccb..2578b2d45439 100644 --- a/drivers/auxdisplay/lcd2s.c +++ b/drivers/auxdisplay/lcd2s.c @@ -238,7 +238,7 @@ static int lcd2s_redefine_char(struct charlcd *lcd, char *esc) if (buf[1] > 7) return 1; - i = 0; + i = 2; shift = 0; value = 0; while (*esc && i < LCD2S_CHARACTER_SIZE + 2) { @@ -298,6 +298,10 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c, I2C_FUNC_SMBUS_WRITE_BLOCK_DATA)) return -EIO; + lcd2s = devm_kzalloc(&i2c->dev, sizeof(*lcd2s), GFP_KERNEL); + if (!lcd2s) + return -ENOMEM; + /* Test, if the display is responding */ err = lcd2s_i2c_smbus_write_byte(i2c, LCD2S_CMD_DISPLAY_OFF); if (err < 0) @@ -307,12 +311,6 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c, if (!lcd) return -ENOMEM; - lcd2s = kzalloc(sizeof(struct lcd2s_data), GFP_KERNEL); - if (!lcd2s) { - err = -ENOMEM; - goto fail1; - } - lcd->drvdata = lcd2s; lcd2s->i2c = i2c; lcd2s->charlcd = lcd; @@ -321,26 +319,24 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c, err = device_property_read_u32(&i2c->dev, "display-height-chars", &lcd->height); if (err) - goto fail2; + goto fail1; err = device_property_read_u32(&i2c->dev, "display-width-chars", &lcd->width); if (err) - goto fail2; + goto fail1; lcd->ops = &lcd2s_ops; err = charlcd_register(lcd2s->charlcd); if (err) - goto fail2; + goto fail1; i2c_set_clientdata(i2c, lcd2s); return 0; -fail2: - kfree(lcd2s); fail1: - kfree(lcd); + charlcd_free(lcd2s->charlcd); return err; } @@ -349,7 +345,7 @@ static int lcd2s_i2c_remove(struct i2c_client *i2c) struct lcd2s_data *lcd2s = i2c_get_clientdata(i2c); charlcd_unregister(lcd2s->charlcd); - kfree(lcd2s->charlcd); + charlcd_free(lcd2s->charlcd); return 0; } diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 68ea1f949daa..95ae347df137 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -296,6 +296,7 @@ int driver_deferred_probe_check_state(struct device *dev) return -EPROBE_DEFER; } +EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state); static void deferred_probe_timeout_work_func(struct work_struct *work) { @@ -629,6 +630,9 @@ re_probe: drv->remove(dev); devres_release_all(dev); + arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; driver_sysfs_remove(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); @@ -806,7 +810,7 @@ static int __init save_async_options(char *buf) pr_warn("Too long list of driver names for 'driver_async_probe'!\n"); strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN); - return 0; + return 1; } __setup("driver_async_probe=", save_async_options); @@ -1208,6 +1212,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) devres_release_all(dev); arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; dev->driver = NULL; dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 365cd4a7f239..60c38f9cf1a7 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -663,14 +663,16 @@ static int init_memory_block(unsigned long block_id, unsigned long state, mem->nr_vmemmap_pages = nr_vmemmap_pages; INIT_LIST_HEAD(&mem->group_next); + ret = register_memory(mem); + if (ret) + return ret; + if (group) { mem->group = group; list_add(&mem->group_next, &group->memory_blocks); } - ret = register_memory(mem); - - return ret; + return 0; } static int add_memory_block(unsigned long base_section_nr) diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 5db704f02e71..7e8039d1884c 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2058,9 +2058,9 @@ static int genpd_remove(struct generic_pm_domain *genpd) kfree(link); } - genpd_debug_remove(genpd); list_del(&genpd->gpd_list_node); genpd_unlock(genpd); + genpd_debug_remove(genpd); cancel_work_sync(&genpd->power_off_work); if (genpd_is_cpu_domain(genpd)) free_cpumask_var(genpd->cpus); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 6bce40e2506e..8c4819fe73d4 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -2022,7 +2022,9 @@ static bool pm_ops_is_empty(const struct dev_pm_ops *ops) void device_pm_check_callbacks(struct device *dev) { - spin_lock_irq(&dev->power.lock); + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); dev->power.no_pm_callbacks = (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && !dev->bus->suspend && !dev->bus->resume)) && @@ -2031,7 +2033,7 @@ void device_pm_check_callbacks(struct device *dev) (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && !dev->driver->suspend && !dev->driver->resume)); - spin_unlock_irq(&dev->power.lock); + spin_unlock_irqrestore(&dev->power.lock, flags); } bool dev_pm_skip_suspend(struct device *dev) diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 94665037f4a3..72b7a92337b1 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -120,7 +120,11 @@ static unsigned int read_magic_time(void) struct rtc_time time; unsigned int val; - mc146818_get_time(&time); + if (mc146818_get_time(&time) < 0) { + pr_err("Unable to read current time from RTC\n"); + return 0; + } + pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time); val = time.tm_year; /* 100 years */ if (val > 100) diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index d2656581a608..4a446259a184 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -189,11 +189,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data) ret = regmap_write(map, reg, d->mask_buf[i]); if (d->chip->clear_ack) { if (d->chip->ack_invert && !ret) - ret = regmap_write(map, reg, - d->mask_buf[i]); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~d->mask_buf[i]); + ret = regmap_write(map, reg, 0); } if (ret != 0) dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", @@ -556,11 +554,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) data->status_buf[i]); if (chip->clear_ack) { if (chip->ack_invert && !ret) - ret = regmap_write(map, reg, - data->status_buf[i]); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~data->status_buf[i]); + ret = regmap_write(map, reg, 0); } if (ret != 0) dev_err(map->dev, "Failed to ack 0x%x: %d\n", @@ -817,13 +813,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, d->status_buf[i] & d->mask_buf[i]); if (chip->clear_ack) { if (chip->ack_invert && !ret) - ret = regmap_write(map, reg, - (d->status_buf[i] & - d->mask_buf[i])); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~(d->status_buf[i] & - d->mask_buf[i])); + ret = regmap_write(map, reg, 0); } if (ret != 0) { dev_err(map->dev, "Failed to ack 0x%x: %d\n", diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 8b1714021498..1ed557cb5ed2 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -61,6 +61,7 @@ #include <linux/hdreg.h> #include <linux/delay.h> #include <linux/init.h> +#include <linux/major.h> #include <linux/mutex.h> #include <linux/fs.h> #include <linux/blk-mq.h> diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index aab48b292a3b..82faaa458157 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -68,6 +68,7 @@ #include <linux/delay.h> #include <linux/init.h> #include <linux/blk-mq.h> +#include <linux/major.h> #include <linux/mutex.h> #include <linux/completion.h> #include <linux/wait.h> diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 5d9181382ce1..0a5766a2f161 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1642,22 +1642,22 @@ struct sib_info { }; void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib); -extern void notify_resource_state(struct sk_buff *, +extern int notify_resource_state(struct sk_buff *, unsigned int, struct drbd_resource *, struct resource_info *, enum drbd_notification_type); -extern void notify_device_state(struct sk_buff *, +extern int notify_device_state(struct sk_buff *, unsigned int, struct drbd_device *, struct device_info *, enum drbd_notification_type); -extern void notify_connection_state(struct sk_buff *, +extern int notify_connection_state(struct sk_buff *, unsigned int, struct drbd_connection *, struct connection_info *, enum drbd_notification_type); -extern void notify_peer_device_state(struct sk_buff *, +extern int notify_peer_device_state(struct sk_buff *, unsigned int, struct drbd_peer_device *, struct peer_device_info *, diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 55234a558e98..548e0dd53528 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2737,6 +2737,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig sprintf(disk->disk_name, "drbd%d", minor); disk->private_data = device; + blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue); blk_queue_write_cache(disk->queue, true, true); /* Setting the max_hw_sectors to an odd value of 8kibyte here This triggers a max_bio_size message upon first attach or connect */ diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 44ccf8b4f4b2..69184cf17b6a 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -4617,7 +4617,7 @@ static int nla_put_notification_header(struct sk_buff *msg, return drbd_notification_header_to_skb(msg, &nh, true); } -void notify_resource_state(struct sk_buff *skb, +int notify_resource_state(struct sk_buff *skb, unsigned int seq, struct drbd_resource *resource, struct resource_info *resource_info, @@ -4659,16 +4659,17 @@ void notify_resource_state(struct sk_buff *skb, if (err && err != -ESRCH) goto failed; } - return; + return 0; nla_put_failure: nlmsg_free(skb); failed: drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n", err, seq); + return err; } -void notify_device_state(struct sk_buff *skb, +int notify_device_state(struct sk_buff *skb, unsigned int seq, struct drbd_device *device, struct device_info *device_info, @@ -4708,16 +4709,17 @@ void notify_device_state(struct sk_buff *skb, if (err && err != -ESRCH) goto failed; } - return; + return 0; nla_put_failure: nlmsg_free(skb); failed: drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n", err, seq); + return err; } -void notify_connection_state(struct sk_buff *skb, +int notify_connection_state(struct sk_buff *skb, unsigned int seq, struct drbd_connection *connection, struct connection_info *connection_info, @@ -4757,16 +4759,17 @@ void notify_connection_state(struct sk_buff *skb, if (err && err != -ESRCH) goto failed; } - return; + return 0; nla_put_failure: nlmsg_free(skb); failed: drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n", err, seq); + return err; } -void notify_peer_device_state(struct sk_buff *skb, +int notify_peer_device_state(struct sk_buff *skb, unsigned int seq, struct drbd_peer_device *peer_device, struct peer_device_info *peer_device_info, @@ -4807,13 +4810,14 @@ void notify_peer_device_state(struct sk_buff *skb, if (err && err != -ESRCH) goto failed; } - return; + return 0; nla_put_failure: nlmsg_free(skb); failed: drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n", err, seq); + return err; } void notify_helper(enum drbd_notification_type type, @@ -4864,7 +4868,7 @@ fail: err, seq); } -static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq) +static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq) { struct drbd_genlmsghdr *dh; int err; @@ -4878,11 +4882,12 @@ static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq) if (nla_put_notification_header(skb, NOTIFY_EXISTS)) goto nla_put_failure; genlmsg_end(skb, dh); - return; + return 0; nla_put_failure: nlmsg_free(skb); pr_err("Error %d sending event. Event seq:%u\n", err, seq); + return err; } static void free_state_changes(struct list_head *list) @@ -4909,6 +4914,7 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) unsigned int seq = cb->args[2]; unsigned int n; enum drbd_notification_type flags = 0; + int err = 0; /* There is no need for taking notification_mutex here: it doesn't matter if the initial state events mix with later state chage @@ -4917,32 +4923,32 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) cb->args[5]--; if (cb->args[5] == 1) { - notify_initial_state_done(skb, seq); + err = notify_initial_state_done(skb, seq); goto out; } n = cb->args[4]++; if (cb->args[4] < cb->args[3]) flags |= NOTIFY_CONTINUES; if (n < 1) { - notify_resource_state_change(skb, seq, state_change->resource, + err = notify_resource_state_change(skb, seq, state_change->resource, NOTIFY_EXISTS | flags); goto next; } n--; if (n < state_change->n_connections) { - notify_connection_state_change(skb, seq, &state_change->connections[n], + err = notify_connection_state_change(skb, seq, &state_change->connections[n], NOTIFY_EXISTS | flags); goto next; } n -= state_change->n_connections; if (n < state_change->n_devices) { - notify_device_state_change(skb, seq, &state_change->devices[n], + err = notify_device_state_change(skb, seq, &state_change->devices[n], NOTIFY_EXISTS | flags); goto next; } n -= state_change->n_devices; if (n < state_change->n_devices * state_change->n_connections) { - notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n], + err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n], NOTIFY_EXISTS | flags); goto next; } @@ -4957,7 +4963,10 @@ next: cb->args[4] = 0; } out: - return skb->len; + if (err) + return err; + else + return skb->len; } int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 5ca233644d70..47e0d105b462 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -180,7 +180,8 @@ void start_new_tl_epoch(struct drbd_connection *connection) void complete_master_bio(struct drbd_device *device, struct bio_and_error *m) { - m->bio->bi_status = errno_to_blk_status(m->error); + if (unlikely(m->error)) + m->bio->bi_status = errno_to_blk_status(m->error); bio_endio(m->bio); dec_ap_bio(device); } diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index b8a27818ab3f..4ee11aef6672 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -1537,7 +1537,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device, return rv; } -void notify_resource_state_change(struct sk_buff *skb, +int notify_resource_state_change(struct sk_buff *skb, unsigned int seq, struct drbd_resource_state_change *resource_state_change, enum drbd_notification_type type) @@ -1550,10 +1550,10 @@ void notify_resource_state_change(struct sk_buff *skb, .res_susp_fen = resource_state_change->susp_fen[NEW], }; - notify_resource_state(skb, seq, resource, &resource_info, type); + return notify_resource_state(skb, seq, resource, &resource_info, type); } -void notify_connection_state_change(struct sk_buff *skb, +int notify_connection_state_change(struct sk_buff *skb, unsigned int seq, struct drbd_connection_state_change *connection_state_change, enum drbd_notification_type type) @@ -1564,10 +1564,10 @@ void notify_connection_state_change(struct sk_buff *skb, .conn_role = connection_state_change->peer_role[NEW], }; - notify_connection_state(skb, seq, connection, &connection_info, type); + return notify_connection_state(skb, seq, connection, &connection_info, type); } -void notify_device_state_change(struct sk_buff *skb, +int notify_device_state_change(struct sk_buff *skb, unsigned int seq, struct drbd_device_state_change *device_state_change, enum drbd_notification_type type) @@ -1577,10 +1577,10 @@ void notify_device_state_change(struct sk_buff *skb, .dev_disk_state = device_state_change->disk_state[NEW], }; - notify_device_state(skb, seq, device, &device_info, type); + return notify_device_state(skb, seq, device, &device_info, type); } -void notify_peer_device_state_change(struct sk_buff *skb, +int notify_peer_device_state_change(struct sk_buff *skb, unsigned int seq, struct drbd_peer_device_state_change *p, enum drbd_notification_type type) @@ -1594,7 +1594,7 @@ void notify_peer_device_state_change(struct sk_buff *skb, .peer_resync_susp_dependency = p->resync_susp_dependency[NEW], }; - notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type); + return notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type); } static void broadcast_state_change(struct drbd_state_change *state_change) @@ -1602,7 +1602,7 @@ static void broadcast_state_change(struct drbd_state_change *state_change) struct drbd_resource_state_change *resource_state_change = &state_change->resource[0]; bool resource_state_has_changed; unsigned int n_device, n_connection, n_peer_device, n_peer_devices; - void (*last_func)(struct sk_buff *, unsigned int, void *, + int (*last_func)(struct sk_buff *, unsigned int, void *, enum drbd_notification_type) = NULL; void *last_arg = NULL; diff --git a/drivers/block/drbd/drbd_state_change.h b/drivers/block/drbd/drbd_state_change.h index ba80f612d6ab..d5b0479bc9a6 100644 --- a/drivers/block/drbd/drbd_state_change.h +++ b/drivers/block/drbd/drbd_state_change.h @@ -44,19 +44,19 @@ extern struct drbd_state_change *remember_old_state(struct drbd_resource *, gfp_ extern void copy_old_to_new_state_change(struct drbd_state_change *); extern void forget_state_change(struct drbd_state_change *); -extern void notify_resource_state_change(struct sk_buff *, +extern int notify_resource_state_change(struct sk_buff *, unsigned int, struct drbd_resource_state_change *, enum drbd_notification_type type); -extern void notify_connection_state_change(struct sk_buff *, +extern int notify_connection_state_change(struct sk_buff *, unsigned int, struct drbd_connection_state_change *, enum drbd_notification_type type); -extern void notify_device_state_change(struct sk_buff *, +extern int notify_device_state_change(struct sk_buff *, unsigned int, struct drbd_device_state_change *, enum drbd_notification_type type); -extern void notify_peer_device_state_change(struct sk_buff *, +extern int notify_peer_device_state_change(struct sk_buff *, unsigned int, struct drbd_peer_device_state_change *, enum drbd_notification_type type); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 4a6a74177b3c..0f58594c5a4d 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -184,6 +184,7 @@ static int print_unex = 1; #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/init.h> +#include <linux/major.h> #include <linux/platform_device.h> #include <linux/mod_devicetable.h> #include <linux/mutex.h> diff --git a/drivers/block/loop.c b/drivers/block/loop.c index c00ae30fde89..8cba10aafadb 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -79,6 +79,7 @@ #include <linux/ioprio.h> #include <linux/blk-cgroup.h> #include <linux/sched/mm.h> +#include <linux/statfs.h> #include "loop.h" @@ -843,33 +844,33 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) { - return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset); + return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset); } static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) { - return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); + return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); } static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) { int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); - return sprintf(buf, "%s\n", autoclear ? "1" : "0"); + return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0"); } static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) { int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); - return sprintf(buf, "%s\n", partscan ? "1" : "0"); + return sysfs_emit(buf, "%s\n", partscan ? "1" : "0"); } static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf) { int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO); - return sprintf(buf, "%s\n", dio ? "1" : "0"); + return sysfs_emit(buf, "%s\n", dio ? "1" : "0"); } LOOP_ATTR_RO(backing_file); @@ -939,8 +940,13 @@ static void loop_config_discard(struct loop_device *lo) granularity = 0; } else { + struct kstatfs sbuf; + max_discard_sectors = UINT_MAX >> 9; - granularity = inode->i_sb->s_blocksize; + if (!vfs_statfs(&file->f_path, &sbuf)) + granularity = sbuf.f_bsize; + else + max_discard_sectors = 0; } if (max_discard_sectors) { diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 901855717cb5..ba61e72741ea 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -4112,7 +4112,7 @@ static void mtip_pci_remove(struct pci_dev *pdev) "Completion workers still active!\n"); } - blk_set_queue_dying(dd->queue); + blk_mark_disk_dead(dd->disk); set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); /* Clean up the block layer. */ diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c index 26798da661bd..bcaabf038947 100644 --- a/drivers/block/n64cart.c +++ b/drivers/block/n64cart.c @@ -88,7 +88,7 @@ static blk_qc_t n64cart_submit_bio(struct bio *bio) { struct bio_vec bvec; struct bvec_iter iter; - struct device *dev = bio->bi_disk->private_data; + struct device *dev = bio->bi_bdev->bd_disk->private_data; u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT; bio_for_each_segment(bvec, bio, iter) { diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 577c7dba5d78..582b23befb5c 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -254,7 +254,7 @@ static void nbd_dev_remove(struct nbd_device *nbd) mutex_lock(&nbd_index_mutex); idr_remove(&nbd_index_idr, nbd->index); mutex_unlock(&nbd_index_mutex); - + destroy_workqueue(nbd->recv_workq); kfree(nbd); } @@ -1260,10 +1260,6 @@ static void nbd_config_put(struct nbd_device *nbd) kfree(nbd->config); nbd->config = NULL; - if (nbd->recv_workq) - destroy_workqueue(nbd->recv_workq); - nbd->recv_workq = NULL; - nbd->tag_set.timeout = 0; nbd->disk->queue->limits.discard_granularity = 0; nbd->disk->queue->limits.discard_alignment = 0; @@ -1292,14 +1288,6 @@ static int nbd_start_device(struct nbd_device *nbd) return -EINVAL; } - nbd->recv_workq = alloc_workqueue("knbd%d-recv", - WQ_MEM_RECLAIM | WQ_HIGHPRI | - WQ_UNBOUND, 0, nbd->index); - if (!nbd->recv_workq) { - dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n"); - return -ENOMEM; - } - blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections); nbd->pid = task_pid_nr(current); @@ -1725,6 +1713,15 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) } nbd->disk = disk; + nbd->recv_workq = alloc_workqueue("nbd%d-recv", + WQ_MEM_RECLAIM | WQ_HIGHPRI | + WQ_UNBOUND, 0, nbd->index); + if (!nbd->recv_workq) { + dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n"); + err = -ENOMEM; + goto out_err_disk; + } + /* * Tell the block layer that we are not a rotational device */ @@ -1755,14 +1752,16 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) disk->first_minor = index << part_shift; if (disk->first_minor < index || disk->first_minor > MINORMASK) { err = -EINVAL; - goto out_free_idr; + goto out_free_work; } disk->minors = 1 << part_shift; disk->fops = &nbd_fops; disk->private_data = nbd; sprintf(disk->disk_name, "nbd%d", index); - add_disk(disk); + err = add_disk(disk); + if (err) + goto out_free_work; /* * Now publish the device. @@ -1771,6 +1770,10 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) nbd_total_devices++; return nbd; +out_free_work: + destroy_workqueue(nbd->recv_workq); +out_err_disk: + blk_cleanup_disk(disk); out_free_idr: mutex_lock(&nbd_index_mutex); idr_remove(&nbd_index_idr, index); @@ -2024,13 +2027,10 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd) nbd_disconnect(nbd); sock_shutdown(nbd); /* - * Make sure recv thread has finished, so it does not drop the last - * config ref and try to destroy the workqueue from inside the work - * queue. And this also ensure that we can safely call nbd_clear_que() + * Make sure recv thread has finished, we can safely call nbd_clear_que() * to cancel the inflight I/Os. */ - if (nbd->recv_workq) - flush_workqueue(nbd->recv_workq); + flush_workqueue(nbd->recv_workq); nbd_clear_que(nbd); nbd->task_setup = NULL; mutex_unlock(&nbd->config_lock); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index e65c9d706f6f..c4a52f33604d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -7182,7 +7182,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus, * IO to complete/fail. */ blk_mq_freeze_queue(rbd_dev->disk->queue); - blk_set_queue_dying(rbd_dev->disk->queue); + blk_mark_disk_dead(rbd_dev->disk); } del_gendisk(rbd_dev->disk); diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 7ccc8d2a41bc..3911d0833e1b 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -16,6 +16,7 @@ #include <linux/fd.h> #include <linux/slab.h> #include <linux/blk-mq.h> +#include <linux/major.h> #include <linux/mutex.h> #include <linux/hdreg.h> #include <linux/kernel.h> diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 303caf2d17d0..c0b8a26892a5 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -815,9 +815,17 @@ static int virtblk_probe(struct virtio_device *vdev) err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, struct virtio_blk_config, blk_size, &blk_size); - if (!err) + if (!err) { + err = blk_validate_block_size(blk_size); + if (err) { + dev_err(&vdev->dev, + "virtio_blk: invalid block size: 0x%x\n", + blk_size); + goto out_cleanup_disk; + } + blk_queue_logical_block_size(q, blk_size); - else + } else blk_size = queue_logical_block_size(q); /* Use topology information if available */ @@ -859,9 +867,15 @@ static int virtblk_probe(struct virtio_device *vdev) virtio_cread(vdev, struct virtio_blk_config, max_discard_seg, &v); + + /* + * max_discard_seg == 0 is out of spec but we always + * handled it. + */ + if (!v) + v = sg_elems - 2; blk_queue_max_discard_segments(q, - min_not_zero(v, - MAX_DISCARD_SEGMENTS)); + min(v, MAX_DISCARD_SEGMENTS)); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 4dbb71230d6e..390817cf1221 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -42,6 +42,7 @@ #include <linux/cdrom.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/major.h> #include <linux/mutex.h> #include <linux/scatterlist.h> #include <linux/bitmap.h> @@ -1290,7 +1291,8 @@ free_shadow: rinfo->ring_ref[i] = GRANT_INVALID_REF; } } - free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE)); + free_pages_exact(rinfo->ring.sring, + info->nr_ring_pages * XEN_PAGE_SIZE); rinfo->ring.sring = NULL; if (rinfo->irq) @@ -1374,9 +1376,15 @@ static int blkif_get_final_status(enum blk_req_status s1, return BLKIF_RSP_OKAY; } -static bool blkif_completion(unsigned long *id, - struct blkfront_ring_info *rinfo, - struct blkif_response *bret) +/* + * Return values: + * 1 response processed. + * 0 missing further responses. + * -1 error while processing. + */ +static int blkif_completion(unsigned long *id, + struct blkfront_ring_info *rinfo, + struct blkif_response *bret) { int i = 0; struct scatterlist *sg; @@ -1399,7 +1407,7 @@ static bool blkif_completion(unsigned long *id, /* Wait the second response if not yet here. */ if (s2->status < REQ_DONE) - return false; + return 0; bret->status = blkif_get_final_status(s->status, s2->status); @@ -1450,42 +1458,43 @@ static bool blkif_completion(unsigned long *id, } /* Add the persistent grant into the list of free grants */ for (i = 0; i < num_grant; i++) { - if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { + if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) { /* * If the grant is still mapped by the backend (the * backend has chosen to make this grant persistent) * we add it at the head of the list, so it will be * reused first. */ - if (!info->feature_persistent) - pr_alert_ratelimited("backed has not unmapped grant: %u\n", - s->grants_used[i]->gref); + if (!info->feature_persistent) { + pr_alert("backed has not unmapped grant: %u\n", + s->grants_used[i]->gref); + return -1; + } list_add(&s->grants_used[i]->node, &rinfo->grants); rinfo->persistent_gnts_c++; } else { /* - * If the grant is not mapped by the backend we end the - * foreign access and add it to the tail of the list, - * so it will not be picked again unless we run out of - * persistent grants. + * If the grant is not mapped by the backend we add it + * to the tail of the list, so it will not be picked + * again unless we run out of persistent grants. */ - gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL); s->grants_used[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->grants_used[i]->node, &rinfo->grants); } } if (s->req.operation == BLKIF_OP_INDIRECT) { for (i = 0; i < INDIRECT_GREFS(num_grant); i++) { - if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { - if (!info->feature_persistent) - pr_alert_ratelimited("backed has not unmapped grant: %u\n", - s->indirect_grants[i]->gref); + if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) { + if (!info->feature_persistent) { + pr_alert("backed has not unmapped grant: %u\n", + s->indirect_grants[i]->gref); + return -1; + } list_add(&s->indirect_grants[i]->node, &rinfo->grants); rinfo->persistent_gnts_c++; } else { struct page *indirect_page; - gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL); /* * Add the used indirect page back to the list of * available pages for indirect grefs. @@ -1500,7 +1509,7 @@ static bool blkif_completion(unsigned long *id, } } - return true; + return 1; } static irqreturn_t blkif_interrupt(int irq, void *dev_id) @@ -1566,12 +1575,17 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) } if (bret.operation != BLKIF_OP_DISCARD) { + int ret; + /* * We may need to wait for an extra response if the * I/O request is split in 2 */ - if (!blkif_completion(&id, rinfo, &bret)) + ret = blkif_completion(&id, rinfo, &bret); + if (!ret) continue; + if (unlikely(ret < 0)) + goto err; } if (add_id_to_freelist(rinfo, id)) { @@ -1678,8 +1692,7 @@ static int setup_blkring(struct xenbus_device *dev, for (i = 0; i < info->nr_ring_pages; i++) rinfo->ring_ref[i] = GRANT_INVALID_REF; - sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH, - get_order(ring_size)); + sring = alloc_pages_exact(ring_size, GFP_NOIO); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; @@ -1689,7 +1702,7 @@ static int setup_blkring(struct xenbus_device *dev, err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref); if (err < 0) { - free_pages((unsigned long)sring, get_order(ring_size)); + free_pages_exact(sring, ring_size); rinfo->ring.sring = NULL; goto fail; } @@ -2128,7 +2141,7 @@ static void blkfront_closing(struct blkfront_info *info) /* No more blkif_request(). */ blk_mq_stop_hw_queues(info->rq); - blk_set_queue_dying(info->rq); + blk_mark_disk_dead(info->gd); set_capacity(info->gd, 0); for_each_rinfo(info, rinfo, i) { @@ -2529,11 +2542,10 @@ static void purge_persistent_grants(struct blkfront_info *info) list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, node) { if (gnt_list_entry->gref == GRANT_INVALID_REF || - gnttab_query_foreign_access(gnt_list_entry->gref)) + !gnttab_try_end_foreign_access(gnt_list_entry->gref)) continue; list_del(&gnt_list_entry->node); - gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); rinfo->persistent_gnts_c--; gnt_list_entry->gref = GRANT_INVALID_REF; list_add_tail(&gnt_list_entry->node, &rinfo->grants); diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index e73d4c719b0a..d122cc973917 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -2263,10 +2263,15 @@ static int btintel_setup_combined(struct hci_dev *hdev) /* Apply the device specific HCI quirks * - * WBS for SdP - SdP and Stp have a same hw_varaint but - * different fw_variant + * WBS for SdP - For the Legacy ROM products, only SdP + * supports the WBS. But the version information is not + * enough to use here because the StP2 and SdP have same + * hw_variant and fw_variant. So, this flag is set by + * the transport driver (btusb) based on the HW info + * (idProduct) */ - if (ver.hw_variant == 0x08 && ver.fw_variant == 0x22) + if (!btintel_test_flag(hdev, + INTEL_ROM_LEGACY_NO_WBS_SUPPORT)) set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index 704e3b7bcb77..2b85ebf63321 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -147,6 +147,7 @@ enum { INTEL_BROKEN_INITIAL_NCMD, INTEL_BROKEN_SHUTDOWN_LED, INTEL_ROM_LEGACY, + INTEL_ROM_LEGACY_NO_WBS_SUPPORT, __INTEL_NUM_FLAGS, }; diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index 1cbdeca1fdc4..ff1f5dfbb6db 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -981,6 +981,8 @@ static int btmtksdio_probe(struct sdio_func *func, hdev->manufacturer = 70; set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); + sdio_set_drvdata(func, bdev); + err = hci_register_dev(hdev); if (err < 0) { dev_err(&func->dev, "Can't register HCI device\n"); @@ -988,8 +990,6 @@ static int btmtksdio_probe(struct sdio_func *func, return err; } - sdio_set_drvdata(func, bdev); - /* pm_runtime_enable would be done after the firmware is being * downloaded because the core layer probably already enables * runtime PM for this func such as the case host->caps & diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index ac90392cce33..a68edbc7be0f 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -61,6 +61,7 @@ static struct usb_driver btusb_driver; #define BTUSB_QCA_WCN6855 0x1000000 #define BTUSB_INTEL_BROKEN_SHUTDOWN_LED 0x2000000 #define BTUSB_INTEL_BROKEN_INITIAL_NCMD 0x4000000 +#define BTUSB_INTEL_NO_WBS_SUPPORT 0x8000000 static const struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ @@ -384,9 +385,11 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x8087, 0x0033), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED | + BTUSB_INTEL_NO_WBS_SUPPORT | BTUSB_INTEL_BROKEN_INITIAL_NCMD | BTUSB_INTEL_BROKEN_SHUTDOWN_LED }, { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL_COMBINED | + BTUSB_INTEL_NO_WBS_SUPPORT | BTUSB_INTEL_BROKEN_SHUTDOWN_LED }, { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL_COMBINED | @@ -404,6 +407,8 @@ static const struct usb_device_id blacklist_table[] = { BTUSB_WIDEBAND_SPEECH }, /* Realtek 8852AE Bluetooth devices */ + { USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0bda, 0x385a), .driver_info = BTUSB_REALTEK | @@ -481,6 +486,8 @@ static const struct usb_device_id blacklist_table[] = { /* Additional Realtek 8761BU Bluetooth devices */ { USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x2550, 0x8761), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, /* Additional Realtek 8821AE Bluetooth devices */ { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, @@ -3859,6 +3866,9 @@ static int btusb_probe(struct usb_interface *intf, hdev->send = btusb_send_frame_intel; hdev->cmd_timeout = btusb_intel_cmd_timeout; + if (id->driver_info & BTUSB_INTEL_NO_WBS_SUPPORT) + btintel_set_flag(hdev, INTEL_ROM_LEGACY_NO_WBS_SUPPORT); + if (id->driver_info & BTUSB_INTEL_BROKEN_INITIAL_NCMD) btintel_set_flag(hdev, INTEL_BROKEN_INITIAL_NCMD); diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index d49a39d17d7d..e0ea9d25bb39 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -629,9 +629,11 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb) break; } - pm_runtime_get_sync(&hu->serdev->dev); - pm_runtime_mark_last_busy(&hu->serdev->dev); - pm_runtime_put_autosuspend(&hu->serdev->dev); + if (hu->serdev) { + pm_runtime_get_sync(&hu->serdev->dev); + pm_runtime_mark_last_busy(&hu->serdev->dev); + pm_runtime_put_autosuspend(&hu->serdev->dev); + } return 0; } diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index 3b00d82d36cf..4cda890ce647 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -305,6 +305,8 @@ int hci_uart_register_device(struct hci_uart *hu, if (err) return err; + percpu_init_rwsem(&hu->proto_lock); + err = p->open(hu); if (err) goto err_open; @@ -327,7 +329,6 @@ int hci_uart_register_device(struct hci_uart *hu, INIT_WORK(&hu->init_ready, hci_uart_init_work); INIT_WORK(&hu->write_work, hci_uart_write_work); - percpu_init_rwsem(&hu->proto_lock); /* Only when vendor specific setup callback is provided, consider * the manufacturer information valid. This avoids filling in the diff --git a/drivers/bus/mhi/core/debugfs.c b/drivers/bus/mhi/core/debugfs.c index 858d7516410b..d818586c229d 100644 --- a/drivers/bus/mhi/core/debugfs.c +++ b/drivers/bus/mhi/core/debugfs.c @@ -60,16 +60,16 @@ static int mhi_debugfs_events_show(struct seq_file *m, void *d) } seq_printf(m, "Index: %d intmod count: %lu time: %lu", - i, (er_ctxt->intmod & EV_CTX_INTMODC_MASK) >> + i, (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODC_MASK) >> EV_CTX_INTMODC_SHIFT, - (er_ctxt->intmod & EV_CTX_INTMODT_MASK) >> + (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODT_MASK) >> EV_CTX_INTMODT_SHIFT); - seq_printf(m, " base: 0x%0llx len: 0x%llx", er_ctxt->rbase, - er_ctxt->rlen); + seq_printf(m, " base: 0x%0llx len: 0x%llx", le64_to_cpu(er_ctxt->rbase), + le64_to_cpu(er_ctxt->rlen)); - seq_printf(m, " rp: 0x%llx wp: 0x%llx", er_ctxt->rp, - er_ctxt->wp); + seq_printf(m, " rp: 0x%llx wp: 0x%llx", le64_to_cpu(er_ctxt->rp), + le64_to_cpu(er_ctxt->wp)); seq_printf(m, " local rp: 0x%pK db: 0x%pad\n", ring->rp, &mhi_event->db_cfg.db_val); @@ -106,18 +106,18 @@ static int mhi_debugfs_channels_show(struct seq_file *m, void *d) seq_printf(m, "%s(%u) state: 0x%lx brstmode: 0x%lx pollcfg: 0x%lx", - mhi_chan->name, mhi_chan->chan, (chan_ctxt->chcfg & + mhi_chan->name, mhi_chan->chan, (le32_to_cpu(chan_ctxt->chcfg) & CHAN_CTX_CHSTATE_MASK) >> CHAN_CTX_CHSTATE_SHIFT, - (chan_ctxt->chcfg & CHAN_CTX_BRSTMODE_MASK) >> - CHAN_CTX_BRSTMODE_SHIFT, (chan_ctxt->chcfg & + (le32_to_cpu(chan_ctxt->chcfg) & CHAN_CTX_BRSTMODE_MASK) >> + CHAN_CTX_BRSTMODE_SHIFT, (le32_to_cpu(chan_ctxt->chcfg) & CHAN_CTX_POLLCFG_MASK) >> CHAN_CTX_POLLCFG_SHIFT); - seq_printf(m, " type: 0x%x event ring: %u", chan_ctxt->chtype, - chan_ctxt->erindex); + seq_printf(m, " type: 0x%x event ring: %u", le32_to_cpu(chan_ctxt->chtype), + le32_to_cpu(chan_ctxt->erindex)); seq_printf(m, " base: 0x%llx len: 0x%llx rp: 0x%llx wp: 0x%llx", - chan_ctxt->rbase, chan_ctxt->rlen, chan_ctxt->rp, - chan_ctxt->wp); + le64_to_cpu(chan_ctxt->rbase), le64_to_cpu(chan_ctxt->rlen), + le64_to_cpu(chan_ctxt->rp), le64_to_cpu(chan_ctxt->wp)); seq_printf(m, " local rp: 0x%pK local wp: 0x%pK db: 0x%pad\n", ring->rp, ring->wp, diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index f1ec34417592..4183945fc2c4 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -290,17 +290,17 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) if (mhi_chan->offload_ch) continue; - tmp = chan_ctxt->chcfg; + tmp = le32_to_cpu(chan_ctxt->chcfg); tmp &= ~CHAN_CTX_CHSTATE_MASK; tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); tmp &= ~CHAN_CTX_BRSTMODE_MASK; tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT); tmp &= ~CHAN_CTX_POLLCFG_MASK; tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT); - chan_ctxt->chcfg = tmp; + chan_ctxt->chcfg = cpu_to_le32(tmp); - chan_ctxt->chtype = mhi_chan->type; - chan_ctxt->erindex = mhi_chan->er_index; + chan_ctxt->chtype = cpu_to_le32(mhi_chan->type); + chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index); mhi_chan->ch_state = MHI_CH_STATE_DISABLED; mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp; @@ -325,14 +325,14 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) if (mhi_event->offload_ev) continue; - tmp = er_ctxt->intmod; + tmp = le32_to_cpu(er_ctxt->intmod); tmp &= ~EV_CTX_INTMODC_MASK; tmp &= ~EV_CTX_INTMODT_MASK; tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT); - er_ctxt->intmod = tmp; + er_ctxt->intmod = cpu_to_le32(tmp); - er_ctxt->ertype = MHI_ER_TYPE_VALID; - er_ctxt->msivec = mhi_event->irq; + er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID); + er_ctxt->msivec = cpu_to_le32(mhi_event->irq); mhi_event->db_cfg.db_mode = true; ring->el_size = sizeof(struct mhi_tre); @@ -346,9 +346,9 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) * ring is empty */ ring->rp = ring->wp = ring->base; - er_ctxt->rbase = ring->iommu_base; + er_ctxt->rbase = cpu_to_le64(ring->iommu_base); er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; - er_ctxt->rlen = ring->len; + er_ctxt->rlen = cpu_to_le64(ring->len); ring->ctxt_wp = &er_ctxt->wp; } @@ -375,9 +375,9 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) goto error_alloc_cmd; ring->rp = ring->wp = ring->base; - cmd_ctxt->rbase = ring->iommu_base; + cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base); cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; - cmd_ctxt->rlen = ring->len; + cmd_ctxt->rlen = cpu_to_le64(ring->len); ring->ctxt_wp = &cmd_ctxt->wp; } @@ -578,10 +578,10 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, chan_ctxt->rp = 0; chan_ctxt->wp = 0; - tmp = chan_ctxt->chcfg; + tmp = le32_to_cpu(chan_ctxt->chcfg); tmp &= ~CHAN_CTX_CHSTATE_MASK; tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); - chan_ctxt->chcfg = tmp; + chan_ctxt->chcfg = cpu_to_le32(tmp); /* Update to all cores */ smp_wmb(); @@ -615,14 +615,14 @@ int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, return -ENOMEM; } - tmp = chan_ctxt->chcfg; + tmp = le32_to_cpu(chan_ctxt->chcfg); tmp &= ~CHAN_CTX_CHSTATE_MASK; tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT); - chan_ctxt->chcfg = tmp; + chan_ctxt->chcfg = cpu_to_le32(tmp); - chan_ctxt->rbase = tre_ring->iommu_base; + chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base); chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; - chan_ctxt->rlen = tre_ring->len; + chan_ctxt->rlen = cpu_to_le64(tre_ring->len); tre_ring->ctxt_wp = &chan_ctxt->wp; tre_ring->rp = tre_ring->wp = tre_ring->base; diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index 3a732afaf73e..c02c4d48b744 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -209,14 +209,14 @@ extern struct bus_type mhi_bus_type; #define EV_CTX_INTMODT_MASK GENMASK(31, 16) #define EV_CTX_INTMODT_SHIFT 16 struct mhi_event_ctxt { - __u32 intmod; - __u32 ertype; - __u32 msivec; - - __u64 rbase __packed __aligned(4); - __u64 rlen __packed __aligned(4); - __u64 rp __packed __aligned(4); - __u64 wp __packed __aligned(4); + __le32 intmod; + __le32 ertype; + __le32 msivec; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); }; #define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0) @@ -227,25 +227,25 @@ struct mhi_event_ctxt { #define CHAN_CTX_POLLCFG_SHIFT 10 #define CHAN_CTX_RESERVED_MASK GENMASK(31, 16) struct mhi_chan_ctxt { - __u32 chcfg; - __u32 chtype; - __u32 erindex; - - __u64 rbase __packed __aligned(4); - __u64 rlen __packed __aligned(4); - __u64 rp __packed __aligned(4); - __u64 wp __packed __aligned(4); + __le32 chcfg; + __le32 chtype; + __le32 erindex; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); }; struct mhi_cmd_ctxt { - __u32 reserved0; - __u32 reserved1; - __u32 reserved2; - - __u64 rbase __packed __aligned(4); - __u64 rlen __packed __aligned(4); - __u64 rp __packed __aligned(4); - __u64 wp __packed __aligned(4); + __le32 reserved0; + __le32 reserved1; + __le32 reserved2; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); }; struct mhi_ctxt { @@ -258,8 +258,8 @@ struct mhi_ctxt { }; struct mhi_tre { - u64 ptr; - u32 dword[2]; + __le64 ptr; + __le32 dword[2]; }; struct bhi_vec_entry { @@ -277,57 +277,58 @@ enum mhi_cmd_type { /* No operation command */ #define MHI_TRE_CMD_NOOP_PTR (0) #define MHI_TRE_CMD_NOOP_DWORD0 (0) -#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16) +#define MHI_TRE_CMD_NOOP_DWORD1 (cpu_to_le32(MHI_CMD_NOP << 16)) /* Channel reset command */ #define MHI_TRE_CMD_RESET_PTR (0) #define MHI_TRE_CMD_RESET_DWORD0 (0) -#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \ - (MHI_CMD_RESET_CHAN << 16)) +#define MHI_TRE_CMD_RESET_DWORD1(chid) (cpu_to_le32((chid << 24) | \ + (MHI_CMD_RESET_CHAN << 16))) /* Channel stop command */ #define MHI_TRE_CMD_STOP_PTR (0) #define MHI_TRE_CMD_STOP_DWORD0 (0) -#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \ - (MHI_CMD_STOP_CHAN << 16)) +#define MHI_TRE_CMD_STOP_DWORD1(chid) (cpu_to_le32((chid << 24) | \ + (MHI_CMD_STOP_CHAN << 16))) /* Channel start command */ #define MHI_TRE_CMD_START_PTR (0) #define MHI_TRE_CMD_START_DWORD0 (0) -#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \ - (MHI_CMD_START_CHAN << 16)) +#define MHI_TRE_CMD_START_DWORD1(chid) (cpu_to_le32((chid << 24) | \ + (MHI_CMD_START_CHAN << 16))) -#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) +#define MHI_TRE_GET_DWORD(tre, word) (le32_to_cpu((tre)->dword[(word)])) +#define MHI_TRE_GET_CMD_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_CMD_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF) /* Event descriptor macros */ -#define MHI_TRE_EV_PTR(ptr) (ptr) -#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len) -#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16)) -#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr) -#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF) -#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) -#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) -#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) -#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr) -#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF) -#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF) +#define MHI_TRE_EV_PTR(ptr) (cpu_to_le64(ptr)) +#define MHI_TRE_EV_DWORD0(code, len) (cpu_to_le32((code << 24) | len)) +#define MHI_TRE_EV_DWORD1(chid, type) (cpu_to_le32((chid << 24) | (type << 16))) +#define MHI_TRE_GET_EV_PTR(tre) (le64_to_cpu((tre)->ptr)) +#define MHI_TRE_GET_EV_CODE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LEN(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFFFF) +#define MHI_TRE_GET_EV_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF) +#define MHI_TRE_GET_EV_STATE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_EXECENV(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_SEQ(tre) MHI_TRE_GET_DWORD(tre, 0) +#define MHI_TRE_GET_EV_TIME(tre) (MHI_TRE_GET_EV_PTR(tre)) +#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits(MHI_TRE_GET_EV_PTR(tre)) +#define MHI_TRE_GET_EV_VEID(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 16) & 0xFF) +#define MHI_TRE_GET_EV_LINKSPEED(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LINKWIDTH(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFF) /* Transfer descriptor macros */ -#define MHI_TRE_DATA_PTR(ptr) (ptr) -#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU) -#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \ - | (ieot << 9) | (ieob << 8) | chain) +#define MHI_TRE_DATA_PTR(ptr) (cpu_to_le64(ptr)) +#define MHI_TRE_DATA_DWORD0(len) (cpu_to_le32(len & MHI_MAX_MTU)) +#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) (cpu_to_le32((2 << 16) | (bei << 10) \ + | (ieot << 9) | (ieob << 8) | chain)) /* RSC transfer descriptor macros */ -#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr) -#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie) -#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16) +#define MHI_RSCTRE_DATA_PTR(ptr, len) (cpu_to_le64(((u64)len << 48) | ptr)) +#define MHI_RSCTRE_DATA_DWORD0(cookie) (cpu_to_le32(cookie)) +#define MHI_RSCTRE_DATA_DWORD1 (cpu_to_le32(MHI_PKT_TYPE_COALESCING << 16)) enum mhi_pkt_type { MHI_PKT_TYPE_INVALID = 0x0, @@ -499,7 +500,7 @@ struct state_transition { struct mhi_ring { dma_addr_t dma_handle; dma_addr_t iommu_base; - u64 *ctxt_wp; /* point to ctxt wp */ + __le64 *ctxt_wp; /* point to ctxt wp */ void *pre_aligned; void *base; void *rp; diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index b15c5bc37dd4..9a94b8d66f57 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -114,7 +114,7 @@ void mhi_ring_er_db(struct mhi_event *mhi_event) struct mhi_ring *ring = &mhi_event->ring; mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, - ring->db_addr, *ring->ctxt_wp); + ring->db_addr, le64_to_cpu(*ring->ctxt_wp)); } void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) @@ -123,7 +123,7 @@ void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) struct mhi_ring *ring = &mhi_cmd->ring; db = ring->iommu_base + (ring->wp - ring->base); - *ring->ctxt_wp = db; + *ring->ctxt_wp = cpu_to_le64(db); mhi_write_db(mhi_cntrl, ring->db_addr, db); } @@ -140,7 +140,7 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, * before letting h/w know there is new element to fetch. */ dma_wmb(); - *ring->ctxt_wp = db; + *ring->ctxt_wp = cpu_to_le64(db); mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, ring->db_addr, db); @@ -432,7 +432,7 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev) struct mhi_event_ctxt *er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; struct mhi_ring *ev_ring = &mhi_event->ring; - dma_addr_t ptr = er_ctxt->rp; + dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); void *dev_rp; if (!is_valid_ring_ptr(ev_ring, ptr)) { @@ -537,14 +537,14 @@ static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, /* Update the WP */ ring->wp += ring->el_size; - ctxt_wp = *ring->ctxt_wp + ring->el_size; + ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size; if (ring->wp >= (ring->base + ring->len)) { ring->wp = ring->base; ctxt_wp = ring->iommu_base; } - *ring->ctxt_wp = ctxt_wp; + *ring->ctxt_wp = cpu_to_le64(ctxt_wp); /* Update the RP */ ring->rp += ring->el_size; @@ -801,7 +801,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, struct device *dev = &mhi_cntrl->mhi_dev->dev; u32 chan; int count = 0; - dma_addr_t ptr = er_ctxt->rp; + dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); /* * This is a quick check to avoid unnecessary event processing @@ -940,7 +940,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); local_rp = ev_ring->rp; - ptr = er_ctxt->rp; + ptr = le64_to_cpu(er_ctxt->rp); if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n"); @@ -970,7 +970,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, int count = 0; u32 chan; struct mhi_chan *mhi_chan; - dma_addr_t ptr = er_ctxt->rp; + dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) return -EIO; @@ -1011,7 +1011,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); local_rp = ev_ring->rp; - ptr = er_ctxt->rp; + ptr = le64_to_cpu(er_ctxt->rp); if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n"); @@ -1529,7 +1529,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, /* mark all stale events related to channel as STALE event */ spin_lock_irqsave(&mhi_event->lock, flags); - ptr = er_ctxt->rp; + ptr = le64_to_cpu(er_ctxt->rp); if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n"); diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index bb9a2043f3a2..1020268a075a 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -218,7 +218,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) continue; ring->wp = ring->base + ring->len - ring->el_size; - *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; + *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); /* Update all cores */ smp_wmb(); @@ -420,7 +420,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) continue; ring->wp = ring->base + ring->len - ring->el_size; - *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; + *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); /* Update to all cores */ smp_wmb(); diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c index d340d6864e13..d243526b23d8 100644 --- a/drivers/bus/mhi/pci_generic.c +++ b/drivers/bus/mhi/pci_generic.c @@ -327,6 +327,7 @@ static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = { .config = &modem_quectel_em1xx_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, + .mru_default = 32768, .sideband_wake = true, }; diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c index 626dedd110cb..fca0d0669aa9 100644 --- a/drivers/bus/mips_cdmm.c +++ b/drivers/bus/mips_cdmm.c @@ -351,6 +351,7 @@ phys_addr_t __weak mips_cdmm_phys_base(void) np = of_find_compatible_node(NULL, NULL, "mti,mips-cdmm"); if (np) { err = of_address_to_resource(np, 0, &res); + of_node_put(np); if (!err) return res.start; } diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 239eca4d6805..650c7d918080 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -414,7 +414,7 @@ config HW_RANDOM_MESON config HW_RANDOM_CAVIUM tristate "Cavium ThunderX Random Number Generator support" - depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT)) + depends on HW_RANDOM && PCI && ARCH_THUNDER default HW_RANDOM help This driver provides kernel-side support for the Random Number diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index ecb71c4317a5..8cf0ef501341 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c @@ -114,6 +114,7 @@ static int atmel_trng_probe(struct platform_device *pdev) err_register: clk_disable_unprepare(trng->clk); + atmel_trng_disable(trng); return ret; } diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c index 3de4a6a443ef..6f66919652bf 100644 --- a/drivers/char/hw_random/cavium-rng-vf.c +++ b/drivers/char/hw_random/cavium-rng-vf.c @@ -1,10 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * Hardware Random Number Generator support for Cavium, Inc. - * Thunder processor family. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. + * Hardware Random Number Generator support. + * Cavium Thunder, Marvell OcteonTx/Tx2 processor families. * * Copyright (C) 2016 Cavium, Inc. */ @@ -15,16 +12,146 @@ #include <linux/pci.h> #include <linux/pci_ids.h> +#include <asm/arch_timer.h> + +/* PCI device IDs */ +#define PCI_DEVID_CAVIUM_RNG_PF 0xA018 +#define PCI_DEVID_CAVIUM_RNG_VF 0xA033 + +#define HEALTH_STATUS_REG 0x38 + +/* RST device info */ +#define PCI_DEVICE_ID_RST_OTX2 0xA085 +#define RST_BOOT_REG 0x1600ULL +#define CLOCK_BASE_RATE 50000000ULL +#define MSEC_TO_NSEC(x) (x * 1000000) + struct cavium_rng { struct hwrng ops; void __iomem *result; + void __iomem *pf_regbase; + struct pci_dev *pdev; + u64 clock_rate; + u64 prev_error; + u64 prev_time; }; +static inline bool is_octeontx(struct pci_dev *pdev) +{ + if (midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_83XX, + MIDR_CPU_VAR_REV(0, 0), + MIDR_CPU_VAR_REV(3, 0)) || + midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_81XX, + MIDR_CPU_VAR_REV(0, 0), + MIDR_CPU_VAR_REV(3, 0)) || + midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX, + MIDR_CPU_VAR_REV(0, 0), + MIDR_CPU_VAR_REV(3, 0))) + return true; + + return false; +} + +static u64 rng_get_coprocessor_clkrate(void) +{ + u64 ret = CLOCK_BASE_RATE * 16; /* Assume 800Mhz as default */ + struct pci_dev *pdev; + void __iomem *base; + + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_RST_OTX2, NULL); + if (!pdev) + goto error; + + base = pci_ioremap_bar(pdev, 0); + if (!base) + goto error_put_pdev; + + /* RST: PNR_MUL * 50Mhz gives clockrate */ + ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT_REG) >> 33) & 0x3F); + + iounmap(base); + +error_put_pdev: + pci_dev_put(pdev); + +error: + return ret; +} + +static int check_rng_health(struct cavium_rng *rng) +{ + u64 cur_err, cur_time; + u64 status, cycles; + u64 time_elapsed; + + + /* Skip checking health for OcteonTx */ + if (!rng->pf_regbase) + return 0; + + status = readq(rng->pf_regbase + HEALTH_STATUS_REG); + if (status & BIT_ULL(0)) { + dev_err(&rng->pdev->dev, "HWRNG: Startup health test failed\n"); + return -EIO; + } + + cycles = status >> 1; + if (!cycles) + return 0; + + cur_time = arch_timer_read_counter(); + + /* RNM_HEALTH_STATUS[CYCLES_SINCE_HEALTH_FAILURE] + * Number of coprocessor cycles times 2 since the last failure. + * This field doesn't get cleared/updated until another failure. + */ + cycles = cycles / 2; + cur_err = (cycles * 1000000000) / rng->clock_rate; /* In nanosec */ + + /* Ignore errors that happenned a long time ago, these + * are most likely false positive errors. + */ + if (cur_err > MSEC_TO_NSEC(10)) { + rng->prev_error = 0; + rng->prev_time = 0; + return 0; + } + + if (rng->prev_error) { + /* Calculate time elapsed since last error + * '1' tick of CNTVCT is 10ns, since it runs at 100Mhz. + */ + time_elapsed = (cur_time - rng->prev_time) * 10; + time_elapsed += rng->prev_error; + + /* Check if current error is a new one or the old one itself. + * If error is a new one then consider there is a persistent + * issue with entropy, declare hardware failure. + */ + if (cur_err < time_elapsed) { + dev_err(&rng->pdev->dev, "HWRNG failure detected\n"); + rng->prev_error = cur_err; + rng->prev_time = cur_time; + return -EIO; + } + } + + rng->prev_error = cur_err; + rng->prev_time = cur_time; + return 0; +} + /* Read data from the RNG unit */ static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait) { struct cavium_rng *p = container_of(rng, struct cavium_rng, ops); unsigned int size = max; + int err = 0; + + err = check_rng_health(p); + if (err) + return err; while (size >= 8) { *((u64 *)dat) = readq(p->result); @@ -39,6 +166,39 @@ static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait) return max; } +static int cavium_map_pf_regs(struct cavium_rng *rng) +{ + struct pci_dev *pdev; + + /* Health status is not supported on 83xx, skip mapping PF CSRs */ + if (is_octeontx(rng->pdev)) { + rng->pf_regbase = NULL; + return 0; + } + + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_CAVIUM_RNG_PF, NULL); + if (!pdev) { + dev_err(&pdev->dev, "Cannot find RNG PF device\n"); + return -EIO; + } + + rng->pf_regbase = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!rng->pf_regbase) { + dev_err(&pdev->dev, "Failed to map PF CSR region\n"); + pci_dev_put(pdev); + return -ENOMEM; + } + + pci_dev_put(pdev); + + /* Get co-processor clock rate */ + rng->clock_rate = rng_get_coprocessor_clkrate(); + + return 0; +} + /* Map Cavium RNG to an HWRNG object */ static int cavium_rng_probe_vf(struct pci_dev *pdev, const struct pci_device_id *id) @@ -50,6 +210,8 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev, if (!rng) return -ENOMEM; + rng->pdev = pdev; + /* Map the RNG result */ rng->result = pcim_iomap(pdev, 0, 0); if (!rng->result) { @@ -67,6 +229,11 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev, pci_set_drvdata(pdev, rng); + /* Health status is available only at PF, hence map PF registers. */ + ret = cavium_map_pf_regs(rng); + if (ret) + return ret; + ret = devm_hwrng_register(&pdev->dev, &rng->ops); if (ret) { dev_err(&pdev->dev, "Error registering device as HWRNG.\n"); @@ -76,10 +243,18 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev, return 0; } +/* Remove the VF */ +static void cavium_rng_remove_vf(struct pci_dev *pdev) +{ + struct cavium_rng *rng; + + rng = pci_get_drvdata(pdev); + iounmap(rng->pf_regbase); +} static const struct pci_device_id cavium_rng_vf_id_table[] = { - { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0}, - {0,}, + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CAVIUM_RNG_VF) }, + { 0, } }; MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table); @@ -87,8 +262,9 @@ static struct pci_driver cavium_rng_vf_driver = { .name = "cavium_rng_vf", .id_table = cavium_rng_vf_id_table, .probe = cavium_rng_probe_vf, + .remove = cavium_rng_remove_vf, }; module_pci_driver(cavium_rng_vf_driver); MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c index 63d6e68c24d2..b96579222408 100644 --- a/drivers/char/hw_random/cavium-rng.c +++ b/drivers/char/hw_random/cavium-rng.c @@ -1,10 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * Hardware Random Number Generator support for Cavium Inc. - * Thunder processor family. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. + * Hardware Random Number Generator support. + * Cavium Thunder, Marvell OcteonTx/Tx2 processor families. * * Copyright (C) 2016 Cavium, Inc. */ @@ -91,4 +88,4 @@ static struct pci_driver cavium_rng_pf_driver = { module_pci_driver(cavium_rng_pf_driver); MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index 67947a19aa22..e8f9621e7954 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -65,14 +65,14 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) out_release: amba_release_regions(dev); out_clk: - clk_disable(rng_clk); + clk_disable_unprepare(rng_clk); return ret; } static void nmk_rng_remove(struct amba_device *dev) { amba_release_regions(dev); - clk_disable(rng_clk); + clk_disable_unprepare(rng_clk); } static const struct amba_id nmk_rng_ids[] = { diff --git a/drivers/char/random.c b/drivers/char/random.c index a27ae3999ff3..ebe86de9d0ac 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1963,7 +1963,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; - input_pool.entropy_count = 0; + if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) { + wake_up_interruptible(&random_write_wait); + kill_fasync(&fasync, SIGIO, POLL_OUT); + } return 0; case RNDRESEEDCRNG: if (!capable(CAP_SYS_ADMIN)) diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index df37e7b6a10a..65d800ecc996 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -274,14 +274,6 @@ static void tpm_dev_release(struct device *dev) kfree(chip); } -static void tpm_devs_release(struct device *dev) -{ - struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs); - - /* release the master device reference */ - put_device(&chip->dev); -} - /** * tpm_class_shutdown() - prepare the TPM device for loss of power. * @dev: device to which the chip is associated. @@ -344,7 +336,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->dev_num = rc; device_initialize(&chip->dev); - device_initialize(&chip->devs); chip->dev.class = tpm_class; chip->dev.class->shutdown_pre = tpm_class_shutdown; @@ -352,39 +343,20 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->dev.parent = pdev; chip->dev.groups = chip->groups; - chip->devs.parent = pdev; - chip->devs.class = tpmrm_class; - chip->devs.release = tpm_devs_release; - /* get extra reference on main device to hold on - * behalf of devs. This holds the chip structure - * while cdevs is in use. The corresponding put - * is in the tpm_devs_release (TPM2 only) - */ - if (chip->flags & TPM_CHIP_FLAG_TPM2) - get_device(&chip->dev); - if (chip->dev_num == 0) chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR); else chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num); - chip->devs.devt = - MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES); - rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num); if (rc) goto out; - rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num); - if (rc) - goto out; if (!pdev) chip->flags |= TPM_CHIP_FLAG_VIRTUAL; cdev_init(&chip->cdev, &tpm_fops); - cdev_init(&chip->cdevs, &tpmrm_fops); chip->cdev.owner = THIS_MODULE; - chip->cdevs.owner = THIS_MODULE; rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE); if (rc) { @@ -396,7 +368,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, return chip; out: - put_device(&chip->devs); put_device(&chip->dev); return ERR_PTR(rc); } @@ -445,14 +416,9 @@ static int tpm_add_char_device(struct tpm_chip *chip) } if (chip->flags & TPM_CHIP_FLAG_TPM2) { - rc = cdev_device_add(&chip->cdevs, &chip->devs); - if (rc) { - dev_err(&chip->devs, - "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n", - dev_name(&chip->devs), MAJOR(chip->devs.devt), - MINOR(chip->devs.devt), rc); - return rc; - } + rc = tpm_devs_add(chip); + if (rc) + goto err_del_cdev; } /* Make the chip available. */ @@ -460,6 +426,10 @@ static int tpm_add_char_device(struct tpm_chip *chip) idr_replace(&dev_nums_idr, chip, chip->dev_num); mutex_unlock(&idr_lock); + return 0; + +err_del_cdev: + cdev_device_del(&chip->cdev, &chip->dev); return rc; } @@ -649,7 +619,7 @@ void tpm_chip_unregister(struct tpm_chip *chip) hwrng_unregister(&chip->hwrng); tpm_bios_log_teardown(chip); if (chip->flags & TPM_CHIP_FLAG_TPM2) - cdev_device_del(&chip->cdevs, &chip->devs); + tpm_devs_remove(chip); tpm_del_char_device(chip); } EXPORT_SYMBOL_GPL(tpm_chip_unregister); diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index c08cbb306636..dc4c0a0a5129 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -69,7 +69,13 @@ static void tpm_dev_async_work(struct work_struct *work) ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, sizeof(priv->data_buffer)); tpm_put_ops(priv->chip); - if (ret > 0) { + + /* + * If ret is > 0 then tpm_dev_transmit returned the size of the + * response. If ret is < 0 then tpm_dev_transmit failed and + * returned an error code. + */ + if (ret != 0) { priv->response_length = ret; mod_timer(&priv->user_read_timer, jiffies + (120 * HZ)); } diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 283f78211c3a..2163c6ee0d36 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -234,6 +234,8 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, size_t cmdsiz); int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf, size_t *bufsiz); +int tpm_devs_add(struct tpm_chip *chip); +void tpm_devs_remove(struct tpm_chip *chip); void tpm_bios_log_setup(struct tpm_chip *chip); void tpm_bios_log_teardown(struct tpm_chip *chip); diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index 97e916856cf3..ffb35f0154c1 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -58,12 +58,12 @@ int tpm2_init_space(struct tpm_space *space, unsigned int buf_size) void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space) { - mutex_lock(&chip->tpm_mutex); - if (!tpm_chip_start(chip)) { + + if (tpm_try_get_ops(chip) == 0) { tpm2_flush_sessions(chip, space); - tpm_chip_stop(chip); + tpm_put_ops(chip); } - mutex_unlock(&chip->tpm_mutex); + kfree(space->context_buf); kfree(space->session_buf); } @@ -574,3 +574,68 @@ out: dev_err(&chip->dev, "%s: error %d\n", __func__, rc); return rc; } + +/* + * Put the reference to the main device. + */ +static void tpm_devs_release(struct device *dev) +{ + struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs); + + /* release the master device reference */ + put_device(&chip->dev); +} + +/* + * Remove the device file for exposed TPM spaces and release the device + * reference. This may also release the reference to the master device. + */ +void tpm_devs_remove(struct tpm_chip *chip) +{ + cdev_device_del(&chip->cdevs, &chip->devs); + put_device(&chip->devs); +} + +/* + * Add a device file to expose TPM spaces. Also take a reference to the + * main device. + */ +int tpm_devs_add(struct tpm_chip *chip) +{ + int rc; + + device_initialize(&chip->devs); + chip->devs.parent = chip->dev.parent; + chip->devs.class = tpmrm_class; + + /* + * Get extra reference on main device to hold on behalf of devs. + * This holds the chip structure while cdevs is in use. The + * corresponding put is in the tpm_devs_release. + */ + get_device(&chip->dev); + chip->devs.release = tpm_devs_release; + chip->devs.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES); + cdev_init(&chip->cdevs, &tpmrm_fops); + chip->cdevs.owner = THIS_MODULE; + + rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num); + if (rc) + goto err_put_devs; + + rc = cdev_device_add(&chip->cdevs, &chip->devs); + if (rc) { + dev_err(&chip->devs, + "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n", + dev_name(&chip->devs), MAJOR(chip->devs.devt), + MINOR(chip->devs.devt), rc); + goto err_put_devs; + } + + return 0; + +err_put_devs: + put_device(&chip->devs); + + return rc; +} diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 7eaf303a7a86..77bc993d7513 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1956,6 +1956,13 @@ static void virtcons_remove(struct virtio_device *vdev) list_del(&portdev->list); spin_unlock_irq(&pdrvdata_lock); + /* Device is going away, exit any polling for buffers */ + virtio_break_device(vdev); + if (use_multiport(portdev)) + flush_work(&portdev->control_work); + else + flush_work(&portdev->config_work); + /* Disable interrupts for vqs */ vdev->config->reset(vdev); /* Finish up work that's lined up */ @@ -2229,7 +2236,7 @@ static struct virtio_driver virtio_rproc_serial = { .remove = virtcons_remove, }; -static int __init init(void) +static int __init virtio_console_init(void) { int err; @@ -2264,7 +2271,7 @@ free: return err; } -static void __exit fini(void) +static void __exit virtio_console_fini(void) { reclaim_dma_bufs(); @@ -2274,8 +2281,8 @@ static void __exit fini(void) class_destroy(pdrvdata.class); debugfs_remove_recursive(pdrvdata.debugfs_dir); } -module_init(init); -module_exit(fini); +module_init(virtio_console_init); +module_exit(virtio_console_fini); MODULE_DESCRIPTION("Virtio console driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/clk/actions/owl-s700.c b/drivers/clk/actions/owl-s700.c index a2f34d13fb54..6ea7da1d6d75 100644 --- a/drivers/clk/actions/owl-s700.c +++ b/drivers/clk/actions/owl-s700.c @@ -162,6 +162,7 @@ static struct clk_div_table hdmia_div_table[] = { static struct clk_div_table rmii_div_table[] = { {0, 4}, {1, 10}, + {0, 0} }; /* divider clocks */ diff --git a/drivers/clk/actions/owl-s900.c b/drivers/clk/actions/owl-s900.c index 790890978424..5144ada2c7e1 100644 --- a/drivers/clk/actions/owl-s900.c +++ b/drivers/clk/actions/owl-s900.c @@ -140,7 +140,7 @@ static struct clk_div_table rmii_ref_div_table[] = { static struct clk_div_table usb3_mac_div_table[] = { { 1, 2 }, { 2, 3 }, { 3, 4 }, - { 0, 8 }, + { 0, 0 } }; static struct clk_div_table i2s_div_table[] = { diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c index 019e712f90d6..9eed97a299d0 100644 --- a/drivers/clk/at91/sama7g5.c +++ b/drivers/clk/at91/sama7g5.c @@ -687,16 +687,16 @@ static const struct { { .n = "pdmc0_gclk", .id = 68, .r = { .max = 50000000 }, - .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, - .pp_mux_table = { 5, 8, }, + .pp = { "syspll_divpmcck", "audiopll_divpmcck", }, + .pp_mux_table = { 5, 9, }, .pp_count = 2, .pp_chg_id = INT_MIN, }, { .n = "pdmc1_gclk", .id = 69, .r = { .max = 50000000, }, - .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, - .pp_mux_table = { 5, 8, }, + .pp = { "syspll_divpmcck", "audiopll_divpmcck", }, + .pp_mux_table = { 5, 9, }, .pp_count = 2, .pp_chg_id = INT_MIN, }, diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c index a2c6486ef170..f8417ee2961a 100644 --- a/drivers/clk/clk-clps711x.c +++ b/drivers/clk/clk-clps711x.c @@ -28,11 +28,13 @@ static const struct clk_div_table spi_div_table[] = { { .val = 1, .div = 8, }, { .val = 2, .div = 2, }, { .val = 3, .div = 1, }, + { /* sentinel */ } }; static const struct clk_div_table timer_div_table[] = { { .val = 0, .div = 256, }, { .val = 1, .div = 1, }, + { /* sentinel */ } }; struct clps711x_clk { diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c index f7b41366666e..4de098b6b0d4 100644 --- a/drivers/clk/clk-si5341.c +++ b/drivers/clk/clk-si5341.c @@ -798,6 +798,15 @@ static unsigned long si5341_output_clk_recalc_rate(struct clk_hw *hw, u32 r_divider; u8 r[3]; + err = regmap_read(output->data->regmap, + SI5341_OUT_CONFIG(output), &val); + if (err < 0) + return err; + + /* If SI5341_OUT_CFG_RDIV_FORCE2 is set, r_divider is 2 */ + if (val & SI5341_OUT_CFG_RDIV_FORCE2) + return parent_rate / 2; + err = regmap_bulk_read(output->data->regmap, SI5341_OUT_R_REG(output), r, 3); if (err < 0) @@ -814,13 +823,6 @@ static unsigned long si5341_output_clk_recalc_rate(struct clk_hw *hw, r_divider += 1; r_divider <<= 1; - err = regmap_read(output->data->regmap, - SI5341_OUT_CONFIG(output), &val); - if (err < 0) - return err; - - if (val & SI5341_OUT_CFG_RDIV_FORCE2) - r_divider = 2; return parent_rate / r_divider; } diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index ac11cefc3191..d6dc58bd07b3 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -631,6 +631,24 @@ static void clk_core_get_boundaries(struct clk_core *core, *max_rate = min(*max_rate, clk_user->max_rate); } +static bool clk_core_check_boundaries(struct clk_core *core, + unsigned long min_rate, + unsigned long max_rate) +{ + struct clk *user; + + lockdep_assert_held(&prepare_lock); + + if (min_rate > core->max_rate || max_rate < core->min_rate) + return false; + + hlist_for_each_entry(user, &core->clks, clks_node) + if (min_rate > user->max_rate || max_rate < user->min_rate) + return false; + + return true; +} + void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, unsigned long max_rate) { @@ -2347,6 +2365,11 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) clk->min_rate = min; clk->max_rate = max; + if (!clk_core_check_boundaries(clk->core, min, max)) { + ret = -EINVAL; + goto out; + } + rate = clk_core_get_rate_nolock(clk->core); if (rate < min || rate > max) { /* @@ -2375,6 +2398,7 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) } } +out: if (clk->exclusive_count) clk_core_rate_protect(clk->core); @@ -3410,6 +3434,19 @@ static void clk_core_reparent_orphans_nolock(void) __clk_set_parent_after(orphan, parent, NULL); __clk_recalc_accuracies(orphan); __clk_recalc_rates(orphan, 0); + + /* + * __clk_init_parent() will set the initial req_rate to + * 0 if the clock doesn't have clk_ops::recalc_rate and + * is an orphan when it's registered. + * + * 'req_rate' is used by clk_set_rate_range() and + * clk_put() to trigger a clk_set_rate() call whenever + * the boundaries are modified. Let's make sure + * 'req_rate' is set to something non-zero so that + * clk_set_rate_range() doesn't drop the frequency. + */ + orphan->req_rate = orphan->rate; } } } @@ -3730,8 +3767,9 @@ struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw, struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id) { struct device *dev = hw->core->dev; + const char *name = dev ? dev_name(dev) : NULL; - return clk_hw_create_clk(dev, hw, dev_name(dev), con_id); + return clk_hw_create_clk(dev, hw, name, con_id); } EXPORT_SYMBOL(clk_hw_get_clk); diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c index 56012a3d0219..9ea1a80acbe8 100644 --- a/drivers/clk/hisilicon/clk-hi3559a.c +++ b/drivers/clk/hisilicon/clk-hi3559a.c @@ -611,8 +611,8 @@ static struct hisi_mux_clock hi3559av100_shub_mux_clks[] = { /* shub div clk */ -static struct clk_div_table shub_spi_clk_table[] = {{0, 8}, {1, 4}, {2, 2}}; -static struct clk_div_table shub_uart_div_clk_table[] = {{1, 8}, {2, 4}}; +static struct clk_div_table shub_spi_clk_table[] = {{0, 8}, {1, 4}, {2, 2}, {/*sentinel*/}}; +static struct clk_div_table shub_uart_div_clk_table[] = {{1, 8}, {2, 4}, {/*sentinel*/}}; static struct hisi_divider_clock hi3559av100_shub_div_clks[] = { { HI3559AV100_SHUB_SPI_SOURCE_CLK, "clk_spi_clk", "shub_clk", 0, 0x20, 24, 2, diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index c4e0f1c07192..3f6fd7ef2a68 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c @@ -849,7 +849,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) hws[IMX7D_WDOG4_ROOT_CLK] = imx_clk_hw_gate4("wdog4_root_clk", "wdog_post_div", base + 0x49f0, 0); hws[IMX7D_KPP_ROOT_CLK] = imx_clk_hw_gate4("kpp_root_clk", "ipg_root_clk", base + 0x4aa0, 0); hws[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_hw_gate4("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0); - hws[IMX7D_AUDIO_MCLK_ROOT_CLK] = imx_clk_hw_gate4("audio_mclk_root_clk", "audio_mclk_post_div", base + 0x4790, 0); hws[IMX7D_WRCLK_ROOT_CLK] = imx_clk_hw_gate4("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0); hws[IMX7D_USB_CTRL_CLK] = imx_clk_hw_gate4("usb_ctrl_clk", "ahb_root_clk", base + 0x4680, 0); hws[IMX7D_USB_PHY1_CLK] = imx_clk_hw_gate4("usb_phy1_clk", "pll_usb1_main_clk", base + 0x46a0, 0); diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c index b23758083ce5..5e31a6a24b3a 100644 --- a/drivers/clk/imx/clk-imx8qxp-lpcg.c +++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c @@ -248,7 +248,7 @@ static int imx_lpcg_parse_clks_from_dt(struct platform_device *pdev, for (i = 0; i < count; i++) { idx = bit_offset[i] / 4; - if (idx > IMX_LPCG_MAX_CLKS) { + if (idx >= IMX_LPCG_MAX_CLKS) { dev_warn(&pdev->dev, "invalid bit offset of clock %d\n", i); ret = -EINVAL; diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c index 5154b0cf8ad6..66ff141da0a4 100644 --- a/drivers/clk/ingenic/jz4725b-cgu.c +++ b/drivers/clk/ingenic/jz4725b-cgu.c @@ -139,11 +139,10 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = { }, [JZ4725B_CLK_I2S] = { - "i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, + "i2s", CGU_CLK_MUX | CGU_CLK_DIV, .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 }, .mux = { CGU_REG_CPCCR, 31, 1 }, .div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 }, - .gate = { CGU_REG_CLKGR, 6 }, }, [JZ4725B_CLK_SPI] = { diff --git a/drivers/clk/loongson1/clk-loongson1c.c b/drivers/clk/loongson1/clk-loongson1c.c index 703f87622cf5..1ebf740380ef 100644 --- a/drivers/clk/loongson1/clk-loongson1c.c +++ b/drivers/clk/loongson1/clk-loongson1c.c @@ -37,6 +37,7 @@ static const struct clk_div_table ahb_div_table[] = { [1] = { .val = 1, .div = 4 }, [2] = { .val = 2, .div = 3 }, [3] = { .val = 3, .div = 3 }, + [4] = { /* sentinel */ } }; void __init ls1x_clk_init(void) diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index e1b1b426fae4..f675fd969c4d 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -264,7 +264,7 @@ static int clk_rcg2_determine_floor_rate(struct clk_hw *hw, static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) { - u32 cfg, mask; + u32 cfg, mask, d_val, not2d_val, n_minus_m; struct clk_hw *hw = &rcg->clkr.hw; int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src); @@ -283,8 +283,17 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) if (ret) return ret; + /* Calculate 2d value */ + d_val = f->n; + + n_minus_m = f->n - f->m; + n_minus_m *= 2; + + d_val = clamp_t(u32, d_val, f->m, n_minus_m); + not2d_val = ~d_val & mask; + ret = regmap_update_bits(rcg->clkr.regmap, - RCG_D_OFFSET(rcg), mask, ~f->n); + RCG_D_OFFSET(rcg), mask, not2d_val); if (ret) return ret; } @@ -720,6 +729,7 @@ static const struct frac_entry frac_table_pixel[] = { { 2, 9 }, { 4, 9 }, { 1, 1 }, + { 2, 3 }, { } }; diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c index 538e4963c915..5d2ae297e741 100644 --- a/drivers/clk/qcom/dispcc-sc7180.c +++ b/drivers/clk/qcom/dispcc-sc7180.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2019, 2022, The Linux Foundation. All rights reserved. */ #include <linux/clk-provider.h> @@ -625,6 +625,9 @@ static struct clk_branch disp_cc_mdss_vsync_clk = { static struct gdsc mdss_gdsc = { .gdscr = 0x3000, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "mdss_gdsc", }, diff --git a/drivers/clk/qcom/dispcc-sc7280.c b/drivers/clk/qcom/dispcc-sc7280.c index 4ef4ae231794..ad596d567f6a 100644 --- a/drivers/clk/qcom/dispcc-sc7280.c +++ b/drivers/clk/qcom/dispcc-sc7280.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022, The Linux Foundation. All rights reserved. */ #include <linux/clk-provider.h> @@ -787,6 +787,9 @@ static struct clk_branch disp_cc_sleep_clk = { static struct gdsc disp_cc_mdss_core_gdsc = { .gdscr = 0x1004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "disp_cc_mdss_core_gdsc", }, diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c index bf9ffe1a1cf4..73c5feea9818 100644 --- a/drivers/clk/qcom/dispcc-sm8250.c +++ b/drivers/clk/qcom/dispcc-sm8250.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2020, 2022, The Linux Foundation. All rights reserved. */ #include <linux/clk-provider.h> @@ -1125,6 +1125,9 @@ static struct clk_branch disp_cc_mdss_vsync_clk = { static struct gdsc mdss_gdsc = { .gdscr = 0x3000, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "mdss_gdsc", }, diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c index 108fe27bee10..541016db3c4b 100644 --- a/drivers/clk/qcom/gcc-ipq8074.c +++ b/drivers/clk/qcom/gcc-ipq8074.c @@ -60,11 +60,6 @@ static const struct parent_map gcc_xo_gpll0_gpll0_out_main_div2_map[] = { { P_GPLL0_DIV2, 4 }, }; -static const char * const gcc_xo_gpll0[] = { - "xo", - "gpll0", -}; - static const struct parent_map gcc_xo_gpll0_map[] = { { P_XO, 0 }, { P_GPLL0, 1 }, @@ -956,6 +951,11 @@ static struct clk_rcg2 blsp1_uart6_apps_clk_src = { }, }; +static const struct clk_parent_data gcc_xo_gpll0[] = { + { .fw_name = "xo" }, + { .hw = &gpll0.clkr.hw }, +}; + static const struct freq_tbl ftbl_pcie_axi_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(200000000, P_GPLL0, 4, 0, 0), @@ -969,7 +969,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = { .parent_map = gcc_xo_gpll0_map, .clkr.hw.init = &(struct clk_init_data){ .name = "pcie0_axi_clk_src", - .parent_names = gcc_xo_gpll0, + .parent_data = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, @@ -1016,7 +1016,7 @@ static struct clk_rcg2 pcie1_axi_clk_src = { .parent_map = gcc_xo_gpll0_map, .clkr.hw.init = &(struct clk_init_data){ .name = "pcie1_axi_clk_src", - .parent_names = gcc_xo_gpll0, + .parent_data = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, @@ -1074,7 +1074,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = { .name = "sdcc1_apps_clk_src", .parent_names = gcc_xo_gpll0_gpll2_gpll0_out_main_div2, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -1330,7 +1330,7 @@ static struct clk_rcg2 nss_ce_clk_src = { .parent_map = gcc_xo_gpll0_map, .clkr.hw.init = &(struct clk_init_data){ .name = "nss_ce_clk_src", - .parent_names = gcc_xo_gpll0, + .parent_data = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, @@ -4329,8 +4329,7 @@ static struct clk_rcg2 pcie0_rchng_clk_src = { .parent_map = gcc_xo_gpll0_map, .clkr.hw.init = &(struct clk_init_data){ .name = "pcie0_rchng_clk_src", - .parent_hws = (const struct clk_hw *[]) { - &gpll0.clkr.hw }, + .parent_data = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c index 144d2ba7a9be..463a444c8a7e 100644 --- a/drivers/clk/qcom/gcc-msm8994.c +++ b/drivers/clk/qcom/gcc-msm8994.c @@ -108,6 +108,7 @@ static struct clk_alpha_pll gpll4_early = { static struct clk_alpha_pll_postdiv gpll4 = { .offset = 0x1dc0, + .width = 4, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .clkr.hw.init = &(struct clk_init_data) { diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index 4ece326ea233..cf23cfd7e467 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. */ #include <linux/bitops.h> @@ -34,9 +34,14 @@ #define CFG_GDSCR_OFFSET 0x4 /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */ -#define EN_REST_WAIT_VAL (0x2 << 20) -#define EN_FEW_WAIT_VAL (0x8 << 16) -#define CLK_DIS_WAIT_VAL (0x2 << 12) +#define EN_REST_WAIT_VAL 0x2 +#define EN_FEW_WAIT_VAL 0x8 +#define CLK_DIS_WAIT_VAL 0x2 + +/* Transition delay shifts */ +#define EN_REST_WAIT_SHIFT 20 +#define EN_FEW_WAIT_SHIFT 16 +#define CLK_DIS_WAIT_SHIFT 12 #define RETAIN_MEM BIT(14) #define RETAIN_PERIPH BIT(13) @@ -341,7 +346,18 @@ static int gdsc_init(struct gdsc *sc) */ mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK | EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK; - val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL; + + if (!sc->en_rest_wait_val) + sc->en_rest_wait_val = EN_REST_WAIT_VAL; + if (!sc->en_few_wait_val) + sc->en_few_wait_val = EN_FEW_WAIT_VAL; + if (!sc->clk_dis_wait_val) + sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL; + + val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT | + sc->en_few_wait_val << EN_FEW_WAIT_SHIFT | + sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT; + ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val); if (ret) return ret; diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h index 5bb396b344d1..762f1b5e1ec5 100644 --- a/drivers/clk/qcom/gdsc.h +++ b/drivers/clk/qcom/gdsc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. */ #ifndef __QCOM_GDSC_H__ @@ -22,6 +22,9 @@ struct reset_controller_dev; * @cxcs: offsets of branch registers to toggle mem/periph bits in * @cxc_count: number of @cxcs * @pwrsts: Possible powerdomain power states + * @en_rest_wait_val: transition delay value for receiving enr ack signal + * @en_few_wait_val: transition delay value for receiving enf ack signal + * @clk_dis_wait_val: transition delay value for halting clock * @resets: ids of resets associated with this gdsc * @reset_count: number of @resets * @rcdev: reset controller @@ -35,6 +38,9 @@ struct gdsc { unsigned int clamp_io_ctrl; unsigned int *cxcs; unsigned int cxc_count; + unsigned int en_rest_wait_val; + unsigned int en_few_wait_val; + unsigned int clk_dis_wait_val; const u8 pwrsts; /* Powerdomain allowable state bitfields */ #define PWRSTS_OFF BIT(0) diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c index 75ca855e720d..6e5440841d1e 100644 --- a/drivers/clk/rockchip/clk-rk3568.c +++ b/drivers/clk/rockchip/clk-rk3568.c @@ -1038,13 +1038,13 @@ static struct rockchip_clk_branch rk3568_clk_branches[] __initdata = { RK3568_CLKGATE_CON(20), 8, GFLAGS), GATE(HCLK_VOP, "hclk_vop", "hclk_vo", 0, RK3568_CLKGATE_CON(20), 9, GFLAGS), - COMPOSITE(DCLK_VOP0, "dclk_vop0", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + COMPOSITE(DCLK_VOP0, "dclk_vop0", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_NO_REPARENT, RK3568_CLKSEL_CON(39), 10, 2, MFLAGS, 0, 8, DFLAGS, RK3568_CLKGATE_CON(20), 10, GFLAGS), - COMPOSITE(DCLK_VOP1, "dclk_vop1", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + COMPOSITE(DCLK_VOP1, "dclk_vop1", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_NO_REPARENT, RK3568_CLKSEL_CON(40), 10, 2, MFLAGS, 0, 8, DFLAGS, RK3568_CLKGATE_CON(20), 11, GFLAGS), - COMPOSITE(DCLK_VOP2, "dclk_vop2", hpll_vpll_gpll_cpll_p, 0, + COMPOSITE(DCLK_VOP2, "dclk_vop2", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_NO_REPARENT, RK3568_CLKSEL_CON(41), 10, 2, MFLAGS, 0, 8, DFLAGS, RK3568_CLKGATE_CON(20), 12, GFLAGS), GATE(CLK_VOP_PWM, "clk_vop_pwm", "xin24m", 0, diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c index b7be7e11b0df..bb8a844309bf 100644 --- a/drivers/clk/rockchip/clk.c +++ b/drivers/clk/rockchip/clk.c @@ -180,6 +180,7 @@ static void rockchip_fractional_approximation(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate, unsigned long *m, unsigned long *n) { + struct clk_fractional_divider *fd = to_clk_fd(hw); unsigned long p_rate, p_parent_rate; struct clk_hw *p_parent; @@ -190,6 +191,8 @@ static void rockchip_fractional_approximation(struct clk_hw *hw, *parent_rate = p_parent_rate; } + fd->flags |= CLK_FRAC_DIVIDER_POWER_OF_TWO_PS; + clk_fractional_divider_general_approximation(hw, rate, parent_rate, m, n); } diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c index 74c1d894cca8..219c80653dbd 100644 --- a/drivers/clk/tegra/clk-tegra124-emc.c +++ b/drivers/clk/tegra/clk-tegra124-emc.c @@ -198,6 +198,7 @@ static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra) tegra->emc = platform_get_drvdata(pdev); if (!tegra->emc) { + put_device(&pdev->dev); pr_err("%s: cannot find EMC driver\n", __func__); return NULL; } diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 3da33c786d77..29eafab4353e 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -131,7 +131,7 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops) void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) { struct ti_dt_clk *c; - struct device_node *node, *parent; + struct device_node *node, *parent, *child; struct clk *clk; struct of_phandle_args clkspec; char buf[64]; @@ -171,10 +171,13 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) node = of_find_node_by_name(NULL, buf); if (num_args && compat_mode) { parent = node; - node = of_get_child_by_name(parent, "clock"); - if (!node) - node = of_get_child_by_name(parent, "clk"); - of_node_put(parent); + child = of_get_child_by_name(parent, "clock"); + if (!child) + child = of_get_child_by_name(parent, "clk"); + if (child) { + of_node_put(parent); + node = child; + } } clkspec.np = node; diff --git a/drivers/clk/uniphier/clk-uniphier-fixed-rate.c b/drivers/clk/uniphier/clk-uniphier-fixed-rate.c index 5319cd380480..3bc55ab75314 100644 --- a/drivers/clk/uniphier/clk-uniphier-fixed-rate.c +++ b/drivers/clk/uniphier/clk-uniphier-fixed-rate.c @@ -24,6 +24,7 @@ struct clk_hw *uniphier_clk_register_fixed_rate(struct device *dev, init.name = name; init.ops = &clk_fixed_rate_ops; + init.flags = 0; init.parent_names = NULL; init.num_parents = 0; diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index eb596ff9e7bb..279ddff81ab4 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c @@ -229,8 +229,10 @@ static int __init parse_pmtmr(char *arg) int ret; ret = kstrtouint(arg, 16, &base); - if (ret) - return ret; + if (ret) { + pr_warn("PMTMR: invalid 'pmtmr=' value: '%s'\n", arg); + return 1; + } pr_info("PMTMR IOPort override: 0x%04x -> 0x%04x\n", pmtmr_ioport, base); diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 5e3e96d3d1b9..cc2a961ddd3b 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -504,11 +504,14 @@ static int exynos4_mct_dying_cpu(unsigned int cpu) return 0; } -static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base) +static int __init exynos4_timer_resources(struct device_node *np) { - int err, cpu; struct clk *mct_clk, *tick_clk; + reg_base = of_iomap(np, 0); + if (!reg_base) + panic("%s: unable to ioremap mct address space\n", __func__); + tick_clk = of_clk_get_by_name(np, "fin_pll"); if (IS_ERR(tick_clk)) panic("%s: unable to determine tick clock rate\n", __func__); @@ -519,9 +522,32 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * panic("%s: unable to retrieve mct clock instance\n", __func__); clk_prepare_enable(mct_clk); - reg_base = base; - if (!reg_base) - panic("%s: unable to ioremap mct address space\n", __func__); + return 0; +} + +static int __init exynos4_timer_interrupts(struct device_node *np, + unsigned int int_type) +{ + int nr_irqs, i, err, cpu; + + mct_int_type = int_type; + + /* This driver uses only one global timer interrupt */ + mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); + + /* + * Find out the number of local irqs specified. The local + * timer irqs are specified after the four global timer + * irqs are specified. + */ + nr_irqs = of_irq_count(np); + if (nr_irqs > ARRAY_SIZE(mct_irqs)) { + pr_err("exynos-mct: too many (%d) interrupts configured in DT\n", + nr_irqs); + nr_irqs = ARRAY_SIZE(mct_irqs); + } + for (i = MCT_L0_IRQ; i < nr_irqs; i++) + mct_irqs[i] = irq_of_parse_and_map(np, i); if (mct_int_type == MCT_INT_PPI) { @@ -532,11 +558,14 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * mct_irqs[MCT_L0_IRQ], err); } else { for_each_possible_cpu(cpu) { - int mct_irq = mct_irqs[MCT_L0_IRQ + cpu]; + int mct_irq; struct mct_clock_event_device *pcpu_mevt = per_cpu_ptr(&percpu_mct_tick, cpu); pcpu_mevt->evt.irq = -1; + if (MCT_L0_IRQ + cpu >= ARRAY_SIZE(mct_irqs)) + break; + mct_irq = mct_irqs[MCT_L0_IRQ + cpu]; irq_set_status_flags(mct_irq, IRQ_NOAUTOEN); if (request_irq(mct_irq, @@ -581,24 +610,13 @@ out_irq: static int __init mct_init_dt(struct device_node *np, unsigned int int_type) { - u32 nr_irqs, i; int ret; - mct_int_type = int_type; - - /* This driver uses only one global timer interrupt */ - mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); - - /* - * Find out the number of local irqs specified. The local - * timer irqs are specified after the four global timer - * irqs are specified. - */ - nr_irqs = of_irq_count(np); - for (i = MCT_L0_IRQ; i < nr_irqs; i++) - mct_irqs[i] = irq_of_parse_and_map(np, i); + ret = exynos4_timer_resources(np); + if (ret) + return ret; - ret = exynos4_timer_resources(np, of_iomap(np, 0)); + ret = exynos4_timer_interrupts(np, int_type); if (ret) return ret; diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c index cfa4ec7ef396..790d2c9b42a7 100644 --- a/drivers/clocksource/timer-microchip-pit64b.c +++ b/drivers/clocksource/timer-microchip-pit64b.c @@ -165,7 +165,7 @@ static u64 mchp_pit64b_clksrc_read(struct clocksource *cs) return mchp_pit64b_cnt_read(mchp_pit64b_cs_base); } -static u64 mchp_pit64b_sched_read_clk(void) +static u64 notrace mchp_pit64b_sched_read_clk(void) { return mchp_pit64b_cnt_read(mchp_pit64b_cs_base); } diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c index 529cc6a51cdb..c3f54d9912be 100644 --- a/drivers/clocksource/timer-of.c +++ b/drivers/clocksource/timer-of.c @@ -157,9 +157,9 @@ static __init int timer_of_base_init(struct device_node *np, of_base->base = of_base->name ? of_io_request_and_map(np, of_base->index, of_base->name) : of_iomap(np, of_base->index); - if (IS_ERR(of_base->base)) { - pr_err("Failed to iomap (%s)\n", of_base->name); - return PTR_ERR(of_base->base); + if (IS_ERR_OR_NULL(of_base->base)) { + pr_err("Failed to iomap (%s:%s)\n", np->name, of_base->name); + return of_base->base ? PTR_ERR(of_base->base) : -ENOMEM; } return 0; diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c index 5c40ca1d4740..2737407ff069 100644 --- a/drivers/clocksource/timer-ti-dm-systimer.c +++ b/drivers/clocksource/timer-ti-dm-systimer.c @@ -241,8 +241,7 @@ static void __init dmtimer_systimer_assign_alwon(void) bool quirk_unreliable_oscillator = false; /* Quirk unreliable 32 KiHz oscillator with incomplete dts */ - if (of_machine_is_compatible("ti,omap3-beagle-ab4") || - of_machine_is_compatible("timll,omap3-devkit8000")) { + if (of_machine_is_compatible("ti,omap3-beagle-ab4")) { quirk_unreliable_oscillator = true; counter_32k = -ENODEV; } @@ -695,9 +694,9 @@ static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa) return 0; } - if (pa == 0x48034000) /* dra7 dmtimer3 */ + if (pa == 0x4882c000) /* dra7 dmtimer15 */ return dmtimer_percpu_timer_init(np, 0); - else if (pa == 0x48036000) /* dra7 dmtimer4 */ + else if (pa == 0x4882e000) /* dra7 dmtimer16 */ return dmtimer_percpu_timer_init(np, 1); return 0; diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index d4c27022b9c9..e0ff09d66c96 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -303,52 +303,48 @@ static u64 cppc_get_dmi_max_khz(void) /* * If CPPC lowest_freq and nominal_freq registers are exposed then we can - * use them to convert perf to freq and vice versa - * - * If the perf/freq point lies between Nominal and Lowest, we can treat - * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line - * and extrapolate the rest - * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion + * use them to convert perf to freq and vice versa. The conversion is + * extrapolated as an affine function passing by the 2 points: + * - (Low perf, Low freq) + * - (Nominal perf, Nominal perf) */ static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data, unsigned int perf) { struct cppc_perf_caps *caps = &cpu_data->perf_caps; + s64 retval, offset = 0; static u64 max_khz; u64 mul, div; if (caps->lowest_freq && caps->nominal_freq) { - if (perf >= caps->nominal_perf) { - mul = caps->nominal_freq; - div = caps->nominal_perf; - } else { - mul = caps->nominal_freq - caps->lowest_freq; - div = caps->nominal_perf - caps->lowest_perf; - } + mul = caps->nominal_freq - caps->lowest_freq; + div = caps->nominal_perf - caps->lowest_perf; + offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div); } else { if (!max_khz) max_khz = cppc_get_dmi_max_khz(); mul = max_khz; div = caps->highest_perf; } - return (u64)perf * mul / div; + + retval = offset + div64_u64(perf * mul, div); + if (retval >= 0) + return retval; + return 0; } static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data, unsigned int freq) { struct cppc_perf_caps *caps = &cpu_data->perf_caps; + s64 retval, offset = 0; static u64 max_khz; u64 mul, div; if (caps->lowest_freq && caps->nominal_freq) { - if (freq >= caps->nominal_freq) { - mul = caps->nominal_perf; - div = caps->nominal_freq; - } else { - mul = caps->lowest_perf; - div = caps->lowest_freq; - } + mul = caps->nominal_perf - caps->lowest_perf; + div = caps->nominal_freq - caps->lowest_freq; + offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div); } else { if (!max_khz) max_khz = cppc_get_dmi_max_khz(); @@ -356,7 +352,10 @@ static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data, div = max_khz; } - return (u64)freq * mul / div; + retval = offset + div64_u64(freq * mul, div); + if (retval >= 0) + return retval; + return 0; } static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index e15c3bc17a55..8a2c6b58b652 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -335,6 +335,8 @@ static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); +#define CPPC_MAX_PERF U8_MAX + static void intel_pstate_set_itmt_prio(int cpu) { struct cppc_perf_caps cppc_perf; @@ -346,6 +348,14 @@ static void intel_pstate_set_itmt_prio(int cpu) return; /* + * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff. + * In this case we can't use CPPC.highest_perf to enable ITMT. + * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide. + */ + if (cppc_perf.highest_perf == CPPC_MAX_PERF) + cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached)); + + /* * The priorities can be set regardless of whether or not * sched_set_itmt_support(true) has been called and it is valid to * update them at any time after it has been called. diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index d1744b5d9619..6dfa86971a75 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -130,7 +130,7 @@ static void get_krait_bin_format_b(struct device *cpu_dev, } /* Check PVS_BLOW_STATUS */ - pte_efuse = *(((u32 *)buf) + 4); + pte_efuse = *(((u32 *)buf) + 1); pte_efuse &= BIT(21); if (pte_efuse) { dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs); diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 54ae8d16e493..35e3cadccac2 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -11,6 +11,7 @@ * You could find a link for the datasheet in Documentation/arm/sunxi.rst */ +#include <linux/bottom_half.h> #include <linux/crypto.h> #include <linux/dma-mapping.h> #include <linux/io.h> @@ -283,7 +284,9 @@ static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq) flow = rctx->flow; err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm)); + local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 88194718a806..859b7522faaa 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -9,6 +9,7 @@ * * You could find the datasheet in Documentation/arm/sunxi.rst */ +#include <linux/bottom_half.h> #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/scatterlist.h> @@ -414,6 +415,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) theend: kfree(buf); kfree(result); + local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 9ef1c85c4aaa..554e400d41ca 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -11,6 +11,7 @@ * You could find a link for the datasheet in Documentation/arm/sunxi.rst */ +#include <linux/bottom_half.h> #include <linux/crypto.h> #include <linux/dma-mapping.h> #include <linux/io.h> @@ -274,7 +275,9 @@ static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *ar struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); err = sun8i_ss_cipher(breq); + local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index 80e89066dbd1..319fe3279a71 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -30,6 +30,8 @@ static const struct ss_variant ss_a80_variant = { .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES, }, + .alg_hash = { SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, + }, .op_mode = { SS_OP_ECB, SS_OP_CBC, }, .ss_clks = { diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index 3c073eb3db03..1a71ed49d233 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -9,6 +9,7 @@ * * You could find the datasheet in Documentation/arm/sunxi.rst */ +#include <linux/bottom_half.h> #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/scatterlist.h> @@ -442,6 +443,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) theend: kfree(pad); kfree(result); + local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c index c6865cbd334b..e79514fce731 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c +++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c @@ -265,7 +265,9 @@ static int meson_handle_cipher_request(struct crypto_engine *engine, struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); err = meson_cipher(breq); + local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index d718db224be4..7d4b4ad1db1f 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -632,6 +632,20 @@ static int ccp_terminate_all(struct dma_chan *dma_chan) return 0; } +static void ccp_dma_release(struct ccp_device *ccp) +{ + struct ccp_dma_chan *chan; + struct dma_chan *dma_chan; + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) { + chan = ccp->ccp_dma_chan + i; + dma_chan = &chan->dma_chan; + tasklet_kill(&chan->cleanup_tasklet); + list_del_rcu(&dma_chan->device_node); + } +} + int ccp_dmaengine_register(struct ccp_device *ccp) { struct ccp_dma_chan *chan; @@ -736,6 +750,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp) return 0; err_reg: + ccp_dma_release(ccp); kmem_cache_destroy(ccp->dma_desc_cache); err_cache: @@ -752,6 +767,7 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp) return; dma_async_device_unregister(dma_dev); + ccp_dma_release(ccp); kmem_cache_destroy(ccp->dma_desc_cache); kmem_cache_destroy(ccp->dma_cmd_cache); diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index e2806ca3300a..5040726fc119 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -241,7 +241,7 @@ static int __sev_platform_init_locked(int *error) struct psp_device *psp = psp_master; struct sev_data_init data; struct sev_device *sev; - int psp_ret, rc = 0; + int psp_ret = -1, rc = 0; if (!psp || !psp->sev_data) return -ENODEV; diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index a5e041d9d2cf..11e0278c8631 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -258,6 +258,13 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg, { int ret = 0; + if (!nbytes) { + *mapped_nents = 0; + *lbytes = 0; + *nents = 0; + return 0; + } + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); if (*nents > max_sg_nents) { *nents = 0; diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 78833491f534..309da6334a0a 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -257,8 +257,8 @@ static void cc_cipher_exit(struct crypto_tfm *tfm) &ctx_p->user.key_dma_addr); /* Free key buffer in context */ - kfree_sensitive(ctx_p->user.key); dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key); + kfree_sensitive(ctx_p->user.key); } struct tdes_keys { diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c index c1c2b1d86663..f2be0a7d7f7a 100644 --- a/drivers/crypto/gemini/sl3516-ce-cipher.c +++ b/drivers/crypto/gemini/sl3516-ce-cipher.c @@ -264,7 +264,9 @@ static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *a struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); err = sl3516_ce_cipher(breq); + local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index ff1122153fbe..b616d2d8e773 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -4107,7 +4107,7 @@ static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) static int qm_vf_read_qos(struct hisi_qm *qm) { int cnt = 0; - int ret; + int ret = -EINVAL; /* reset mailbox qos val */ qm->mb_qos = 0; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 6a45bd23b363..090920ed50c8 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -2284,9 +2284,10 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, struct aead_request *aead_req, bool encrypt) { - struct aead_request *subreq = aead_request_ctx(aead_req); struct sec_auth_ctx *a_ctx = &ctx->a_ctx; struct device *dev = ctx->dev; + struct aead_request *subreq; + int ret; /* Kunpeng920 aead mode not support input 0 size */ if (!a_ctx->fallback_aead_tfm) { @@ -2294,6 +2295,10 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, return -EINVAL; } + subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL); + if (!subreq) + return -ENOMEM; + aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm); aead_request_set_callback(subreq, aead_req->base.flags, aead_req->base.complete, aead_req->base.data); @@ -2301,8 +2306,13 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, aead_req->cryptlen, aead_req->iv); aead_request_set_ad(subreq, aead_req->assoclen); - return encrypt ? crypto_aead_encrypt(subreq) : - crypto_aead_decrypt(subreq); + if (encrypt) + ret = crypto_aead_encrypt(subreq); + else + ret = crypto_aead_decrypt(subreq); + aead_request_free(subreq); + + return ret; } static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 90551bf38b52..03d239cfdf8c 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -443,9 +443,11 @@ static int sec_engine_init(struct hisi_qm *qm) writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG); - /* Enable sm4 extra mode, as ctr/ecb */ - writel_relaxed(SEC_BD_ERR_CHK_EN0, - qm->io_base + SEC_BD_ERR_CHK_EN_REG0); + /* HW V2 enable sm4 extra mode, as ctr/ecb */ + if (qm->ver < QM_HW_V3) + writel_relaxed(SEC_BD_ERR_CHK_EN0, + qm->io_base + SEC_BD_ERR_CHK_EN_REG0); + /* Enable sm4 xts mode multiple iv */ writel_relaxed(SEC_BD_ERR_CHK_EN1, qm->io_base + SEC_BD_ERR_CHK_EN_REG1); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c index 877a948469bd..570074e23b60 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c @@ -1634,16 +1634,13 @@ static inline int cpt_register_algs(void) { int i, err = 0; - if (!IS_ENABLED(CONFIG_DM_CRYPT)) { - for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++) - otx2_cpt_skciphers[i].base.cra_flags &= - ~CRYPTO_ALG_DEAD; - - err = crypto_register_skciphers(otx2_cpt_skciphers, - ARRAY_SIZE(otx2_cpt_skciphers)); - if (err) - return err; - } + for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++) + otx2_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD; + + err = crypto_register_skciphers(otx2_cpt_skciphers, + ARRAY_SIZE(otx2_cpt_skciphers)); + if (err) + return err; for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++) otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD; diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index d19e5ffb5104..d6f9e2fe863d 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -331,7 +331,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); } - for_each_sg(req->src, src, sg_nents(src), i) { + for_each_sg(req->src, src, sg_nents(req->src), i) { src_buf = sg_virt(src); len = sg_dma_len(src); tlen += len; diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c index 359fb7989dfb..8fd44703115f 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c @@ -52,6 +52,13 @@ static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) if (ret) goto err; + /* Temporarily set the number of crypto instances to zero to avoid + * registering the crypto algorithms. + * This will be removed when the algorithms will support the + * CRYPTO_TFM_REQ_MAY_BACKLOG flag + */ + instances = 0; + for (i = 0; i < instances; i++) { val = i; bank = i * 2; diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index ece6776fbd53..3efbb3883601 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -136,6 +136,13 @@ int qat_crypto_dev_config(struct adf_accel_dev *accel_dev) if (ret) goto err; + /* Temporarily set the number of crypto instances to zero to avoid + * registering the crypto algorithms. + * This will be removed when the algorithms will support the + * CRYPTO_TFM_REQ_MAY_BACKLOG flag + */ + instances = 0; + for (i = 0; i < instances; i++) { val = i; snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i); diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c index 99ba8d51d102..11f30fd48c14 100644 --- a/drivers/crypto/qcom-rng.c +++ b/drivers/crypto/qcom-rng.c @@ -8,6 +8,7 @@ #include <linux/clk.h> #include <linux/crypto.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> @@ -43,16 +44,19 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max) { unsigned int currsize = 0; u32 val; + int ret; /* read random data from hardware */ do { - val = readl_relaxed(rng->base + PRNG_STATUS); - if (!(val & PRNG_STATUS_DATA_AVAIL)) - break; + ret = readl_poll_timeout(rng->base + PRNG_STATUS, val, + val & PRNG_STATUS_DATA_AVAIL, + 200, 10000); + if (ret) + return ret; val = readl_relaxed(rng->base + PRNG_DATA_OUT); if (!val) - break; + return -EINVAL; if ((max - currsize) >= WORD_SZ) { memcpy(data, &val, WORD_SZ); @@ -61,11 +65,10 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max) } else { /* copy only remaining bytes */ memcpy(data, &val, max - currsize); - break; } } while (currsize < max); - return currsize; + return 0; } static int qcom_rng_generate(struct crypto_rng *tfm, @@ -87,7 +90,7 @@ static int qcom_rng_generate(struct crypto_rng *tfm, mutex_unlock(&rng->lock); clk_disable_unprepare(rng->clk); - return 0; + return ret; } static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed, diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c index 1cece1a7d3f0..5bbf0d2722e1 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c @@ -506,7 +506,6 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = { .exit = rk_ablk_exit_tfm, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, .setkey = rk_tdes_setkey, .encrypt = rk_des3_ede_ecb_encrypt, .decrypt = rk_des3_ede_ecb_decrypt, diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig index c85fab7ef0bd..b2c28b87f14b 100644 --- a/drivers/crypto/vmx/Kconfig +++ b/drivers/crypto/vmx/Kconfig @@ -2,7 +2,11 @@ config CRYPTO_DEV_VMX_ENCRYPT tristate "Encryption acceleration support on P8 CPU" depends on CRYPTO_DEV_VMX + select CRYPTO_AES + select CRYPTO_CBC + select CRYPTO_CTR select CRYPTO_GHASH + select CRYPTO_XTS default m help Support for VMX cryptographic acceleration instructions on Power8 CPU. diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index 41de4a136ecd..2e7027a3fef3 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -35,7 +35,7 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base, struct cxl_component_reg_map *map) { int cap, cap_count; - u64 cap_array; + u32 cap_array; *map = (struct cxl_component_reg_map) { 0 }; @@ -45,11 +45,11 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base, */ base += CXL_CM_OFFSET; - cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET); + cap_array = readl(base + CXL_CM_CAP_HDR_OFFSET); if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) { dev_err(dev, - "Couldn't locate the CXL.cache and CXL.mem capability array header./n"); + "Couldn't locate the CXL.cache and CXL.mem capability array header.\n"); return; } diff --git a/drivers/dax/super.c b/drivers/dax/super.c index fc89e91beea7..7610e4a9ac4e 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -678,6 +678,7 @@ static int dax_fs_init(void) static void dax_fs_exit(void) { kern_unmount(dax_mnt); + rcu_barrier(); kmem_cache_destroy(dax_cache); } diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index 0c05b79870f9..83f02bd51dda 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -124,10 +124,11 @@ static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, struct cma_heap_buffer *buffer = dmabuf->priv; struct dma_heap_attachment *a; + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); - mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { if (!a->mapped) continue; @@ -144,10 +145,11 @@ static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, struct cma_heap_buffer *buffer = dmabuf->priv; struct dma_heap_attachment *a; + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) flush_kernel_vmap_range(buffer->vaddr, buffer->len); - mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { if (!a->mapped) continue; diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index c57a609db75b..e7330684d3b8 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -190,6 +190,10 @@ static long udmabuf_create(struct miscdevice *device, if (ubuf->pagecount > pglimit) goto err; } + + if (!ubuf->pagecount) + goto err; + ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages), GFP_KERNEL); if (!ubuf->pages) { diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 8177aed16006..177a537971a1 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1450,7 +1450,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); - struct at_xdmac_desc *desc, *_desc; + struct at_xdmac_desc *desc, *_desc, *iter; struct list_head *descs_list; enum dma_status ret; int residue, retry; @@ -1565,11 +1565,13 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, * microblock. */ descs_list = &desc->descs_list; - list_for_each_entry_safe(desc, _desc, descs_list, desc_node) { - dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); - residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth; - if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) + list_for_each_entry_safe(iter, _desc, descs_list, desc_node) { + dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg); + residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth; + if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) { + desc = iter; break; + } } residue += cur_ubc << dwidth; diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c index 329fc2e57b70..b5b8f8181e77 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-core.c +++ b/drivers/dma/dw-edma/dw-edma-v0-core.c @@ -415,8 +415,11 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); /* Linked list */ #ifdef CONFIG_64BIT - SET_CH_64(dw, chan->dir, chan->id, llp.reg, - chunk->ll_region.paddr); + /* llp is not aligned on 64bit -> keep 32bit accesses */ + SET_CH_32(dw, chan->dir, chan->id, llp.lsb, + lower_32_bits(chunk->ll_region.paddr)); + SET_CH_32(dw, chan->dir, chan->id, llp.msb, + upper_32_bits(chunk->ll_region.paddr)); #else /* CONFIG_64BIT */ SET_CH_32(dw, chan->dir, chan->id, llp.lsb, lower_32_bits(chunk->ll_region.paddr)); diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c index c855a0e4f9ff..f680e9b40bf7 100644 --- a/drivers/dma/hisi_dma.c +++ b/drivers/dma/hisi_dma.c @@ -30,7 +30,7 @@ #define HISI_DMA_MODE 0x217c #define HISI_DMA_OFFSET 0x100 -#define HISI_DMA_MSI_NUM 30 +#define HISI_DMA_MSI_NUM 32 #define HISI_DMA_CHAN_NUM 30 #define HISI_DMA_Q_DEPTH_VAL 1024 diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index b468ca36d3a0..e622245c9380 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -406,7 +406,6 @@ static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq) { lockdep_assert_held(&wq->wq_lock); - idxd_wq_disable_cleanup(wq); wq->size = 0; wq->group = NULL; } @@ -700,11 +699,16 @@ static void idxd_groups_clear_state(struct idxd_device *idxd) memset(&group->grpcfg, 0, sizeof(group->grpcfg)); group->num_engines = 0; group->num_wqs = 0; - group->use_token_limit = false; - group->tokens_allowed = 0; - group->tokens_reserved = 0; - group->tc_a = -1; - group->tc_b = -1; + group->use_rdbuf_limit = false; + group->rdbufs_allowed = 0; + group->rdbufs_reserved = 0; + if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) { + group->tc_a = 1; + group->tc_b = 1; + } else { + group->tc_a = -1; + group->tc_b = -1; + } } } @@ -718,14 +722,17 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd) if (wq->state == IDXD_WQ_ENABLED) { idxd_wq_disable_cleanup(wq); - idxd_wq_device_reset_cleanup(wq); wq->state = IDXD_WQ_DISABLED; } + idxd_wq_device_reset_cleanup(wq); } } void idxd_device_clear_state(struct idxd_device *idxd) { + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return; + idxd_groups_clear_state(idxd); idxd_engines_clear_state(idxd); idxd_device_wqs_clear_state(idxd); @@ -800,10 +807,10 @@ static int idxd_groups_config_write(struct idxd_device *idxd) int i; struct device *dev = &idxd->pdev->dev; - /* Setup bandwidth token limit */ - if (idxd->token_limit) { + /* Setup bandwidth rdbuf limit */ + if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) { reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); - reg.token_limit = idxd->token_limit; + reg.rdbuf_limit = idxd->rdbuf_limit; iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); } @@ -944,13 +951,12 @@ static void idxd_group_flags_setup(struct idxd_device *idxd) group->tc_b = group->grpcfg.flags.tc_b = 1; else group->grpcfg.flags.tc_b = group->tc_b; - group->grpcfg.flags.use_token_limit = group->use_token_limit; - group->grpcfg.flags.tokens_reserved = group->tokens_reserved; - if (group->tokens_allowed) - group->grpcfg.flags.tokens_allowed = - group->tokens_allowed; + group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit; + group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved; + if (group->rdbufs_allowed) + group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed; else - group->grpcfg.flags.tokens_allowed = idxd->max_tokens; + group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs; } } @@ -1145,7 +1151,7 @@ int idxd_device_load_config(struct idxd_device *idxd) int i, rc; reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); - idxd->token_limit = reg.token_limit; + idxd->rdbuf_limit = reg.rdbuf_limit; for (i = 0; i < idxd->max_groups; i++) { struct idxd_group *group = idxd->groups[i]; diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index bfcb03329f77..833af18a99ee 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -84,9 +84,9 @@ struct idxd_group { int id; int num_engines; int num_wqs; - bool use_token_limit; - u8 tokens_allowed; - u8 tokens_reserved; + bool use_rdbuf_limit; + u8 rdbufs_allowed; + u8 rdbufs_reserved; int tc_a; int tc_b; }; @@ -278,11 +278,11 @@ struct idxd_device { u32 max_batch_size; int max_groups; int max_engines; - int max_tokens; + int max_rdbufs; int max_wqs; int max_wq_size; - int token_limit; - int nr_tokens; /* non-reserved tokens */ + int rdbuf_limit; + int nr_rdbufs; /* non-reserved read buffers */ unsigned int wqcfg_size; union sw_err_reg sw_err; diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 7bf03f371ce1..6263d9825250 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -464,9 +464,9 @@ static void idxd_read_caps(struct idxd_device *idxd) dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); idxd->max_groups = idxd->hw.group_cap.num_groups; dev_dbg(dev, "max groups: %u\n", idxd->max_groups); - idxd->max_tokens = idxd->hw.group_cap.total_tokens; - dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens); - idxd->nr_tokens = idxd->max_tokens; + idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs; + dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs); + idxd->nr_rdbufs = idxd->max_rdbufs; /* read engine capabilities */ idxd->hw.engine_cap.bits = diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index 97ffb06de9b0..c0961c1ac161 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -65,9 +65,9 @@ union wq_cap_reg { union group_cap_reg { struct { u64 num_groups:8; - u64 total_tokens:8; - u64 token_en:1; - u64 token_limit:1; + u64 total_rdbufs:8; /* formerly total_tokens */ + u64 rdbuf_ctrl:1; /* formerly token_en */ + u64 rdbuf_limit:1; /* formerly token_limit */ u64 rsvd:46; }; u64 bits; @@ -111,7 +111,7 @@ union offsets_reg { #define IDXD_GENCFG_OFFSET 0x80 union gencfg_reg { struct { - u32 token_limit:8; + u32 rdbuf_limit:8; u32 rsvd:4; u32 user_int_en:1; u32 rsvd2:19; @@ -288,10 +288,10 @@ union group_flags { u32 tc_a:3; u32 tc_b:3; u32 rsvd:1; - u32 use_token_limit:1; - u32 tokens_reserved:8; + u32 use_rdbuf_limit:1; + u32 rdbufs_reserved:8; u32 rsvd2:4; - u32 tokens_allowed:8; + u32 rdbufs_allowed:8; u32 rsvd3:4; }; u32 bits; diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index a9025be940db..33d94c67fedb 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -99,17 +99,17 @@ struct device_type idxd_engine_device_type = { /* Group attributes */ -static void idxd_set_free_tokens(struct idxd_device *idxd) +static void idxd_set_free_rdbufs(struct idxd_device *idxd) { - int i, tokens; + int i, rdbufs; - for (i = 0, tokens = 0; i < idxd->max_groups; i++) { + for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) { struct idxd_group *g = idxd->groups[i]; - tokens += g->tokens_reserved; + rdbufs += g->rdbufs_reserved; } - idxd->nr_tokens = idxd->max_tokens - tokens; + idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs; } static ssize_t group_tokens_reserved_show(struct device *dev, @@ -118,7 +118,7 @@ static ssize_t group_tokens_reserved_show(struct device *dev, { struct idxd_group *group = confdev_to_group(dev); - return sysfs_emit(buf, "%u\n", group->tokens_reserved); + return sysfs_emit(buf, "%u\n", group->rdbufs_reserved); } static ssize_t group_tokens_reserved_store(struct device *dev, @@ -143,14 +143,14 @@ static ssize_t group_tokens_reserved_store(struct device *dev, if (idxd->state == IDXD_DEV_ENABLED) return -EPERM; - if (val > idxd->max_tokens) + if (val > idxd->max_rdbufs) return -EINVAL; - if (val > idxd->nr_tokens + group->tokens_reserved) + if (val > idxd->nr_rdbufs + group->rdbufs_reserved) return -EINVAL; - group->tokens_reserved = val; - idxd_set_free_tokens(idxd); + group->rdbufs_reserved = val; + idxd_set_free_rdbufs(idxd); return count; } @@ -164,7 +164,7 @@ static ssize_t group_tokens_allowed_show(struct device *dev, { struct idxd_group *group = confdev_to_group(dev); - return sysfs_emit(buf, "%u\n", group->tokens_allowed); + return sysfs_emit(buf, "%u\n", group->rdbufs_allowed); } static ssize_t group_tokens_allowed_store(struct device *dev, @@ -190,10 +190,10 @@ static ssize_t group_tokens_allowed_store(struct device *dev, return -EPERM; if (val < 4 * group->num_engines || - val > group->tokens_reserved + idxd->nr_tokens) + val > group->rdbufs_reserved + idxd->nr_rdbufs) return -EINVAL; - group->tokens_allowed = val; + group->rdbufs_allowed = val; return count; } @@ -207,7 +207,7 @@ static ssize_t group_use_token_limit_show(struct device *dev, { struct idxd_group *group = confdev_to_group(dev); - return sysfs_emit(buf, "%u\n", group->use_token_limit); + return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit); } static ssize_t group_use_token_limit_store(struct device *dev, @@ -232,10 +232,10 @@ static ssize_t group_use_token_limit_store(struct device *dev, if (idxd->state == IDXD_DEV_ENABLED) return -EPERM; - if (idxd->token_limit == 0) + if (idxd->rdbuf_limit == 0) return -EPERM; - group->use_token_limit = !!val; + group->use_rdbuf_limit = !!val; return count; } @@ -842,6 +842,9 @@ static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attr u64 xfer_size; int rc; + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + if (wq->state != IDXD_WQ_DISABLED) return -EPERM; @@ -876,6 +879,9 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu u64 batch_size; int rc; + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + if (wq->state != IDXD_WQ_DISABLED) return -EPERM; @@ -1161,7 +1167,7 @@ static ssize_t max_tokens_show(struct device *dev, { struct idxd_device *idxd = confdev_to_idxd(dev); - return sysfs_emit(buf, "%u\n", idxd->max_tokens); + return sysfs_emit(buf, "%u\n", idxd->max_rdbufs); } static DEVICE_ATTR_RO(max_tokens); @@ -1170,7 +1176,7 @@ static ssize_t token_limit_show(struct device *dev, { struct idxd_device *idxd = confdev_to_idxd(dev); - return sysfs_emit(buf, "%u\n", idxd->token_limit); + return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit); } static ssize_t token_limit_store(struct device *dev, @@ -1191,13 +1197,13 @@ static ssize_t token_limit_store(struct device *dev, if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; - if (!idxd->hw.group_cap.token_limit) + if (!idxd->hw.group_cap.rdbuf_limit) return -EPERM; - if (val > idxd->hw.group_cap.total_tokens) + if (val > idxd->hw.group_cap.total_rdbufs) return -EINVAL; - idxd->token_limit = val; + idxd->rdbuf_limit = val; return count; } static DEVICE_ATTR_RW(token_limit); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index cacc725ca545..2300d965a3f4 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -198,12 +198,12 @@ struct sdma_script_start_addrs { s32 per_2_firi_addr; s32 mcu_2_firi_addr; s32 uart_2_per_addr; - s32 uart_2_mcu_ram_addr; + s32 uart_2_mcu_addr; s32 per_2_app_addr; s32 mcu_2_app_addr; s32 per_2_per_addr; s32 uartsh_2_per_addr; - s32 uartsh_2_mcu_ram_addr; + s32 uartsh_2_mcu_addr; s32 per_2_shp_addr; s32 mcu_2_shp_addr; s32 ata_2_mcu_addr; @@ -232,8 +232,8 @@ struct sdma_script_start_addrs { s32 mcu_2_ecspi_addr; s32 mcu_2_sai_addr; s32 sai_2_mcu_addr; - s32 uart_2_mcu_addr; - s32 uartsh_2_mcu_addr; + s32 uart_2_mcu_rom_addr; + s32 uartsh_2_mcu_rom_addr; /* End of v3 array */ s32 mcu_2_zqspi_addr; /* End of v4 array */ @@ -1780,17 +1780,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma, saddr_arr[i] = addr_arr[i]; /* - * get uart_2_mcu_addr/uartsh_2_mcu_addr rom script specially because - * they are now replaced by uart_2_mcu_ram_addr/uartsh_2_mcu_ram_addr - * to be compatible with legacy freescale/nxp sdma firmware, and they - * are located in the bottom part of sdma_script_start_addrs which are - * beyond the SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1. + * For compatibility with NXP internal legacy kernel before 4.19 which + * is based on uart ram script and mainline kernel based on uart rom + * script, both uart ram/rom scripts are present in newer sdma + * firmware. Use the rom versions if they are present (V3 or newer). */ - if (addr->uart_2_mcu_addr) - sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_addr; - if (addr->uartsh_2_mcu_addr) - sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_addr; - + if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) { + if (addr->uart_2_mcu_rom_addr) + sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr; + if (addr->uartsh_2_mcu_rom_addr) + sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr; + } } static void sdma_load_firmware(const struct firmware *fw, void *context) @@ -1869,7 +1869,7 @@ static int sdma_event_remap(struct sdma_engine *sdma) u32 reg, val, shift, num_map, i; int ret = 0; - if (IS_ERR(np) || IS_ERR(gpr_np)) + if (IS_ERR(np) || !gpr_np) goto out; event_remap = of_find_property(np, propname, NULL); @@ -1917,7 +1917,7 @@ static int sdma_event_remap(struct sdma_engine *sdma) } out: - if (!IS_ERR(gpr_np)) + if (gpr_np) of_node_put(gpr_np); return ret; diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c index 375e7e647df6..a1517ef1f4a0 100644 --- a/drivers/dma/mediatek/mtk-uart-apdma.c +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan) unsigned int status; int ret; - ret = pm_runtime_get_sync(mtkd->ddev.dev); + ret = pm_runtime_resume_and_get(mtkd->ddev.dev); if (ret < 0) { pm_runtime_put_noidle(chan->device->dev); return ret; @@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan) ret = readx_poll_timeout(readl, c->base + VFF_EN, status, !status, 10, 100); if (ret) - return ret; + goto err_pm; ret = request_irq(c->irq, mtk_uart_apdma_irq_handler, IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan); if (ret < 0) { dev_err(chan->device->dev, "Can't request dma IRQ\n"); - return -EINVAL; + ret = -EINVAL; + goto err_pm; } if (mtkd->support_33bits) mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B); +err_pm: + pm_runtime_put_noidle(mtkd->ddev.dev); return ret; } diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c index 8a6bf291a73f..daafea5bc35d 100644 --- a/drivers/dma/ptdma/ptdma-dev.c +++ b/drivers/dma/ptdma/ptdma-dev.c @@ -207,7 +207,7 @@ int pt_core_init(struct pt_device *pt) if (!cmd_q->qbase) { dev_err(dev, "unable to allocate command queue\n"); ret = -ENOMEM; - goto e_dma_alloc; + goto e_destroy_pool; } cmd_q->qidx = 0; @@ -229,8 +229,10 @@ int pt_core_init(struct pt_device *pt) /* Request an irq */ ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt); - if (ret) - goto e_pool; + if (ret) { + dev_err(dev, "unable to allocate an IRQ\n"); + goto e_free_dma; + } /* Update the device registers with queue information. */ cmd_q->qcontrol &= ~CMD_Q_SIZE; @@ -250,21 +252,20 @@ int pt_core_init(struct pt_device *pt) /* Register the DMA engine support */ ret = pt_dmaengine_register(pt); if (ret) - goto e_dmaengine; + goto e_free_irq; /* Set up debugfs entries */ ptdma_debugfs_setup(pt); return 0; -e_dmaengine: +e_free_irq: free_irq(pt->pt_irq, pt); -e_dma_alloc: +e_free_dma: dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma); -e_pool: - dev_err(dev, "unable to allocate an IRQ\n"); +e_destroy_pool: dma_pool_destroy(pt->cmd_q.dma_pool); return ret; diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 6885b3dcd7a9..f4c46b3b6d9d 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1868,8 +1868,13 @@ static int rcar_dmac_probe(struct platform_device *pdev) dmac->dev = &pdev->dev; platform_set_drvdata(pdev, dmac); - dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); - dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); + ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); + if (ret) + return ret; + + ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); + if (ret) + return ret; ret = rcar_dmac_parse_of(&pdev->dev, dmac); if (ret < 0) diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index a42164389ebc..d5d55732adba 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c @@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev) ret = of_dma_router_register(node, stm32_dmamux_route_allocate, &stm32_dmamux->dmarouter); if (ret) - goto err_clk; + goto pm_disable; return 0; +pm_disable: + pm_runtime_disable(&pdev->dev); err_clk: clk_disable_unprepare(stm32_dmamux->clk); diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 2c5975674723..a859ddd9d4a1 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -215,7 +215,7 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems) else return (char *)ptr; - r = (unsigned long)p % align; + r = (unsigned long)ptr % align; if (r == 0) return (char *)ptr; diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index a5486d86fdd2..8557781bb8dc 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -163,6 +163,11 @@ #define ECC_STAT_CECNT_SHIFT 8 #define ECC_STAT_BITNUM_MASK 0x7F +/* ECC error count register definitions */ +#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000 +#define ECC_ERRCNT_UECNT_SHIFT 16 +#define ECC_ERRCNT_CECNT_MASK 0xFFFF + /* DDR QOS Interrupt register definitions */ #define DDR_QOS_IRQ_STAT_OFST 0x20200 #define DDR_QOSUE_MASK 0x4 @@ -418,15 +423,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv) base = priv->baseaddr; p = &priv->stat; + regval = readl(base + ECC_ERRCNT_OFST); + p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK; + p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT; + if (!p->ce_cnt) + goto ue_err; + regval = readl(base + ECC_STAT_OFST); if (!regval) return 1; - p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT; - p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT; - if (!p->ce_cnt) - goto ue_err; - p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK); regval = readl(base + ECC_CEADDR0_OFST); diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 35b56c8ba0c0..492f3a9197ec 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -204,7 +204,8 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id, if (rate_discrete && rate) { clk->list.num_rates = tot_rate_cnt; - sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL); + sort(clk->list.rates, tot_rate_cnt, sizeof(*rate), + rate_cmp_func, NULL); } clk->rate_discrete = rate_discrete; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index b406b3f78f46..e815b8f98739 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -652,7 +652,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, xfer = scmi_xfer_command_acquire(cinfo, msg_hdr); if (IS_ERR(xfer)) { - scmi_clear_channel(info, cinfo); + if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP) + scmi_clear_channel(info, cinfo); return; } @@ -2112,7 +2113,7 @@ static void __exit scmi_driver_exit(void) } module_exit(scmi_driver_exit); -MODULE_ALIAS("platform: arm-scmi"); +MODULE_ALIAS("platform:arm-scmi"); MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); MODULE_DESCRIPTION("ARM SCMI protocol driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c index 4c3201e290e2..ea84108035eb 100644 --- a/drivers/firmware/efi/apple-properties.c +++ b/drivers/firmware/efi/apple-properties.c @@ -24,7 +24,7 @@ static bool dump_properties __initdata; static int __init dump_properties_enable(char *arg) { dump_properties = true; - return 0; + return 1; } __setup("dump_apple_properties", dump_properties_enable); diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 0ef086e43090..7e771c56c13c 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -266,7 +266,7 @@ static int efi_pstore_write(struct pstore_record *record) efi_name[i] = name[i]; ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES, - preemptible(), record->size, record->psi->buf); + false, record->size, record->psi->buf); if (record->reason == KMSG_DUMP_OOPS && try_module_get(THIS_MODULE)) if (!schedule_work(&efivar_work)) diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 9fa86288b78a..e3df82d5d37a 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -209,7 +209,7 @@ static int __init efivar_ssdt_setup(char *str) memcpy(efivar_ssdt, str, strlen(str)); else pr_warn("efivar_ssdt: name too long: %s\n", str); - return 0; + return 1; } __setup("efivar_ssdt=", efivar_ssdt_setup); diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c index 380e4e251399..9c460843442f 100644 --- a/drivers/firmware/efi/libstub/riscv-stub.c +++ b/drivers/firmware/efi/libstub/riscv-stub.c @@ -25,7 +25,7 @@ typedef void __noreturn (*jump_kernel_func)(unsigned int, unsigned long); static u32 hartid; -static u32 get_boot_hartid_from_fdt(void) +static int get_boot_hartid_from_fdt(void) { const void *fdt; int chosen_node, len; @@ -33,23 +33,26 @@ static u32 get_boot_hartid_from_fdt(void) fdt = get_efi_config_table(DEVICE_TREE_GUID); if (!fdt) - return U32_MAX; + return -EINVAL; chosen_node = fdt_path_offset(fdt, "/chosen"); if (chosen_node < 0) - return U32_MAX; + return -EINVAL; prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len); if (!prop || len != sizeof(u32)) - return U32_MAX; + return -EINVAL; - return fdt32_to_cpu(*prop); + hartid = fdt32_to_cpu(*prop); + return 0; } efi_status_t check_platform_features(void) { - hartid = get_boot_hartid_from_fdt(); - if (hartid == U32_MAX) { + int ret; + + ret = get_boot_hartid_from_fdt(); + if (ret) { efi_err("/chosen/boot-hartid missing or invalid!\n"); return EFI_UNSUPPORTED; } diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index abdc8a6a3963..cae590bd08f2 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -742,6 +742,7 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, { const struct efivar_operations *ops; efi_status_t status; + unsigned long varsize; if (!__efivars) return -EINVAL; @@ -764,15 +765,17 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, return efivar_entry_set_nonblocking(name, vendor, attributes, size, data); + varsize = size + ucs2_strsize(name, 1024); if (!block) { if (down_trylock(&efivars_lock)) return -EBUSY; + status = check_var_size_nonblocking(attributes, varsize); } else { if (down_interruptible(&efivars_lock)) return -EINTR; + status = check_var_size(attributes, varsize); } - status = check_var_size(attributes, size + ucs2_strsize(name, 1024)); if (status != EFI_SUCCESS) { up(&efivars_lock); return -ENOSPC; diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig index 931544c9f63d..983e07dc022e 100644 --- a/drivers/firmware/google/Kconfig +++ b/drivers/firmware/google/Kconfig @@ -21,7 +21,7 @@ config GOOGLE_SMI config GOOGLE_COREBOOT_TABLE tristate "Coreboot Table Access" - depends on ACPI || OF + depends on HAS_IOMEM && (ACPI || OF) help This option enables the coreboot_table module, which provides other firmware modules access to the coreboot table. The coreboot table diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 27a64de91981..2b5214d5c0da 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -749,12 +749,6 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) }; int ret; - desc.args[0] = addr; - desc.args[1] = size; - desc.args[2] = spare; - desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, - QCOM_SCM_VAL); - ret = qcom_scm_call(__scm->dev, &desc, NULL); /* the pg table has been initialized already, ignore the error */ diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index 2a7687911c09..53c7e3f8cfde 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -477,7 +477,7 @@ static int svc_normal_to_secure_thread(void *data) case INTEL_SIP_SMC_RSU_ERROR: pr_err("%s: STATUS_ERROR\n", __func__); cbdata->status = BIT(SVC_STATUS_ERROR); - cbdata->kaddr1 = NULL; + cbdata->kaddr1 = &res.a1; cbdata->kaddr2 = NULL; cbdata->kaddr3 = NULL; pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata); diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c index 303a491e520d..757cc8b9f3de 100644 --- a/drivers/firmware/sysfb_simplefb.c +++ b/drivers/firmware/sysfb_simplefb.c @@ -113,16 +113,21 @@ __init int sysfb_create_simplefb(const struct screen_info *si, sysfb_apply_efi_quirks(pd); ret = platform_device_add_resources(pd, &res, 1); - if (ret) { - platform_device_put(pd); - return ret; - } + if (ret) + goto err_put_device; ret = platform_device_add_data(pd, mode, sizeof(*mode)); - if (ret) { - platform_device_put(pd); - return ret; - } + if (ret) + goto err_put_device; + + ret = platform_device_add(pd); + if (ret) + goto err_put_device; + + return 0; + +err_put_device: + platform_device_put(pd); - return platform_device_add(pd); + return ret; } diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c index 92e6eebd1851..5858e6339a10 100644 --- a/drivers/fsi/fsi-core.c +++ b/drivers/fsi/fsi-core.c @@ -24,9 +24,6 @@ #include "fsi-master.h" -#define CREATE_TRACE_POINTS -#include <trace/events/fsi.h> - #define FSI_SLAVE_CONF_NEXT_MASK GENMASK(31, 31) #define FSI_SLAVE_CONF_SLOTS_MASK GENMASK(23, 16) #define FSI_SLAVE_CONF_SLOTS_SHIFT 16 @@ -95,6 +92,9 @@ struct fsi_slave { u8 t_echo_delay; }; +#define CREATE_TRACE_POINTS +#include <trace/events/fsi.h> + #define to_fsi_master(d) container_of(d, struct fsi_master, dev) #define to_fsi_slave(d) container_of(d, struct fsi_slave, dev) @@ -524,6 +524,8 @@ static int fsi_slave_scan(struct fsi_slave *slave) dev->addr = engine_addr; dev->size = slots * engine_page_size; + trace_fsi_dev_init(dev); + dev_dbg(&slave->dev, "engine[%i]: type %x, version %x, addr %x size %x\n", dev->unit, dev->engine_type, version, @@ -1006,6 +1008,7 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id) crc = crc4(0, cfam_id, 32); if (crc) { + trace_fsi_slave_invalid_cfam(master, link, cfam_id); dev_warn(&master->dev, "slave %02x:%02x invalid cfam id CRC!\n", link, id); return -EIO; @@ -1080,6 +1083,8 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id) if (rc) goto err_free; + trace_fsi_slave_init(slave); + /* Create chardev for userspace access */ cdev_init(&slave->cdev, &cfam_fops); rc = cdev_device_add(&slave->cdev, &slave->dev); diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c index 8606e55c1721..7cec1772820d 100644 --- a/drivers/fsi/fsi-master-aspeed.c +++ b/drivers/fsi/fsi-master-aspeed.c @@ -449,11 +449,13 @@ static ssize_t cfam_reset_store(struct device *dev, struct device_attribute *att { struct fsi_master_aspeed *aspeed = dev_get_drvdata(dev); + trace_fsi_master_aspeed_cfam_reset(true); mutex_lock(&aspeed->lock); gpiod_set_value(aspeed->cfam_reset_gpio, 1); usleep_range(900, 1000); gpiod_set_value(aspeed->cfam_reset_gpio, 0); mutex_unlock(&aspeed->lock); + trace_fsi_master_aspeed_cfam_reset(false); return count; } @@ -542,25 +544,28 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev) return rc; } - aspeed = devm_kzalloc(&pdev->dev, sizeof(*aspeed), GFP_KERNEL); + aspeed = kzalloc(sizeof(*aspeed), GFP_KERNEL); if (!aspeed) return -ENOMEM; aspeed->dev = &pdev->dev; aspeed->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(aspeed->base)) - return PTR_ERR(aspeed->base); + if (IS_ERR(aspeed->base)) { + rc = PTR_ERR(aspeed->base); + goto err_free_aspeed; + } aspeed->clk = devm_clk_get(aspeed->dev, NULL); if (IS_ERR(aspeed->clk)) { dev_err(aspeed->dev, "couldn't get clock\n"); - return PTR_ERR(aspeed->clk); + rc = PTR_ERR(aspeed->clk); + goto err_free_aspeed; } rc = clk_prepare_enable(aspeed->clk); if (rc) { dev_err(aspeed->dev, "couldn't enable clock\n"); - return rc; + goto err_free_aspeed; } rc = setup_cfam_reset(aspeed); @@ -595,7 +600,7 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev) rc = opb_readl(aspeed, ctrl_base + FSI_MVER, &raw); if (rc) { dev_err(&pdev->dev, "failed to read hub version\n"); - return rc; + goto err_release; } reg = be32_to_cpu(raw); @@ -634,6 +639,8 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev) err_release: clk_disable_unprepare(aspeed->clk); +err_free_aspeed: + kfree(aspeed); return rc; } diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c index 7eaab1be0aa4..3d04e8baecbb 100644 --- a/drivers/fsi/fsi-occ.c +++ b/drivers/fsi/fsi-occ.c @@ -246,7 +246,7 @@ static int occ_verify_checksum(struct occ *occ, struct occ_response *resp, if (checksum != checksum_resp) { dev_err(occ->dev, "Bad checksum: %04x!=%04x\n", checksum, checksum_resp); - return -EBADMSG; + return -EBADE; } return 0; @@ -451,6 +451,14 @@ static int occ_trigger_attn(struct occ *occ) return rc; } +static bool fsi_occ_response_not_ready(struct occ_response *resp, u8 seq_no, + u8 cmd_type) +{ + return resp->return_status == OCC_RESP_CMD_IN_PRG || + resp->return_status == OCC_RESP_CRIT_INIT || + resp->seq_no != seq_no || resp->cmd_type != cmd_type; +} + int fsi_occ_submit(struct device *dev, const void *request, size_t req_len, void *response, size_t *resp_len) { @@ -461,10 +469,11 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len, struct occ_response *resp = response; size_t user_resp_len = *resp_len; u8 seq_no; + u8 cmd_type; u16 checksum = 0; u16 resp_data_length; const u8 *byte_request = (const u8 *)request; - unsigned long start; + unsigned long end; int rc; size_t i; @@ -478,6 +487,8 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len, return -EINVAL; } + cmd_type = byte_request[1]; + /* Checksum the request, ignoring first byte (sequence number). */ for (i = 1; i < req_len - 2; ++i) checksum += byte_request[i]; @@ -509,53 +520,66 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len, if (rc) goto done; - /* Read occ response header */ - start = jiffies; - do { + end = jiffies + timeout; + while (true) { + /* Read occ response header */ rc = occ_getsram(occ, 0, resp, 8); if (rc) goto done; - if (resp->return_status == OCC_RESP_CMD_IN_PRG || - resp->return_status == OCC_RESP_CRIT_INIT || - resp->seq_no != seq_no) { - rc = -ETIMEDOUT; - - if (time_after(jiffies, start + timeout)) { - dev_err(occ->dev, "resp timeout status=%02x " - "resp seq_no=%d our seq_no=%d\n", + if (fsi_occ_response_not_ready(resp, seq_no, cmd_type)) { + if (time_after(jiffies, end)) { + dev_err(occ->dev, + "resp timeout status=%02x seq=%d cmd=%d, our seq=%d cmd=%d\n", resp->return_status, resp->seq_no, - seq_no); + resp->cmd_type, seq_no, cmd_type); + rc = -ETIMEDOUT; goto done; } set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(wait_time); - } - } while (rc); - - /* Extract size of response data */ - resp_data_length = get_unaligned_be16(&resp->data_length); + } else { + /* Extract size of response data */ + resp_data_length = + get_unaligned_be16(&resp->data_length); + + /* + * Message size is data length + 5 bytes header + 2 + * bytes checksum + */ + if ((resp_data_length + 7) > user_resp_len) { + rc = -EMSGSIZE; + goto done; + } - /* Message size is data length + 5 bytes header + 2 bytes checksum */ - if ((resp_data_length + 7) > user_resp_len) { - rc = -EMSGSIZE; - goto done; + /* + * Get the entire response including the header again, + * in case it changed + */ + if (resp_data_length > 1) { + rc = occ_getsram(occ, 0, resp, + resp_data_length + 7); + if (rc) + goto done; + + if (!fsi_occ_response_not_ready(resp, seq_no, + cmd_type)) + break; + } else { + break; + } + } } dev_dbg(dev, "resp_status=%02x resp_data_len=%d\n", resp->return_status, resp_data_length); - /* Grab the rest */ - if (resp_data_length > 1) { - /* already got 3 bytes resp, also need 2 bytes checksum */ - rc = occ_getsram(occ, 8, &resp->data[3], resp_data_length - 1); - if (rc) - goto done; - } + rc = occ_verify_checksum(occ, resp, resp_data_length); + if (rc) + goto done; occ->client_response_size = resp_data_length + 7; - rc = occ_verify_checksum(occ, resp, resp_data_length); done: *resp_len = occ->client_response_size; @@ -598,7 +622,11 @@ static int occ_probe(struct platform_device *pdev) occ->version = (uintptr_t)of_device_get_match_data(dev); occ->dev = dev; occ->sbefifo = dev->parent; - occ->sequence_number = 1; + /* + * Quickly derive a pseudo-random number from jiffies so that + * re-probing the driver doesn't accidentally overlap sequence numbers. + */ + occ->sequence_number = (u8)((jiffies % 0xff) + 1); mutex_init(&occ->occ_lock); if (dev->of_node) { diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c index 52328adef643..5f93a53846aa 100644 --- a/drivers/fsi/fsi-sbefifo.c +++ b/drivers/fsi/fsi-sbefifo.c @@ -32,6 +32,8 @@ #include <linux/vmalloc.h> #include <linux/mm.h> +#include <uapi/linux/fsi.h> + /* * The SBEFIFO is a pipe-like FSI device for communicating with * the self boot engine on POWER processors. @@ -125,6 +127,7 @@ struct sbefifo { bool dead; bool async_ffdc; bool timed_out; + u32 timeout_start_rsp_ms; }; struct sbefifo_user { @@ -133,6 +136,7 @@ struct sbefifo_user { void *cmd_page; void *pending_cmd; size_t pending_len; + u32 read_timeout_ms; }; static DEFINE_MUTEX(sbefifo_ffdc_mutex); @@ -473,7 +477,8 @@ static int sbefifo_wait(struct sbefifo *sbefifo, bool up, if (!ready) { sysfs_notify(&sbefifo->dev.kobj, NULL, dev_attr_timeout.attr.name); sbefifo->timed_out = true; - dev_err(dev, "%s FIFO Timeout ! status=%08x\n", up ? "UP" : "DOWN", sts); + dev_err(dev, "%s FIFO Timeout (%u ms)! status=%08x\n", + up ? "UP" : "DOWN", jiffies_to_msecs(timeout), sts); return -ETIMEDOUT; } dev_vdbg(dev, "End of wait status: %08x\n", sts); @@ -493,8 +498,8 @@ static int sbefifo_send_command(struct sbefifo *sbefifo, u32 status; int rc; - dev_vdbg(dev, "sending command (%zd words, cmd=%04x)\n", - cmd_len, be32_to_cpu(command[1])); + dev_dbg(dev, "sending command (%zd words, cmd=%04x)\n", + cmd_len, be32_to_cpu(command[1])); /* As long as there's something to send */ timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_START_CMD); @@ -547,21 +552,23 @@ static int sbefifo_read_response(struct sbefifo *sbefifo, struct iov_iter *respo size_t len; int rc; - dev_vdbg(dev, "reading response, buflen = %zd\n", iov_iter_count(response)); + dev_dbg(dev, "reading response, buflen = %zd\n", iov_iter_count(response)); - timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_START_RSP); + timeout = msecs_to_jiffies(sbefifo->timeout_start_rsp_ms); for (;;) { /* Grab FIFO status (this will handle parity errors) */ rc = sbefifo_wait(sbefifo, false, &status, timeout); - if (rc < 0) + if (rc < 0) { + dev_dbg(dev, "timeout waiting (%u ms)\n", jiffies_to_msecs(timeout)); return rc; + } timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_IN_RSP); /* Decode status */ len = sbefifo_populated(status); eot_set = sbefifo_eot_set(status); - dev_vdbg(dev, " chunk size %zd eot_set=0x%x\n", len, eot_set); + dev_dbg(dev, " chunk size %zd eot_set=0x%x\n", len, eot_set); /* Go through the chunk */ while(len--) { @@ -795,6 +802,7 @@ static int sbefifo_user_open(struct inode *inode, struct file *file) return -ENOMEM; } mutex_init(&user->file_lock); + user->read_timeout_ms = SBEFIFO_TIMEOUT_START_RSP; return 0; } @@ -837,7 +845,9 @@ static ssize_t sbefifo_user_read(struct file *file, char __user *buf, rc = mutex_lock_interruptible(&sbefifo->lock); if (rc) goto bail; + sbefifo->timeout_start_rsp_ms = user->read_timeout_ms; rc = __sbefifo_submit(sbefifo, user->pending_cmd, cmd_len, &resp_iter); + sbefifo->timeout_start_rsp_ms = SBEFIFO_TIMEOUT_START_RSP; mutex_unlock(&sbefifo->lock); if (rc < 0) goto bail; @@ -927,12 +937,55 @@ static int sbefifo_user_release(struct inode *inode, struct file *file) return 0; } +static int sbefifo_read_timeout(struct sbefifo_user *user, void __user *argp) +{ + struct device *dev = &user->sbefifo->dev; + u32 timeout; + + if (get_user(timeout, (__u32 __user *)argp)) + return -EFAULT; + + if (timeout == 0) { + user->read_timeout_ms = SBEFIFO_TIMEOUT_START_RSP; + dev_dbg(dev, "Timeout reset to %d\n", user->read_timeout_ms); + return 0; + } + + if (timeout < 10 || timeout > 120) + return -EINVAL; + + user->read_timeout_ms = timeout * 1000; /* user timeout is in sec */ + + dev_dbg(dev, "Timeout set to %d\n", user->read_timeout_ms); + + return 0; +} + +static long sbefifo_user_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct sbefifo_user *user = file->private_data; + int rc = -ENOTTY; + + if (!user) + return -EINVAL; + + mutex_lock(&user->file_lock); + switch (cmd) { + case FSI_SBEFIFO_READ_TIMEOUT_SECONDS: + rc = sbefifo_read_timeout(user, (void __user *)arg); + break; + } + mutex_unlock(&user->file_lock); + return rc; +} + static const struct file_operations sbefifo_fops = { .owner = THIS_MODULE, .open = sbefifo_user_open, .read = sbefifo_user_read, .write = sbefifo_user_write, .release = sbefifo_user_release, + .unlocked_ioctl = sbefifo_user_ioctl, }; static void sbefifo_free(struct device *dev) @@ -972,6 +1025,7 @@ static int sbefifo_probe(struct device *dev) sbefifo->fsi_dev = fsi_dev; dev_set_drvdata(dev, sbefifo); mutex_init(&sbefifo->lock); + sbefifo->timeout_start_rsp_ms = SBEFIFO_TIMEOUT_START_RSP; /* * Try cleaning up the FIFO. If this fails, we still register the diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c index ce63cbd14d69..24155c038f6d 100644 --- a/drivers/gpio/gpio-rockchip.c +++ b/drivers/gpio/gpio-rockchip.c @@ -410,10 +410,8 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) level = rockchip_gpio_readl(bank, bank->gpio_regs->int_type); polarity = rockchip_gpio_readl(bank, bank->gpio_regs->int_polarity); - switch (type) { - case IRQ_TYPE_EDGE_BOTH: + if (type == IRQ_TYPE_EDGE_BOTH) { if (bank->gpio_type == GPIO_TYPE_V2) { - bank->toggle_edge_mode &= ~mask; rockchip_gpio_writel_bit(bank, d->hwirq, 1, bank->gpio_regs->int_bothedge); goto out; @@ -431,30 +429,34 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) else polarity |= mask; } - break; - case IRQ_TYPE_EDGE_RISING: - bank->toggle_edge_mode &= ~mask; - level |= mask; - polarity |= mask; - break; - case IRQ_TYPE_EDGE_FALLING: - bank->toggle_edge_mode &= ~mask; - level |= mask; - polarity &= ~mask; - break; - case IRQ_TYPE_LEVEL_HIGH: - bank->toggle_edge_mode &= ~mask; - level &= ~mask; - polarity |= mask; - break; - case IRQ_TYPE_LEVEL_LOW: - bank->toggle_edge_mode &= ~mask; - level &= ~mask; - polarity &= ~mask; - break; - default: - ret = -EINVAL; - goto out; + } else { + if (bank->gpio_type == GPIO_TYPE_V2) { + rockchip_gpio_writel_bit(bank, d->hwirq, 0, + bank->gpio_regs->int_bothedge); + } else { + bank->toggle_edge_mode &= ~mask; + } + switch (type) { + case IRQ_TYPE_EDGE_RISING: + level |= mask; + polarity |= mask; + break; + case IRQ_TYPE_EDGE_FALLING: + level |= mask; + polarity &= ~mask; + break; + case IRQ_TYPE_LEVEL_HIGH: + level &= ~mask; + polarity |= mask; + break; + case IRQ_TYPE_LEVEL_LOW: + level &= ~mask; + polarity &= ~mask; + break; + default: + ret = -EINVAL; + goto out; + } } rockchip_gpio_writel(bank, level, bank->gpio_regs->int_type); diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c index c99858f40a27..00762de3d409 100644 --- a/drivers/gpio/gpio-tegra186.c +++ b/drivers/gpio/gpio-tegra186.c @@ -337,9 +337,12 @@ static int tegra186_gpio_of_xlate(struct gpio_chip *chip, return offset + pin; } +#define to_tegra_gpio(x) container_of((x), struct tegra_gpio, gpio) + static void tegra186_irq_ack(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; base = tegra186_gpio_get_base(gpio, data->hwirq); @@ -351,7 +354,8 @@ static void tegra186_irq_ack(struct irq_data *data) static void tegra186_irq_mask(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; @@ -366,7 +370,8 @@ static void tegra186_irq_mask(struct irq_data *data) static void tegra186_irq_unmask(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; @@ -381,7 +386,8 @@ static void tegra186_irq_unmask(struct irq_data *data) static int tegra186_irq_set_type(struct irq_data *data, unsigned int type) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c index d885032cf814..d918d2df4de2 100644 --- a/drivers/gpio/gpio-ts4900.c +++ b/drivers/gpio/gpio-ts4900.c @@ -1,7 +1,7 @@ /* * Digital I/O driver for Technologic Systems I2C FPGA Core * - * Copyright (C) 2015 Technologic Systems + * Copyright (C) 2015, 2018 Technologic Systems * Copyright (C) 2016 Savoir-Faire Linux * * This program is free software; you can redistribute it and/or @@ -55,19 +55,33 @@ static int ts4900_gpio_direction_input(struct gpio_chip *chip, { struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); - /* - * This will clear the output enable bit, the other bits are - * dontcare when this is cleared + /* Only clear the OE bit here, requires a RMW. Prevents potential issue + * with OE and data getting to the physical pin at different times. */ - return regmap_write(priv->regmap, offset, 0); + return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OE, 0); } static int ts4900_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); + unsigned int reg; int ret; + /* If changing from an input to an output, we need to first set the + * proper data bit to what is requested and then set OE bit. This + * prevents a glitch that can occur on the IO line + */ + regmap_read(priv->regmap, offset, ®); + if (!(reg & TS4900_GPIO_OE)) { + if (value) + reg = TS4900_GPIO_OUT; + else + reg &= ~TS4900_GPIO_OUT; + + regmap_write(priv->regmap, offset, reg); + } + if (value) ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE | TS4900_GPIO_OUT); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index d040c72fea58..53be0bdf2bc3 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -311,7 +311,8 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip, if (IS_ERR(desc)) return desc; - ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout); + /* ACPI uses hundredths of milliseconds units */ + ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10); if (ret) dev_warn(chip->parent, "Failed to set debounce-timeout for pin 0x%04X, err %d\n", @@ -391,8 +392,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, pin = agpio->pin_table[0]; if (pin <= 255) { - char ev_name[5]; - sprintf(ev_name, "_%c%02hhX", + char ev_name[8]; + sprintf(ev_name, "_%c%02X", agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', pin); if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) @@ -1052,7 +1053,8 @@ int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int ind if (ret < 0) return ret; - ret = gpio_set_debounce_timeout(desc, info.debounce); + /* ACPI uses hundredths of milliseconds units */ + ret = gpio_set_debounce_timeout(desc, info.debounce * 10); if (ret) return ret; diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index d1b9b721218f..320baed949ee 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1368,6 +1368,16 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset) { struct irq_domain *domain = gc->irq.domain; +#ifdef CONFIG_GPIOLIB_IRQCHIP + /* + * Avoid race condition with other code, which tries to lookup + * an IRQ before the irqchip has been properly registered, + * i.e. while gpiochip is still being brought up. + */ + if (!gc->irq.initialized) + return -EPROBE_DEFER; +#endif + if (!gpiochip_irqchip_irq_valid(gc, offset)) return -ENXIO; @@ -1550,6 +1560,15 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc, gpiochip_set_irq_hooks(gc); + /* + * Using barrier() here to prevent compiler from reordering + * gc->irq.initialized before initialization of above + * GPIO chip irq members. + */ + barrier(); + + gc->irq.initialized = true; + acpi_gpiochip_request_interrupts(gc); return 0; @@ -2186,6 +2205,16 @@ static int gpio_set_bias(struct gpio_desc *desc) return gpio_set_config_with_argument_optional(desc, bias, arg); } +/** + * gpio_set_debounce_timeout() - Set debounce timeout + * @desc: GPIO descriptor to set the debounce timeout + * @debounce: Debounce timeout in microseconds + * + * The function calls the certain GPIO driver to set debounce timeout + * in the hardware. + * + * Returns 0 on success, or negative error code otherwise. + */ int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce) { return gpio_set_config_with_argument_optional(desc, @@ -3106,6 +3135,16 @@ int gpiod_to_irq(const struct gpio_desc *desc) return retirq; } +#ifdef CONFIG_GPIOLIB_IRQCHIP + if (gc->irq.chip) { + /* + * Avoid race condition with other code, which tries to lookup + * an IRQ before the irqchip has been properly registered, + * i.e. while gpiochip is still being brought up. + */ + return -EPROBE_DEFER; + } +#endif return -ENXIO; } EXPORT_SYMBOL_GPL(gpiod_to_irq); diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h index 5b393622f592..a0f0a17e224f 100644 --- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h +++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h @@ -119,6 +119,7 @@ #define CONNECTOR_OBJECT_ID_eDP 0x14 #define CONNECTOR_OBJECT_ID_MXM 0x15 #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 +#define CONNECTOR_OBJECT_ID_USBC 0x17 /* deleted */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f428f94b43c0..7e73ac6fb21d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1397,12 +1397,10 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); -bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); void amdgpu_acpi_detect(void); #else static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } -static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } static inline void amdgpu_acpi_detect(void) { } static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, @@ -1411,6 +1409,14 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state) { return 0; } #endif +#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) +bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); +bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); +#else +static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } +static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } +#endif + int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo, struct amdgpu_bo_va_mapping **mapping); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 4811b0faafd9..0e12315fa0cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1031,6 +1031,20 @@ void amdgpu_acpi_detect(void) } } +#if IS_ENABLED(CONFIG_SUSPEND) +/** + * amdgpu_acpi_is_s3_active + * + * @adev: amdgpu_device_pointer + * + * returns true if supported, false if not. + */ +bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) +{ + return !(adev->flags & AMD_IS_APU) || + (pm_suspend_target_state == PM_SUSPEND_MEM); +} + /** * amdgpu_acpi_is_s0ix_active * @@ -1040,11 +1054,24 @@ void amdgpu_acpi_detect(void) */ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { -#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND) - if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) { - if (adev->flags & AMD_IS_APU) - return pm_suspend_target_state == PM_SUSPEND_TO_IDLE; + if (!(adev->flags & AMD_IS_APU) || + (pm_suspend_target_state != PM_SUSPEND_TO_IDLE)) + return false; + + if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) { + dev_warn_once(adev->dev, + "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n" + "To use suspend-to-idle change the sleep mode in BIOS setup.\n"); + return false; } -#endif + +#if !IS_ENABLED(CONFIG_AMD_PMC) + dev_warn_once(adev->dev, + "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n"); return false; +#else + return true; +#endif /* CONFIG_AMD_PMC */ } + +#endif /* CONFIG_SUSPEND */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index df1f9b88a53f..a09876bb7ec8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -175,7 +175,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) /* Check if bpc is within clock limit. Try to degrade gracefully otherwise */ if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) { - if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) && + if ((connector->display_info.edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30) && (mode_clock * 5/4 <= max_tmds_clock)) bpc = 10; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 913f9eaa9cd6..aa823f154199 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1508,6 +1508,7 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, return 0; default: + dma_fence_put(fence); return -EINVAL; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1545884dc703..2f2ae26a8068 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -30,6 +30,7 @@ #include <linux/module.h> #include <linux/console.h> #include <linux/slab.h> +#include <linux/pci.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_probe_helper.h> @@ -2069,6 +2070,8 @@ out: */ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) { + struct drm_device *dev = adev_to_drm(adev); + struct pci_dev *parent; int i, r; amdgpu_device_enable_virtual_display(adev); @@ -2168,6 +2171,18 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) return -EINVAL; } + if (amdgpu_has_atpx() && + (amdgpu_is_atpx_hybrid() || + amdgpu_has_atpx_dgpu_power_cntl()) && + ((adev->flags & AMD_IS_APU) == 0) && + !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) + adev->flags |= AMD_IS_PX; + + if (!(adev->flags & AMD_IS_APU)) { + parent = pci_upstream_bridge(adev->pdev); + adev->has_pr3 = parent ? pci_pr3_present(parent) : false; + } + amdgpu_amdkfd_device_probe(adev); adev->pm.pp_feature = amdgpu_pp_feature_mask; @@ -5610,7 +5625,7 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) return; #endif if (adev->gmc.xgmi.connected_to_cpu) @@ -5626,7 +5641,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) return; #endif if (adev->gmc.xgmi.connected_to_cpu) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index dc50c05f23fc..5c08047adb59 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -1145,7 +1145,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev, if (ret) return ret; - if (!dev->mode_config.allow_fb_modifiers) { + if (!dev->mode_config.allow_fb_modifiers && !adev->enable_virtual_display) { drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI, "GFX9+ requires FB check based on format modifier\n"); ret = check_tiling_flags_gfx6(rfb); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 30059b7db0b2..b517b76e96a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -680,7 +680,7 @@ MODULE_PARM_DESC(sched_policy, * Maximum number of processes that HWS can schedule concurrently. The maximum is the * number of VMIDs assigned to the HWS, which is also the default. */ -int hws_max_conc_proc = 8; +int hws_max_conc_proc = -1; module_param(hws_max_conc_proc, int, 0444); MODULE_PARM_DESC(hws_max_conc_proc, "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))"); @@ -891,6 +891,717 @@ MODULE_PARM_DESC(smu_pptable_id, "specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)"); module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444); +/* These devices are not supported by amdgpu. + * They are supported by the mach64, r128, radeon drivers + */ +static const u16 amdgpu_unsupported_pciidlist[] = { + /* mach64 */ + 0x4354, + 0x4358, + 0x4554, + 0x4742, + 0x4744, + 0x4749, + 0x474C, + 0x474D, + 0x474E, + 0x474F, + 0x4750, + 0x4751, + 0x4752, + 0x4753, + 0x4754, + 0x4755, + 0x4756, + 0x4757, + 0x4758, + 0x4759, + 0x475A, + 0x4C42, + 0x4C44, + 0x4C47, + 0x4C49, + 0x4C4D, + 0x4C4E, + 0x4C50, + 0x4C51, + 0x4C52, + 0x4C53, + 0x5654, + 0x5655, + 0x5656, + /* r128 */ + 0x4c45, + 0x4c46, + 0x4d46, + 0x4d4c, + 0x5041, + 0x5042, + 0x5043, + 0x5044, + 0x5045, + 0x5046, + 0x5047, + 0x5048, + 0x5049, + 0x504A, + 0x504B, + 0x504C, + 0x504D, + 0x504E, + 0x504F, + 0x5050, + 0x5051, + 0x5052, + 0x5053, + 0x5054, + 0x5055, + 0x5056, + 0x5057, + 0x5058, + 0x5245, + 0x5246, + 0x5247, + 0x524b, + 0x524c, + 0x534d, + 0x5446, + 0x544C, + 0x5452, + /* radeon */ + 0x3150, + 0x3151, + 0x3152, + 0x3154, + 0x3155, + 0x3E50, + 0x3E54, + 0x4136, + 0x4137, + 0x4144, + 0x4145, + 0x4146, + 0x4147, + 0x4148, + 0x4149, + 0x414A, + 0x414B, + 0x4150, + 0x4151, + 0x4152, + 0x4153, + 0x4154, + 0x4155, + 0x4156, + 0x4237, + 0x4242, + 0x4336, + 0x4337, + 0x4437, + 0x4966, + 0x4967, + 0x4A48, + 0x4A49, + 0x4A4A, + 0x4A4B, + 0x4A4C, + 0x4A4D, + 0x4A4E, + 0x4A4F, + 0x4A50, + 0x4A54, + 0x4B48, + 0x4B49, + 0x4B4A, + 0x4B4B, + 0x4B4C, + 0x4C57, + 0x4C58, + 0x4C59, + 0x4C5A, + 0x4C64, + 0x4C66, + 0x4C67, + 0x4E44, + 0x4E45, + 0x4E46, + 0x4E47, + 0x4E48, + 0x4E49, + 0x4E4A, + 0x4E4B, + 0x4E50, + 0x4E51, + 0x4E52, + 0x4E53, + 0x4E54, + 0x4E56, + 0x5144, + 0x5145, + 0x5146, + 0x5147, + 0x5148, + 0x514C, + 0x514D, + 0x5157, + 0x5158, + 0x5159, + 0x515A, + 0x515E, + 0x5460, + 0x5462, + 0x5464, + 0x5548, + 0x5549, + 0x554A, + 0x554B, + 0x554C, + 0x554D, + 0x554E, + 0x554F, + 0x5550, + 0x5551, + 0x5552, + 0x5554, + 0x564A, + 0x564B, + 0x564F, + 0x5652, + 0x5653, + 0x5657, + 0x5834, + 0x5835, + 0x5954, + 0x5955, + 0x5974, + 0x5975, + 0x5960, + 0x5961, + 0x5962, + 0x5964, + 0x5965, + 0x5969, + 0x5a41, + 0x5a42, + 0x5a61, + 0x5a62, + 0x5b60, + 0x5b62, + 0x5b63, + 0x5b64, + 0x5b65, + 0x5c61, + 0x5c63, + 0x5d48, + 0x5d49, + 0x5d4a, + 0x5d4c, + 0x5d4d, + 0x5d4e, + 0x5d4f, + 0x5d50, + 0x5d52, + 0x5d57, + 0x5e48, + 0x5e4a, + 0x5e4b, + 0x5e4c, + 0x5e4d, + 0x5e4f, + 0x6700, + 0x6701, + 0x6702, + 0x6703, + 0x6704, + 0x6705, + 0x6706, + 0x6707, + 0x6708, + 0x6709, + 0x6718, + 0x6719, + 0x671c, + 0x671d, + 0x671f, + 0x6720, + 0x6721, + 0x6722, + 0x6723, + 0x6724, + 0x6725, + 0x6726, + 0x6727, + 0x6728, + 0x6729, + 0x6738, + 0x6739, + 0x673e, + 0x6740, + 0x6741, + 0x6742, + 0x6743, + 0x6744, + 0x6745, + 0x6746, + 0x6747, + 0x6748, + 0x6749, + 0x674A, + 0x6750, + 0x6751, + 0x6758, + 0x6759, + 0x675B, + 0x675D, + 0x675F, + 0x6760, + 0x6761, + 0x6762, + 0x6763, + 0x6764, + 0x6765, + 0x6766, + 0x6767, + 0x6768, + 0x6770, + 0x6771, + 0x6772, + 0x6778, + 0x6779, + 0x677B, + 0x6840, + 0x6841, + 0x6842, + 0x6843, + 0x6849, + 0x684C, + 0x6850, + 0x6858, + 0x6859, + 0x6880, + 0x6888, + 0x6889, + 0x688A, + 0x688C, + 0x688D, + 0x6898, + 0x6899, + 0x689b, + 0x689c, + 0x689d, + 0x689e, + 0x68a0, + 0x68a1, + 0x68a8, + 0x68a9, + 0x68b0, + 0x68b8, + 0x68b9, + 0x68ba, + 0x68be, + 0x68bf, + 0x68c0, + 0x68c1, + 0x68c7, + 0x68c8, + 0x68c9, + 0x68d8, + 0x68d9, + 0x68da, + 0x68de, + 0x68e0, + 0x68e1, + 0x68e4, + 0x68e5, + 0x68e8, + 0x68e9, + 0x68f1, + 0x68f2, + 0x68f8, + 0x68f9, + 0x68fa, + 0x68fe, + 0x7100, + 0x7101, + 0x7102, + 0x7103, + 0x7104, + 0x7105, + 0x7106, + 0x7108, + 0x7109, + 0x710A, + 0x710B, + 0x710C, + 0x710E, + 0x710F, + 0x7140, + 0x7141, + 0x7142, + 0x7143, + 0x7144, + 0x7145, + 0x7146, + 0x7147, + 0x7149, + 0x714A, + 0x714B, + 0x714C, + 0x714D, + 0x714E, + 0x714F, + 0x7151, + 0x7152, + 0x7153, + 0x715E, + 0x715F, + 0x7180, + 0x7181, + 0x7183, + 0x7186, + 0x7187, + 0x7188, + 0x718A, + 0x718B, + 0x718C, + 0x718D, + 0x718F, + 0x7193, + 0x7196, + 0x719B, + 0x719F, + 0x71C0, + 0x71C1, + 0x71C2, + 0x71C3, + 0x71C4, + 0x71C5, + 0x71C6, + 0x71C7, + 0x71CD, + 0x71CE, + 0x71D2, + 0x71D4, + 0x71D5, + 0x71D6, + 0x71DA, + 0x71DE, + 0x7200, + 0x7210, + 0x7211, + 0x7240, + 0x7243, + 0x7244, + 0x7245, + 0x7246, + 0x7247, + 0x7248, + 0x7249, + 0x724A, + 0x724B, + 0x724C, + 0x724D, + 0x724E, + 0x724F, + 0x7280, + 0x7281, + 0x7283, + 0x7284, + 0x7287, + 0x7288, + 0x7289, + 0x728B, + 0x728C, + 0x7290, + 0x7291, + 0x7293, + 0x7297, + 0x7834, + 0x7835, + 0x791e, + 0x791f, + 0x793f, + 0x7941, + 0x7942, + 0x796c, + 0x796d, + 0x796e, + 0x796f, + 0x9400, + 0x9401, + 0x9402, + 0x9403, + 0x9405, + 0x940A, + 0x940B, + 0x940F, + 0x94A0, + 0x94A1, + 0x94A3, + 0x94B1, + 0x94B3, + 0x94B4, + 0x94B5, + 0x94B9, + 0x9440, + 0x9441, + 0x9442, + 0x9443, + 0x9444, + 0x9446, + 0x944A, + 0x944B, + 0x944C, + 0x944E, + 0x9450, + 0x9452, + 0x9456, + 0x945A, + 0x945B, + 0x945E, + 0x9460, + 0x9462, + 0x946A, + 0x946B, + 0x947A, + 0x947B, + 0x9480, + 0x9487, + 0x9488, + 0x9489, + 0x948A, + 0x948F, + 0x9490, + 0x9491, + 0x9495, + 0x9498, + 0x949C, + 0x949E, + 0x949F, + 0x94C0, + 0x94C1, + 0x94C3, + 0x94C4, + 0x94C5, + 0x94C6, + 0x94C7, + 0x94C8, + 0x94C9, + 0x94CB, + 0x94CC, + 0x94CD, + 0x9500, + 0x9501, + 0x9504, + 0x9505, + 0x9506, + 0x9507, + 0x9508, + 0x9509, + 0x950F, + 0x9511, + 0x9515, + 0x9517, + 0x9519, + 0x9540, + 0x9541, + 0x9542, + 0x954E, + 0x954F, + 0x9552, + 0x9553, + 0x9555, + 0x9557, + 0x955f, + 0x9580, + 0x9581, + 0x9583, + 0x9586, + 0x9587, + 0x9588, + 0x9589, + 0x958A, + 0x958B, + 0x958C, + 0x958D, + 0x958E, + 0x958F, + 0x9590, + 0x9591, + 0x9593, + 0x9595, + 0x9596, + 0x9597, + 0x9598, + 0x9599, + 0x959B, + 0x95C0, + 0x95C2, + 0x95C4, + 0x95C5, + 0x95C6, + 0x95C7, + 0x95C9, + 0x95CC, + 0x95CD, + 0x95CE, + 0x95CF, + 0x9610, + 0x9611, + 0x9612, + 0x9613, + 0x9614, + 0x9615, + 0x9616, + 0x9640, + 0x9641, + 0x9642, + 0x9643, + 0x9644, + 0x9645, + 0x9647, + 0x9648, + 0x9649, + 0x964a, + 0x964b, + 0x964c, + 0x964e, + 0x964f, + 0x9710, + 0x9711, + 0x9712, + 0x9713, + 0x9714, + 0x9715, + 0x9802, + 0x9803, + 0x9804, + 0x9805, + 0x9806, + 0x9807, + 0x9808, + 0x9809, + 0x980A, + 0x9900, + 0x9901, + 0x9903, + 0x9904, + 0x9905, + 0x9906, + 0x9907, + 0x9908, + 0x9909, + 0x990A, + 0x990B, + 0x990C, + 0x990D, + 0x990E, + 0x990F, + 0x9910, + 0x9913, + 0x9917, + 0x9918, + 0x9919, + 0x9990, + 0x9991, + 0x9992, + 0x9993, + 0x9994, + 0x9995, + 0x9996, + 0x9997, + 0x9998, + 0x9999, + 0x999A, + 0x999B, + 0x999C, + 0x999D, + 0x99A0, + 0x99A2, + 0x99A4, + /* radeon secondary ids */ + 0x3171, + 0x3e70, + 0x4164, + 0x4165, + 0x4166, + 0x4168, + 0x4170, + 0x4171, + 0x4172, + 0x4173, + 0x496e, + 0x4a69, + 0x4a6a, + 0x4a6b, + 0x4a70, + 0x4a74, + 0x4b69, + 0x4b6b, + 0x4b6c, + 0x4c6e, + 0x4e64, + 0x4e65, + 0x4e66, + 0x4e67, + 0x4e68, + 0x4e69, + 0x4e6a, + 0x4e71, + 0x4f73, + 0x5569, + 0x556b, + 0x556d, + 0x556f, + 0x5571, + 0x5854, + 0x5874, + 0x5940, + 0x5941, + 0x5b72, + 0x5b73, + 0x5b74, + 0x5b75, + 0x5d44, + 0x5d45, + 0x5d6d, + 0x5d6f, + 0x5d72, + 0x5d77, + 0x5e6b, + 0x5e6d, + 0x7120, + 0x7124, + 0x7129, + 0x712e, + 0x712f, + 0x7162, + 0x7163, + 0x7166, + 0x7167, + 0x7172, + 0x7173, + 0x71a0, + 0x71a1, + 0x71a3, + 0x71a7, + 0x71bb, + 0x71e0, + 0x71e1, + 0x71e2, + 0x71e6, + 0x71e7, + 0x71f2, + 0x7269, + 0x726b, + 0x726e, + 0x72a0, + 0x72a8, + 0x72b1, + 0x72b3, + 0x793f, +}; + static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_SI {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, @@ -1273,11 +1984,20 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, struct drm_device *ddev; struct amdgpu_device *adev; unsigned long flags = ent->driver_data; - int ret, retry = 0; + int ret, retry = 0, i; bool supports_atomic = false; bool is_fw_fb; resource_size_t base, size; + if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev)) + amdgpu_aspm = 0; + + /* skip devices which are owned by radeon */ + for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) { + if (amdgpu_unsupported_pciidlist[i] == pdev->device) + return -ENODEV; + } + if (amdgpu_virtual_display || amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) supports_atomic = true; @@ -1499,6 +2219,7 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) static int amdgpu_pmops_prepare(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); /* Return a positive number here so * DPM_FLAG_SMART_SUSPEND works properly @@ -1506,6 +2227,13 @@ static int amdgpu_pmops_prepare(struct device *dev) if (amdgpu_device_supports_boco(drm_dev)) return pm_runtime_suspended(dev); + /* if we will not support s3 or s2i for the device + * then skip suspend + */ + if (!amdgpu_acpi_is_s0ix_active(adev) && + !amdgpu_acpi_is_s3_active(adev)) + return 1; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 1916ec84dd71..e7845df6cad2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -266,7 +266,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, * adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; - while (queue_bit-- >= 0) { + while (--queue_bit >= 0) { if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 09a2fe839059..6744427577b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -152,21 +152,10 @@ static void amdgpu_get_audio_func(struct amdgpu_device *adev) int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) { struct drm_device *dev; - struct pci_dev *parent; int r, acpi_status; dev = adev_to_drm(adev); - if (amdgpu_has_atpx() && - (amdgpu_is_atpx_hybrid() || - amdgpu_has_atpx_dgpu_power_cntl()) && - ((flags & AMD_IS_APU) == 0) && - !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) - flags |= AMD_IS_PX; - - parent = pci_upstream_bridge(adev->pdev); - adev->has_pr3 = parent ? pci_pr3_present(parent) : false; - /* amdgpu_device_init should report only fatal error * like memory allocation failure or iomapping failure, * or memory manager initialization failure, it must diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 01a78c786536..d62b770cc9dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1343,7 +1343,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) return; - dma_resv_lock(bo->base.resv, NULL); + if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))) + return; r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence); if (!WARN_ON(r)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 94126dc39688..8132f66177c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1892,7 +1892,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, unsigned i; int r; - if (direct_submit && !ring->sched.ready) { + if (!direct_submit && !ring->sched.ready) { DRM_ERROR("Trying to move memory with ring turned off.\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index ac9a8cd21c4b..7d58bf410be0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -142,15 +142,16 @@ static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc, static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { + unsigned long flags; if (crtc->state->event) { - spin_lock(&crtc->dev->event_lock); + spin_lock_irqsave(&crtc->dev->event_lock, flags); if (drm_crtc_vblank_get(crtc) != 0) drm_crtc_send_vblank_event(crtc, crtc->state->event); else drm_crtc_arm_vblank_event(crtc, crtc->state->event); - spin_unlock(&crtc->dev->event_lock); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); crtc->state->event = NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6b15cad78de9..fd37bb39774c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -768,11 +768,17 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, * Check if all VM PDs/PTs are ready for updates * * Returns: - * True if eviction list is empty. + * True if VM is not evicting. */ bool amdgpu_vm_ready(struct amdgpu_vm *vm) { - return list_empty(&vm->evicted); + bool ret; + + amdgpu_vm_eviction_lock(vm); + ret = !vm->evicting; + amdgpu_vm_eviction_unlock(vm); + + return ret && list_empty(&vm->evicted); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index c39e53a41f13..db27fcf87cd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1272,6 +1272,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 }, /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 }, + /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */ + { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 }, { 0, 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 3c01be661014..93a4da4284ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -788,7 +788,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) { + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev); adev->gmc.aper_size = adev->gmc.real_vram_size; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 0a50fdaced7e..63c47f61d0df 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -381,8 +381,9 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU && - adev->gmc.real_vram_size > adev->gmc.aper_size) { + if ((adev->flags & AMD_IS_APU) && + adev->gmc.real_vram_size > adev->gmc.aper_size && + !amdgpu_passthrough(adev)) { adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; adev->gmc.aper_size = adev->gmc.real_vram_size; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 63b890f1e8af..bef9610084f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -581,7 +581,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) { + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; adev->gmc.aper_size = adev->gmc.real_vram_size; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c67e21244342..0e731016921b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1387,7 +1387,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) */ /* check whether both host-gpu and gpu-gpu xgmi links exist */ - if ((adev->flags & AMD_IS_APU) || + if (((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) || (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu)) { adev->gmc.aper_base = @@ -1652,7 +1652,7 @@ static int gmc_v9_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); amdgpu_gart_table_vram_free(adev); - amdgpu_bo_unref(&adev->gmc.pdb0_bo); + amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0); amdgpu_bo_fini(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 8931000dcd41..e37948c15769 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -2062,6 +2062,10 @@ static int sdma_v4_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* SMU saves SDMA state for us */ + if (adev->in_s0ix) + return 0; + return sdma_v4_0_hw_fini(adev); } @@ -2069,6 +2073,10 @@ static int sdma_v4_0_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* SMU restores SDMA state for us */ + if (adev->in_s0ix) + return 0; + return sdma_v4_0_hw_init(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0fc97c364fd7..6439d5c3d8d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -607,8 +607,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev) static int soc15_asic_reset(struct amdgpu_device *adev) { /* original raven doesn't have full asic reset */ - if ((adev->apu_flags & AMD_APU_IS_RAVEN) && - !(adev->apu_flags & AMD_APU_IS_RAVEN2)) + if ((adev->apu_flags & AMD_APU_IS_RAVEN) || + (adev->apu_flags & AMD_APU_IS_RAVEN2)) return 0; switch (soc15_asic_reset_method(adev)) { @@ -1273,8 +1273,11 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; + /* + * MMHUB PG needs to be disabled for Picasso for + * stability reasons. + */ adev->pg_flags = AMD_PG_SUPPORT_SDMA | - AMD_PG_SUPPORT_MMHUB | AMD_PG_SUPPORT_VCN; } else { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 3d18aab88b4e..6e56bef4fdf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -601,8 +601,8 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); /* VCN global tiling registers */ - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( - UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); } static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst) @@ -1508,8 +1508,11 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) { + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; uint32_t tmp; + vcn_v3_0_pause_dpg_mode(adev, 0, &state); + /* Wait for power status to be 1 */ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 86afd37b098d..6688129df240 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1807,13 +1807,9 @@ static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data) if (!args->start_addr || !args->size) return -EINVAL; - mutex_lock(&p->mutex); - r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr, args->attrs); - mutex_unlock(&p->mutex); - return r; } #else diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index cfedfb1e8596..e574aa32a111 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1060,6 +1060,9 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, return -ENODEV; /* same everything but the other direction */ props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL); + if (!props2) + return -ENOMEM; + props2->node_from = id_to; props2->node_to = id_from; props2->kobj = NULL; @@ -1560,7 +1563,7 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size) /* Fetch the CRAT table from ACPI */ status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table); if (status == AE_NOT_FOUND) { - pr_warn("CRAT table not found\n"); + pr_info("CRAT table not found\n"); return -ENODATA; } else if (ACPI_FAILURE(status)) { const char *err = acpi_format_exception(status); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 88c483f69989..660eb7097cfc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -834,15 +834,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, } /* Verify module parameters regarding mapped process number*/ - if ((hws_max_conc_proc < 0) - || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) { - dev_err(kfd_device, - "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n", - hws_max_conc_proc, kfd->vm_info.vmid_num_kfd, - kfd->vm_info.vmid_num_kfd); + if (hws_max_conc_proc >= 0) + kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd); + else kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; - } else - kfd->max_proc_per_quantum = hws_max_conc_proc; /* calculate max size of mqds needed for queues */ size = max_num_of_queues_per_device * diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 3eea4edee355..b8bdd796cd91 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -531,6 +531,8 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events) event_waiters = kmalloc_array(num_events, sizeof(struct kfd_event_waiter), GFP_KERNEL); + if (!event_waiters) + return NULL; for (i = 0; (event_waiters) && (i < num_events) ; i++) { init_wait(&event_waiters[i].wait); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index ed4bc5f844ce..766b3660c8c8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -270,15 +270,6 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) return ret; } - ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client, - O_RDWR); - if (ret < 0) { - kfifo_free(&client->fifo); - kfree(client); - return ret; - } - *fd = ret; - init_waitqueue_head(&client->wait_queue); spin_lock_init(&client->lock); client->events = 0; @@ -288,5 +279,20 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) list_add_rcu(&client->list, &dev->smi_clients); spin_unlock(&dev->smi_lock); + ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client, + O_RDWR); + if (ret < 0) { + spin_lock(&dev->smi_lock); + list_del_rcu(&client->list); + spin_unlock(&dev->smi_lock); + + synchronize_rcu(); + + kfifo_free(&client->fifo); + kfree(client); + return ret; + } + *fd = ret; + return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 16556ae892d4..ec75613618b1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1279,9 +1279,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; - /* Disable vblank IRQs aggressively for power-saving */ - adev_to_drm(adev)->vblank_disable_immediate = true; - if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { DRM_ERROR( "amdgpu: failed to initialize sw for display support.\n"); @@ -2299,7 +2296,8 @@ static int dm_resume(void *handle) * this is the case when traversing through already created * MST connectors, should be skipped */ - if (aconnector->mst_port) + if (aconnector->dc_link && + aconnector->dc_link->type == dc_connection_mst_branch) continue; mutex_lock(&aconnector->hpd_lock); @@ -3230,7 +3228,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) /* Use GRPH_PFLIP interrupt */ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; - i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; + i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; i++) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); if (r) { @@ -3525,7 +3523,7 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap max - min); } -static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, +static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, int bl_idx, u32 user_brightness) { @@ -3553,7 +3551,8 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); } - return rc ? 0 : 1; + if (rc) + dm->actual_brightness[bl_idx] = user_brightness; } static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) @@ -3815,6 +3814,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } #endif + /* Disable vblank IRQs aggressively for power-saving. */ + adev_to_drm(adev)->vblank_disable_immediate = true; + /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; @@ -3861,6 +3863,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) update_connector_ext_caps(aconnector); if (amdgpu_dc_feature_mask & DC_PSR_MASK) amdgpu_dm_set_psr_caps(link); + + /* TODO: Fix vblank control helpers to delay PSR entry to allow this when + * PSR is also supported. + */ + if (link->psr_settings.psr_feature_enabled) + adev_to_drm(adev)->vblank_disable_immediate = false; } @@ -7548,6 +7556,9 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, mode = amdgpu_dm_create_common_mode(encoder, common_modes[i].name, common_modes[i].w, common_modes[i].h); + if (!mode) + continue; + drm_mode_probed_add(connector, mode); amdgpu_dm_connector->num_modes++; } @@ -9307,7 +9318,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) /* restore the backlight level */ for (i = 0; i < dm->num_of_edps; i++) { if (dm->backlight_dev[i] && - (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i])) + (dm->actual_brightness[i] != dm->brightness[i])) amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); } #endif @@ -10217,10 +10228,13 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) { struct drm_connector *connector; - struct drm_connector_state *conn_state; + struct drm_connector_state *conn_state, *old_conn_state; struct amdgpu_dm_connector *aconnector = NULL; int i; - for_each_new_connector_in_state(state, connector, conn_state, i) { + for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { + if (!conn_state->crtc) + conn_state = old_conn_state; + if (conn_state->crtc != crtc) continue; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index d1d353a7c77d..46d6e65f6bd4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -446,6 +446,12 @@ struct amdgpu_display_manager { * cached backlight values. */ u32 brightness[AMDGPU_DM_MAX_NUM_EDP]; + /** + * @actual_brightness: + * + * last successfully applied backlight values. + */ + u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; }; enum dsc_clock_force_state { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index e94ddd5e7b63..5c9f5214bc4e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -229,8 +229,10 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -388,8 +390,10 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf, break; r = put_user((*(rd_buf + result)), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -1316,8 +1320,10 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -1333,8 +1339,10 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -1503,8 +1511,10 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -1520,8 +1530,10 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -1688,8 +1700,10 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -1705,8 +1719,10 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -1869,8 +1885,10 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -1886,8 +1904,10 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -2045,8 +2065,10 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -2062,8 +2084,10 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -2102,8 +2126,10 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -2119,8 +2145,10 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -2174,8 +2202,10 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -2191,8 +2221,10 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -2246,8 +2278,10 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -2263,8 +2297,10 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -3254,8 +3290,10 @@ static ssize_t dcc_en_bits_read( dc->hwss.get_dcc_en_bits(dc, dcc_en_bits); rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); - if (!rd_buf) + if (!rd_buf) { + kfree(dcc_en_bits); return -ENOMEM; + } for (i = 0; i < num_pipes; i++) offset += snprintf(rd_buf + offset, rd_buf_size - offset, @@ -3268,8 +3306,10 @@ static ssize_t dcc_en_bits_read( if (*pos >= rd_buf_size) break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; *pos += 1; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 70a554f1e725..7072fb2ec07f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -74,10 +74,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) link = stream->link; - psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version; - - if (psr_config.psr_version > 0) { - psr_config.psr_exit_link_training_required = 0x1; + if (link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) { + psr_config.psr_version = link->psr_settings.psr_version; psr_config.psr_frame_capture_indication_req = 0; psr_config.psr_rfb_setup_time = 0x37; psr_config.psr_sdp_transmit_line_num_deadline = 0x20; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 1861a147a7fa..5c5cbeb59c4d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -437,8 +437,10 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1; /* Refresh bounding box */ + DC_FP_START(); clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box( clk_mgr->base.ctx->dc, clk_mgr_base->bw_params); + DC_FP_END(); } static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index 7046da14bb2a..329ce4e84b83 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -582,32 +582,32 @@ static struct wm_table lpddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 5.32, - .sr_enter_plus_exit_time_us = 6.38, + .sr_exit_time_us = 13.5, + .sr_enter_plus_exit_time_us = 16.5, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 9.82, - .sr_enter_plus_exit_time_us = 11.196, + .sr_exit_time_us = 13.5, + .sr_enter_plus_exit_time_us = 16.5, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 9.89, - .sr_enter_plus_exit_time_us = 11.24, + .sr_exit_time_us = 13.5, + .sr_enter_plus_exit_time_us = 16.5, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 9.748, - .sr_enter_plus_exit_time_us = 11.102, + .sr_exit_time_us = 13.5, + .sr_enter_plus_exit_time_us = 16.5, .valid = true, }, } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 162ae7186124..21d2cbc3cbb2 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -120,7 +120,11 @@ int dcn31_smu_send_msg_with_param( result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000); if (result == VBIOSSMC_Result_Failed) { - ASSERT(0); + if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu && + param == TABLE_WATERMARKS) + DC_LOG_WARNING("Watermarks table not configured properly by SMU"); + else + ASSERT(0); REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK); return -1; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 1860ccc3f4f2..b37c4d2e7a1e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -891,10 +891,13 @@ static bool dc_construct(struct dc *dc, goto fail; #ifdef CONFIG_DRM_AMD_DC_DCN dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; -#endif - if (dc->res_pool->funcs->update_bw_bounding_box) + if (dc->res_pool->funcs->update_bw_bounding_box) { + DC_FP_START(); dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); + DC_FP_END(); + } +#endif /* Creation of current_state must occur after dc->dml * is initialized in dc_create_resource_pool because @@ -1118,6 +1121,8 @@ struct dc *dc_create(const struct dc_init_data *init_params) dc->caps.max_dp_protocol_version = DP_VERSION_1_4; + dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; + if (dc->res_pool->dmcu != NULL) dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 93c20844848c..605b96873d8c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -3650,7 +3650,9 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) lttpr_dpcd_data, sizeof(lttpr_dpcd_data)); if (status != DC_OK) { - dm_error("%s: Read LTTPR caps data failed.\n", __func__); +#if defined(CONFIG_DRM_AMD_DC_DCN) + DC_LOG_DP2("%s: Read LTTPR caps data failed.\n", __func__); +#endif return false; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index e94546187cf1..82f1f27baaf3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1599,6 +1599,9 @@ static bool are_stream_backends_same( if (is_timing_changed(stream_a, stream_b)) return false; + if (stream_a->signal != stream_b->signal) + return false; + if (stream_a->dpms_off != stream_b->dpms_off) return false; @@ -1623,8 +1626,8 @@ bool dc_is_stream_unchanged( if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) return false; - // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks - if (old_stream->audio_info.mode_count != stream->audio_info.mode_count) + /*compare audio info*/ + if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0) return false; return true; @@ -1799,9 +1802,6 @@ enum dc_status dc_remove_stream_from_ctx( dc->res_pool, del_pipe->stream_res.stream_enc, false); - /* Release link encoder from stream in new dc_state. */ - if (dc->res_pool->funcs->link_enc_unassign) - dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream); if (del_pipe->stream_res.audio) update_audio_usage( diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3ab52d9a82cf..e0f58fab5e8e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -185,6 +185,7 @@ struct dc_caps { struct dc_color_caps color; bool vbios_lttpr_aware; bool vbios_lttpr_enable; + uint32_t max_otg_num; }; struct dc_bug_wa { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index f4f423d0b8c3..80595d7f060c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -940,6 +940,7 @@ static const struct hubbub_funcs hubbub1_funcs = { .program_watermarks = hubbub1_program_watermarks, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, + .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, }; void hubbub1_construct(struct hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 3af49cdf89eb..93f31e4aeecb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1052,9 +1052,13 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) void dcn10_verify_allow_pstate_change_high(struct dc *dc) { + struct hubbub *hubbub = dc->res_pool->hubbub; static bool should_log_hw_state; /* prevent hw state log by default */ - if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) { + if (!hubbub->funcs->verify_allow_pstate_change_high) + return; + + if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) { int i = 0; if (should_log_hw_state) @@ -1063,8 +1067,8 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc) TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); BREAK_TO_DEBUGGER(); if (dcn10_hw_wa_force_recovery(dc)) { - /*check again*/ - if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) + /*check again*/ + if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) BREAK_TO_DEBUGGER(); } } @@ -1435,6 +1439,9 @@ void dcn10_init_hw(struct dc *dc) } } + if (hws->funcs.enable_power_gating_plane) + hws->funcs.enable_power_gating_plane(dc->hwseq, true); + /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which * pipes we want to use. @@ -1487,8 +1494,6 @@ void dcn10_init_hw(struct dc *dc) REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(dc->hwseq, true); if (dc->clk_mgr->funcs->notify_wm_ranges) dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); @@ -2455,14 +2460,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) struct mpc *mpc = dc->res_pool->mpc; struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); - if (per_pixel_alpha) - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; - else - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; - blnd_cfg.overlap_only = false; blnd_cfg.global_gain = 0xff; + if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; + blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; + } else if (per_pixel_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; + } else { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; + } + if (pipe_ctx->plane_state->global_alpha) blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; else diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index a47ba1d45be9..9f8d7f92300b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2297,14 +2297,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) struct mpc *mpc = dc->res_pool->mpc; struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); - if (per_pixel_alpha) - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; - else - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; - blnd_cfg.overlap_only = false; blnd_cfg.global_gain = 0xff; + if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; + blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; + } else if (per_pixel_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; + } else { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; + } + if (pipe_ctx->plane_state->global_alpha) blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; else diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 92a308ad1213..fbbdf9976183 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -874,7 +874,7 @@ static const struct dc_debug_options debug_defaults_drv = { .clock_trace = true, .disable_pplib_clock_request = true, .min_disp_clk_khz = 100000, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c index f4414de96acc..152c9c5733f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c @@ -448,6 +448,7 @@ static const struct hubbub_funcs hubbub30_funcs = { .program_watermarks = hubbub3_program_watermarks, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, .force_pstate_change_control = hubbub3_force_pstate_change_control, .init_watermarks = hubbub3_init_watermarks, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 0950784bafa4..f83457375811 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -570,6 +570,9 @@ void dcn30_init_hw(struct dc *dc) } } + if (hws->funcs.enable_power_gating_plane) + hws->funcs.enable_power_gating_plane(dc->hwseq, true); + /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which * pipes we want to use. @@ -647,8 +650,6 @@ void dcn30_init_hw(struct dc *dc) REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(dc->hwseq, true); if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c index 1e3bd2e9cdcc..a046664e2031 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c @@ -60,6 +60,7 @@ static const struct hubbub_funcs hubbub301_funcs = { .program_watermarks = hubbub3_program_watermarks, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, .force_pstate_change_control = hubbub3_force_pstate_change_control, .hubbub_read_state = hubbub2_read_state, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 26ebe00a55f6..dea358b01791 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -1622,12 +1622,106 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); } +static void calculate_wm_set_for_vlevel( + int vlevel, + struct wm_range_table_entry *table_entry, + struct dcn_watermarks *wm_set, + struct display_mode_lib *dml, + display_e2e_pipe_params_st *pipes, + int pipe_cnt) +{ + double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us; + + ASSERT(vlevel < dml->soc.num_states); + /* only pipe 0 is read for voltage and dcf/soc clocks */ + pipes[0].clks_cfg.voltage = vlevel; + pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; + + dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; + dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; + dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; + + wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; + wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; + wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; + dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; + +} + +static void dcn301_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel_req) +{ + int i, pipe_idx; + int vlevel, vlevel_max; + struct wm_range_table_entry *table_entry; + struct clk_bw_params *bw_params = dc->clk_mgr->bw_params; + + ASSERT(bw_params); + + vlevel_max = bw_params->clk_table.num_entries - 1; + + /* WM Set D */ + table_entry = &bw_params->wm_table.entries[WM_D]; + if (table_entry->wm_type == WM_TYPE_RETRAINING) + vlevel = 0; + else + vlevel = vlevel_max; + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set C */ + table_entry = &bw_params->wm_table.entries[WM_C]; + vlevel = min(max(vlevel_req, 2), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set B */ + table_entry = &bw_params->wm_table.entries[WM_B]; + vlevel = min(max(vlevel_req, 1), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, + &context->bw_ctx.dml, pipes, pipe_cnt); + + /* WM Set A */ + table_entry = &bw_params->wm_table.entries[WM_A]; + vlevel = min(vlevel_req, vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a, + &context->bw_ctx.dml, pipes, pipe_cnt); + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); + pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + + if (dc->config.forced_clocks) { + pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; + pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; + } + if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; + if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; + + pipe_idx++; + } + + dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); +} + static struct resource_funcs dcn301_res_pool_funcs = { .destroy = dcn301_destroy_resource_pool, .link_enc_create = dcn301_link_encoder_create, .panel_cntl_create = dcn301_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, - .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, + .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index 90c73a1cb986..208d2dc8b1d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -24,6 +24,7 @@ */ +#include <linux/delay.h> #include "dcn30/dcn30_hubbub.h" #include "dcn31_hubbub.h" #include "dm_services.h" @@ -138,8 +139,11 @@ static uint32_t convert_and_clamp( ret_val = wm_ns * refclk_mhz; ret_val /= 1000; - if (ret_val > clamp_value) + if (ret_val > clamp_value) { + /* clamping WMs is abnormal, unexpected and may lead to underflow*/ + ASSERT(0); ret_val = clamp_value; + } return ret_val; } @@ -159,7 +163,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) { hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); @@ -193,7 +197,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) { hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns) @@ -203,7 +207,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) { hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); @@ -237,7 +241,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) { hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns) @@ -247,7 +251,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) { hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); @@ -281,7 +285,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) { hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns) @@ -291,7 +295,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) { hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); @@ -325,7 +329,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) { hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns) @@ -351,7 +355,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" @@ -367,7 +371,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->a.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" @@ -383,7 +387,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n" @@ -399,7 +403,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->a.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n" @@ -416,7 +420,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" @@ -432,7 +436,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->b.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" @@ -448,7 +452,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n" @@ -464,7 +468,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->b.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n" @@ -481,7 +485,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" @@ -497,7 +501,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->c.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" @@ -513,7 +517,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n" @@ -529,7 +533,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->c.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n" @@ -546,7 +550,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" @@ -562,7 +566,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->d.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" @@ -578,7 +582,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n" @@ -594,7 +598,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->d.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n" @@ -625,7 +629,7 @@ static bool hubbub31_program_pstate_watermarks( watermarks->a.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" @@ -642,7 +646,7 @@ static bool hubbub31_program_pstate_watermarks( watermarks->b.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" @@ -659,7 +663,7 @@ static bool hubbub31_program_pstate_watermarks( watermarks->c.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" @@ -676,7 +680,7 @@ static bool hubbub31_program_pstate_watermarks( watermarks->d.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" @@ -946,6 +950,65 @@ static void hubbub31_get_dchub_ref_freq(struct hubbub *hubbub, } } +static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + /* + * Pstate latency is ~20us so if we wait over 40us and pstate allow + * still not asserted, we are probably stuck and going to hang + */ + const unsigned int pstate_wait_timeout_us = 100; + const unsigned int pstate_wait_expected_timeout_us = 40; + + static unsigned int max_sampled_pstate_wait_us; /* data collection */ + static bool forced_pstate_allow; /* help with revert wa */ + + unsigned int debug_data = 0; + unsigned int i; + + if (forced_pstate_allow) { + /* we hacked to force pstate allow to prevent hang last time + * we verify_allow_pstate_change_high. so disable force + * here so we can check status + */ + REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0); + forced_pstate_allow = false; + } + + REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub2->debug_test_index_pstate); + + for (i = 0; i < pstate_wait_timeout_us; i++) { + debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); + + /* Debug bit is specific to ASIC. */ + if (debug_data & (1 << 26)) { + if (i > pstate_wait_expected_timeout_us) + DC_LOG_WARNING("pstate took longer than expected ~%dus\n", i); + return true; + } + if (max_sampled_pstate_wait_us < i) + max_sampled_pstate_wait_us = i; + + udelay(1); + } + + /* force pstate allow to prevent system hang + * and break to debugger to investigate + */ + REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1); + forced_pstate_allow = true; + + DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n", + debug_data); + + return false; +} + static const struct hubbub_funcs hubbub31_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx, @@ -958,6 +1021,7 @@ static const struct hubbub_funcs hubbub31_funcs = { .program_watermarks = hubbub31_program_watermarks, .allow_self_refresh_control = hubbub1_allow_self_refresh_control, .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .verify_allow_pstate_change_high = hubbub31_verify_allow_pstate_change_high, .program_det_size = dcn31_program_det_size, .program_compbuf_size = dcn31_program_compbuf_size, .init_crb = dcn31_init_crb, @@ -979,5 +1043,7 @@ void hubbub31_construct(struct dcn20_hubbub *hubbub31, hubbub31->detile_buf_size = det_size_kb * 1024; hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024; hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB; + + hubbub31->debug_test_index_pstate = 0x6; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 3afa1159a5f7..b72d080b302a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -204,6 +204,9 @@ void dcn31_init_hw(struct dc *dc) } } + if (hws->funcs.enable_power_gating_plane) + hws->funcs.enable_power_gating_plane(dc->hwseq, true); + /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which * pipes we want to use. @@ -287,8 +290,6 @@ void dcn31_init_hw(struct dc *dc) REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(dc->hwseq, true); if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index d4fe5352421f..a5ef9d5e7685 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -940,7 +940,7 @@ static const struct dc_debug_options debug_defaults_drv = { .max_downscale_src_width = 4096,/*upto true 4K*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, - .sanity_checks = false, + .sanity_checks = true, .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 56055df2e8d2..9009b92490f3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -70,6 +70,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_rcflags) @@ -84,6 +85,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_rcfla CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_rcflags) endif CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags) @@ -99,6 +101,7 @@ DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o +DML += dsc/rc_calc_fpu.o endif AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML)) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h index e5fac9f4181d..e5fac9f4181d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c new file mode 100644 index 000000000000..3ee858f311d1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c @@ -0,0 +1,291 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "rc_calc_fpu.h" + +#include "qp_tables.h" +#include "amdgpu_dm/dc_fpu.h" + +#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min) + +#define MODE_SELECT(val444, val422, val420) \ + (cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420)) + + +#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \ + table = qp_table_##mode##_##bpc##bpc_##max; \ + table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \ + break + +static int median3(int a, int b, int c) +{ + if (a > b) + swap(a, b); + if (b > c) + swap(b, c); + if (a > b) + swap(b, c); + + return b; +} + +static double dsc_roundf(double num) +{ + if (num < 0.0) + num = num - 0.5; + else + num = num + 0.5; + + return (int)(num); +} + +static double dsc_ceil(double num) +{ + double retval = (int)num; + + if (retval != num && num > 0) + retval = num + 1; + + return (int)retval; +} + +static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, + enum max_min max_min, float bpp) +{ + int mode = MODE_SELECT(444, 422, 420); + int sel = table_hash(mode, bpc, max_min); + int table_size = 0; + int index; + const struct qp_entry *table = 0L; + + // alias enum + enum { min = DAL_MM_MIN, max = DAL_MM_MAX }; + switch (sel) { + TABLE_CASE(444, 8, max); + TABLE_CASE(444, 8, min); + TABLE_CASE(444, 10, max); + TABLE_CASE(444, 10, min); + TABLE_CASE(444, 12, max); + TABLE_CASE(444, 12, min); + TABLE_CASE(422, 8, max); + TABLE_CASE(422, 8, min); + TABLE_CASE(422, 10, max); + TABLE_CASE(422, 10, min); + TABLE_CASE(422, 12, max); + TABLE_CASE(422, 12, min); + TABLE_CASE(420, 8, max); + TABLE_CASE(420, 8, min); + TABLE_CASE(420, 10, max); + TABLE_CASE(420, 10, min); + TABLE_CASE(420, 12, max); + TABLE_CASE(420, 12, min); + } + + if (table == 0) + return; + + index = (bpp - table[0].bpp) * 2; + + /* requested size is bigger than the table */ + if (index >= table_size) { + dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n"); + return; + } + + memcpy(qps, table[index].qps, sizeof(qp_set)); +} + +static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp) +{ + int *p = ofs; + + if (mode == CM_444 || mode == CM_RGB) { + *p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); + *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); + *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); + *p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); + *p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); + *p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0)))); + *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0)))); + *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0)))); + *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0)))); + *p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0)))); + *p++ = -10; + *p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0)))); + *p++ = -12; + *p++ = -12; + *p++ = -12; + } else if (mode == CM_422) { + *p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0)))); + *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0)))); + *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0)))); + *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0)))); + *p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0)))); + *p++ = -10; + *p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1)))); + *p++ = -12; + *p++ = -12; + *p++ = -12; + } else { + *p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0)))); + *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0)))); + *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0)))); + *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0)))); + *p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0)))); + *p++ = -10; + *p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0)))); + *p++ = -12; + *p++ = -12; + *p++ = -12; + } +} + +void _do_calc_rc_params(struct rc_params *rc, + enum colour_mode cm, + enum bits_per_comp bpc, + u16 drm_bpp, + bool is_navite_422_or_420, + int slice_width, + int slice_height, + int minor_version) +{ + float bpp; + float bpp_group; + float initial_xmit_delay_factor; + int padding_pixels; + int i; + + dc_assert_fp_enabled(); + + bpp = ((float)drm_bpp / 16.0); + /* in native_422 or native_420 modes, the bits_per_pixel is double the + * target bpp (the latter is what calc_rc_params expects) + */ + if (is_navite_422_or_420) + bpp /= 2.0; + + rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); + rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); + + bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0); + + switch (cm) { + case CM_420: + rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584))))); + rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group))); + rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group))); + break; + case CM_422: + rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584)))); + rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group))); + rc->second_line_bpg_offset = 0; + break; + case CM_444: + case CM_RGB: + rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2))))); + rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group))); + rc->second_line_bpg_offset = 0; + break; + } + + initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0; + rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor); + + if (cm == CM_422 || cm == CM_420) + slice_width /= 2; + + padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0; + if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) { + if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1) + rc->initial_xmit_delay++; + } + + rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); + rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); + rc->flatness_det_thresh = 2 << (bpc - 8); + + get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp); + get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp); + if (cm == CM_444 && minor_version == 1) { + for (i = 0; i < QP_SET_SIZE; ++i) { + rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0; + rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0; + } + } + get_ofs_set(rc->ofs, cm, bpp); + + /* fixed parameters */ + rc->rc_model_size = 8192; + rc->rc_edge_factor = 6; + rc->rc_tgt_offset_hi = 3; + rc->rc_tgt_offset_lo = 3; + + rc->rc_buf_thresh[0] = 896; + rc->rc_buf_thresh[1] = 1792; + rc->rc_buf_thresh[2] = 2688; + rc->rc_buf_thresh[3] = 3584; + rc->rc_buf_thresh[4] = 4480; + rc->rc_buf_thresh[5] = 5376; + rc->rc_buf_thresh[6] = 6272; + rc->rc_buf_thresh[7] = 6720; + rc->rc_buf_thresh[8] = 7168; + rc->rc_buf_thresh[9] = 7616; + rc->rc_buf_thresh[10] = 7744; + rc->rc_buf_thresh[11] = 7872; + rc->rc_buf_thresh[12] = 8000; + rc->rc_buf_thresh[13] = 8064; +} + +u32 _do_bytes_per_pixel_calc(int slice_width, + u16 drm_bpp, + bool is_navite_422_or_420) +{ + float bpp; + u32 bytes_per_pixel; + double d_bytes_per_pixel; + + dc_assert_fp_enabled(); + + bpp = ((float)drm_bpp / 16.0); + d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width; + // TODO: Make sure the formula for calculating this is precise (ceiling + // vs. floor, and at what point they should be applied) + if (is_navite_422_or_420) + d_bytes_per_pixel /= 2; + + bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000); + + return bytes_per_pixel; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h new file mode 100644 index 000000000000..b93b95409fbe --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h @@ -0,0 +1,94 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __RC_CALC_FPU_H__ +#define __RC_CALC_FPU_H__ + +#include "os_types.h" +#include <drm/drm_dsc.h> + +#define QP_SET_SIZE 15 + +typedef int qp_set[QP_SET_SIZE]; + +struct rc_params { + int rc_quant_incr_limit0; + int rc_quant_incr_limit1; + int initial_fullness_offset; + int initial_xmit_delay; + int first_line_bpg_offset; + int second_line_bpg_offset; + int flatness_min_qp; + int flatness_max_qp; + int flatness_det_thresh; + qp_set qp_min; + qp_set qp_max; + qp_set ofs; + int rc_model_size; + int rc_edge_factor; + int rc_tgt_offset_hi; + int rc_tgt_offset_lo; + int rc_buf_thresh[QP_SET_SIZE - 1]; +}; + +enum colour_mode { + CM_RGB, /* 444 RGB */ + CM_444, /* 444 YUV or simple 422 */ + CM_422, /* native 422 */ + CM_420 /* native 420 */ +}; + +enum bits_per_comp { + BPC_8 = 8, + BPC_10 = 10, + BPC_12 = 12 +}; + +enum max_min { + DAL_MM_MIN = 0, + DAL_MM_MAX = 1 +}; + +struct qp_entry { + float bpp; + const qp_set qps; +}; + +typedef struct qp_entry qp_table[]; + +u32 _do_bytes_per_pixel_calc(int slice_width, + u16 drm_bpp, + bool is_navite_422_or_420); + +void _do_calc_rc_params(struct rc_params *rc, + enum colour_mode cm, + enum bits_per_comp bpc, + u16 drm_bpp, + bool is_navite_422_or_420, + int slice_width, + int slice_height, + int minor_version); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index 8d31eb75c6a6..a2537229ee88 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -1,35 +1,6 @@ # SPDX-License-Identifier: MIT # # Makefile for the 'dsc' sub-component of DAL. - -ifdef CONFIG_X86 -dsc_ccflags := -mhard-float -msse -endif - -ifdef CONFIG_PPC64 -dsc_ccflags := -mhard-float -maltivec -endif - -ifdef CONFIG_CC_IS_GCC -ifeq ($(call cc-ifversion, -lt, 0701, y), y) -IS_OLD_GCC = 1 -endif -endif - -ifdef CONFIG_X86 -ifdef IS_OLD_GCC -# Stack alignment mismatch, proceed with caution. -# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 -# (8B stack alignment). -dsc_ccflags += -mpreferred-stack-boundary=4 -else -dsc_ccflags += -msse2 -endif -endif - -CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_rcflags) - DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o AMD_DAL_DSC = $(addprefix $(AMDDALPATH)/dc/dsc/,$(DSC)) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index 7b294f637881..b19d3aeb5962 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c @@ -23,266 +23,7 @@ * Authors: AMD * */ -#include <drm/drm_dsc.h> - -#include "os_types.h" #include "rc_calc.h" -#include "qp_tables.h" - -#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min) - -#define MODE_SELECT(val444, val422, val420) \ - (cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420)) - - -#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \ - table = qp_table_##mode##_##bpc##bpc_##max; \ - table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \ - break - - -static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, - enum max_min max_min, float bpp) -{ - int mode = MODE_SELECT(444, 422, 420); - int sel = table_hash(mode, bpc, max_min); - int table_size = 0; - int index; - const struct qp_entry *table = 0L; - - // alias enum - enum { min = DAL_MM_MIN, max = DAL_MM_MAX }; - switch (sel) { - TABLE_CASE(444, 8, max); - TABLE_CASE(444, 8, min); - TABLE_CASE(444, 10, max); - TABLE_CASE(444, 10, min); - TABLE_CASE(444, 12, max); - TABLE_CASE(444, 12, min); - TABLE_CASE(422, 8, max); - TABLE_CASE(422, 8, min); - TABLE_CASE(422, 10, max); - TABLE_CASE(422, 10, min); - TABLE_CASE(422, 12, max); - TABLE_CASE(422, 12, min); - TABLE_CASE(420, 8, max); - TABLE_CASE(420, 8, min); - TABLE_CASE(420, 10, max); - TABLE_CASE(420, 10, min); - TABLE_CASE(420, 12, max); - TABLE_CASE(420, 12, min); - } - - if (table == 0) - return; - - index = (bpp - table[0].bpp) * 2; - - /* requested size is bigger than the table */ - if (index >= table_size) { - dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n"); - return; - } - - memcpy(qps, table[index].qps, sizeof(qp_set)); -} - -static double dsc_roundf(double num) -{ - if (num < 0.0) - num = num - 0.5; - else - num = num + 0.5; - - return (int)(num); -} - -static double dsc_ceil(double num) -{ - double retval = (int)num; - - if (retval != num && num > 0) - retval = num + 1; - - return (int)retval; -} - -static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp) -{ - int *p = ofs; - - if (mode == CM_444 || mode == CM_RGB) { - *p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); - *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); - *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); - *p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); - *p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); - *p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0)))); - *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0)))); - *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0)))); - *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0)))); - *p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0)))); - *p++ = -10; - *p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0)))); - *p++ = -12; - *p++ = -12; - *p++ = -12; - } else if (mode == CM_422) { - *p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0)))); - *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0)))); - *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0)))); - *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0)))); - *p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0)))); - *p++ = -10; - *p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1)))); - *p++ = -12; - *p++ = -12; - *p++ = -12; - } else { - *p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0)))); - *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0)))); - *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0)))); - *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0)))); - *p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0)))); - *p++ = -10; - *p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0)))); - *p++ = -12; - *p++ = -12; - *p++ = -12; - } -} - -static int median3(int a, int b, int c) -{ - if (a > b) - swap(a, b); - if (b > c) - swap(b, c); - if (a > b) - swap(b, c); - - return b; -} - -static void _do_calc_rc_params(struct rc_params *rc, enum colour_mode cm, - enum bits_per_comp bpc, u16 drm_bpp, - bool is_navite_422_or_420, - int slice_width, int slice_height, - int minor_version) -{ - float bpp; - float bpp_group; - float initial_xmit_delay_factor; - int padding_pixels; - int i; - - bpp = ((float)drm_bpp / 16.0); - /* in native_422 or native_420 modes, the bits_per_pixel is double the - * target bpp (the latter is what calc_rc_params expects) - */ - if (is_navite_422_or_420) - bpp /= 2.0; - - rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); - rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); - - bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0); - - switch (cm) { - case CM_420: - rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584))))); - rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group))); - rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group))); - break; - case CM_422: - rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584)))); - rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group))); - rc->second_line_bpg_offset = 0; - break; - case CM_444: - case CM_RGB: - rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2))))); - rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group))); - rc->second_line_bpg_offset = 0; - break; - } - - initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0; - rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor); - - if (cm == CM_422 || cm == CM_420) - slice_width /= 2; - - padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0; - if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) { - if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1) - rc->initial_xmit_delay++; - } - - rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); - rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); - rc->flatness_det_thresh = 2 << (bpc - 8); - - get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp); - get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp); - if (cm == CM_444 && minor_version == 1) { - for (i = 0; i < QP_SET_SIZE; ++i) { - rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0; - rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0; - } - } - get_ofs_set(rc->ofs, cm, bpp); - - /* fixed parameters */ - rc->rc_model_size = 8192; - rc->rc_edge_factor = 6; - rc->rc_tgt_offset_hi = 3; - rc->rc_tgt_offset_lo = 3; - - rc->rc_buf_thresh[0] = 896; - rc->rc_buf_thresh[1] = 1792; - rc->rc_buf_thresh[2] = 2688; - rc->rc_buf_thresh[3] = 3584; - rc->rc_buf_thresh[4] = 4480; - rc->rc_buf_thresh[5] = 5376; - rc->rc_buf_thresh[6] = 6272; - rc->rc_buf_thresh[7] = 6720; - rc->rc_buf_thresh[8] = 7168; - rc->rc_buf_thresh[9] = 7616; - rc->rc_buf_thresh[10] = 7744; - rc->rc_buf_thresh[11] = 7872; - rc->rc_buf_thresh[12] = 8000; - rc->rc_buf_thresh[13] = 8064; -} - -static u32 _do_bytes_per_pixel_calc(int slice_width, u16 drm_bpp, - bool is_navite_422_or_420) -{ - float bpp; - u32 bytes_per_pixel; - double d_bytes_per_pixel; - - bpp = ((float)drm_bpp / 16.0); - d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width; - // TODO: Make sure the formula for calculating this is precise (ceiling - // vs. floor, and at what point they should be applied) - if (is_navite_422_or_420) - d_bytes_per_pixel /= 2; - - bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000); - - return bytes_per_pixel; -} /** * calc_rc_params - reads the user's cmdline mode diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h index 262f06afcbf9..c2340e001b57 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h @@ -27,55 +27,7 @@ #ifndef __RC_CALC_H__ #define __RC_CALC_H__ - -#define QP_SET_SIZE 15 - -typedef int qp_set[QP_SET_SIZE]; - -struct rc_params { - int rc_quant_incr_limit0; - int rc_quant_incr_limit1; - int initial_fullness_offset; - int initial_xmit_delay; - int first_line_bpg_offset; - int second_line_bpg_offset; - int flatness_min_qp; - int flatness_max_qp; - int flatness_det_thresh; - qp_set qp_min; - qp_set qp_max; - qp_set ofs; - int rc_model_size; - int rc_edge_factor; - int rc_tgt_offset_hi; - int rc_tgt_offset_lo; - int rc_buf_thresh[QP_SET_SIZE - 1]; -}; - -enum colour_mode { - CM_RGB, /* 444 RGB */ - CM_444, /* 444 YUV or simple 422 */ - CM_422, /* native 422 */ - CM_420 /* native 420 */ -}; - -enum bits_per_comp { - BPC_8 = 8, - BPC_10 = 10, - BPC_12 = 12 -}; - -enum max_min { - DAL_MM_MIN = 0, - DAL_MM_MAX = 1 -}; - -struct qp_entry { - float bpp; - const qp_set qps; -}; - -typedef struct qp_entry qp_table[]; +#include "dml/dsc/rc_calc_fpu.h" void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps); u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps); diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index ef830aded5b1..1e19dd674e5a 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -22,7 +22,6 @@ * Authors: AMD * */ -#include "os_types.h" #include <drm/drm_dsc.h> #include "dscc_types.h" #include "rc_calc.h" diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 713f5558f5e1..9195dec294c2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -154,6 +154,8 @@ struct hubbub_funcs { bool (*is_allow_self_refresh_enabled)(struct hubbub *hubbub); void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow); + bool (*verify_allow_pstate_change_high)(struct hubbub *hubbub); + void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub); void (*force_wm_propagate_to_pipes)(struct hubbub *hubbub); diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index ed54e1c819be..a728087b3f3d 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -266,14 +266,6 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = { .funcs = &pflip_irq_info_funcs\ } -#define vupdate_int_entry(reg_num)\ - [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\ - IRQ_REG_ENTRY(OTG, reg_num,\ - OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\ - OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\ - .funcs = &vblank_irq_info_funcs\ - } - /* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic * of DCE's DC_IRQ_SOURCE_VUPDATEx. */ @@ -402,12 +394,6 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = { dc_underflow_int_entry(6), [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(), [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(), - vupdate_int_entry(0), - vupdate_int_entry(1), - vupdate_int_entry(2), - vupdate_int_entry(3), - vupdate_int_entry(4), - vupdate_int_entry(5), vupdate_no_lock_int_entry(0), vupdate_no_lock_int_entry(1), vupdate_no_lock_int_entry(2), diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index 571fcf23cea9..a3a9ea077f50 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -72,6 +72,9 @@ #define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__) #define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__) #define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__) +#if defined(CONFIG_DRM_AMD_DC_DCN) +#define DC_LOG_DP2(...) DRM_DEBUG_KMS(__VA_ARGS__) +#endif struct dal_logger; diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 57f198de5e2c..4e075b01d48b 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -100,7 +100,8 @@ enum vsc_packet_revision { //PB7 = MD0 #define MASK_VTEM_MD0__VRR_EN 0x01 #define MASK_VTEM_MD0__M_CONST 0x02 -#define MASK_VTEM_MD0__RESERVED2 0x0C +#define MASK_VTEM_MD0__QMS_EN 0x04 +#define MASK_VTEM_MD0__RESERVED2 0x08 #define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0 //MD1 @@ -109,7 +110,7 @@ enum vsc_packet_revision { //MD2 #define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03 #define MASK_VTEM_MD2__RB 0x04 -#define MASK_VTEM_MD2__RESERVED3 0xF8 +#define MASK_VTEM_MD2__NEXT_TFR 0xF8 //MD3 #define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 08362d506534..a68496b3f929 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -1045,6 +1045,17 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) if (!pp_funcs || !pp_funcs->get_asic_baco_capability) return false; + /* Don't use baco for reset in S3. + * This is a workaround for some platforms + * where entering BACO during suspend + * seems to cause reboots or hangs. + * This might be related to the fact that BACO controls + * power to the whole GPU including devices like audio and USB. + * Powering down/up everything may adversely affect these other + * devices. Needs more investigation. + */ + if (adev->in_s3) + return false; if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap)) return false; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 640db5020ccc..6aaf1230655f 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2117,8 +2117,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } } - /* setting should not be allowed from VF */ - if (amdgpu_sriov_vf(adev)) { + /* setting should not be allowed from VF if not in one VF mode */ + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { dev_attr->attr.mode &= ~S_IWUGO; dev_attr->store = NULL; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index 1f406f21b452..cf74621f94a7 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -773,13 +773,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, hwmgr->display_config->num_display > 3 ? - data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk : + (data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk / 100) : min_mclk, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinSocclkByFreq, - data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk, + data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk / 100, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinVcn, @@ -792,11 +792,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, - data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk, + data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk / 100, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxSocclkByFreq, - data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk, + data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk / 100, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxVcn, diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 6dc83cfad9d8..8acdb244b99f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -138,7 +138,7 @@ int smu_get_dpm_freq_range(struct smu_context *smu, uint32_t *min, uint32_t *max) { - int ret = 0; + int ret = -ENOTSUPP; if (!min && !max) return -EINVAL; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index f89bf49965fc..574a9d7f7a5e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -418,6 +418,36 @@ static int sienna_cichlid_store_powerplay_table(struct smu_context *smu) return 0; } +static int sienna_cichlid_patch_pptable_quirk(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t *board_reserved; + uint16_t *freq_table_gfx; + uint32_t i; + + /* Fix some OEM SKU specific stability issues */ + GET_PPTABLE_MEMBER(BoardReserved, &board_reserved); + if ((adev->pdev->device == 0x73DF) && + (adev->pdev->revision == 0XC3) && + (adev->pdev->subsystem_device == 0x16C2) && + (adev->pdev->subsystem_vendor == 0x1043)) + board_reserved[0] = 1387; + + GET_PPTABLE_MEMBER(FreqTableGfx, &freq_table_gfx); + if ((adev->pdev->device == 0x73DF) && + (adev->pdev->revision == 0XC3) && + ((adev->pdev->subsystem_device == 0x16C2) || + (adev->pdev->subsystem_device == 0x133C)) && + (adev->pdev->subsystem_vendor == 0x1043)) { + for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) { + if (freq_table_gfx[i] > 2500) + freq_table_gfx[i] = 2500; + } + } + + return 0; +} + static int sienna_cichlid_setup_pptable(struct smu_context *smu) { int ret = 0; @@ -438,7 +468,7 @@ static int sienna_cichlid_setup_pptable(struct smu_context *smu) if (ret) return ret; - return ret; + return sienna_cichlid_patch_pptable_quirk(smu); } static int sienna_cichlid_tables_init(struct smu_context *smu) @@ -1278,21 +1308,37 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu) &dpm_context->dpm_tables.soc_table; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + struct amdgpu_device *adev = smu->adev; pstate_table->gfxclk_pstate.min = gfx_table->min; pstate_table->gfxclk_pstate.peak = gfx_table->max; - if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK) - pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK; pstate_table->uclk_pstate.min = mem_table->min; pstate_table->uclk_pstate.peak = mem_table->max; - if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK) - pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK; pstate_table->socclk_pstate.min = soc_table->min; pstate_table->socclk_pstate.peak = soc_table->max; - if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK) + + switch (adev->asic_type) { + case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: + pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK; + pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK; pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK; + break; + case CHIP_DIMGREY_CAVEFISH: + pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK; + pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK; + pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK; + break; + case CHIP_BEIGE_GOBY: + pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK; + pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK; + pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK; + break; + default: + break; + } return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h index 38cd0ece24f6..42f705c7a36f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h @@ -33,6 +33,14 @@ typedef enum { #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960 #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000 +#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK 1950 +#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK 960 +#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK 676 + +#define BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK 2200 +#define BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK 960 +#define BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK 1000 + extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index a403657151ba..0e1a843608e4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -291,14 +291,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu) static int yellow_carp_mode_reset(struct smu_context *smu, int type) { - int ret = 0, index = 0; - - index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, - SMU_MSG_GfxDeviceDriverReset); - if (index < 0) - return index == -EACCES ? 0 : index; + int ret = 0; - ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL); if (ret) dev_err(smu->adev->dev, "Failed to mode reset!\n"); diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx.h b/drivers/gpu/drm/aspeed/aspeed_gfx.h index 96501152bafa..4e6a442c3886 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx.h +++ b/drivers/gpu/drm/aspeed/aspeed_gfx.h @@ -12,6 +12,7 @@ struct aspeed_gfx { struct regmap *scu; u32 dac_reg; + u32 int_clr_reg; u32 vga_scratch_reg; u32 throd_val; u32 scan_line_max; diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 65f172807a0d..5b0fa65aeb25 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -60,6 +60,7 @@ struct aspeed_gfx_config { u32 dac_reg; /* DAC register in SCU */ + u32 int_clear_reg; /* Interrupt clear register */ u32 vga_scratch_reg; /* VGA scratch register in SCU */ u32 throd_val; /* Default Threshold Seting */ u32 scan_line_max; /* Max memory size of one scan line */ @@ -67,6 +68,7 @@ struct aspeed_gfx_config { static const struct aspeed_gfx_config ast2400_config = { .dac_reg = 0x2c, + .int_clear_reg = 0x60, .vga_scratch_reg = 0x50, .throd_val = CRT_THROD_LOW(0x1e) | CRT_THROD_HIGH(0x12), .scan_line_max = 64, @@ -74,14 +76,24 @@ static const struct aspeed_gfx_config ast2400_config = { static const struct aspeed_gfx_config ast2500_config = { .dac_reg = 0x2c, + .int_clear_reg = 0x60, .vga_scratch_reg = 0x50, .throd_val = CRT_THROD_LOW(0x24) | CRT_THROD_HIGH(0x3c), .scan_line_max = 128, }; +static const struct aspeed_gfx_config ast2600_config = { + .dac_reg = 0xc0, + .int_clear_reg = 0x68, + .vga_scratch_reg = 0x50, + .throd_val = CRT_THROD_LOW(0x50) | CRT_THROD_HIGH(0x70), + .scan_line_max = 128, +}; + static const struct of_device_id aspeed_gfx_match[] = { { .compatible = "aspeed,ast2400-gfx", .data = &ast2400_config }, { .compatible = "aspeed,ast2500-gfx", .data = &ast2500_config }, + { .compatible = "aspeed,ast2600-gfx", .data = &ast2600_config }, { }, }; MODULE_DEVICE_TABLE(of, aspeed_gfx_match); @@ -119,7 +131,7 @@ static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data) if (reg & CRT_CTRL_VERTICAL_INTR_STS) { drm_crtc_handle_vblank(&priv->pipe.crtc); - writel(reg, priv->base + CRT_CTRL1); + writel(reg, priv->base + priv->int_clr_reg); return IRQ_HANDLED; } @@ -147,6 +159,7 @@ static int aspeed_gfx_load(struct drm_device *drm) config = match->data; priv->dac_reg = config->dac_reg; + priv->int_clr_reg = config->int_clear_reg; priv->vga_scratch_reg = config->vga_scratch_reg; priv->throd_val = config->throd_val; priv->scan_line_max = config->scan_line_max; diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 431b6e12a81f..68ec45abc1fb 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -8,7 +8,6 @@ config DRM_BRIDGE config DRM_PANEL_BRIDGE def_bool y depends on DRM_BRIDGE - depends on DRM_KMS_HELPER select DRM_PANEL help DRM bridge wrapper of DRM panels @@ -30,6 +29,7 @@ config DRM_CDNS_DSI config DRM_CHIPONE_ICN6211 tristate "Chipone ICN6211 MIPI-DSI/RGB Converter bridge" depends on OF + select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL_BRIDGE help diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index 05e3abb5a0c9..1b00dfda6e0d 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -169,6 +169,7 @@ #define ADV7511_PACKET_ENABLE_SPARE2 BIT(1) #define ADV7511_PACKET_ENABLE_SPARE1 BIT(0) +#define ADV7535_REG_POWER2_HPD_OVERRIDE BIT(6) #define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0 #define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00 #define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40 diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 76555ae64e9c..c02f3ec60b04 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -351,11 +351,17 @@ static void __adv7511_power_on(struct adv7511 *adv7511) * from standby or are enabled. When the HPD goes low the adv7511 is * reset and the outputs are disabled which might cause the monitor to * go to standby again. To avoid this we ignore the HPD pin for the - * first few seconds after enabling the output. + * first few seconds after enabling the output. On the other hand + * adv7535 require to enable HPD Override bit for proper HPD. */ - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, - ADV7511_REG_POWER2_HPD_SRC_MASK, - ADV7511_REG_POWER2_HPD_SRC_NONE); + if (adv7511->type == ADV7535) + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7535_REG_POWER2_HPD_OVERRIDE, + ADV7535_REG_POWER2_HPD_OVERRIDE); + else + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7511_REG_POWER2_HPD_SRC_MASK, + ADV7511_REG_POWER2_HPD_SRC_NONE); } static void adv7511_power_on(struct adv7511 *adv7511) @@ -375,6 +381,10 @@ static void adv7511_power_on(struct adv7511 *adv7511) static void __adv7511_power_off(struct adv7511 *adv7511) { /* TODO: setup additional power down modes */ + if (adv7511->type == ADV7535) + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7535_REG_POWER2_HPD_OVERRIDE, 0); + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, ADV7511_POWER_POWER_DOWN, ADV7511_POWER_POWER_DOWN); @@ -672,9 +682,14 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector) status = connector_status_disconnected; } else { /* Renable HPD sensing */ - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, - ADV7511_REG_POWER2_HPD_SRC_MASK, - ADV7511_REG_POWER2_HPD_SRC_BOTH); + if (adv7511->type == ADV7535) + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7535_REG_POWER2_HPD_OVERRIDE, + ADV7535_REG_POWER2_HPD_OVERRIDE); + else + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7511_REG_POWER2_HPD_SRC_MASK, + ADV7511_REG_POWER2_HPD_SRC_BOTH); } adv7511->status = status; diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index ea414cd349b5..392a9c56e9a0 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -791,7 +791,8 @@ static int segments_edid_read(struct anx7625_data *ctx, static int sp_tx_edid_read(struct anx7625_data *ctx, u8 *pedid_blocks_buf) { - u8 offset, edid_pos; + u8 offset; + int edid_pos; int count, blocks_num; u8 pblock_buf[MAX_DPCD_BUFFER_SIZE]; u8 i, j; diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c index e6e331071a00..dd57b104aec3 100644 --- a/drivers/gpu/drm/bridge/cdns-dsi.c +++ b/drivers/gpu/drm/bridge/cdns-dsi.c @@ -1286,6 +1286,7 @@ static const struct of_device_id cdns_dsi_of_match[] = { { .compatible = "cdns,dsi" }, { }, }; +MODULE_DEVICE_TABLE(of, cdns_dsi_of_match); static struct platform_driver cdns_dsi_platform_driver = { .probe = cdns_dsi_drm_probe, diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index af07eeb47ca0..691039aba87f 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -861,18 +861,19 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge, memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode)); drm_mode_debug_printmodeline(adjusted_mode); - pm_runtime_get_sync(dev); + if (pm_runtime_resume_and_get(dev) < 0) + return; if (clk_prepare_enable(dsi->lcdif_clk) < 0) - return; + goto runtime_put; if (clk_prepare_enable(dsi->core_clk) < 0) - return; + goto runtime_put; /* Step 1 from DSI reset-out instructions */ ret = reset_control_deassert(dsi->rst_pclk); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to deassert PCLK: %d\n", ret); - return; + goto runtime_put; } /* Step 2 from DSI reset-out instructions */ @@ -882,13 +883,18 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge, ret = reset_control_deassert(dsi->rst_esc); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to deassert ESC: %d\n", ret); - return; + goto runtime_put; } ret = reset_control_deassert(dsi->rst_byte); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to deassert BYTE: %d\n", ret); - return; + goto runtime_put; } + + return; + +runtime_put: + pm_runtime_put_sync(dev); } static void @@ -1204,6 +1210,7 @@ static int nwl_dsi_probe(struct platform_device *pdev) ret = nwl_dsi_select_input(dsi); if (ret < 0) { + pm_runtime_disable(dev); mipi_dsi_host_unregister(&dsi->dsi_host); return ret; } diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 843265d7f1b1..ec7745c31da0 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -2120,7 +2120,7 @@ static void sii8620_init_rcp_input_dev(struct sii8620 *ctx) if (ret) { dev_err(ctx->dev, "Failed to register RC device\n"); ctx->error = ret; - rc_free_device(ctx->rc_dev); + rc_free_device(rc_dev); return; } ctx->rc_dev = rc_dev; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index e1211a5b334b..25d58dcfc87e 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -2551,8 +2551,9 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, if (!output_fmts) return NULL; - /* If dw-hdmi is the only bridge, avoid negociating with ourselves */ - if (list_is_singular(&bridge->encoder->bridge_chain)) { + /* If dw-hdmi is the first or only bridge, avoid negociating with ourselves */ + if (list_is_singular(&bridge->encoder->bridge_chain) || + list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain)) { *num_output_fmts = 1; output_fmts[0] = MEDIA_BUS_FMT_FIXED; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c index e44e18a0112a..56c3fd08c6a0 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c @@ -1199,6 +1199,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev, ret = mipi_dsi_host_register(&dsi->dsi_host); if (ret) { dev_err(dev, "Failed to register MIPI host: %d\n", ret); + pm_runtime_disable(dev); dw_mipi_dsi_debugfs_remove(dsi); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 4d08246f930c..45a5f1e48f0e 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -1473,6 +1473,7 @@ static inline void ti_sn_gpio_unregister(void) {} static void ti_sn65dsi86_runtime_disable(void *data) { + pm_runtime_dont_use_autosuspend(data); pm_runtime_disable(data); } @@ -1532,11 +1533,11 @@ static int ti_sn65dsi86_probe(struct i2c_client *client, "failed to get reference clock\n"); pm_runtime_enable(dev); + pm_runtime_set_autosuspend_delay(pdata->dev, 500); + pm_runtime_use_autosuspend(pdata->dev); ret = devm_add_action_or_reset(dev, ti_sn65dsi86_runtime_disable, dev); if (ret) return ret; - pm_runtime_set_autosuspend_delay(pdata->dev, 500); - pm_runtime_use_autosuspend(pdata->dev); ti_sn65dsi86_debugfs_init(pdata); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 2c0c6ec92820..ff2bc9a11801 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1001,7 +1001,7 @@ crtc_needs_disable(struct drm_crtc_state *old_state, * it's in self refresh mode and needs to be fully disabled. */ return old_state->active || - (old_state->self_refresh_active && !new_state->enable) || + (old_state->self_refresh_active && !new_state->active) || new_state->self_refresh_active; } diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 909f31833181..f195c7013137 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -76,15 +76,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, state->mode_blob = NULL; if (mode) { + struct drm_property_blob *blob; + drm_mode_convert_to_umode(&umode, mode); - state->mode_blob = - drm_property_create_blob(state->crtc->dev, - sizeof(umode), - &umode); - if (IS_ERR(state->mode_blob)) - return PTR_ERR(state->mode_blob); + blob = drm_property_create_blob(crtc->dev, + sizeof(umode), &umode); + if (IS_ERR(blob)) + return PTR_ERR(blob); drm_mode_copy(&state->mode, mode); + + state->mode_blob = blob; state->enable = true; drm_dbg_atomic(crtc->dev, "Set [MODE:%s] for [CRTC:%d:%s] state %p\n", diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 2ba257b1ae20..e9b7926d9b66 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -2233,6 +2233,9 @@ EXPORT_SYMBOL(drm_connector_atomic_hdr_metadata_equal); void drm_connector_set_vrr_capable_property( struct drm_connector *connector, bool capable) { + if (!connector->vrr_capable_property) + return; + drm_object_property_set_value(&connector->base, connector->vrr_capable_property, capable); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ea9a79bc9583..ee6f44f9a81c 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -4776,7 +4776,8 @@ bool drm_detect_monitor_audio(struct edid *edid) if (!edid_ext) goto end; - has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0); + has_audio = (edid_ext[0] == CEA_EXT && + (edid_ext[3] & EDID_BASIC_AUDIO) != 0); if (has_audio) { DRM_DEBUG_KMS("Monitor has basic audio support\n"); @@ -5003,21 +5004,21 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector, if (hdmi[6] & DRM_EDID_HDMI_DC_30) { dc_bpc = 10; - info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_30; + info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_30; DRM_DEBUG("%s: HDMI sink does deep color 30.\n", connector->name); } if (hdmi[6] & DRM_EDID_HDMI_DC_36) { dc_bpc = 12; - info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_36; + info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_36; DRM_DEBUG("%s: HDMI sink does deep color 36.\n", connector->name); } if (hdmi[6] & DRM_EDID_HDMI_DC_48) { dc_bpc = 16; - info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_48; + info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_48; DRM_DEBUG("%s: HDMI sink does deep color 48.\n", connector->name); } @@ -5032,16 +5033,9 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector, connector->name, dc_bpc); info->bpc = dc_bpc; - /* - * Deep color support mandates RGB444 support for all video - * modes and forbids YCRCB422 support for all video modes per - * HDMI 1.3 spec. - */ - info->color_formats = DRM_COLOR_FORMAT_RGB444; - /* YCRCB444 is optional according to spec. */ if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) { - info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; + info->edid_hdmi_ycbcr444_dc_modes = info->edid_hdmi_rgb444_dc_modes; DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n", connector->name); } @@ -5205,6 +5199,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) return quirks; + info->color_formats |= DRM_COLOR_FORMAT_RGB444; drm_parse_cea_ext(connector, edid); /* @@ -5253,7 +5248,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n", connector->name, info->bpc); - info->color_formats |= DRM_COLOR_FORMAT_RGB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 22bf690910b2..ed589e7182bb 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -2346,6 +2346,7 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, fbi->fbops = &drm_fbdev_fb_ops; fbi->screen_size = fb->height * fb->pitches[0]; fbi->fix.smem_len = fbi->screen_size; + fbi->flags = FBINFO_DEFAULT; drm_fb_helper_fill_info(fbi, fb_helper, sizes); @@ -2353,19 +2354,21 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, fbi->screen_buffer = vzalloc(fbi->screen_size); if (!fbi->screen_buffer) return -ENOMEM; + fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; fbi->fbdefio = &drm_fbdev_defio; - fb_deferred_io_init(fbi); } else { /* buffer is mapped for HW framebuffer */ ret = drm_client_buffer_vmap(fb_helper->buffer, &map); if (ret) return ret; - if (map.is_iomem) + if (map.is_iomem) { fbi->screen_base = map.vaddr_iomem; - else + } else { fbi->screen_buffer = map.vaddr; + fbi->flags |= FBINFO_VIRTFB; + } /* * Shamelessly leak the physical address to user-space. As diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 9d05674550a4..6533efa84020 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -515,6 +515,7 @@ int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) */ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_DONTEXPAND; cma_obj = to_drm_gem_cma_obj(obj); diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 448c2f2d803a..f5ab891731d0 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -166,6 +166,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"), }, .driver_data = (void *)&lcd720x1280_rightside_up, + }, { /* GPD Win Max */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1619-01"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* * GPD Pocket, note that the the DMI data is less generic then * it seems, devices with a board-vendor of "AMI Corporation" diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index c313a5b4549c..7e48dcd1bee4 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -853,12 +853,57 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, &args->handle); } + +/* + * Try to flatten a dma_fence_chain into a dma_fence_array so that it can be + * added as timeline fence to a chain again. + */ +static int drm_syncobj_flatten_chain(struct dma_fence **f) +{ + struct dma_fence_chain *chain = to_dma_fence_chain(*f); + struct dma_fence *tmp, **fences; + struct dma_fence_array *array; + unsigned int count; + + if (!chain) + return 0; + + count = 0; + dma_fence_chain_for_each(tmp, &chain->base) + ++count; + + fences = kmalloc_array(count, sizeof(*fences), GFP_KERNEL); + if (!fences) + return -ENOMEM; + + count = 0; + dma_fence_chain_for_each(tmp, &chain->base) + fences[count++] = dma_fence_get(tmp); + + array = dma_fence_array_create(count, fences, + dma_fence_context_alloc(1), + 1, false); + if (!array) + goto free_fences; + + dma_fence_put(*f); + *f = &array->base; + return 0; + +free_fences: + while (count--) + dma_fence_put(fences[count]); + + kfree(fences); + return -ENOMEM; +} + static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private, struct drm_syncobj_transfer *args) { struct drm_syncobj *timeline_syncobj = NULL; - struct dma_fence *fence; struct dma_fence_chain *chain; + struct dma_fence *fence; int ret; timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle); @@ -869,16 +914,22 @@ static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private, args->src_point, args->flags, &fence); if (ret) - goto err; + goto err_put_timeline; + + ret = drm_syncobj_flatten_chain(&fence); + if (ret) + goto err_free_fence; + chain = dma_fence_chain_alloc(); if (!chain) { ret = -ENOMEM; - goto err1; + goto err_free_fence; } + drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point); -err1: +err_free_fence: dma_fence_put(fence); -err: +err_put_timeline: drm_syncobj_put(timeline_syncobj); return ret; diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index f960f5d7664e..fe6b34774483 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -101,6 +101,7 @@ config DRM_I915_USERPTR config DRM_I915_GVT bool "Enable Intel GVT-g graphics virtualization host support" depends on DRM_I915 + depends on X86 depends on 64BIT default n help diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 335ba9f43d8f..26cf75422945 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -211,6 +211,8 @@ i915-y += \ display/intel_dpio_phy.o \ display/intel_dpll.o \ display/intel_dpll_mgr.o \ + display/intel_dpt.o \ + display/intel_drrs.o \ display/intel_dsb.o \ display/intel_fb.o \ display/intel_fbc.o \ diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 4b94256d7319..ea48620f76d9 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -681,6 +681,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) unsigned int max_bw_point = 0, max_bw = 0; unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points; unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points; + bool changed = false; u32 mask = 0; /* FIXME earlier gens need some checks too */ @@ -724,6 +725,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) new_bw_state->data_rate[crtc->pipe] = new_data_rate; new_bw_state->num_active_planes[crtc->pipe] = new_active_planes; + changed = true; + drm_dbg_kms(&dev_priv->drm, "pipe %c data rate %u num active planes %u\n", pipe_name(crtc->pipe), @@ -731,7 +734,19 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) new_bw_state->num_active_planes[crtc->pipe]); } - if (!new_bw_state) + old_bw_state = intel_atomic_get_old_bw_state(state); + new_bw_state = intel_atomic_get_new_bw_state(state); + + if (new_bw_state && + intel_can_enable_sagv(dev_priv, old_bw_state) != + intel_can_enable_sagv(dev_priv, new_bw_state)) + changed = true; + + /* + * If none of our inputs (data rates, number of active + * planes, SAGV yes/no) changed then nothing to do here. + */ + if (!changed) return 0; ret = intel_atomic_lock_global_state(&new_bw_state->base); @@ -804,7 +819,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) * cause. */ if (!intel_can_enable_sagv(dev_priv, new_bw_state)) { - allowed_points = BIT(max_bw_point); + allowed_points &= ADLS_PSF_PT_MASK; + allowed_points |= BIT(max_bw_point); drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n", max_bw_point); } @@ -814,7 +830,6 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) */ new_bw_state->qgv_points_mask = ~allowed_points & mask; - old_bw_state = intel_atomic_get_old_bw_state(state); /* * If the actual mask had changed we need to make sure that * the commits are serialized(in case this is a nomodeset, nonblocking) diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index 46c6eecbd917..0ceaed1c9656 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -30,19 +30,19 @@ struct intel_bw_state { */ u8 pipe_sagv_reject; + /* bitmask of active pipes */ + u8 active_pipes; + /* * Current QGV points mask, which restricts * some particular SAGV states, not to confuse * with pipe_sagv_mask. */ - u8 qgv_points_mask; + u16 qgv_points_mask; unsigned int data_rate[I915_MAX_PIPES]; u8 num_active_planes[I915_MAX_PIPES]; - /* bitmask of active pipes */ - u8 active_pipes; - int min_cdclk; }; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 82e5064b4ce7..f61901e26409 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -40,6 +40,7 @@ #include "intel_dp_link_training.h" #include "intel_dp_mst.h" #include "intel_dpio_phy.h" +#include "intel_drrs.h" #include "intel_dsi.h" #include "intel_fdi.h" #include "intel_fifo_underrun.h" diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 17f44ffea586..c9b051ab18e0 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -84,6 +84,7 @@ #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_dp_link_training.h" +#include "intel_dpt.h" #include "intel_fbc.h" #include "intel_fdi.h" #include "intel_fbdev.h" @@ -126,182 +127,6 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); static void intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); -struct i915_dpt { - struct i915_address_space vm; - - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - void __iomem *iomem; -}; - -#define i915_is_dpt(vm) ((vm)->is_dpt) - -static inline struct i915_dpt * -i915_vm_to_dpt(struct i915_address_space *vm) -{ - BUILD_BUG_ON(offsetof(struct i915_dpt, vm)); - GEM_BUG_ON(!i915_is_dpt(vm)); - return container_of(vm, struct i915_dpt, vm); -} - -#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT) - -static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) -{ - writeq(pte, addr); -} - -static void dpt_insert_page(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level level, - u32 flags) -{ - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - gen8_pte_t __iomem *base = dpt->iomem; - - gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE, - vm->pte_encode(addr, level, flags)); -} - -static void dpt_insert_entries(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level level, - u32 flags) -{ - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - gen8_pte_t __iomem *base = dpt->iomem; - const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags); - struct sgt_iter sgt_iter; - dma_addr_t addr; - int i; - - /* - * Note that we ignore PTE_READ_ONLY here. The caller must be careful - * not to allow the user to override access to a read only page. - */ - - i = vma->node.start / I915_GTT_PAGE_SIZE; - for_each_sgt_daddr(addr, sgt_iter, vma->pages) - gen8_set_pte(&base[i++], pte_encode | addr); -} - -static void dpt_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ -} - -static void dpt_bind_vma(struct i915_address_space *vm, - struct i915_vm_pt_stash *stash, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) -{ - struct drm_i915_gem_object *obj = vma->obj; - u32 pte_flags; - - /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ - pte_flags = 0; - if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj)) - pte_flags |= PTE_READ_ONLY; - if (i915_gem_object_is_lmem(obj)) - pte_flags |= PTE_LM; - - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - - vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; - - /* - * Without aliasing PPGTT there's no difference between - * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally - * upgrade to both bound if we bind either to avoid double-binding. - */ - atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); -} - -static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) -{ - vm->clear_range(vm, vma->node.start, vma->size); -} - -static void dpt_cleanup(struct i915_address_space *vm) -{ - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - - i915_gem_object_put(dpt->obj); -} - -static struct i915_address_space * -intel_dpt_create(struct intel_framebuffer *fb) -{ - struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base; - struct drm_i915_private *i915 = to_i915(obj->dev); - struct drm_i915_gem_object *dpt_obj; - struct i915_address_space *vm; - struct i915_dpt *dpt; - size_t size; - int ret; - - if (intel_fb_needs_pot_stride_remap(fb)) - size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped); - else - size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE); - - size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE); - - if (HAS_LMEM(i915)) - dpt_obj = i915_gem_object_create_lmem(i915, size, 0); - else - dpt_obj = i915_gem_object_create_stolen(i915, size); - if (IS_ERR(dpt_obj)) - return ERR_CAST(dpt_obj); - - ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE); - if (ret) { - i915_gem_object_put(dpt_obj); - return ERR_PTR(ret); - } - - dpt = kzalloc(sizeof(*dpt), GFP_KERNEL); - if (!dpt) { - i915_gem_object_put(dpt_obj); - return ERR_PTR(-ENOMEM); - } - - vm = &dpt->vm; - - vm->gt = &i915->gt; - vm->i915 = i915; - vm->dma = i915->drm.dev; - vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; - vm->is_dpt = true; - - i915_address_space_init(vm, VM_CLASS_DPT); - - vm->insert_page = dpt_insert_page; - vm->clear_range = dpt_clear_range; - vm->insert_entries = dpt_insert_entries; - vm->cleanup = dpt_cleanup; - - vm->vma_ops.bind_vma = dpt_bind_vma; - vm->vma_ops.unbind_vma = dpt_unbind_vma; - vm->vma_ops.set_pages = ggtt_set_pages; - vm->vma_ops.clear_pages = clear_pages; - - vm->pte_encode = gen8_ggtt_pte_encode; - - dpt->obj = dpt_obj; - - return &dpt->vm; -} - -static void intel_dpt_destroy(struct i915_address_space *vm) -{ - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - - i915_vm_close(&dpt->vm); -} - /* returns HPLL frequency in kHz */ int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) { @@ -1833,8 +1658,8 @@ static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state) } } -static void intel_plane_disable_noatomic(struct intel_crtc *crtc, - struct intel_plane *plane) +void intel_plane_disable_noatomic(struct intel_crtc *crtc, + struct intel_plane *plane) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = @@ -1879,49 +1704,6 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, intel_wait_for_vblank(dev_priv, crtc->pipe); } -static struct i915_vma *intel_dpt_pin(struct i915_address_space *vm) -{ - struct drm_i915_private *i915 = vm->i915; - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - intel_wakeref_t wakeref; - struct i915_vma *vma; - void __iomem *iomem; - - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - atomic_inc(&i915->gpu_error.pending_fb_pin); - - vma = i915_gem_object_ggtt_pin(dpt->obj, NULL, 0, 4096, - HAS_LMEM(i915) ? 0 : PIN_MAPPABLE); - if (IS_ERR(vma)) - goto err; - - iomem = i915_vma_pin_iomap(vma); - i915_vma_unpin(vma); - if (IS_ERR(iomem)) { - vma = iomem; - goto err; - } - - dpt->vma = vma; - dpt->iomem = iomem; - - i915_vma_get(vma); - -err: - atomic_dec(&i915->gpu_error.pending_fb_pin); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - - return vma; -} - -static void intel_dpt_unpin(struct i915_address_space *vm) -{ - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - - i915_vma_unpin_iomap(dpt->vma); - i915_vma_put(dpt->vma); -} - static bool intel_reuse_initial_plane_obj(struct drm_i915_private *i915, const struct intel_initial_plane_config *plane_config, @@ -13435,6 +13217,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, vlv_wm_sanitize(dev_priv); } else if (DISPLAY_VER(dev_priv) >= 9) { skl_wm_get_hw_state(dev_priv); + skl_wm_sanitize(dev_priv); } else if (HAS_PCH_SPLIT(dev_priv)) { ilk_wm_get_hw_state(dev_priv); } diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 284936f0ddab..6a7a91b38080 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -629,6 +629,8 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state); struct intel_encoder * intel_get_crtc_new_encoder(const struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state); +void intel_plane_disable_noatomic(struct intel_crtc *crtc, + struct intel_plane *plane); unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, int color_plane); diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 8fdacb252bb1..b136a0fc0963 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -13,6 +13,7 @@ #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_dp.h" +#include "intel_drrs.h" #include "intel_fbc.h" #include "intel_hdcp.h" #include "intel_hdmi.h" diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index d55363f1fa10..631cf7d4323c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -56,6 +56,7 @@ #include "intel_dp_mst.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" +#include "intel_drrs.h" #include "intel_fifo_underrun.h" #include "intel_hdcp.h" #include "intel_hdmi.h" @@ -1610,46 +1611,6 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); } -static void -intel_dp_drrs_compute_config(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config, - int output_bpp, bool constant_n) -{ - struct intel_connector *intel_connector = intel_dp->attached_connector; - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - int pixel_clock; - - if (pipe_config->vrr.enable) - return; - - /* - * DRRS and PSR can't be enable together, so giving preference to PSR - * as it allows more power-savings by complete shutting down display, - * so to guarantee this, intel_dp_drrs_compute_config() must be called - * after intel_psr_compute_config(). - */ - if (pipe_config->has_psr) - return; - - if (!intel_connector->panel.downclock_mode || - dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) - return; - - pipe_config->has_drrs = true; - - pixel_clock = intel_connector->panel.downclock_mode->clock; - if (pipe_config->splitter.enable) - pixel_clock /= pipe_config->splitter.link_count; - - intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock, - pipe_config->port_clock, &pipe_config->dp_m2_n2, - constant_n, pipe_config->fec_enable); - - /* FIXME: abstract this better */ - if (pipe_config->splitter.enable) - pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count; -} - int intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, @@ -4638,7 +4599,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) struct intel_dp *intel_dp = &dig_port->dp; if (dig_port->base.type == INTEL_OUTPUT_EDP && - (long_hpd || !intel_pps_have_power(intel_dp))) { + (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) { /* * vdd off can generate a long/short pulse on eDP which * would require vdd on to handle it, and thus we @@ -4737,432 +4698,6 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect drm_connector_attach_vrr_capable_property(connector); } -/** - * intel_dp_set_drrs_state - program registers for RR switch to take effect - * @dev_priv: i915 device - * @crtc_state: a pointer to the active intel_crtc_state - * @refresh_rate: RR to be programmed - * - * This function gets called when refresh rate (RR) has to be changed from - * one frequency to another. Switches can be between high and low RR - * supported by the panel or to any other RR based on media playback (in - * this case, RR value needs to be passed from user space). - * - * The caller of this function needs to take a lock on dev_priv->drrs. - */ -static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, - const struct intel_crtc_state *crtc_state, - int refresh_rate) -{ - struct intel_dp *intel_dp = dev_priv->drrs.dp; - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum drrs_refresh_rate_type index = DRRS_HIGH_RR; - - if (refresh_rate <= 0) { - drm_dbg_kms(&dev_priv->drm, - "Refresh rate should be positive non-zero.\n"); - return; - } - - if (intel_dp == NULL) { - drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); - return; - } - - if (!crtc) { - drm_dbg_kms(&dev_priv->drm, - "DRRS: intel_crtc not initialized\n"); - return; - } - - if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { - drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); - return; - } - - if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) == - refresh_rate) - index = DRRS_LOW_RR; - - if (index == dev_priv->drrs.refresh_rate_type) { - drm_dbg_kms(&dev_priv->drm, - "DRRS requested for previously set RR...ignoring\n"); - return; - } - - if (!crtc_state->hw.active) { - drm_dbg_kms(&dev_priv->drm, - "eDP encoder disabled. CRTC not Active\n"); - return; - } - - if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { - switch (index) { - case DRRS_HIGH_RR: - intel_dp_set_m_n(crtc_state, M1_N1); - break; - case DRRS_LOW_RR: - intel_dp_set_m_n(crtc_state, M2_N2); - break; - case DRRS_MAX_RR: - default: - drm_err(&dev_priv->drm, - "Unsupported refreshrate type\n"); - } - } else if (DISPLAY_VER(dev_priv) > 6) { - i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); - u32 val; - - val = intel_de_read(dev_priv, reg); - if (index > DRRS_HIGH_RR) { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; - else - val |= PIPECONF_EDP_RR_MODE_SWITCH; - } else { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; - else - val &= ~PIPECONF_EDP_RR_MODE_SWITCH; - } - intel_de_write(dev_priv, reg, val); - } - - dev_priv->drrs.refresh_rate_type = index; - - drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", - refresh_rate); -} - -static void -intel_edp_drrs_enable_locked(struct intel_dp *intel_dp) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - dev_priv->drrs.busy_frontbuffer_bits = 0; - dev_priv->drrs.dp = intel_dp; -} - -/** - * intel_edp_drrs_enable - init drrs struct if supported - * @intel_dp: DP struct - * @crtc_state: A pointer to the active crtc state. - * - * Initializes frontbuffer_bits and drrs.dp - */ -void intel_edp_drrs_enable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - if (!crtc_state->has_drrs) - return; - - drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n"); - - mutex_lock(&dev_priv->drrs.mutex); - - if (dev_priv->drrs.dp) { - drm_warn(&dev_priv->drm, "DRRS already enabled\n"); - goto unlock; - } - - intel_edp_drrs_enable_locked(intel_dp); - -unlock: - mutex_unlock(&dev_priv->drrs.mutex); -} - -static void -intel_edp_drrs_disable_locked(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { - int refresh; - - refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode); - intel_dp_set_drrs_state(dev_priv, crtc_state, refresh); - } - - dev_priv->drrs.dp = NULL; -} - -/** - * intel_edp_drrs_disable - Disable DRRS - * @intel_dp: DP struct - * @old_crtc_state: Pointer to old crtc_state. - * - */ -void intel_edp_drrs_disable(struct intel_dp *intel_dp, - const struct intel_crtc_state *old_crtc_state) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - if (!old_crtc_state->has_drrs) - return; - - mutex_lock(&dev_priv->drrs.mutex); - if (!dev_priv->drrs.dp) { - mutex_unlock(&dev_priv->drrs.mutex); - return; - } - - intel_edp_drrs_disable_locked(intel_dp, old_crtc_state); - mutex_unlock(&dev_priv->drrs.mutex); - - cancel_delayed_work_sync(&dev_priv->drrs.work); -} - -/** - * intel_edp_drrs_update - Update DRRS state - * @intel_dp: Intel DP - * @crtc_state: new CRTC state - * - * This function will update DRRS states, disabling or enabling DRRS when - * executing fastsets. For full modeset, intel_edp_drrs_disable() and - * intel_edp_drrs_enable() should be called instead. - */ -void -intel_edp_drrs_update(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) - return; - - mutex_lock(&dev_priv->drrs.mutex); - - /* New state matches current one? */ - if (crtc_state->has_drrs == !!dev_priv->drrs.dp) - goto unlock; - - if (crtc_state->has_drrs) - intel_edp_drrs_enable_locked(intel_dp); - else - intel_edp_drrs_disable_locked(intel_dp, crtc_state); - -unlock: - mutex_unlock(&dev_priv->drrs.mutex); -} - -static void intel_edp_drrs_downclock_work(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), drrs.work.work); - struct intel_dp *intel_dp; - - mutex_lock(&dev_priv->drrs.mutex); - - intel_dp = dev_priv->drrs.dp; - - if (!intel_dp) - goto unlock; - - /* - * The delayed work can race with an invalidate hence we need to - * recheck. - */ - - if (dev_priv->drrs.busy_frontbuffer_bits) - goto unlock; - - if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { - struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; - - intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, - drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode)); - } - -unlock: - mutex_unlock(&dev_priv->drrs.mutex); -} - -/** - * intel_edp_drrs_invalidate - Disable Idleness DRRS - * @dev_priv: i915 device - * @frontbuffer_bits: frontbuffer plane tracking bits - * - * This function gets called everytime rendering on the given planes start. - * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). - * - * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. - */ -void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, - unsigned int frontbuffer_bits) -{ - struct intel_dp *intel_dp; - struct drm_crtc *crtc; - enum pipe pipe; - - if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) - return; - - cancel_delayed_work(&dev_priv->drrs.work); - - mutex_lock(&dev_priv->drrs.mutex); - - intel_dp = dev_priv->drrs.dp; - if (!intel_dp) { - mutex_unlock(&dev_priv->drrs.mutex); - return; - } - - crtc = dp_to_dig_port(intel_dp)->base.base.crtc; - pipe = to_intel_crtc(crtc)->pipe; - - frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); - dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; - - /* invalidate means busy screen hence upclock */ - if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) - intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, - drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); - - mutex_unlock(&dev_priv->drrs.mutex); -} - -/** - * intel_edp_drrs_flush - Restart Idleness DRRS - * @dev_priv: i915 device - * @frontbuffer_bits: frontbuffer plane tracking bits - * - * This function gets called every time rendering on the given planes has - * completed or flip on a crtc is completed. So DRRS should be upclocked - * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, - * if no other planes are dirty. - * - * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. - */ -void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, - unsigned int frontbuffer_bits) -{ - struct intel_dp *intel_dp; - struct drm_crtc *crtc; - enum pipe pipe; - - if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) - return; - - cancel_delayed_work(&dev_priv->drrs.work); - - mutex_lock(&dev_priv->drrs.mutex); - - intel_dp = dev_priv->drrs.dp; - if (!intel_dp) { - mutex_unlock(&dev_priv->drrs.mutex); - return; - } - - crtc = dp_to_dig_port(intel_dp)->base.base.crtc; - pipe = to_intel_crtc(crtc)->pipe; - - frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); - dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; - - /* flush means busy screen hence upclock */ - if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) - intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, - drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); - - /* - * flush also means no more activity hence schedule downclock, if all - * other fbs are quiescent too - */ - if (!dev_priv->drrs.busy_frontbuffer_bits) - schedule_delayed_work(&dev_priv->drrs.work, - msecs_to_jiffies(1000)); - mutex_unlock(&dev_priv->drrs.mutex); -} - -/** - * DOC: Display Refresh Rate Switching (DRRS) - * - * Display Refresh Rate Switching (DRRS) is a power conservation feature - * which enables swtching between low and high refresh rates, - * dynamically, based on the usage scenario. This feature is applicable - * for internal panels. - * - * Indication that the panel supports DRRS is given by the panel EDID, which - * would list multiple refresh rates for one resolution. - * - * DRRS is of 2 types - static and seamless. - * Static DRRS involves changing refresh rate (RR) by doing a full modeset - * (may appear as a blink on screen) and is used in dock-undock scenario. - * Seamless DRRS involves changing RR without any visual effect to the user - * and can be used during normal system usage. This is done by programming - * certain registers. - * - * Support for static/seamless DRRS may be indicated in the VBT based on - * inputs from the panel spec. - * - * DRRS saves power by switching to low RR based on usage scenarios. - * - * The implementation is based on frontbuffer tracking implementation. When - * there is a disturbance on the screen triggered by user activity or a periodic - * system activity, DRRS is disabled (RR is changed to high RR). When there is - * no movement on screen, after a timeout of 1 second, a switch to low RR is - * made. - * - * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() - * and intel_edp_drrs_flush() are called. - * - * DRRS can be further extended to support other internal panels and also - * the scenario of video playback wherein RR is set based on the rate - * requested by userspace. - */ - -/** - * intel_dp_drrs_init - Init basic DRRS work and mutex. - * @connector: eDP connector - * @fixed_mode: preferred mode of panel - * - * This function is called only once at driver load to initialize basic - * DRRS stuff. - * - * Returns: - * Downclock mode if panel supports it, else return NULL. - * DRRS support is determined by the presence of downclock mode (apart - * from VBT setting). - */ -static struct drm_display_mode * -intel_dp_drrs_init(struct intel_connector *connector, - struct drm_display_mode *fixed_mode) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct drm_display_mode *downclock_mode = NULL; - - INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); - mutex_init(&dev_priv->drrs.mutex); - - if (DISPLAY_VER(dev_priv) <= 6) { - drm_dbg_kms(&dev_priv->drm, - "DRRS supported for Gen7 and above\n"); - return NULL; - } - - if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { - drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); - return NULL; - } - - downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); - if (!downclock_mode) { - drm_dbg_kms(&dev_priv->drm, - "Downclock mode is not found. DRRS not supported\n"); - return NULL; - } - - dev_priv->drrs.type = dev_priv->vbt.drrs_type; - - dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; - drm_dbg_kms(&dev_priv->drm, - "seamless DRRS supported for eDP panel.\n"); - return downclock_mode; -} - static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct intel_connector *intel_connector) { diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 2121aaa9b8db..3dd6ebc2f6b1 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -70,17 +70,6 @@ int intel_dp_max_link_rate(struct intel_dp *intel_dp); int intel_dp_max_lane_count(struct intel_dp *intel_dp); int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); -void intel_edp_drrs_enable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); -void intel_edp_drrs_disable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); -void intel_edp_drrs_update(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); -void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, - unsigned int frontbuffer_bits); -void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, - unsigned int frontbuffer_bits); - void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, u8 *link_bw, u8 *rate_select); bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c new file mode 100644 index 000000000000..22acd945a9e4 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_display_types.h" +#include "intel_dpt.h" +#include "intel_fb.h" +#include "gt/gen8_ppgtt.h" + +struct i915_dpt { + struct i915_address_space vm; + + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + void __iomem *iomem; +}; + +#define i915_is_dpt(vm) ((vm)->is_dpt) + +static inline struct i915_dpt * +i915_vm_to_dpt(struct i915_address_space *vm) +{ + BUILD_BUG_ON(offsetof(struct i915_dpt, vm)); + GEM_BUG_ON(!i915_is_dpt(vm)); + return container_of(vm, struct i915_dpt, vm); +} + +#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT) + +static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) +{ + writeq(pte, addr); +} + +static void dpt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level level, + u32 flags) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + gen8_pte_t __iomem *base = dpt->iomem; + + gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE, + vm->pte_encode(addr, level, flags)); +} + +static void dpt_insert_entries(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level level, + u32 flags) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + gen8_pte_t __iomem *base = dpt->iomem; + const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags); + struct sgt_iter sgt_iter; + dma_addr_t addr; + int i; + + /* + * Note that we ignore PTE_READ_ONLY here. The caller must be careful + * not to allow the user to override access to a read only page. + */ + + i = vma->node.start / I915_GTT_PAGE_SIZE; + for_each_sgt_daddr(addr, sgt_iter, vma->pages) + gen8_set_pte(&base[i++], pte_encode | addr); +} + +static void dpt_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ +} + +static void dpt_bind_vma(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + struct drm_i915_gem_object *obj = vma->obj; + u32 pte_flags; + + /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ + pte_flags = 0; + if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj)) + pte_flags |= PTE_READ_ONLY; + if (i915_gem_object_is_lmem(obj)) + pte_flags |= PTE_LM; + + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + + vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; + + /* + * Without aliasing PPGTT there's no difference between + * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally + * upgrade to both bound if we bind either to avoid double-binding. + */ + atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); +} + +static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) +{ + vm->clear_range(vm, vma->node.start, vma->size); +} + +static void dpt_cleanup(struct i915_address_space *vm) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + + i915_gem_object_put(dpt->obj); +} + +struct i915_vma *intel_dpt_pin(struct i915_address_space *vm) +{ + struct drm_i915_private *i915 = vm->i915; + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + intel_wakeref_t wakeref; + struct i915_vma *vma; + void __iomem *iomem; + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + atomic_inc(&i915->gpu_error.pending_fb_pin); + + vma = i915_gem_object_ggtt_pin(dpt->obj, NULL, 0, 4096, + HAS_LMEM(i915) ? 0 : PIN_MAPPABLE); + if (IS_ERR(vma)) + goto err; + + iomem = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + if (IS_ERR(iomem)) { + vma = iomem; + goto err; + } + + dpt->vma = vma; + dpt->iomem = iomem; + + i915_vma_get(vma); + +err: + atomic_dec(&i915->gpu_error.pending_fb_pin); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + + return vma; +} + +void intel_dpt_unpin(struct i915_address_space *vm) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + + i915_vma_unpin_iomap(dpt->vma); + i915_vma_put(dpt->vma); +} + +struct i915_address_space * +intel_dpt_create(struct intel_framebuffer *fb) +{ + struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base; + struct drm_i915_private *i915 = to_i915(obj->dev); + struct drm_i915_gem_object *dpt_obj; + struct i915_address_space *vm; + struct i915_dpt *dpt; + size_t size; + int ret; + + if (intel_fb_needs_pot_stride_remap(fb)) + size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped); + else + size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE); + + size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE); + + if (HAS_LMEM(i915)) + dpt_obj = i915_gem_object_create_lmem(i915, size, 0); + else + dpt_obj = i915_gem_object_create_stolen(i915, size); + if (IS_ERR(dpt_obj)) + return ERR_CAST(dpt_obj); + + ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE); + if (ret) { + i915_gem_object_put(dpt_obj); + return ERR_PTR(ret); + } + + dpt = kzalloc(sizeof(*dpt), GFP_KERNEL); + if (!dpt) { + i915_gem_object_put(dpt_obj); + return ERR_PTR(-ENOMEM); + } + + vm = &dpt->vm; + + vm->gt = &i915->gt; + vm->i915 = i915; + vm->dma = i915->drm.dev; + vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; + vm->is_dpt = true; + + i915_address_space_init(vm, VM_CLASS_DPT); + + vm->insert_page = dpt_insert_page; + vm->clear_range = dpt_clear_range; + vm->insert_entries = dpt_insert_entries; + vm->cleanup = dpt_cleanup; + + vm->vma_ops.bind_vma = dpt_bind_vma; + vm->vma_ops.unbind_vma = dpt_unbind_vma; + vm->vma_ops.set_pages = ggtt_set_pages; + vm->vma_ops.clear_pages = clear_pages; + + vm->pte_encode = gen8_ggtt_pte_encode; + + dpt->obj = dpt_obj; + + return &dpt->vm; +} + +void intel_dpt_destroy(struct i915_address_space *vm) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + + i915_vm_close(&dpt->vm); +} diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h new file mode 100644 index 000000000000..45142b8f849f --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dpt.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_DPT_H__ +#define __INTEL_DPT_H__ + +struct i915_address_space; +struct i915_vma; +struct intel_framebuffer; + +void intel_dpt_destroy(struct i915_address_space *vm); +struct i915_vma *intel_dpt_pin(struct i915_address_space *vm); +void intel_dpt_unpin(struct i915_address_space *vm); +struct i915_address_space * +intel_dpt_create(struct intel_framebuffer *fb); + +#endif /* __INTEL_DPT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c new file mode 100644 index 000000000000..3c7d6bf57948 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_atomic.h" +#include "intel_de.h" +#include "intel_display_types.h" +#include "intel_drrs.h" +#include "intel_panel.h" + +/** + * DOC: Display Refresh Rate Switching (DRRS) + * + * Display Refresh Rate Switching (DRRS) is a power conservation feature + * which enables swtching between low and high refresh rates, + * dynamically, based on the usage scenario. This feature is applicable + * for internal panels. + * + * Indication that the panel supports DRRS is given by the panel EDID, which + * would list multiple refresh rates for one resolution. + * + * DRRS is of 2 types - static and seamless. + * Static DRRS involves changing refresh rate (RR) by doing a full modeset + * (may appear as a blink on screen) and is used in dock-undock scenario. + * Seamless DRRS involves changing RR without any visual effect to the user + * and can be used during normal system usage. This is done by programming + * certain registers. + * + * Support for static/seamless DRRS may be indicated in the VBT based on + * inputs from the panel spec. + * + * DRRS saves power by switching to low RR based on usage scenarios. + * + * The implementation is based on frontbuffer tracking implementation. When + * there is a disturbance on the screen triggered by user activity or a periodic + * system activity, DRRS is disabled (RR is changed to high RR). When there is + * no movement on screen, after a timeout of 1 second, a switch to low RR is + * made. + * + * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() + * and intel_edp_drrs_flush() are called. + * + * DRRS can be further extended to support other internal panels and also + * the scenario of video playback wherein RR is set based on the rate + * requested by userspace. + */ + +void +intel_dp_drrs_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + int output_bpp, bool constant_n) +{ + struct intel_connector *intel_connector = intel_dp->attached_connector; + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + int pixel_clock; + + if (pipe_config->vrr.enable) + return; + + /* + * DRRS and PSR can't be enable together, so giving preference to PSR + * as it allows more power-savings by complete shutting down display, + * so to guarantee this, intel_dp_drrs_compute_config() must be called + * after intel_psr_compute_config(). + */ + if (pipe_config->has_psr) + return; + + if (!intel_connector->panel.downclock_mode || + dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) + return; + + pipe_config->has_drrs = true; + + pixel_clock = intel_connector->panel.downclock_mode->clock; + if (pipe_config->splitter.enable) + pixel_clock /= pipe_config->splitter.link_count; + + intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock, + pipe_config->port_clock, &pipe_config->dp_m2_n2, + constant_n, pipe_config->fec_enable); + + /* FIXME: abstract this better */ + if (pipe_config->splitter.enable) + pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count; +} + +/** + * intel_dp_set_drrs_state - program registers for RR switch to take effect + * @dev_priv: i915 device + * @crtc_state: a pointer to the active intel_crtc_state + * @refresh_rate: RR to be programmed + * + * This function gets called when refresh rate (RR) has to be changed from + * one frequency to another. Switches can be between high and low RR + * supported by the panel or to any other RR based on media playback (in + * this case, RR value needs to be passed from user space). + * + * The caller of this function needs to take a lock on dev_priv->drrs. + */ +static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, + const struct intel_crtc_state *crtc_state, + int refresh_rate) +{ + struct intel_dp *intel_dp = dev_priv->drrs.dp; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum drrs_refresh_rate_type index = DRRS_HIGH_RR; + + if (refresh_rate <= 0) { + drm_dbg_kms(&dev_priv->drm, + "Refresh rate should be positive non-zero.\n"); + return; + } + + if (intel_dp == NULL) { + drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); + return; + } + + if (!crtc) { + drm_dbg_kms(&dev_priv->drm, + "DRRS: intel_crtc not initialized\n"); + return; + } + + if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { + drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); + return; + } + + if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) == + refresh_rate) + index = DRRS_LOW_RR; + + if (index == dev_priv->drrs.refresh_rate_type) { + drm_dbg_kms(&dev_priv->drm, + "DRRS requested for previously set RR...ignoring\n"); + return; + } + + if (!crtc_state->hw.active) { + drm_dbg_kms(&dev_priv->drm, + "eDP encoder disabled. CRTC not Active\n"); + return; + } + + if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { + switch (index) { + case DRRS_HIGH_RR: + intel_dp_set_m_n(crtc_state, M1_N1); + break; + case DRRS_LOW_RR: + intel_dp_set_m_n(crtc_state, M2_N2); + break; + case DRRS_MAX_RR: + default: + drm_err(&dev_priv->drm, + "Unsupported refreshrate type\n"); + } + } else if (DISPLAY_VER(dev_priv) > 6) { + i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); + u32 val; + + val = intel_de_read(dev_priv, reg); + if (index > DRRS_HIGH_RR) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; + else + val |= PIPECONF_EDP_RR_MODE_SWITCH; + } else { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; + else + val &= ~PIPECONF_EDP_RR_MODE_SWITCH; + } + intel_de_write(dev_priv, reg, val); + } + + dev_priv->drrs.refresh_rate_type = index; + + drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", + refresh_rate); +} + +static void +intel_edp_drrs_enable_locked(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + dev_priv->drrs.busy_frontbuffer_bits = 0; + dev_priv->drrs.dp = intel_dp; +} + +/** + * intel_edp_drrs_enable - init drrs struct if supported + * @intel_dp: DP struct + * @crtc_state: A pointer to the active crtc state. + * + * Initializes frontbuffer_bits and drrs.dp + */ +void intel_edp_drrs_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (!crtc_state->has_drrs) + return; + + drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n"); + + mutex_lock(&dev_priv->drrs.mutex); + + if (dev_priv->drrs.dp) { + drm_warn(&dev_priv->drm, "DRRS already enabled\n"); + goto unlock; + } + + intel_edp_drrs_enable_locked(intel_dp); + +unlock: + mutex_unlock(&dev_priv->drrs.mutex); +} + +static void +intel_edp_drrs_disable_locked(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { + int refresh; + + refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode); + intel_dp_set_drrs_state(dev_priv, crtc_state, refresh); + } + + dev_priv->drrs.dp = NULL; +} + +/** + * intel_edp_drrs_disable - Disable DRRS + * @intel_dp: DP struct + * @old_crtc_state: Pointer to old crtc_state. + * + */ +void intel_edp_drrs_disable(struct intel_dp *intel_dp, + const struct intel_crtc_state *old_crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (!old_crtc_state->has_drrs) + return; + + mutex_lock(&dev_priv->drrs.mutex); + if (!dev_priv->drrs.dp) { + mutex_unlock(&dev_priv->drrs.mutex); + return; + } + + intel_edp_drrs_disable_locked(intel_dp, old_crtc_state); + mutex_unlock(&dev_priv->drrs.mutex); + + cancel_delayed_work_sync(&dev_priv->drrs.work); +} + +/** + * intel_edp_drrs_update - Update DRRS state + * @intel_dp: Intel DP + * @crtc_state: new CRTC state + * + * This function will update DRRS states, disabling or enabling DRRS when + * executing fastsets. For full modeset, intel_edp_drrs_disable() and + * intel_edp_drrs_enable() should be called instead. + */ +void +intel_edp_drrs_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) + return; + + mutex_lock(&dev_priv->drrs.mutex); + + /* New state matches current one? */ + if (crtc_state->has_drrs == !!dev_priv->drrs.dp) + goto unlock; + + if (crtc_state->has_drrs) + intel_edp_drrs_enable_locked(intel_dp); + else + intel_edp_drrs_disable_locked(intel_dp, crtc_state); + +unlock: + mutex_unlock(&dev_priv->drrs.mutex); +} + +static void intel_edp_drrs_downclock_work(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), drrs.work.work); + struct intel_dp *intel_dp; + + mutex_lock(&dev_priv->drrs.mutex); + + intel_dp = dev_priv->drrs.dp; + + if (!intel_dp) + goto unlock; + + /* + * The delayed work can race with an invalidate hence we need to + * recheck. + */ + + if (dev_priv->drrs.busy_frontbuffer_bits) + goto unlock; + + if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { + struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; + + intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, + drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode)); + } + +unlock: + mutex_unlock(&dev_priv->drrs.mutex); +} + +/** + * intel_edp_drrs_invalidate - Disable Idleness DRRS + * @dev_priv: i915 device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called everytime rendering on the given planes start. + * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). + * + * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. + */ +void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits) +{ + struct intel_dp *intel_dp; + struct drm_crtc *crtc; + enum pipe pipe; + + if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) + return; + + cancel_delayed_work(&dev_priv->drrs.work); + + mutex_lock(&dev_priv->drrs.mutex); + + intel_dp = dev_priv->drrs.dp; + if (!intel_dp) { + mutex_unlock(&dev_priv->drrs.mutex); + return; + } + + crtc = dp_to_dig_port(intel_dp)->base.base.crtc; + pipe = to_intel_crtc(crtc)->pipe; + + frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); + dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; + + /* invalidate means busy screen hence upclock */ + if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) + intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, + drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); + + mutex_unlock(&dev_priv->drrs.mutex); +} + +/** + * intel_edp_drrs_flush - Restart Idleness DRRS + * @dev_priv: i915 device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called every time rendering on the given planes has + * completed or flip on a crtc is completed. So DRRS should be upclocked + * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, + * if no other planes are dirty. + * + * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. + */ +void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits) +{ + struct intel_dp *intel_dp; + struct drm_crtc *crtc; + enum pipe pipe; + + if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) + return; + + cancel_delayed_work(&dev_priv->drrs.work); + + mutex_lock(&dev_priv->drrs.mutex); + + intel_dp = dev_priv->drrs.dp; + if (!intel_dp) { + mutex_unlock(&dev_priv->drrs.mutex); + return; + } + + crtc = dp_to_dig_port(intel_dp)->base.base.crtc; + pipe = to_intel_crtc(crtc)->pipe; + + frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); + dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; + + /* flush means busy screen hence upclock */ + if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) + intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, + drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); + + /* + * flush also means no more activity hence schedule downclock, if all + * other fbs are quiescent too + */ + if (!dev_priv->drrs.busy_frontbuffer_bits) + schedule_delayed_work(&dev_priv->drrs.work, + msecs_to_jiffies(1000)); + mutex_unlock(&dev_priv->drrs.mutex); +} + +/** + * intel_dp_drrs_init - Init basic DRRS work and mutex. + * @connector: eDP connector + * @fixed_mode: preferred mode of panel + * + * This function is called only once at driver load to initialize basic + * DRRS stuff. + * + * Returns: + * Downclock mode if panel supports it, else return NULL. + * DRRS support is determined by the presence of downclock mode (apart + * from VBT setting). + */ +struct drm_display_mode * +intel_dp_drrs_init(struct intel_connector *connector, + struct drm_display_mode *fixed_mode) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_encoder *encoder = connector->encoder; + struct drm_display_mode *downclock_mode = NULL; + + INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); + mutex_init(&dev_priv->drrs.mutex); + + if (DISPLAY_VER(dev_priv) <= 6) { + drm_dbg_kms(&dev_priv->drm, + "DRRS supported for Gen7 and above\n"); + return NULL; + } + + if ((DISPLAY_VER(dev_priv) < 8 && !HAS_GMCH(dev_priv)) && + encoder->port != PORT_A) { + drm_dbg_kms(&dev_priv->drm, + "DRRS only supported on eDP port A\n"); + return NULL; + } + + if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { + drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); + return NULL; + } + + downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); + if (!downclock_mode) { + drm_dbg_kms(&dev_priv->drm, + "Downclock mode is not found. DRRS not supported\n"); + return NULL; + } + + dev_priv->drrs.type = dev_priv->vbt.drrs_type; + + dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; + drm_dbg_kms(&dev_priv->drm, + "seamless DRRS supported for eDP panel.\n"); + return downclock_mode; +} diff --git a/drivers/gpu/drm/i915/display/intel_drrs.h b/drivers/gpu/drm/i915/display/intel_drrs.h new file mode 100644 index 000000000000..ffa175b4cf4f --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_drrs.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_DRRS_H__ +#define __INTEL_DRRS_H__ + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_crtc_state; +struct intel_connector; +struct intel_dp; + +void intel_edp_drrs_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +void intel_edp_drrs_disable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +void intel_edp_drrs_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits); +void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits); +void intel_dp_drrs_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + int output_bpp, bool constant_n); +struct drm_display_mode *intel_dp_drrs_init(struct intel_connector *connector, + struct drm_display_mode *fixed_mode); + +#endif /* __INTEL_DRRS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 8e75debcce1a..e4834d84ce5e 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -62,6 +62,7 @@ #include "intel_display_types.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" +#include "intel_drrs.h" #include "intel_psr.h" /** diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index c3787512295d..926ddc6599f5 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -1831,6 +1831,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, bool has_hdmi_sink) { struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi); + enum phy phy = intel_port_to_phy(dev_priv, hdmi_to_dig_port(hdmi)->base.port); if (clock < 25000) return MODE_CLOCK_LOW; @@ -1851,6 +1852,14 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000) return MODE_CLOCK_RANGE; + /* ICL+ combo PHY PLL can't generate 500-533.2 MHz */ + if (intel_phy_is_combo(dev_priv, phy) && clock > 500000 && clock < 533200) + return MODE_CLOCK_RANGE; + + /* ICL+ TC PHY PLL can't generate 500-532.8 MHz */ + if (intel_phy_is_tc(dev_priv, phy) && clock > 500000 && clock < 532800) + return MODE_CLOCK_RANGE; + /* * SNPS PHYs' MPLLB table-based programming can only handle a fixed * set of link rates. @@ -1892,7 +1901,7 @@ static bool intel_hdmi_bpc_possible(struct drm_connector *connector, if (ycbcr420_output) return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36; else - return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36; + return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_36; case 10: if (DISPLAY_VER(i915) < 11) return false; @@ -1903,7 +1912,7 @@ static bool intel_hdmi_bpc_possible(struct drm_connector *connector, if (ycbcr420_output) return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_30; else - return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30; + return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30; case 8: return true; default: diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 3855fba70980..08c20869d7e9 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -361,6 +361,36 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, port++; } + /* + * The port numbering and mapping here is bizarre. The now-obsolete + * swsci spec supports ports numbered [0..4]. Port E is handled as a + * special case, but port F and beyond are not. The functionality is + * supposed to be obsolete for new platforms. Just bail out if the port + * number is out of bounds after mapping. + */ + if (port > 4) { + drm_dbg_kms(&dev_priv->drm, + "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n", + intel_encoder->base.base.id, intel_encoder->base.name, + port_name(intel_encoder->port), port); + return -EINVAL; + } + + /* + * The port numbering and mapping here is bizarre. The now-obsolete + * swsci spec supports ports numbered [0..4]. Port E is handled as a + * special case, but port F and beyond are not. The functionality is + * supposed to be obsolete for new platforms. Just bail out if the port + * number is out of bounds after mapping. + */ + if (port > 4) { + drm_dbg_kms(&dev_priv->drm, + "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n", + intel_encoder->base.base.id, intel_encoder->base.name, + port_name(intel_encoder->port), port); + return -EINVAL; + } + if (!enable) parm |= 4 << 8; diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index a36ec4a818ff..466bf6820641 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -1074,14 +1074,14 @@ static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp) edp_panel_vdd_schedule_off(intel_dp); } -bool intel_pps_have_power(struct intel_dp *intel_dp) +bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp) { intel_wakeref_t wakeref; bool have_power = false; with_intel_pps_lock(intel_dp, wakeref) { - have_power = edp_have_panel_power(intel_dp) && - edp_have_panel_vdd(intel_dp); + have_power = edp_have_panel_power(intel_dp) || + edp_have_panel_vdd(intel_dp); } return have_power; diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h index fbbcca782e7b..9fe7be4fe867 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.h +++ b/drivers/gpu/drm/i915/display/intel_pps.h @@ -36,7 +36,7 @@ void intel_pps_vdd_on(struct intel_dp *intel_dp); void intel_pps_on(struct intel_dp *intel_dp); void intel_pps_off(struct intel_dp *intel_dp); void intel_pps_vdd_off_sync(struct intel_dp *intel_dp); -bool intel_pps_have_power(struct intel_dp *intel_dp); +bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp); void intel_pps_wait_power_cycle(struct intel_dp *intel_dp); void intel_pps_init(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 1b0daf649e82..a3d0c57ec0f0 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -936,6 +936,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } + /* Wa_16011303918:adl-p */ + if (crtc_state->vrr.enable && + IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, not compatible with HW stepping + VRR\n"); + return false; + } + + if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n"); + return false; + } + if (HAS_PSR2_SEL_FETCH(dev_priv)) { if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && !HAS_PSR_HW_TRACKING(dev_priv)) { @@ -949,12 +963,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, if (!crtc_state->enable_psr2_sel_fetch && IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) { drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n"); - return false; + goto unsupported; } if (!psr2_granularity_check(intel_dp, crtc_state)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n"); - return false; + goto unsupported; } if (!crtc_state->enable_psr2_sel_fetch && @@ -963,25 +977,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", crtc_hdisplay, crtc_vdisplay, psr_max_h, psr_max_v); - return false; - } - - if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) { - drm_dbg_kms(&dev_priv->drm, - "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n"); - return false; - } - - /* Wa_16011303918:adl-p */ - if (crtc_state->vrr.enable && - IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { - drm_dbg_kms(&dev_priv->drm, - "PSR2 not enabled, not compatible with HW stepping + VRR\n"); - return false; + goto unsupported; } tgl_dc3co_exitline_compute_config(intel_dp, crtc_state); return true; + +unsupported: + crtc_state->enable_psr2_sel_fetch = false; + return false; } void intel_psr_compute_config(struct intel_dp *intel_dp, diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index 18b52b64af95..536b319ffe5b 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -32,7 +32,7 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv) if (intel_de_wait_for_clear(dev_priv, ICL_PHY_MISC(phy), DG2_PHY_DP_TX_ACK_MASK, 25)) DRM_ERROR("SNPS PHY %c failed to calibrate after 25ms.\n", - phy); + phy_name(phy)); } } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 5130e8ed9564..28e07040cf47 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -66,7 +66,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, * mmap ioctl is disallowed for all discrete platforms, * and for all platforms with GRAPHICS_VER > 12. */ - if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12) + if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0)) return -EOPNOTSUPP; if (args->flags & ~(I915_MMAP_WC)) @@ -438,7 +438,7 @@ vm_access(struct vm_area_struct *area, unsigned long addr, return -EACCES; addr -= area->vm_start; - if (addr >= obj->base.size) + if (range_overflows_t(u64, addr, len, obj->base.size)) return -EINVAL; i915_gem_ww_ctx_init(&ww, true); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 8d6c38a62201..9053cea3395a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -162,7 +162,6 @@ retry: /* Immediately discard the backing storage */ void i915_gem_object_truncate(struct drm_i915_gem_object *obj) { - drm_gem_free_mmap_offset(&obj->base); if (obj->ops->truncate) obj->ops->truncate(obj); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 6ea13159bffc..4b823fbfe76a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -759,11 +759,9 @@ static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) if (obj->mm.madv != I915_MADV_WILLNEED) { bo->priority = I915_TTM_PRIO_PURGE; } else if (!i915_gem_object_has_pages(obj)) { - if (bo->priority < I915_TTM_PRIO_HAS_PAGES) - bo->priority = I915_TTM_PRIO_HAS_PAGES; + bo->priority = I915_TTM_PRIO_NO_PAGES; } else { - if (bo->priority > I915_TTM_PRIO_NO_PAGES) - bo->priority = I915_TTM_PRIO_NO_PAGES; + bo->priority = I915_TTM_PRIO_HAS_PAGES; } ttm_bo_move_to_lru_tail(bo, bo->resource, NULL); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index 65a3e7fdb2b2..95ff630157b9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -133,7 +133,7 @@ static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id) { u32 request[] = { GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, - SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 2), + SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1), id, }; diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index d1d4b97b86f5..287f5a3d0b35 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -108,6 +108,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) /* Comet Lake V PCH is based on KBP, which is SPT compatible */ return PCH_SPT; case INTEL_PCH_ICP_DEVICE_ID_TYPE: + case INTEL_PCH_ICP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); return PCH_ICP; @@ -123,7 +124,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) !IS_GEN9_BC(dev_priv)); return PCH_TGP; case INTEL_PCH_JSP_DEVICE_ID_TYPE: - case INTEL_PCH_JSP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv)); return PCH_JSP; diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index 7c0d83d292dc..994c56fcb199 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -50,11 +50,11 @@ enum intel_pch { #define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680 #define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380 #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 +#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00 #define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080 #define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380 #define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80 -#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80 #define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index aea4cc2b3486..57c1dda76b94 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3713,8 +3713,7 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv) MISSING_CASE(DISPLAY_VER(dev_priv)); } - /* Default to an unusable block time */ - dev_priv->sagv_block_time_us = -1; + dev_priv->sagv_block_time_us = 0; } /* @@ -4020,6 +4019,17 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) return ret; } + if (intel_can_enable_sagv(dev_priv, new_bw_state) != + intel_can_enable_sagv(dev_priv, old_bw_state)) { + ret = intel_atomic_serialize_global_state(&new_bw_state->base); + if (ret) + return ret; + } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + } + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; @@ -4035,17 +4045,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) intel_can_enable_sagv(dev_priv, new_bw_state); } - if (intel_can_enable_sagv(dev_priv, new_bw_state) != - intel_can_enable_sagv(dev_priv, old_bw_state)) { - ret = intel_atomic_serialize_global_state(&new_bw_state->base); - if (ret) - return ret; - } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { - ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } - return 0; } @@ -4844,7 +4843,7 @@ static bool check_mbus_joined(u8 active_pipes, { int i; - for (i = 0; i < dbuf_slices[i].active_pipes; i++) { + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { if (dbuf_slices[i].active_pipes == active_pipes) return dbuf_slices[i].join_mbus; } @@ -4861,7 +4860,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus, { int i; - for (i = 0; i < dbuf_slices[i].active_pipes; i++) { + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { if (dbuf_slices[i].active_pipes == active_pipes && dbuf_slices[i].join_mbus == join_mbus) return dbuf_slices[i].dbuf_mask[pipe]; @@ -5635,7 +5634,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1; result->enable = true; - if (DISPLAY_VER(dev_priv) < 12) + if (DISPLAY_VER(dev_priv) < 12 && dev_priv->sagv_block_time_us) result->can_sagv = latency >= dev_priv->sagv_block_time_us; } @@ -5666,7 +5665,10 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0; struct skl_wm_level *levels = plane_wm->wm; - unsigned int latency = dev_priv->wm.skl_latency[0] + dev_priv->sagv_block_time_us; + unsigned int latency = 0; + + if (dev_priv->sagv_block_time_us) + latency = dev_priv->sagv_block_time_us + dev_priv->wm.skl_latency[0]; skl_compute_plane_wm(crtc_state, 0, latency, wm_params, &levels[0], @@ -6681,6 +6683,74 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices; } +static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915) +{ + const struct intel_dbuf_state *dbuf_state = + to_intel_dbuf_state(i915->dbuf.obj.state); + struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; + struct intel_crtc *crtc; + + for_each_intel_crtc(&i915->drm, crtc) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + entries[crtc->pipe] = crtc_state->wm.skl.ddb; + } + + for_each_intel_crtc(&i915->drm, crtc) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + u8 slices; + + slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, + dbuf_state->joined_mbus); + if (dbuf_state->slices[crtc->pipe] & ~slices) + return true; + + if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries, + I915_MAX_PIPES, crtc->pipe)) + return true; + } + + return false; +} + +void skl_wm_sanitize(struct drm_i915_private *i915) +{ + struct intel_crtc *crtc; + + /* + * On TGL/RKL (at least) the BIOS likes to assign the planes + * to the wrong DBUF slices. This will cause an infinite loop + * in skl_commit_modeset_enables() as it can't find a way to + * transition between the old bogus DBUF layout to the new + * proper DBUF layout without DBUF allocation overlaps between + * the planes (which cannot be allowed or else the hardware + * may hang). If we detect a bogus DBUF layout just turn off + * all the planes so that skl_commit_modeset_enables() can + * simply ignore them. + */ + if (!skl_dbuf_is_misconfigured(i915)) + return; + + drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n"); + + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + if (plane_state->uapi.visible) + intel_plane_disable_noatomic(crtc, plane); + + drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0); + + memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb)); + } +} + static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index 91f23b7f0af2..79d89fe22d8c 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -48,6 +48,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, struct skl_pipe_wm *out); void g4x_wm_sanitize(struct drm_i915_private *dev_priv); void vlv_wm_sanitize(struct drm_i915_private *dev_priv); +void skl_wm_sanitize(struct drm_i915_private *dev_priv); bool intel_can_enable_sagv(struct drm_i915_private *dev_priv, const struct intel_bw_state *bw_state); void intel_sagv_pre_plane_update(struct intel_atomic_state *state); diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index 87428fb23d9f..a2277a0d6d06 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -222,6 +222,7 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np); struct imx_hdmi *hdmi; + int ret; hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); if (!hdmi) @@ -243,10 +244,15 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev) hdmi->bridge = of_drm_find_bridge(np); if (!hdmi->bridge) { dev_err(hdmi->dev, "Unable to find bridge\n"); + dw_hdmi_remove(hdmi->hdmi); return -ENODEV; } - return component_add(&pdev->dev, &dw_hdmi_imx_ops); + ret = component_add(&pdev->dev, &dw_hdmi_imx_ops); + if (ret) + dw_hdmi_remove(hdmi->hdmi); + + return ret; } static int dw_hdmi_imx_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index e5078d03020d..fb0e951248f6 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -572,6 +572,8 @@ static int imx_ldb_panel_ddc(struct device *dev, edidp = of_get_property(child, "edid", &edid_len); if (edidp) { channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL); + if (!channel->edid) + return -ENOMEM; } else if (!channel->panel) { /* fallback to display-timings node */ ret = of_get_drm_display_mode(child, diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index a8aba0141ce7..63ba2ad84679 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -75,8 +75,10 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) ret = of_get_drm_display_mode(np, &imxpd->mode, &imxpd->bus_flags, OF_USE_NATIVE_MODE); - if (ret) + if (ret) { + drm_mode_destroy(connector->dev, mode); return ret; + } drm_mode_copy(mode, &imxpd->mode); mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; @@ -217,14 +219,6 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, if (!imx_pd_format_supported(bus_fmt)) return -EINVAL; - if (bus_flags & - ~(DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_DE_HIGH | - DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE | - DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)) { - dev_warn(imxpd->dev, "invalid bus_flags (%x)\n", bus_flags); - return -EINVAL; - } - bridge_state->output_bus_cfg.flags = bus_flags; bridge_state->input_bus_cfg.flags = bus_flags; imx_crtc_state->bus_flags = bus_flags; diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 93b40c245f00..5d90d2eb0019 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -11,6 +11,7 @@ #include <linux/of_platform.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include <linux/reset.h> #include <video/mipi_display.h> #include <video/videomode.h> @@ -980,8 +981,10 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) struct mtk_dsi *dsi = dev_get_drvdata(dev); ret = mtk_dsi_encoder_init(drm, dsi); + if (ret) + return ret; - return ret; + return device_reset_optional(dev); } static void mtk_dsi_unbind(struct device *dev, struct device *master, diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile index 28a519cdf66b..523fce45f16b 100644 --- a/drivers/gpu/drm/meson/Makefile +++ b/drivers/gpu/drm/meson/Makefile @@ -2,6 +2,7 @@ meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_overlay.o meson-drm-y += meson_rdma.o meson_osd_afbcd.o +meson-drm-y += meson_encoder_hdmi.o obj-$(CONFIG_DRM_MESON) += meson-drm.o obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index bc0d60df04ae..c98525d60df5 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -32,6 +32,7 @@ #include "meson_osd_afbcd.h" #include "meson_registers.h" #include "meson_venc_cvbs.h" +#include "meson_encoder_hdmi.h" #include "meson_viu.h" #include "meson_vpp.h" #include "meson_rdma.h" @@ -206,8 +207,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) priv->compat = match->compat; priv->afbcd.ops = match->afbcd_ops; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu"); - regs = devm_ioremap_resource(dev, res); + regs = devm_platform_ioremap_resource_byname(pdev, "vpu"); if (IS_ERR(regs)) { ret = PTR_ERR(regs); goto free_drm; @@ -302,38 +302,42 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) if (priv->afbcd.ops) { ret = priv->afbcd.ops->init(priv); if (ret) - return ret; + goto free_drm; } /* Encoder Initialization */ ret = meson_venc_cvbs_create(priv); if (ret) - goto free_drm; + goto exit_afbcd; if (has_components) { ret = component_bind_all(drm->dev, drm); if (ret) { dev_err(drm->dev, "Couldn't bind all components\n"); - goto free_drm; + goto exit_afbcd; } } + ret = meson_encoder_hdmi_init(priv); + if (ret) + goto exit_afbcd; + ret = meson_plane_create(priv); if (ret) - goto free_drm; + goto exit_afbcd; ret = meson_overlay_create(priv); if (ret) - goto free_drm; + goto exit_afbcd; ret = meson_crtc_create(priv); if (ret) - goto free_drm; + goto exit_afbcd; ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm); if (ret) - goto free_drm; + goto exit_afbcd; drm_mode_config_reset(drm); @@ -351,6 +355,9 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) uninstall_irq: free_irq(priv->vsync_irq, drm); +exit_afbcd: + if (priv->afbcd.ops) + priv->afbcd.ops->exit(priv); free_drm: drm_dev_put(drm); @@ -381,10 +388,8 @@ static void meson_drv_unbind(struct device *dev) free_irq(priv->vsync_irq, drm); drm_dev_put(drm); - if (priv->afbcd.ops) { - priv->afbcd.ops->reset(priv); - meson_rdma_free(priv); - } + if (priv->afbcd.ops) + priv->afbcd.ops->exit(priv); } static const struct component_master_ops meson_drv_master_ops = { diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 2ed87cfdd735..fb540a503efe 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -22,14 +22,11 @@ #include <drm/drm_probe_helper.h> #include <drm/drm_print.h> -#include <linux/media-bus-format.h> #include <linux/videodev2.h> #include "meson_drv.h" #include "meson_dw_hdmi.h" #include "meson_registers.h" -#include "meson_vclk.h" -#include "meson_venc.h" #define DRIVER_NAME "meson-dw-hdmi" #define DRIVER_DESC "Amlogic Meson HDMI-TX DRM driver" @@ -135,8 +132,6 @@ struct meson_dw_hdmi_data { }; struct meson_dw_hdmi { - struct drm_encoder encoder; - struct drm_bridge bridge; struct dw_hdmi_plat_data dw_plat_data; struct meson_drm *priv; struct device *dev; @@ -148,12 +143,8 @@ struct meson_dw_hdmi { struct regulator *hdmi_supply; u32 irq_stat; struct dw_hdmi *hdmi; - unsigned long output_bus_fmt; + struct drm_bridge *bridge; }; -#define encoder_to_meson_dw_hdmi(x) \ - container_of(x, struct meson_dw_hdmi, encoder) -#define bridge_to_meson_dw_hdmi(x) \ - container_of(x, struct meson_dw_hdmi, bridge) static inline int dw_hdmi_is_compatible(struct meson_dw_hdmi *dw_hdmi, const char *compat) @@ -295,14 +286,14 @@ static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi, /* Setup PHY bandwidth modes */ static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi, - const struct drm_display_mode *mode) + const struct drm_display_mode *mode, + bool mode_is_420) { struct meson_drm *priv = dw_hdmi->priv; unsigned int pixel_clock = mode->clock; /* For 420, pixel clock is half unlike venc clock */ - if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) - pixel_clock /= 2; + if (mode_is_420) pixel_clock /= 2; if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") || dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi")) { @@ -374,68 +365,25 @@ static inline void meson_dw_hdmi_phy_reset(struct meson_dw_hdmi *dw_hdmi) mdelay(2); } -static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi, - const struct drm_display_mode *mode) -{ - struct meson_drm *priv = dw_hdmi->priv; - int vic = drm_match_cea_mode(mode); - unsigned int phy_freq; - unsigned int vclk_freq; - unsigned int venc_freq; - unsigned int hdmi_freq; - - vclk_freq = mode->clock; - - /* For 420, pixel clock is half unlike venc clock */ - if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) - vclk_freq /= 2; - - /* TMDS clock is pixel_clock * 10 */ - phy_freq = vclk_freq * 10; - - if (!vic) { - meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq, - vclk_freq, vclk_freq, vclk_freq, false); - return; - } - - /* 480i/576i needs global pixel doubling */ - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - vclk_freq *= 2; - - venc_freq = vclk_freq; - hdmi_freq = vclk_freq; - - /* VENC double pixels for 1080i, 720p and YUV420 modes */ - if (meson_venc_hdmi_venc_repeat(vic) || - dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) - venc_freq *= 2; - - vclk_freq = max(venc_freq, hdmi_freq); - - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - venc_freq /= 2; - - DRM_DEBUG_DRIVER("vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n", - phy_freq, vclk_freq, venc_freq, hdmi_freq, - priv->venc.hdmi_use_enci); - - meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq, - venc_freq, hdmi_freq, priv->venc.hdmi_use_enci); -} - static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, const struct drm_display_info *display, const struct drm_display_mode *mode) { struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data; + bool is_hdmi2_sink = display->hdmi.scdc.supported; struct meson_drm *priv = dw_hdmi->priv; unsigned int wr_clk = readl_relaxed(priv->io_base + _REG(VPU_HDMI_SETTING)); + bool mode_is_420 = false; DRM_DEBUG_DRIVER("\"%s\" div%d\n", mode->name, mode->clock > 340000 ? 40 : 10); + if (drm_mode_is_420_only(display, mode) || + (!is_hdmi2_sink && + drm_mode_is_420_also(display, mode))) + mode_is_420 = true; + /* Enable clocks */ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100); @@ -457,8 +405,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12)); /* TMDS pattern setup */ - if (mode->clock > 340000 && - dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_YUV8_1X24) { + if (mode->clock > 340000 && !mode_is_420) { dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01, 0); dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23, @@ -476,7 +423,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x2); /* Setup PHY parameters */ - meson_hdmi_phy_setup_mode(dw_hdmi, mode); + meson_hdmi_phy_setup_mode(dw_hdmi, mode, mode_is_420); /* Setup PHY */ regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, @@ -622,214 +569,15 @@ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id) dw_hdmi_setup_rx_sense(dw_hdmi->hdmi, hpd_connected, hpd_connected); - drm_helper_hpd_irq_event(dw_hdmi->encoder.dev); + drm_helper_hpd_irq_event(dw_hdmi->bridge->dev); + drm_bridge_hpd_notify(dw_hdmi->bridge, + hpd_connected ? connector_status_connected + : connector_status_disconnected); } return IRQ_HANDLED; } -static enum drm_mode_status -dw_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, - const struct drm_display_info *display_info, - const struct drm_display_mode *mode) -{ - struct meson_dw_hdmi *dw_hdmi = data; - struct meson_drm *priv = dw_hdmi->priv; - bool is_hdmi2_sink = display_info->hdmi.scdc.supported; - unsigned int phy_freq; - unsigned int vclk_freq; - unsigned int venc_freq; - unsigned int hdmi_freq; - int vic = drm_match_cea_mode(mode); - enum drm_mode_status status; - - DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); - - /* If sink does not support 540MHz, reject the non-420 HDMI2 modes */ - if (display_info->max_tmds_clock && - mode->clock > display_info->max_tmds_clock && - !drm_mode_is_420_only(display_info, mode) && - !drm_mode_is_420_also(display_info, mode)) - return MODE_BAD; - - /* Check against non-VIC supported modes */ - if (!vic) { - status = meson_venc_hdmi_supported_mode(mode); - if (status != MODE_OK) - return status; - - return meson_vclk_dmt_supported_freq(priv, mode->clock); - /* Check against supported VIC modes */ - } else if (!meson_venc_hdmi_supported_vic(vic)) - return MODE_BAD; - - vclk_freq = mode->clock; - - /* For 420, pixel clock is half unlike venc clock */ - if (drm_mode_is_420_only(display_info, mode) || - (!is_hdmi2_sink && - drm_mode_is_420_also(display_info, mode))) - vclk_freq /= 2; - - /* TMDS clock is pixel_clock * 10 */ - phy_freq = vclk_freq * 10; - - /* 480i/576i needs global pixel doubling */ - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - vclk_freq *= 2; - - venc_freq = vclk_freq; - hdmi_freq = vclk_freq; - - /* VENC double pixels for 1080i, 720p and YUV420 modes */ - if (meson_venc_hdmi_venc_repeat(vic) || - drm_mode_is_420_only(display_info, mode) || - (!is_hdmi2_sink && - drm_mode_is_420_also(display_info, mode))) - venc_freq *= 2; - - vclk_freq = max(venc_freq, hdmi_freq); - - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - venc_freq /= 2; - - dev_dbg(dw_hdmi->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n", - __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq); - - return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq); -} - -/* Encoder */ - -static const u32 meson_dw_hdmi_out_bus_fmts[] = { - MEDIA_BUS_FMT_YUV8_1X24, - MEDIA_BUS_FMT_UYYVYY8_0_5X24, -}; - -static void meson_venc_hdmi_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs meson_venc_hdmi_encoder_funcs = { - .destroy = meson_venc_hdmi_encoder_destroy, -}; - -static u32 * -meson_venc_hdmi_encoder_get_inp_bus_fmts(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state, - u32 output_fmt, - unsigned int *num_input_fmts) -{ - u32 *input_fmts = NULL; - int i; - - *num_input_fmts = 0; - - for (i = 0 ; i < ARRAY_SIZE(meson_dw_hdmi_out_bus_fmts) ; ++i) { - if (output_fmt == meson_dw_hdmi_out_bus_fmts[i]) { - *num_input_fmts = 1; - input_fmts = kcalloc(*num_input_fmts, - sizeof(*input_fmts), - GFP_KERNEL); - if (!input_fmts) - return NULL; - - input_fmts[0] = output_fmt; - - break; - } - } - - return input_fmts; -} - -static int meson_venc_hdmi_encoder_atomic_check(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge); - - dw_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format; - - DRM_DEBUG_DRIVER("output_bus_fmt %lx\n", dw_hdmi->output_bus_fmt); - - return 0; -} - -static void meson_venc_hdmi_encoder_disable(struct drm_bridge *bridge) -{ - struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge); - struct meson_drm *priv = dw_hdmi->priv; - - DRM_DEBUG_DRIVER("\n"); - - writel_bits_relaxed(0x3, 0, - priv->io_base + _REG(VPU_HDMI_SETTING)); - - writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN)); - writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN)); -} - -static void meson_venc_hdmi_encoder_enable(struct drm_bridge *bridge) -{ - struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge); - struct meson_drm *priv = dw_hdmi->priv; - - DRM_DEBUG_DRIVER("%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP"); - - if (priv->venc.hdmi_use_enci) - writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN)); - else - writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN)); -} - -static void meson_venc_hdmi_encoder_mode_set(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - const struct drm_display_mode *adjusted_mode) -{ - struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge); - struct meson_drm *priv = dw_hdmi->priv; - int vic = drm_match_cea_mode(mode); - unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR; - bool yuv420_mode = false; - - DRM_DEBUG_DRIVER("\"%s\" vic %d\n", mode->name, vic); - - if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) { - ycrcb_map = VPU_HDMI_OUTPUT_CRYCB; - yuv420_mode = true; - } - - /* VENC + VENC-DVI Mode setup */ - meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode); - - /* VCLK Set clock */ - dw_hdmi_set_vclk(dw_hdmi, mode); - - if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) - /* Setup YUV420 to HDMI-TX, no 10bit diphering */ - writel_relaxed(2 | (2 << 2), - priv->io_base + _REG(VPU_HDMI_FMT_CTRL)); - else - /* Setup YUV444 to HDMI-TX, no 10bit diphering */ - writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL)); -} - -static const struct drm_bridge_funcs meson_venc_hdmi_encoder_bridge_funcs = { - .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, - .atomic_get_input_bus_fmts = meson_venc_hdmi_encoder_get_inp_bus_fmts, - .atomic_reset = drm_atomic_helper_bridge_reset, - .atomic_check = meson_venc_hdmi_encoder_atomic_check, - .enable = meson_venc_hdmi_encoder_enable, - .disable = meson_venc_hdmi_encoder_disable, - .mode_set = meson_venc_hdmi_encoder_mode_set, -}; - /* DW HDMI Regmap */ static int meson_dw_hdmi_reg_read(void *context, unsigned int reg, @@ -876,28 +624,6 @@ static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = { .dwc_write = dw_hdmi_g12a_dwc_write, }; -static bool meson_hdmi_connector_is_available(struct device *dev) -{ - struct device_node *ep, *remote; - - /* HDMI Connector is on the second port, first endpoint */ - ep = of_graph_get_endpoint_by_regs(dev->of_node, 1, 0); - if (!ep) - return false; - - /* If the endpoint node exists, consider it enabled */ - remote = of_graph_get_remote_port(ep); - if (remote) { - of_node_put(ep); - return true; - } - - of_node_put(ep); - of_node_put(remote); - - return false; -} - static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi) { struct meson_drm *priv = meson_dw_hdmi->priv; @@ -976,19 +702,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, struct drm_device *drm = data; struct meson_drm *priv = drm->dev_private; struct dw_hdmi_plat_data *dw_plat_data; - struct drm_bridge *next_bridge; - struct drm_encoder *encoder; - struct resource *res; int irq; int ret; DRM_DEBUG_DRIVER("\n"); - if (!meson_hdmi_connector_is_available(dev)) { - dev_info(drm->dev, "HDMI Output connector not available\n"); - return -ENODEV; - } - match = of_device_get_match_data(&pdev->dev); if (!match) { dev_err(&pdev->dev, "failed to get match data\n"); @@ -1004,7 +722,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, meson_dw_hdmi->dev = dev; meson_dw_hdmi->data = match; dw_plat_data = &meson_dw_hdmi->dw_plat_data; - encoder = &meson_dw_hdmi->encoder; meson_dw_hdmi->hdmi_supply = devm_regulator_get_optional(dev, "hdmi"); if (IS_ERR(meson_dw_hdmi->hdmi_supply)) { @@ -1042,8 +759,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, return PTR_ERR(meson_dw_hdmi->hdmitx_phy); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - meson_dw_hdmi->hdmitx = devm_ioremap_resource(dev, res); + meson_dw_hdmi->hdmitx = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(meson_dw_hdmi->hdmitx)) return PTR_ERR(meson_dw_hdmi->hdmitx); @@ -1076,28 +792,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, return ret; } - /* Encoder */ - - ret = drm_encoder_init(drm, encoder, &meson_venc_hdmi_encoder_funcs, - DRM_MODE_ENCODER_TMDS, "meson_hdmi"); - if (ret) { - dev_err(priv->dev, "Failed to init HDMI encoder\n"); - return ret; - } - - meson_dw_hdmi->bridge.funcs = &meson_venc_hdmi_encoder_bridge_funcs; - drm_bridge_attach(encoder, &meson_dw_hdmi->bridge, NULL, 0); - - encoder->possible_crtcs = BIT(0); - meson_dw_hdmi_init(meson_dw_hdmi); - DRM_DEBUG_DRIVER("encoder initialized\n"); - /* Bridge / Connector */ dw_plat_data->priv_data = meson_dw_hdmi; - dw_plat_data->mode_valid = dw_hdmi_mode_valid; dw_plat_data->phy_ops = &meson_dw_hdmi_phy_ops; dw_plat_data->phy_name = "meson_dw_hdmi_phy"; dw_plat_data->phy_data = meson_dw_hdmi; @@ -1112,15 +811,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, platform_set_drvdata(pdev, meson_dw_hdmi); - meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev, - &meson_dw_hdmi->dw_plat_data); + meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev, &meson_dw_hdmi->dw_plat_data); if (IS_ERR(meson_dw_hdmi->hdmi)) return PTR_ERR(meson_dw_hdmi->hdmi); - next_bridge = of_drm_find_bridge(pdev->dev.of_node); - if (next_bridge) - drm_bridge_attach(encoder, next_bridge, - &meson_dw_hdmi->bridge, 0); + meson_dw_hdmi->bridge = of_drm_find_bridge(pdev->dev.of_node); DRM_DEBUG_DRIVER("HDMI controller initialized\n"); diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c new file mode 100644 index 000000000000..db332fa4cd54 --- /dev/null +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c @@ -0,0 +1,370 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2016 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + */ + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_device.h> +#include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> + +#include <linux/media-bus-format.h> +#include <linux/videodev2.h> + +#include "meson_drv.h" +#include "meson_registers.h" +#include "meson_vclk.h" +#include "meson_venc.h" +#include "meson_encoder_hdmi.h" + +struct meson_encoder_hdmi { + struct drm_encoder encoder; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct meson_drm *priv; + unsigned long output_bus_fmt; +}; + +#define bridge_to_meson_encoder_hdmi(x) \ + container_of(x, struct meson_encoder_hdmi, bridge) + +static int meson_encoder_hdmi_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + + return drm_bridge_attach(bridge->encoder, encoder_hdmi->next_bridge, + &encoder_hdmi->bridge, flags); +} + +static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi, + const struct drm_display_mode *mode) +{ + struct meson_drm *priv = encoder_hdmi->priv; + int vic = drm_match_cea_mode(mode); + unsigned int phy_freq; + unsigned int vclk_freq; + unsigned int venc_freq; + unsigned int hdmi_freq; + + vclk_freq = mode->clock; + + /* For 420, pixel clock is half unlike venc clock */ + if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) + vclk_freq /= 2; + + /* TMDS clock is pixel_clock * 10 */ + phy_freq = vclk_freq * 10; + + if (!vic) { + meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq, + vclk_freq, vclk_freq, vclk_freq, false); + return; + } + + /* 480i/576i needs global pixel doubling */ + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + vclk_freq *= 2; + + venc_freq = vclk_freq; + hdmi_freq = vclk_freq; + + /* VENC double pixels for 1080i, 720p and YUV420 modes */ + if (meson_venc_hdmi_venc_repeat(vic) || + encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) + venc_freq *= 2; + + vclk_freq = max(venc_freq, hdmi_freq); + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + venc_freq /= 2; + + dev_dbg(priv->dev, "vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n", + phy_freq, vclk_freq, venc_freq, hdmi_freq, + priv->venc.hdmi_use_enci); + + meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq, + venc_freq, hdmi_freq, priv->venc.hdmi_use_enci); +} + +static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *display_info, + const struct drm_display_mode *mode) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct meson_drm *priv = encoder_hdmi->priv; + bool is_hdmi2_sink = display_info->hdmi.scdc.supported; + unsigned int phy_freq; + unsigned int vclk_freq; + unsigned int venc_freq; + unsigned int hdmi_freq; + int vic = drm_match_cea_mode(mode); + enum drm_mode_status status; + + dev_dbg(priv->dev, "Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); + + /* If sink does not support 540MHz, reject the non-420 HDMI2 modes */ + if (display_info->max_tmds_clock && + mode->clock > display_info->max_tmds_clock && + !drm_mode_is_420_only(display_info, mode) && + !drm_mode_is_420_also(display_info, mode)) + return MODE_BAD; + + /* Check against non-VIC supported modes */ + if (!vic) { + status = meson_venc_hdmi_supported_mode(mode); + if (status != MODE_OK) + return status; + + return meson_vclk_dmt_supported_freq(priv, mode->clock); + /* Check against supported VIC modes */ + } else if (!meson_venc_hdmi_supported_vic(vic)) + return MODE_BAD; + + vclk_freq = mode->clock; + + /* For 420, pixel clock is half unlike venc clock */ + if (drm_mode_is_420_only(display_info, mode) || + (!is_hdmi2_sink && + drm_mode_is_420_also(display_info, mode))) + vclk_freq /= 2; + + /* TMDS clock is pixel_clock * 10 */ + phy_freq = vclk_freq * 10; + + /* 480i/576i needs global pixel doubling */ + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + vclk_freq *= 2; + + venc_freq = vclk_freq; + hdmi_freq = vclk_freq; + + /* VENC double pixels for 1080i, 720p and YUV420 modes */ + if (meson_venc_hdmi_venc_repeat(vic) || + drm_mode_is_420_only(display_info, mode) || + (!is_hdmi2_sink && + drm_mode_is_420_also(display_info, mode))) + venc_freq *= 2; + + vclk_freq = max(venc_freq, hdmi_freq); + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + venc_freq /= 2; + + dev_dbg(priv->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n", + __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq); + + return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq); +} + +static void meson_encoder_hdmi_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct drm_atomic_state *state = bridge_state->base.state; + unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR; + struct meson_drm *priv = encoder_hdmi->priv; + struct drm_connector_state *conn_state; + const struct drm_display_mode *mode; + struct drm_crtc_state *crtc_state; + struct drm_connector *connector; + bool yuv420_mode = false; + int vic; + + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + if (WARN_ON(!connector)) + return; + + conn_state = drm_atomic_get_new_connector_state(state, connector); + if (WARN_ON(!conn_state)) + return; + + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); + if (WARN_ON(!crtc_state)) + return; + + mode = &crtc_state->adjusted_mode; + + vic = drm_match_cea_mode(mode); + + dev_dbg(priv->dev, "\"%s\" vic %d\n", mode->name, vic); + + if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) { + ycrcb_map = VPU_HDMI_OUTPUT_CRYCB; + yuv420_mode = true; + } + + /* VENC + VENC-DVI Mode setup */ + meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode); + + /* VCLK Set clock */ + meson_encoder_hdmi_set_vclk(encoder_hdmi, mode); + + if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) + /* Setup YUV420 to HDMI-TX, no 10bit diphering */ + writel_relaxed(2 | (2 << 2), + priv->io_base + _REG(VPU_HDMI_FMT_CTRL)); + else + /* Setup YUV444 to HDMI-TX, no 10bit diphering */ + writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL)); + + dev_dbg(priv->dev, "%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP"); + + if (priv->venc.hdmi_use_enci) + writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN)); + else + writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN)); +} + +static void meson_encoder_hdmi_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct meson_drm *priv = encoder_hdmi->priv; + + writel_bits_relaxed(0x3, 0, + priv->io_base + _REG(VPU_HDMI_SETTING)); + + writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN)); + writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN)); +} + +static const u32 meson_encoder_hdmi_out_bus_fmts[] = { + MEDIA_BUS_FMT_YUV8_1X24, + MEDIA_BUS_FMT_UYYVYY8_0_5X24, +}; + +static u32 * +meson_encoder_hdmi_get_inp_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + u32 *input_fmts = NULL; + int i; + + *num_input_fmts = 0; + + for (i = 0 ; i < ARRAY_SIZE(meson_encoder_hdmi_out_bus_fmts) ; ++i) { + if (output_fmt == meson_encoder_hdmi_out_bus_fmts[i]) { + *num_input_fmts = 1; + input_fmts = kcalloc(*num_input_fmts, + sizeof(*input_fmts), + GFP_KERNEL); + if (!input_fmts) + return NULL; + + input_fmts[0] = output_fmt; + + break; + } + } + + return input_fmts; +} + +static int meson_encoder_hdmi_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct drm_connector_state *old_conn_state = + drm_atomic_get_old_connector_state(conn_state->state, conn_state->connector); + struct meson_drm *priv = encoder_hdmi->priv; + + encoder_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format; + + dev_dbg(priv->dev, "output_bus_fmt %lx\n", encoder_hdmi->output_bus_fmt); + + if (!drm_connector_atomic_hdr_metadata_equal(old_conn_state, conn_state)) + crtc_state->mode_changed = true; + + return 0; +} + +static const struct drm_bridge_funcs meson_encoder_hdmi_bridge_funcs = { + .attach = meson_encoder_hdmi_attach, + .mode_valid = meson_encoder_hdmi_mode_valid, + .atomic_enable = meson_encoder_hdmi_atomic_enable, + .atomic_disable = meson_encoder_hdmi_atomic_disable, + .atomic_get_input_bus_fmts = meson_encoder_hdmi_get_inp_bus_fmts, + .atomic_check = meson_encoder_hdmi_atomic_check, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, +}; + +int meson_encoder_hdmi_init(struct meson_drm *priv) +{ + struct meson_encoder_hdmi *meson_encoder_hdmi; + struct device_node *remote; + int ret; + + meson_encoder_hdmi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_hdmi), GFP_KERNEL); + if (!meson_encoder_hdmi) + return -ENOMEM; + + /* HDMI Transceiver Bridge */ + remote = of_graph_get_remote_node(priv->dev->of_node, 1, 0); + if (!remote) { + dev_err(priv->dev, "HDMI transceiver device is disabled"); + return 0; + } + + meson_encoder_hdmi->next_bridge = of_drm_find_bridge(remote); + if (!meson_encoder_hdmi->next_bridge) { + dev_err(priv->dev, "Failed to find HDMI transceiver bridge\n"); + return -EPROBE_DEFER; + } + + /* HDMI Encoder Bridge */ + meson_encoder_hdmi->bridge.funcs = &meson_encoder_hdmi_bridge_funcs; + meson_encoder_hdmi->bridge.of_node = priv->dev->of_node; + meson_encoder_hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + + drm_bridge_add(&meson_encoder_hdmi->bridge); + + meson_encoder_hdmi->priv = priv; + + /* Encoder */ + ret = drm_simple_encoder_init(priv->drm, &meson_encoder_hdmi->encoder, + DRM_MODE_ENCODER_TMDS); + if (ret) { + dev_err(priv->dev, "Failed to init HDMI encoder: %d\n", ret); + return ret; + } + + meson_encoder_hdmi->encoder.possible_crtcs = BIT(0); + + /* Attach HDMI Encoder Bridge to Encoder */ + ret = drm_bridge_attach(&meson_encoder_hdmi->encoder, &meson_encoder_hdmi->bridge, NULL, 0); + if (ret) { + dev_err(priv->dev, "Failed to attach bridge: %d\n", ret); + return ret; + } + + /* + * We should have now in place: + * encoder->[hdmi encoder bridge]->[dw-hdmi bridge]->[dw-hdmi connector] + */ + + dev_dbg(priv->dev, "HDMI encoder initialized\n"); + + return 0; +} diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.h b/drivers/gpu/drm/meson/meson_encoder_hdmi.h new file mode 100644 index 000000000000..ed19494f0956 --- /dev/null +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2021 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + */ + +#ifndef __MESON_ENCODER_HDMI_H +#define __MESON_ENCODER_HDMI_H + +int meson_encoder_hdmi_init(struct meson_drm *priv); + +#endif /* __MESON_ENCODER_HDMI_H */ diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.c b/drivers/gpu/drm/meson/meson_osd_afbcd.c index ffc6b584dbf8..0cdbe899402f 100644 --- a/drivers/gpu/drm/meson/meson_osd_afbcd.c +++ b/drivers/gpu/drm/meson/meson_osd_afbcd.c @@ -79,11 +79,6 @@ static bool meson_gxm_afbcd_supported_fmt(u64 modifier, uint32_t format) return meson_gxm_afbcd_pixel_fmt(modifier, format) >= 0; } -static int meson_gxm_afbcd_init(struct meson_drm *priv) -{ - return 0; -} - static int meson_gxm_afbcd_reset(struct meson_drm *priv) { writel_relaxed(VIU_SW_RESET_OSD1_AFBCD, @@ -93,6 +88,16 @@ static int meson_gxm_afbcd_reset(struct meson_drm *priv) return 0; } +static int meson_gxm_afbcd_init(struct meson_drm *priv) +{ + return 0; +} + +static void meson_gxm_afbcd_exit(struct meson_drm *priv) +{ + meson_gxm_afbcd_reset(priv); +} + static int meson_gxm_afbcd_enable(struct meson_drm *priv) { writel_relaxed(FIELD_PREP(OSD1_AFBCD_ID_FIFO_THRD, 0x40) | @@ -172,6 +177,7 @@ static int meson_gxm_afbcd_setup(struct meson_drm *priv) struct meson_afbcd_ops meson_afbcd_gxm_ops = { .init = meson_gxm_afbcd_init, + .exit = meson_gxm_afbcd_exit, .reset = meson_gxm_afbcd_reset, .enable = meson_gxm_afbcd_enable, .disable = meson_gxm_afbcd_disable, @@ -269,6 +275,18 @@ static bool meson_g12a_afbcd_supported_fmt(u64 modifier, uint32_t format) return meson_g12a_afbcd_pixel_fmt(modifier, format) >= 0; } +static int meson_g12a_afbcd_reset(struct meson_drm *priv) +{ + meson_rdma_reset(priv); + + meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB | + VIU_SW_RESET_G12A_OSD1_AFBCD, + VIU_SW_RESET); + meson_rdma_writel_sync(priv, 0, VIU_SW_RESET); + + return 0; +} + static int meson_g12a_afbcd_init(struct meson_drm *priv) { int ret; @@ -286,16 +304,10 @@ static int meson_g12a_afbcd_init(struct meson_drm *priv) return 0; } -static int meson_g12a_afbcd_reset(struct meson_drm *priv) +static void meson_g12a_afbcd_exit(struct meson_drm *priv) { - meson_rdma_reset(priv); - - meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB | - VIU_SW_RESET_G12A_OSD1_AFBCD, - VIU_SW_RESET); - meson_rdma_writel_sync(priv, 0, VIU_SW_RESET); - - return 0; + meson_g12a_afbcd_reset(priv); + meson_rdma_free(priv); } static int meson_g12a_afbcd_enable(struct meson_drm *priv) @@ -380,6 +392,7 @@ static int meson_g12a_afbcd_setup(struct meson_drm *priv) struct meson_afbcd_ops meson_afbcd_g12a_ops = { .init = meson_g12a_afbcd_init, + .exit = meson_g12a_afbcd_exit, .reset = meson_g12a_afbcd_reset, .enable = meson_g12a_afbcd_enable, .disable = meson_g12a_afbcd_disable, diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.h b/drivers/gpu/drm/meson/meson_osd_afbcd.h index 5e5523304f42..e77ddeb6416f 100644 --- a/drivers/gpu/drm/meson/meson_osd_afbcd.h +++ b/drivers/gpu/drm/meson/meson_osd_afbcd.h @@ -14,6 +14,7 @@ struct meson_afbcd_ops { int (*init)(struct meson_drm *priv); + void (*exit)(struct meson_drm *priv); int (*reset)(struct meson_drm *priv); int (*enable)(struct meson_drm *priv); int (*disable)(struct meson_drm *priv); diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index fd98e8bbc550..2c7271f545dc 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -529,7 +529,10 @@ static void mgag200_set_format_regs(struct mga_device *mdev, WREG_GFX(3, 0x00); WREG_GFX(4, 0x00); WREG_GFX(5, 0x40); - WREG_GFX(6, 0x05); + /* GCTL6 should be 0x05, but we configure memmapsl to 0xb8000 (text mode), + * so that it doesn't hang when running kexec/kdump on G200_SE rev42. + */ + WREG_GFX(6, 0x0d); WREG_GFX(7, 0x0f); WREG_GFX(8, 0x0f); diff --git a/drivers/gpu/drm/mgag200/mgag200_pll.c b/drivers/gpu/drm/mgag200/mgag200_pll.c index e9ae22b4f813..52be08b744ad 100644 --- a/drivers/gpu/drm/mgag200/mgag200_pll.c +++ b/drivers/gpu/drm/mgag200/mgag200_pll.c @@ -404,9 +404,9 @@ mgag200_pixpll_update_g200wb(struct mgag200_pll *pixpll, const struct mgag200_pl udelay(50); /* program pixel pll register */ - WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn); - WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm); - WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp); + WREG_DAC(MGA1064_WB_PIX_PLLC_N, xpixpllcn); + WREG_DAC(MGA1064_WB_PIX_PLLC_M, xpixpllcm); + WREG_DAC(MGA1064_WB_PIX_PLLC_P, xpixpllcp); udelay(50); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index b681c45520bb..9b41e2f82fc2 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -658,19 +658,23 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); const u32 *regs = a6xx_protect; - unsigned i, count = ARRAY_SIZE(a6xx_protect), count_max = 32; - - BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32); - BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48); + unsigned i, count, count_max; if (adreno_is_a650(adreno_gpu)) { regs = a650_protect; count = ARRAY_SIZE(a650_protect); count_max = 48; + BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48); } else if (adreno_is_a660_family(adreno_gpu)) { regs = a660_protect; count = ARRAY_SIZE(a660_protect); count_max = 48; + BUILD_BUG_ON(ARRAY_SIZE(a660_protect) > 48); + } else { + regs = a6xx_protect; + count = ARRAY_SIZE(a6xx_protect); + count_max = 32; + BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32); } /* @@ -1707,7 +1711,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu) return ERR_CAST(mmu); return msm_gem_address_space_create(mmu, - "gpu", 0x100000000ULL, 0x1ffffffffULL); + "gpu", 0x100000000ULL, SZ_4G); } static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 0e9d3fa1544b..6bde3e234ec8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -1107,7 +1107,7 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) } - if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort && + if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS && dpu_enc->cur_master->hw_mdptop && dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select) dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select( diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index f9c83d6e427a..24fbaf562d41 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -35,6 +35,14 @@ int dpu_rm_destroy(struct dpu_rm *rm) { int i; + for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) { + struct dpu_hw_dspp *hw; + + if (rm->dspp_blks[i]) { + hw = to_dpu_hw_dspp(rm->dspp_blks[i]); + dpu_hw_dspp_destroy(hw); + } + } for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) { struct dpu_hw_pingpong *hw; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c index c6b69afcbac8..50e854207c70 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -90,7 +90,10 @@ static void mdp5_plane_reset(struct drm_plane *plane) __drm_atomic_helper_plane_destroy_state(plane->state); kfree(to_mdp5_plane_state(plane->state)); + plane->state = NULL; mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL); + if (!mdp5_state) + return; if (plane->type == DRM_PLANE_TYPE_PRIMARY) mdp5_state->base.zpos = STAGE_BASE; diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c index cabe15190ec1..369e57f73a47 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c @@ -169,6 +169,8 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len, va_list va; new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL); + if (!new_blk) + return; va_start(va, fmt); diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index 62e75dc8afc6..4af281d97493 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -1744,6 +1744,9 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) /* end with failure */ break; /* lane == 1 already */ } + + /* stop link training before start re training */ + dp_ctrl_clear_training_pattern(ctrl); } } diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index a0392e4d8134..a133f7e154e7 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -551,6 +551,12 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) mutex_unlock(&dp->event_mutex); + /* + * add fail safe mode outside event_mutex scope + * to avoid potiential circular lock with drm thread + */ + dp_panel_add_fail_safe_mode(dp->dp_display.connector); + /* uevent will complete connection part */ return 0; }; @@ -1442,6 +1448,7 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, struct drm_encoder *encoder) { struct msm_drm_private *priv; + struct dp_display_private *dp_priv; int ret; if (WARN_ON(!encoder) || WARN_ON(!dp_display) || WARN_ON(!dev)) @@ -1450,6 +1457,8 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, priv = dev->dev_private; dp_display->drm_dev = dev; + dp_priv = container_of(dp_display, struct dp_display_private, dp_display); + ret = dp_display_request_irq(dp_display); if (ret) { DRM_ERROR("request_irq failed, ret=%d\n", ret); @@ -1467,6 +1476,8 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, return ret; } + dp_priv->panel->connector = dp_display->connector; + priv->connectors[priv->num_connectors++] = dp_display->connector; return 0; } diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 2181b60e1d1d..982f5e8c3546 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -151,6 +151,15 @@ static int dp_panel_update_modes(struct drm_connector *connector, return rc; } +void dp_panel_add_fail_safe_mode(struct drm_connector *connector) +{ + /* fail safe edid */ + mutex_lock(&connector->dev->mode_config.mutex); + if (drm_add_modes_noedid(connector, 640, 480)) + drm_set_preferred_mode(connector, 640, 480); + mutex_unlock(&connector->dev->mode_config.mutex); +} + int dp_panel_read_sink_caps(struct dp_panel *dp_panel, struct drm_connector *connector) { @@ -207,11 +216,7 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel, goto end; } - /* fail safe edid */ - mutex_lock(&connector->dev->mode_config.mutex); - if (drm_add_modes_noedid(connector, 640, 480)) - drm_set_preferred_mode(connector, 640, 480); - mutex_unlock(&connector->dev->mode_config.mutex); + dp_panel_add_fail_safe_mode(connector); } if (panel->aux_cfg_update_done) { diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h index 9023e5bb4b8b..99739ea679a7 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.h +++ b/drivers/gpu/drm/msm/dp/dp_panel.h @@ -59,6 +59,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel); int dp_panel_deinit(struct dp_panel *dp_panel); int dp_panel_timing_cfg(struct dp_panel *dp_panel); void dp_panel_dump_regs(struct dp_panel *dp_panel); +void dp_panel_add_fail_safe_mode(struct drm_connector *connector); int dp_panel_read_sink_caps(struct dp_panel *dp_panel, struct drm_connector *connector); u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp, diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index dc85974c7897..eea679a52e86 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1909,7 +1909,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) /* do not autoenable, will be enabled later */ ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq, - IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN, + IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, "dsi_isr", msm_host); if (ret < 0) { dev_err(&pdev->dev, "failed to request IRQ%u: %d\n", diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index fa4c396df6a9..6e43672f5807 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -643,7 +643,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) return connector; fail: - connector->funcs->destroy(msm_dsi->connector); + connector->funcs->destroy(connector); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c index d8128f50b0dd..0b782cc18b3f 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c @@ -562,7 +562,9 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **prov char clk_name[32], parent[32], vco_name[32]; char parent2[32], parent3[32], parent4[32]; struct clk_init_data vco_init = { - .parent_names = (const char *[]){ "xo" }, + .parent_data = &(const struct clk_parent_data) { + .fw_name = "ref", + }, .num_parents = 1, .name = vco_name, .flags = CLK_IGNORE_UNUSED, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c index 5b4e991f220d..1c1e9861b93f 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c @@ -804,7 +804,9 @@ static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **prov { char clk_name[32], parent[32], vco_name[32]; struct clk_init_data vco_init = { - .parent_names = (const char *[]){ "xo" }, + .parent_data = &(const struct clk_parent_data) { + .fw_name = "ref", + }, .num_parents = 1, .name = vco_name, .flags = CLK_IGNORE_UNUSED, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c index 2da673a2add6..48eab80b548e 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c @@ -521,7 +521,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov { char clk_name[32], parent1[32], parent2[32], vco_name[32]; struct clk_init_data vco_init = { - .parent_names = (const char *[]){ "xo" }, + .parent_data = &(const struct clk_parent_data) { + .fw_name = "ref", .name = "xo", + }, .num_parents = 1, .name = vco_name, .flags = CLK_IGNORE_UNUSED, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c index 71ed4aa0dc67..fc56cdcc9ad6 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c @@ -385,7 +385,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov { char *clk_name, *parent_name, *vco_name; struct clk_init_data vco_init = { - .parent_names = (const char *[]){ "pxo" }, + .parent_data = &(const struct clk_parent_data) { + .fw_name = "ref", + }, .num_parents = 1, .flags = CLK_IGNORE_UNUSED, .ops = &clk_ops_dsi_pll_28nm_vco, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index cb297b08458e..8cc1ef8199ac 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -590,7 +590,9 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide char clk_name[32], parent[32], vco_name[32]; char parent2[32], parent3[32], parent4[32]; struct clk_init_data vco_init = { - .parent_names = (const char *[]){ "bi_tcxo" }, + .parent_data = &(const struct clk_parent_data) { + .fw_name = "ref", + }, .num_parents = 1, .name = vco_name, .flags = CLK_IGNORE_UNUSED, @@ -864,20 +866,26 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, /* Alter PHY configurations if data rate less than 1.5GHZ*/ less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000); - /* For C-PHY, no low power settings for lower clk rate */ - if (phy->cphy_mode) - less_than_1500_mhz = false; - if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) { vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52; - glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00; - glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c; + if (phy->cphy_mode) { + glbl_rescode_top_ctrl = 0x00; + glbl_rescode_bot_ctrl = 0x3c; + } else { + glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00; + glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c; + } glbl_str_swi_cal_sel_ctrl = 0x00; glbl_hstx_str_ctrl_0 = 0x88; } else { vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59; - glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00; - glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88; + if (phy->cphy_mode) { + glbl_str_swi_cal_sel_ctrl = 0x03; + glbl_hstx_str_ctrl_0 = 0x66; + } else { + glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00; + glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88; + } glbl_rescode_top_ctrl = 0x03; glbl_rescode_bot_ctrl = 0x3c; } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index cb52ac01e512..d280dd64744d 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -937,6 +937,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, get_pid_task(aspace->pid, PIDTYPE_PID); if (task) { comm = kstrdup(task->comm, GFP_KERNEL); + put_task_struct(task); } else { comm = NULL; } diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c index 89dd618d78f3..988bc4fbd78d 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c @@ -361,7 +361,17 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc, bridge_state = drm_atomic_get_new_bridge_state(state, mxsfb->bridge); - bus_format = bridge_state->input_bus_cfg.format; + if (!bridge_state) + bus_format = MEDIA_BUS_FMT_FIXED; + else + bus_format = bridge_state->input_bus_cfg.format; + + if (bus_format == MEDIA_BUS_FMT_FIXED) { + dev_warn_once(drm->dev, + "Bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n" + "Please fix bridge driver by handling atomic_get_input_bus_fmts.\n"); + bus_format = MEDIA_BUS_FMT_RGB888_1X24; + } } /* If there is no bridge, use bus format from connector */ diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 1cbd71abc80a..12965a832f94 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -101,7 +101,6 @@ nv40_backlight_init(struct nouveau_encoder *encoder, if (!(nvif_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)) return -ENODEV; - props->type = BACKLIGHT_RAW; props->max_brightness = 31; *ops = &nv40_bl_ops; return 0; @@ -294,7 +293,8 @@ nv50_backlight_init(struct nouveau_backlight *bl, struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nvif_object *device = &drm->client.device.object; - if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1))) + if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)) || + nv_conn->base.status != connector_status_connected) return -ENODEV; if (nv_conn->type == DCB_CONNECTOR_eDP) { @@ -339,7 +339,6 @@ nv50_backlight_init(struct nouveau_backlight *bl, else *ops = &nva3_bl_ops; - props->type = BACKLIGHT_RAW; props->max_brightness = 100; return 0; @@ -407,6 +406,7 @@ nouveau_backlight_init(struct drm_connector *connector) goto fail_alloc; } + props.type = BACKLIGHT_RAW; bl->dev = backlight_device_register(backlight_name, connector->kdev, nv_encoder, ops, &props); if (IS_ERR(bl->dev)) { diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c index 262641a014b0..c91130a6be2a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c @@ -117,8 +117,12 @@ nvkm_falcon_disable(struct nvkm_falcon *falcon) int nvkm_falcon_reset(struct nvkm_falcon *falcon) { - nvkm_falcon_disable(falcon); - return nvkm_falcon_enable(falcon); + if (!falcon->func->reset) { + nvkm_falcon_disable(falcon); + return nvkm_falcon_enable(falcon); + } + + return falcon->func->reset(falcon); } int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c index 667fa016496e..a6ea89a5d51a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c @@ -142,11 +142,12 @@ nvkm_acr_hsfw_load_bl(struct nvkm_acr *acr, const char *name, int ver, hsfw->imem_size = desc->code_size; hsfw->imem_tag = desc->start_tag; - hsfw->imem = kmalloc(desc->code_size, GFP_KERNEL); - memcpy(hsfw->imem, data + desc->code_off, desc->code_size); - + hsfw->imem = kmemdup(data + desc->code_off, desc->code_size, GFP_KERNEL); nvkm_firmware_put(fw); - return 0; + if (!hsfw->imem) + return -ENOMEM; + else + return 0; } int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c index 5968c7696596..40439e329aa9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c @@ -23,9 +23,38 @@ */ #include "priv.h" +static int +gm200_pmu_flcn_reset(struct nvkm_falcon *falcon) +{ + struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); + + nvkm_falcon_wr32(falcon, 0x014, 0x0000ffff); + pmu->func->reset(pmu); + return nvkm_falcon_enable(falcon); +} + +const struct nvkm_falcon_func +gm200_pmu_flcn = { + .debug = 0xc08, + .fbif = 0xe00, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = nvkm_falcon_v1_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, + .reset = gm200_pmu_flcn_reset, + .cmdq = { 0x4a0, 0x4b0, 4 }, + .msgq = { 0x4c8, 0x4cc, 0 }, +}; + static const struct nvkm_pmu_func gm200_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .reset = gf100_pmu_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c index 148706977eec..612310d5d481 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c @@ -211,11 +211,12 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gm20b_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, .initmsg = gm20b_pmu_initmsg, + .reset = gf100_pmu_reset, }; #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c index 00da1b873ce8..1a6f9c3af5ec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c @@ -23,7 +23,7 @@ */ #include "priv.h" -static void +void gp102_pmu_reset(struct nvkm_pmu *pmu) { struct nvkm_device *device = pmu->subdev.device; @@ -39,7 +39,7 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gp102_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gp102_pmu_enabled, .reset = gp102_pmu_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c index 461f722656e2..94cfb1791af6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c @@ -78,11 +78,12 @@ gp10b_pmu_acr = { static const struct nvkm_pmu_func gp10b_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, .initmsg = gm20b_pmu_initmsg, + .reset = gp102_pmu_reset, }; #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h index e7860d177353..21abf31f4442 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h @@ -41,9 +41,12 @@ int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32); bool gf100_pmu_enabled(struct nvkm_pmu *); void gf100_pmu_reset(struct nvkm_pmu *); +void gp102_pmu_reset(struct nvkm_pmu *pmu); void gk110_pmu_pgob(struct nvkm_pmu *, bool); +extern const struct nvkm_falcon_func gm200_pmu_flcn; + void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64); void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *); int gm20b_pmu_acr_boot(struct nvkm_falcon *); diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 418638e6e3b0..af1402d83d51 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -83,6 +83,8 @@ config DRM_PANEL_SIMPLE depends on PM select VIDEOMODE_HELPERS select DRM_DP_AUX_BUS + select DRM_DP_HELPER + select DRM_KMS_HELPER help DRM panel driver for dumb panels that need at most a regulator and a GPIO to be powered up. Optionally a backlight can be attached so diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c index 2c3378a259b1..e1542451ef9d 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c @@ -612,8 +612,10 @@ static int ili9341_dbi_probe(struct spi_device *spi, struct gpio_desc *dc, int ret; vcc = devm_regulator_get_optional(dev, "vcc"); - if (IS_ERR(vcc)) + if (IS_ERR(vcc)) { dev_err(dev, "get optional vcc failed\n"); + vcc = NULL; + } dbidev = devm_drm_dev_alloc(dev, &ili9341_dbi_driver, struct mipi_dbi_dev, drm); diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c index 46029c5610c8..145047e19394 100644 --- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c @@ -229,7 +229,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts, ret = i2c_smbus_write_byte_data(ts->i2c, reg, val); if (ret) - dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret); + dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret); } static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val) @@ -265,7 +265,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel) return 0; } -static int rpi_touchscreen_enable(struct drm_panel *panel) +static int rpi_touchscreen_prepare(struct drm_panel *panel) { struct rpi_touchscreen *ts = panel_to_ts(panel); int i; @@ -295,6 +295,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel) rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01); msleep(100); + return 0; +} + +static int rpi_touchscreen_enable(struct drm_panel *panel) +{ + struct rpi_touchscreen *ts = panel_to_ts(panel); + /* Turn on the backlight. */ rpi_touchscreen_i2c_write(ts, REG_PWM, 255); @@ -349,7 +356,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel, static const struct drm_panel_funcs rpi_touchscreen_funcs = { .disable = rpi_touchscreen_disable, .unprepare = rpi_touchscreen_noop, - .prepare = rpi_touchscreen_noop, + .prepare = rpi_touchscreen_prepare, .enable = rpi_touchscreen_enable, .get_modes = rpi_touchscreen_get_modes, }; diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index b7b654f2dfd9..f9242c19b458 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2510,7 +2510,7 @@ static const struct display_timing innolux_g070y2_l01_timing = { static const struct panel_desc innolux_g070y2_l01 = { .timings = &innolux_g070y2_l01_timing, .num_timings = 1, - .bpc = 6, + .bpc = 8, .size = { .width = 152, .height = 91, diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index bbe628b306ee..f8355de6e335 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -360,8 +360,11 @@ int panfrost_gpu_init(struct panfrost_device *pfdev) panfrost_gpu_init_features(pfdev); - dma_set_mask_and_coherent(pfdev->dev, + err = dma_set_mask_and_coherent(pfdev->dev, DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features))); + if (err) + return err; + dma_set_max_seg_size(pfdev->dev, UINT_MAX); irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu"); diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 0fce73b9a646..70bd84b7ef2b 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -198,7 +198,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, * so don't register a backlight device */ if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && - (rdev->pdev->device == 0x6741)) + (rdev->pdev->device == 0x6741) && + !dmi_match(DMI_PRODUCT_NAME, "iMac12,1")) return; if (!radeon_encoder->enc_priv) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 607ad5620bd9..1546abcadacf 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -204,7 +204,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector) /* Check if bpc is within clock limit. Try to degrade gracefully otherwise */ if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) { - if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) && + if ((connector->display_info.edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30) && (mode_clock * 5/4 <= max_tmds_clock)) bpc = 10; else diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 830bdd5e9b7c..8677c8271678 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -529,13 +529,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, return ret; } - ret = clk_prepare_enable(hdmi->vpll_clk); - if (ret) { - DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n", - ret); - return ret; - } - hdmi->phy = devm_phy_optional_get(dev, "hdmi"); if (IS_ERR(hdmi->phy)) { ret = PTR_ERR(hdmi->phy); @@ -544,6 +537,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, return ret; } + ret = clk_prepare_enable(hdmi->vpll_clk); + if (ret) { + DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n", + ret); + return ret; + } + drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs); drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c index 6b4759ed6bfd..c491429f1a02 100644 --- a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c +++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c @@ -131,8 +131,10 @@ sideband_msg_req_encode_decode(struct drm_dp_sideband_msg_req_body *in) return false; txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); - if (!txmsg) + if (!txmsg) { + kfree(out); return false; + } drm_dp_encode_sideband_req(in, txmsg); ret = drm_dp_decode_sideband_req(txmsg, out); diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index b64d93da651d..5e2b0175df36 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c @@ -658,8 +658,10 @@ int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node) return -EPROBE_DEFER; phy = platform_get_drvdata(pdev); - if (!phy) + if (!phy) { + put_device(&pdev->dev); return -EPROBE_DEFER; + } hdmi->phy = phy; diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h index 145833a9d82d..5b3fbee18671 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.h +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h @@ -111,10 +111,10 @@ /* format 13 is semi-planar YUV411 VUVU */ #define SUN8I_MIXER_FBFMT_YUV411 14 /* format 15 doesn't exist */ -/* format 16 is P010 YVU */ -#define SUN8I_MIXER_FBFMT_P010_YUV 17 -/* format 18 is P210 YVU */ -#define SUN8I_MIXER_FBFMT_P210_YUV 19 +#define SUN8I_MIXER_FBFMT_P010_YUV 16 +/* format 17 is P010 YVU */ +#define SUN8I_MIXER_FBFMT_P210_YUV 18 +/* format 19 is P210 YVU */ /* format 20 is packed YVU444 10-bit */ /* format 21 is packed YUV444 10-bit */ diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index f46d377f0c30..de1333dc0d86 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -1538,8 +1538,10 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi) dsi->slave = platform_get_drvdata(gangster); of_node_put(np); - if (!dsi->slave) + if (!dsi->slave) { + put_device(&gangster->dev); return -EPROBE_DEFER; + } dsi->slave->master = dsi; } diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index 5a6e89825bc2..3e3f9ba1e885 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -779,6 +779,9 @@ static int simpledrm_device_init_modeset(struct simpledrm_device *sdev) if (ret) return ret; drm_connector_helper_add(connector, &simpledrm_connector_helper_funcs); + drm_connector_set_panel_orientation_with_quirk(connector, + DRM_MODE_PANEL_ORIENTATION_UNKNOWN, + mode->hdisplay, mode->vdisplay); formats = simpledrm_device_formats(sdev, &nformats); diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 9403c3b36aca..6407a006d6ec 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -221,6 +221,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) int ret; u32 mmu_debug; u32 ident1; + u64 mask; v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm); @@ -240,8 +241,11 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) return ret; mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO); - dma_set_mask_and_coherent(dev, - DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH))); + mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)); + ret = dma_set_mask_and_coherent(dev, mask); + if (ret) + return ret; + v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH); ident1 = V3D_READ(V3D_HUB_IDENT1); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 772b5831bcc6..805d6f6cba0e 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -625,7 +625,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, if (!render->base.perfmon) { ret = -ENOENT; - goto fail; + goto fail_perfmon; } } @@ -678,6 +678,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, fail_unreserve: mutex_unlock(&v3d->sched_lock); +fail_perfmon: drm_gem_unlock_reservations(last_job->bo, last_job->bo_count, &acquire_ctx); fail: @@ -854,7 +855,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, args->perfmon_id); if (!job->base.perfmon) { ret = -ENOENT; - goto fail; + goto fail_perfmon; } } @@ -886,6 +887,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, fail_unreserve: mutex_unlock(&v3d->sched_lock); +fail_perfmon: drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, &acquire_ctx); fail: diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index e3ed52d96f42..3e61184e194c 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -538,9 +538,11 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc) if (ret) return ret; - ret = pm_runtime_put(&vc4_hdmi->pdev->dev); - if (ret) - return ret; + /* + * post_crtc_powerdown will have called pm_runtime_put, so we + * don't need it here otherwise we'll get the reference counting + * wrong. + */ return 0; } diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index d09c1ea60c04..ca8506316660 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -846,7 +846,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder) unsigned long phy_clock; int ret; - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret) { DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port); return; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 9170d948b448..07887cbfd9cb 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1522,6 +1522,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) dev_err(dev, "Couldn't register the HDMI codec: %ld\n", PTR_ERR(codec_pdev)); return PTR_ERR(codec_pdev); } + vc4_hdmi->audio.codec_pdev = codec_pdev; dai_link->cpus = &vc4_hdmi->audio.cpu; dai_link->codecs = &vc4_hdmi->audio.codec; @@ -1561,6 +1562,12 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) } +static void vc4_hdmi_audio_exit(struct vc4_hdmi *vc4_hdmi) +{ + platform_device_unregister(vc4_hdmi->audio.codec_pdev); + vc4_hdmi->audio.codec_pdev = NULL; +} + static irqreturn_t vc4_hdmi_hpd_irq_thread(int irq, void *priv) { struct vc4_hdmi *vc4_hdmi = priv; @@ -2298,6 +2305,7 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master, kfree(vc4_hdmi->hdmi_regset.regs); kfree(vc4_hdmi->hd_regset.regs); + vc4_hdmi_audio_exit(vc4_hdmi); vc4_hdmi_cec_exit(vc4_hdmi); vc4_hdmi_hotplug_exit(vc4_hdmi); vc4_hdmi_connector_destroy(&vc4_hdmi->connector); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index 33e9f665ab8e..c0492da73683 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -113,6 +113,7 @@ struct vc4_hdmi_audio { struct snd_soc_dai_link_component platform; struct snd_dmaengine_dai_dma_data dma_data; struct hdmi_audio_infoframe infoframe; + struct platform_device *codec_pdev; bool streaming; }; diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index 2de61b63ef91..48d3c9955f0d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -248,6 +248,9 @@ void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs) { u32 i; + if (!objs) + return; + for (i = 0; i < objs->nents; i++) drm_gem_object_put(objs->objs[i]); virtio_gpu_array_free(objs); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 3872e4cd2698..fc9f54282f7d 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -526,6 +526,7 @@ static int host1x_remove(struct platform_device *pdev) host1x_syncpt_deinit(host); reset_control_assert(host->rst); clk_disable_unprepare(host->clk); + host1x_channel_list_free(&host->channel_list); host1x_iommu_exit(host); return 0; diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c index d198a10848c6..a89a408182e6 100644 --- a/drivers/gpu/host1x/syncpt.c +++ b/drivers/gpu/host1x/syncpt.c @@ -225,27 +225,12 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, void *ref; struct host1x_waitlist *waiter; int err = 0, check_count = 0; - u32 val; if (value) - *value = 0; - - /* first check cache */ - if (host1x_syncpt_is_expired(sp, thresh)) { - if (value) - *value = host1x_syncpt_load(sp); + *value = host1x_syncpt_load(sp); + if (host1x_syncpt_is_expired(sp, thresh)) return 0; - } - - /* try to read from register */ - val = host1x_hw_syncpt_load(sp->host, sp); - if (host1x_syncpt_is_expired(sp, thresh)) { - if (value) - *value = val; - - goto done; - } if (!timeout) { err = -EAGAIN; diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c index 666223c6bec4..0a34e0ab4fe6 100644 --- a/drivers/gpu/ipu-v3/ipu-di.c +++ b/drivers/gpu/ipu-v3/ipu-di.c @@ -447,8 +447,9 @@ static void ipu_di_config_clock(struct ipu_di *di, error = rate / (sig->mode.pixelclock / 1000); - dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n", - rate, div, (signed)(error - 1000) / 10, error % 10); + dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %c%d.%d%%\n", + rate, div, error < 1000 ? '-' : '+', + abs(error - 1000) / 10, abs(error - 1000) % 10); /* Allow a 1% error */ if (error < 1010 && error >= 990) { diff --git a/drivers/greybus/svc.c b/drivers/greybus/svc.c index ce7740ef449b..51d0875a3480 100644 --- a/drivers/greybus/svc.c +++ b/drivers/greybus/svc.c @@ -866,8 +866,14 @@ static int gb_svc_hello(struct gb_operation *op) gb_svc_debugfs_init(svc); - return gb_svc_queue_deferred_request(op); + ret = gb_svc_queue_deferred_request(op); + if (ret) + goto err_remove_debugfs; + + return 0; +err_remove_debugfs: + gb_svc_debugfs_exit(svc); err_unregister_device: gb_svc_watchdog_destroy(svc); device_del(&svc->dev); diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c index 05c007b213f2..561bb27f42b1 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c @@ -36,11 +36,11 @@ static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_ { union cmd_response cmd_resp; - /* Get response with status within a max of 800 ms timeout */ + /* Get response with status within a max of 1600 ms timeout */ if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp, (cmd_resp.response_v2.response == sensor_sts && cmd_resp.response_v2.status == 0 && (sid == 0xff || - cmd_resp.response_v2.sensor_id == sid)), 500, 800000)) + cmd_resp.response_v2.sensor_id == sid)), 500, 1600000)) return cmd_resp.response_v2.response; return SENSOR_DISABLED; @@ -88,6 +88,44 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata) writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0); } +static void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata) +{ + if (readl(privdata->mmio + AMD_P2C_MSG(4))) { + writel(0, privdata->mmio + AMD_P2C_MSG(4)); + writel(0xf, privdata->mmio + AMD_P2C_MSG(5)); + } +} + +static void amd_sfh_clear_intr(struct amd_mp2_dev *privdata) +{ + if (privdata->mp2_ops->clear_intr) + privdata->mp2_ops->clear_intr(privdata); +} + +static irqreturn_t amd_sfh_irq_handler(int irq, void *data) +{ + amd_sfh_clear_intr(data); + + return IRQ_HANDLED; +} + +static int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata) +{ + int rc; + + pci_intx(privdata->pdev, true); + + rc = devm_request_irq(&privdata->pdev->dev, privdata->pdev->irq, + amd_sfh_irq_handler, 0, DRIVER_NAME, privdata); + if (rc) { + dev_err(&privdata->pdev->dev, "failed to request irq %d err=%d\n", + privdata->pdev->irq, rc); + return rc; + } + + return 0; +} + void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info) { union sfh_cmd_param cmd_param; @@ -192,6 +230,8 @@ static void amd_mp2_pci_remove(void *privdata) struct amd_mp2_dev *mp2 = privdata; amd_sfh_hid_client_deinit(privdata); mp2->mp2_ops->stop_all(mp2); + pci_intx(mp2->pdev, false); + amd_sfh_clear_intr(mp2); } static const struct amd_mp2_ops amd_sfh_ops_v2 = { @@ -199,6 +239,8 @@ static const struct amd_mp2_ops amd_sfh_ops_v2 = { .stop = amd_stop_sensor_v2, .stop_all = amd_stop_all_sensor_v2, .response = amd_sfh_wait_response_v2, + .clear_intr = amd_sfh_clear_intr_v2, + .init_intr = amd_sfh_irq_init_v2, }; static const struct amd_mp2_ops amd_sfh_ops = { @@ -224,6 +266,14 @@ static void mp2_select_ops(struct amd_mp2_dev *privdata) } } +static int amd_sfh_irq_init(struct amd_mp2_dev *privdata) +{ + if (privdata->mp2_ops->init_intr) + return privdata->mp2_ops->init_intr(privdata); + + return 0; +} + static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct amd_mp2_dev *privdata; @@ -257,9 +307,20 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i mp2_select_ops(privdata); + rc = amd_sfh_irq_init(privdata); + if (rc) { + dev_err(&pdev->dev, "amd_sfh_irq_init failed\n"); + return rc; + } + rc = amd_sfh_hid_client_init(privdata); - if (rc) + if (rc) { + amd_sfh_clear_intr(privdata); + dev_err(&pdev->dev, "amd_sfh_hid_client_init failed\n"); return rc; + } + + amd_sfh_clear_intr(privdata); return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata); } @@ -287,6 +348,9 @@ static int __maybe_unused amd_mp2_pci_resume(struct device *dev) } } + schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP)); + amd_sfh_clear_intr(mp2); + return 0; } @@ -310,6 +374,9 @@ static int __maybe_unused amd_mp2_pci_suspend(struct device *dev) } } + cancel_delayed_work_sync(&cl_data->work_buffer); + amd_sfh_clear_intr(mp2); + return 0; } diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h index 1ff6f83cb6fd..00fc083dc123 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h +++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h @@ -48,7 +48,7 @@ union sfh_cmd_base { } s; struct { u32 cmd_id : 4; - u32 intr_enable : 1; + u32 intr_disable : 1; u32 rsvd1 : 3; u32 length : 7; u32 mem_type : 1; @@ -140,5 +140,7 @@ struct amd_mp2_ops { void (*stop)(struct amd_mp2_dev *privdata, u16 sensor_idx); void (*stop_all)(struct amd_mp2_dev *privdata); int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts); + void (*clear_intr)(struct amd_mp2_dev *privdata); + int (*init_intr)(struct amd_mp2_dev *privdata); }; #endif diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c index 0c3697219382..07eb3281b88d 100644 --- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c +++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c @@ -26,6 +26,7 @@ #define HID_USAGE_SENSOR_STATE_READY_ENUM 0x02 #define HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM 0x05 #define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM 0x04 +#define ILLUMINANCE_MASK GENMASK(14, 0) int get_report_descriptor(int sensor_idx, u8 *rep_desc) { @@ -245,7 +246,8 @@ u8 get_input_report(u8 current_index, int sensor_idx, int report_id, struct amd_ get_common_inputs(&als_input.common_property, report_id); /* For ALS ,V2 Platforms uses C2P_MSG5 register instead of DRAM access method */ if (supported_input == V2_STATUS) - als_input.illuminance_value = (int)readl(privdata->mmio + AMD_C2P_MSG(5)); + als_input.illuminance_value = + readl(privdata->mmio + AMD_C2P_MSG(5)) & ILLUMINANCE_MASK; else als_input.illuminance_value = (int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER; diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index fa57d05badf7..f48d3534e020 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -825,7 +825,9 @@ static const char *keys[KEY_MAX + 1] = { [KEY_F22] = "F22", [KEY_F23] = "F23", [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD", [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3", - [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend", + [KEY_PROG4] = "Prog4", + [KEY_ALL_APPLICATIONS] = "AllApplications", + [KEY_SUSPEND] = "Suspend", [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play", [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost", [KEY_PRINT] = "Print", [KEY_HP] = "HP", @@ -934,6 +936,7 @@ static const char *keys[KEY_MAX + 1] = { [KEY_ASSISTANT] = "Assistant", [KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext", [KEY_EMOJI_PICKER] = "EmojiPicker", + [KEY_DICTATE] = "Dictate", [KEY_BRIGHTNESS_MIN] = "BrightnessMin", [KEY_BRIGHTNESS_MAX] = "BrightnessMax", [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto", diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c index 8e960d7b233b..2876cb6a7dca 100644 --- a/drivers/hid/hid-elo.c +++ b/drivers/hid/hid-elo.c @@ -228,7 +228,6 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct elo_priv *priv; int ret; - struct usb_device *udev; if (!hid_is_usb(hdev)) return -EINVAL; @@ -238,8 +237,7 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id) return -ENOMEM; INIT_DELAYED_WORK(&priv->work, elo_work); - udev = interface_to_usbdev(to_usb_interface(hdev->dev.parent)); - priv->usbdev = usb_get_dev(udev); + priv->usbdev = interface_to_usbdev(to_usb_interface(hdev->dev.parent)); hid_set_drvdata(hdev, priv); @@ -270,8 +268,6 @@ static void elo_remove(struct hid_device *hdev) { struct elo_priv *priv = hid_get_drvdata(hdev); - usb_put_dev(priv->usbdev); - hid_hw_stop(hdev); cancel_delayed_work_sync(&priv->work); kfree(priv); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index bdedf594e2d1..645a5f566d23 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -1353,6 +1353,7 @@ #define USB_VENDOR_ID_UGTIZER 0x2179 #define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053 #define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077 +#define USB_DEVICE_ID_UGTIZER_TABLET_WP5540 0x0004 #define USB_VENDOR_ID_VIEWSONIC 0x0543 #define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 3d33c0c06cbb..3172987a9ebd 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -991,6 +991,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break; case 0x0cf: map_key_clear(KEY_VOICECOMMAND); break; + case 0x0d8: map_key_clear(KEY_DICTATE); break; case 0x0d9: map_key_clear(KEY_EMOJI_PICKER); break; case 0x0e0: map_abs_clear(ABS_VOLUME); break; @@ -1082,6 +1083,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x29d: map_key_clear(KEY_KBD_LAYOUT_NEXT); break; + case 0x2a2: map_key_clear(KEY_ALL_APPLICATIONS); break; + case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break; case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break; case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break; diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 7106b921b53c..c358778e070b 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -1068,6 +1068,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev, workitem.reports_supported |= STD_KEYBOARD; break; case 0x0f: + case 0x11: device_type = "eQUAD Lightspeed 1.2"; logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); workitem.reports_supported |= STD_KEYBOARD; diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 65b711476174..544d1197aca4 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -187,6 +187,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT }, diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c index 0c92b7f9b8b8..a28c3e575650 100644 --- a/drivers/hid/hid-thrustmaster.c +++ b/drivers/hid/hid-thrustmaster.c @@ -64,7 +64,9 @@ struct tm_wheel_info { */ static const struct tm_wheel_info tm_wheels_infos[] = { {0x0306, 0x0006, "Thrustmaster T150RS"}, + {0x0200, 0x0005, "Thrustmaster T300RS (Missing Attachment)"}, {0x0206, 0x0005, "Thrustmaster T300RS"}, + {0x0209, 0x0005, "Thrustmaster T300RS (Open Wheel Attachment)"}, {0x0204, 0x0005, "Thrustmaster T300 Ferrari Alcantara Edition"}, {0x0002, 0x0002, "Thrustmaster T500RS"} //{0x0407, 0x0001, "Thrustmaster TMX"} @@ -158,6 +160,12 @@ static void thrustmaster_interrupts(struct hid_device *hdev) return; } + if (usbif->cur_altsetting->desc.bNumEndpoints < 2) { + kfree(send_buf); + hid_err(hdev, "Wrong number of endpoints?\n"); + return; + } + ep = &usbif->cur_altsetting->endpoint[1]; b_ep = ep->desc.bEndpointAddress; diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c index 576518e704ee..d57ec1767037 100644 --- a/drivers/hid/hid-vivaldi.c +++ b/drivers/hid/hid-vivaldi.c @@ -143,7 +143,7 @@ out: static int vivaldi_input_configured(struct hid_device *hdev, struct hid_input *hidinput) { - return sysfs_create_group(&hdev->dev.kobj, &input_attribute_group); + return devm_device_add_group(&hdev->dev, &input_attribute_group); } static const struct hid_device_id vivaldi_table[] = { diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 4804d71e5293..65c1f20ec420 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -615,6 +615,17 @@ static int i2c_hid_get_raw_report(struct hid_device *hid, if (report_type == HID_OUTPUT_REPORT) return -EINVAL; + /* + * In case of unnumbered reports the response from the device will + * not have the report ID that the upper layers expect, so we need + * to stash it the buffer ourselves and adjust the data size. + */ + if (!report_number) { + buf[0] = 0; + buf++; + count--; + } + /* +2 bytes to include the size of the reply in the query buffer */ ask_count = min(count + 2, (size_t)ihid->bufsize); @@ -636,6 +647,9 @@ static int i2c_hid_get_raw_report(struct hid_device *hid, count = min(count, ret_count - 2); memcpy(buf, ihid->rawbuf + 2, count); + if (!report_number) + count++; + return count; } @@ -652,17 +666,19 @@ static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf, mutex_lock(&ihid->reset_lock); - if (report_id) { - buf++; - count--; - } - + /* + * Note that both numbered and unnumbered reports passed here + * are supposed to have report ID stored in the 1st byte of the + * buffer, so we strip it off unconditionally before passing payload + * to i2c_hid_set_or_send_report which takes care of encoding + * everything properly. + */ ret = i2c_hid_set_or_send_report(client, report_type == HID_FEATURE_REPORT ? 0x03 : 0x02, - report_id, buf, count, use_data); + report_id, buf + 1, count - 1, use_data); - if (report_id && ret >= 0) - ret++; /* add report_id to the number of transfered bytes */ + if (ret >= 0) + ret++; /* add report_id to the number of transferred bytes */ mutex_unlock(&ihid->reset_lock); diff --git a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c index b4dad66fa954..ec6c73f75ffe 100644 --- a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c +++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c @@ -27,7 +27,6 @@ struct i2c_hid_of_goodix { struct regulator *vdd; struct notifier_block nb; - struct mutex regulator_mutex; struct gpio_desc *reset_gpio; const struct goodix_i2c_hid_timing_data *timings; }; @@ -67,8 +66,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb, container_of(nb, struct i2c_hid_of_goodix, nb); int ret = NOTIFY_OK; - mutex_lock(&ihid_goodix->regulator_mutex); - switch (event) { case REGULATOR_EVENT_PRE_DISABLE: gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1); @@ -87,8 +84,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb, break; } - mutex_unlock(&ihid_goodix->regulator_mutex); - return ret; } @@ -102,8 +97,6 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client, if (!ihid_goodix) return -ENOMEM; - mutex_init(&ihid_goodix->regulator_mutex); - ihid_goodix->ops.power_up = goodix_i2c_hid_power_up; ihid_goodix->ops.power_down = goodix_i2c_hid_power_down; @@ -130,25 +123,28 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client, * long. Holding the controller in reset apparently draws extra * power. */ - mutex_lock(&ihid_goodix->regulator_mutex); ihid_goodix->nb.notifier_call = ihid_goodix_vdd_notify; ret = devm_regulator_register_notifier(ihid_goodix->vdd, &ihid_goodix->nb); - if (ret) { - mutex_unlock(&ihid_goodix->regulator_mutex); + if (ret) return dev_err_probe(&client->dev, ret, "regulator notifier request failed\n"); - } /* * If someone else is holding the regulator on (or the regulator is * an always-on one) we might never be told to deassert reset. Do it - * now. Here we'll assume that someone else might have _just - * barely_ turned the regulator on so we'll do the full - * "post_power_delay" just in case. + * now... and temporarily bump the regulator reference count just to + * make sure it is impossible for this to race with our own notifier! + * We also assume that someone else might have _just barely_ turned + * the regulator on so we'll do the full "post_power_delay" just in + * case. */ - if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) + if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) { + ret = regulator_enable(ihid_goodix->vdd); + if (ret) + return ret; goodix_i2c_hid_deassert_reset(ihid_goodix, true); - mutex_unlock(&ihid_goodix->regulator_mutex); + regulator_disable(ihid_goodix->vdd); + } return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0); } diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c index 1b486f262747..6b511fadf7ad 100644 --- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c +++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c @@ -657,21 +657,12 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data, */ payload_max_size &= ~(L1_CACHE_BYTES - 1); - dma_buf = kmalloc(payload_max_size, GFP_KERNEL | GFP_DMA32); + dma_buf = dma_alloc_coherent(devc, payload_max_size, &dma_buf_phy, GFP_KERNEL); if (!dma_buf) { client_data->flag_retry = true; return -ENOMEM; } - dma_buf_phy = dma_map_single(devc, dma_buf, payload_max_size, - DMA_TO_DEVICE); - if (dma_mapping_error(devc, dma_buf_phy)) { - dev_err(cl_data_to_dev(client_data), "DMA map failed\n"); - client_data->flag_retry = true; - rv = -ENOMEM; - goto end_err_dma_buf_release; - } - ldr_xfer_dma_frag.fragment.hdr.command = LOADER_CMD_XFER_FRAGMENT; ldr_xfer_dma_frag.fragment.xfer_mode = LOADER_XFER_MODE_DIRECT_DMA; ldr_xfer_dma_frag.ddr_phys_addr = (u64)dma_buf_phy; @@ -691,14 +682,7 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data, ldr_xfer_dma_frag.fragment.size = fragment_size; memcpy(dma_buf, &fw->data[fragment_offset], fragment_size); - dma_sync_single_for_device(devc, dma_buf_phy, - payload_max_size, - DMA_TO_DEVICE); - - /* - * Flush cache here because the dma_sync_single_for_device() - * does not do for x86. - */ + /* Flush cache to be sure the data is in main memory. */ clflush_cache_range(dma_buf, payload_max_size); dev_dbg(cl_data_to_dev(client_data), @@ -721,15 +705,8 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data, fragment_offset += fragment_size; } - dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE); - kfree(dma_buf); - return 0; - end_err_resp_buf_release: - /* Free ISH buffer if not done already, in error case */ - dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE); -end_err_dma_buf_release: - kfree(dma_buf); + dma_free_coherent(devc, payload_max_size, dma_buf, dma_buf_phy); return rv; } diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig index d1123ceb38f3..9a074cbdef78 100644 --- a/drivers/hv/Kconfig +++ b/drivers/hv/Kconfig @@ -18,6 +18,7 @@ config HYPERV_TIMER config HYPERV_UTILS tristate "Microsoft Hyper-V Utilities driver" depends on HYPERV && CONNECTOR && NLS + depends on PTP_1588_CLOCK_OPTIONAL help Select this option to enable the Hyper-V Utilities. diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 142308526ec6..ce76fc382799 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -380,7 +380,7 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel) * execute: * * (a) In the "normal (i.e., not resuming from hibernation)" path, - * the full barrier in smp_store_mb() guarantees that the store + * the full barrier in virt_store_mb() guarantees that the store * is propagated to all CPUs before the add_channel_work work * is queued. In turn, add_channel_work is queued before the * channel's ring buffer is allocated/initialized and the @@ -392,14 +392,14 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel) * recv_int_page before retrieving the channel pointer from the * array of channels. * - * (b) In the "resuming from hibernation" path, the smp_store_mb() + * (b) In the "resuming from hibernation" path, the virt_store_mb() * guarantees that the store is propagated to all CPUs before * the VMBus connection is marked as ready for the resume event * (cf. check_ready_for_resume_event()). The interrupt handler * of the VMBus driver and vmbus_chan_sched() can not run before * vmbus_bus_resume() has completed execution (cf. resume_noirq). */ - smp_store_mb( + virt_store_mb( vmbus_connection.channels[channel->offermsg.child_relid], channel); } diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index f2d05bff4245..3cf334c46c31 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -1563,7 +1563,7 @@ static void balloon_onchannelcallback(void *context) break; default: - pr_warn("Unhandled message: type: %d\n", dm_hdr->type); + pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type); } } @@ -1653,6 +1653,38 @@ static void disable_page_reporting(void) } } +static int ballooning_enabled(void) +{ + /* + * Disable ballooning if the page size is not 4k (HV_HYP_PAGE_SIZE), + * since currently it's unclear to us whether an unballoon request can + * make sure all page ranges are guest page size aligned. + */ + if (PAGE_SIZE != HV_HYP_PAGE_SIZE) { + pr_info("Ballooning disabled because page size is not 4096 bytes\n"); + return 0; + } + + return 1; +} + +static int hot_add_enabled(void) +{ + /* + * Disable hot add on ARM64, because we currently rely on + * memory_add_physaddr_to_nid() to get a node id of a hot add range, + * however ARM64's memory_add_physaddr_to_nid() always return 0 and + * DM_MEM_HOT_ADD_REQUEST doesn't have the NUMA node information for + * add_memory(). + */ + if (IS_ENABLED(CONFIG_ARM64)) { + pr_info("Memory hot add disabled on ARM64\n"); + return 0; + } + + return 1; +} + static int balloon_connect_vsp(struct hv_device *dev) { struct dm_version_request version_req; @@ -1724,8 +1756,8 @@ static int balloon_connect_vsp(struct hv_device *dev) * currently still requires the bits to be set, so we have to add code * to fail the host's hot-add and balloon up/down requests, if any. */ - cap_msg.caps.cap_bits.balloon = 1; - cap_msg.caps.cap_bits.hot_add = 1; + cap_msg.caps.cap_bits.balloon = ballooning_enabled(); + cap_msg.caps.cap_bits.hot_add = hot_add_enabled(); /* * Specify our alignment requirements as it relates diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 314015d9e912..f4091143213b 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -408,7 +408,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel, static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi) { u32 priv_read_loc = rbi->priv_read_index; - u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index); + u32 write_loc; + + /* + * The Hyper-V host writes the packet data, then uses + * store_release() to update the write_index. Use load_acquire() + * here to prevent loads of the packet data from being re-ordered + * before the read of the write_index and potentially getting + * stale data. + */ + write_loc = virt_load_acquire(&rbi->ring_buffer->write_index); if (write_loc >= priv_read_loc) return write_loc - priv_read_loc; diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 392c1ac4f819..aea8125a4db8 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -76,8 +76,8 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val, /* * Hyper-V should be notified only once about a panic. If we will be - * doing hyperv_report_panic_msg() later with kmsg data, don't do - * the notification here. + * doing hv_kmsg_dump() with kmsg data later, don't do the notification + * here. */ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE && hyperv_report_reg()) { @@ -99,8 +99,8 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val, /* * Hyper-V should be notified only once about a panic. If we will be - * doing hyperv_report_panic_msg() later with kmsg data, don't do - * the notification here. + * doing hv_kmsg_dump() with kmsg data later, don't do the notification + * here. */ if (hyperv_report_reg()) hyperv_report_panic(regs, val, true); @@ -1545,14 +1545,20 @@ static int vmbus_bus_init(void) if (ret) goto err_connect; + if (hv_is_isolation_supported()) + sysctl_record_panic_msg = 0; + /* * Only register if the crash MSRs are available */ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { u64 hyperv_crash_ctl; /* - * Sysctl registration is not fatal, since by default - * reporting is enabled. + * Panic message recording (sysctl_record_panic_msg) + * is enabled by default in non-isolated guests and + * disabled by default in isolated guests; the panic + * message recording won't be available in isolated + * guests should the following registration fail. */ hv_ctl_table_hdr = register_sysctl_table(hv_root_table); if (!hv_ctl_table_hdr) @@ -2027,8 +2033,10 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel) kobj->kset = dev->channels_kset; ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL, "%u", relid); - if (ret) + if (ret) { + kobject_put(kobj); return ret; + } ret = sysfs_create_group(kobj, &vmbus_chan_group); @@ -2037,6 +2045,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel) * The calling functions' error handling paths will cleanup the * empty channel directory. */ + kobject_put(kobj); dev_err(device, "Unable to set up channel sysfs files\n"); return ret; } @@ -2773,10 +2782,15 @@ static void __exit vmbus_exit(void) if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { kmsg_dump_unregister(&hv_kmsg_dumper); unregister_die_notifier(&hyperv_die_block); - atomic_notifier_chain_unregister(&panic_notifier_list, - &hyperv_panic_block); } + /* + * The panic notifier is always registered, hence we should + * also unconditionally unregister it here as well. + */ + atomic_notifier_chain_unregister(&panic_notifier_list, + &hyperv_panic_block); + free_page((unsigned long)hv_panic_page); unregister_sysctl_table(hv_ctl_table_hdr); hv_ctl_table_hdr = NULL; diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index 3501a3ead4ba..3ae961986fc3 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -214,12 +214,14 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index) tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata, &hwmon_thermal_ops); - /* - * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV, - * so ignore that error but forward any other error. - */ - if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV)) - return PTR_ERR(tzd); + if (IS_ERR(tzd)) { + if (PTR_ERR(tzd) != -ENODEV) + return PTR_ERR(tzd); + dev_info(dev, "temp%d_input not attached to any thermal zone\n", + index + 1); + devm_kfree(dev, tdata); + return 0; + } err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node); if (err) diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index 0cb4a0a6cbc1..f00cd59f1d19 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -674,6 +674,9 @@ static ssize_t occ_show_caps_3(struct device *dev, case 7: val = caps->user_source; break; + case 8: + val = get_unaligned_be16(&caps->soft_min) * 1000000ULL; + break; default: return -EINVAL; } @@ -835,12 +838,13 @@ static int occ_setup_sensor_attrs(struct occ *occ) case 1: num_attrs += (sensors->caps.num_sensors * 7); break; - case 3: - show_caps = occ_show_caps_3; - fallthrough; case 2: num_attrs += (sensors->caps.num_sensors * 8); break; + case 3: + show_caps = occ_show_caps_3; + num_attrs += (sensors->caps.num_sensors * 9); + break; default: sensors->caps.num_sensors = 0; } @@ -1047,6 +1051,15 @@ static int occ_setup_sensor_attrs(struct occ *occ) attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, 7, 0); attr++; + + if (sensors->caps.version > 2) { + snprintf(attr->name, sizeof(attr->name), + "power%d_cap_min_soft", s); + attr->sensor = OCC_INIT_ATTR(attr->name, 0444, + show_caps, NULL, + 8, 0); + attr++; + } } } diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h index 5020117be740..2dd4a4d240c0 100644 --- a/drivers/hwmon/occ/common.h +++ b/drivers/hwmon/occ/common.h @@ -119,6 +119,8 @@ struct occ { u8 prev_stat; u8 prev_ext_stat; u8 prev_occs_present; + u8 prev_ips_status; + u8 prev_mode; }; int occ_setup(struct occ *occ, const char *name); diff --git a/drivers/hwmon/occ/p9_sbe.c b/drivers/hwmon/occ/p9_sbe.c index e50243580269..d34d6a007e8d 100644 --- a/drivers/hwmon/occ/p9_sbe.c +++ b/drivers/hwmon/occ/p9_sbe.c @@ -13,6 +13,8 @@ #include "common.h" +#define OCC_CHECKSUM_RETRIES 3 + struct p9_sbe_occ { struct occ occ; bool sbe_error; @@ -82,17 +84,22 @@ static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len) struct occ_response *resp = &occ->resp; struct p9_sbe_occ *ctx = to_p9_sbe_occ(occ); size_t resp_len = sizeof(*resp); + int i; int rc; - rc = fsi_occ_submit(ctx->sbe, cmd, len, resp, &resp_len); - if (rc < 0) { + for (i = 0; i < OCC_CHECKSUM_RETRIES; ++i) { + rc = fsi_occ_submit(ctx->sbe, cmd, len, resp, &resp_len); + if (rc >= 0) + break; if (resp_len) { if (p9_sbe_occ_save_ffdc(ctx, resp, resp_len)) sysfs_notify(&occ->bus_dev->kobj, NULL, bin_attr_ffdc.attr.name); - } - return rc; + return rc; + } + if (rc != -EBADE) + return rc; } switch (resp->return_status) { diff --git a/drivers/hwmon/occ/sysfs.c b/drivers/hwmon/occ/sysfs.c index 03b16abef67f..b2f788a77746 100644 --- a/drivers/hwmon/occ/sysfs.c +++ b/drivers/hwmon/occ/sysfs.c @@ -19,6 +19,8 @@ #define OCC_EXT_STAT_DVFS_POWER BIT(6) #define OCC_EXT_STAT_MEM_THROTTLE BIT(5) #define OCC_EXT_STAT_QUICK_DROP BIT(4) +#define OCC_EXT_STAT_DVFS_VDD BIT(3) +#define OCC_EXT_STAT_GPU_THROTTLE GENMASK(2, 0) static ssize_t occ_sysfs_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -63,6 +65,18 @@ static ssize_t occ_sysfs_show(struct device *dev, else val = 1; break; + case 8: + val = header->ips_status; + break; + case 9: + val = header->mode; + break; + case 10: + val = !!(header->ext_status & OCC_EXT_STAT_DVFS_VDD); + break; + case 11: + val = header->ext_status & OCC_EXT_STAT_GPU_THROTTLE; + break; default: return -EINVAL; } @@ -88,6 +102,10 @@ static SENSOR_DEVICE_ATTR(occ_mem_throttle, 0444, occ_sysfs_show, NULL, 4); static SENSOR_DEVICE_ATTR(occ_quick_pwr_drop, 0444, occ_sysfs_show, NULL, 5); static SENSOR_DEVICE_ATTR(occ_state, 0444, occ_sysfs_show, NULL, 6); static SENSOR_DEVICE_ATTR(occs_present, 0444, occ_sysfs_show, NULL, 7); +static SENSOR_DEVICE_ATTR(occ_ips_status, 0444, occ_sysfs_show, NULL, 8); +static SENSOR_DEVICE_ATTR(occ_mode, 0444, occ_sysfs_show, NULL, 9); +static SENSOR_DEVICE_ATTR(occ_dvfs_vdd, 0444, occ_sysfs_show, NULL, 10); +static SENSOR_DEVICE_ATTR(occ_gpu_throttle, 0444, occ_sysfs_show, NULL, 11); static DEVICE_ATTR_RO(occ_error); static struct attribute *occ_attributes[] = { @@ -99,6 +117,10 @@ static struct attribute *occ_attributes[] = { &sensor_dev_attr_occ_quick_pwr_drop.dev_attr.attr, &sensor_dev_attr_occ_state.dev_attr.attr, &sensor_dev_attr_occs_present.dev_attr.attr, + &sensor_dev_attr_occ_ips_status.dev_attr.attr, + &sensor_dev_attr_occ_mode.dev_attr.attr, + &sensor_dev_attr_occ_dvfs_vdd.dev_attr.attr, + &sensor_dev_attr_occ_gpu_throttle.dev_attr.attr, &dev_attr_occ_error.attr, NULL }; @@ -156,12 +178,34 @@ void occ_sysfs_poll_done(struct occ *occ) sysfs_notify(&occ->bus_dev->kobj, NULL, name); } + if ((header->ext_status & OCC_EXT_STAT_DVFS_VDD) != + (occ->prev_ext_stat & OCC_EXT_STAT_DVFS_VDD)) { + name = sensor_dev_attr_occ_dvfs_vdd.dev_attr.attr.name; + sysfs_notify(&occ->bus_dev->kobj, NULL, name); + } + + if ((header->ext_status & OCC_EXT_STAT_GPU_THROTTLE) != + (occ->prev_ext_stat & OCC_EXT_STAT_GPU_THROTTLE)) { + name = sensor_dev_attr_occ_gpu_throttle.dev_attr.attr.name; + sysfs_notify(&occ->bus_dev->kobj, NULL, name); + } + if ((header->status & OCC_STAT_MASTER) && header->occs_present != occ->prev_occs_present) { name = sensor_dev_attr_occs_present.dev_attr.attr.name; sysfs_notify(&occ->bus_dev->kobj, NULL, name); } + if (header->ips_status != occ->prev_ips_status) { + name = sensor_dev_attr_occ_ips_status.dev_attr.attr.name; + sysfs_notify(&occ->bus_dev->kobj, NULL, name); + } + + if (header->mode != occ->prev_mode) { + name = sensor_dev_attr_occ_mode.dev_attr.attr.name; + sysfs_notify(&occ->bus_dev->kobj, NULL, name); + } + if (occ->error && occ->error != occ->prev_error) { name = dev_attr_occ_error.attr.name; sysfs_notify(&occ->bus_dev->kobj, NULL, name); @@ -174,6 +218,8 @@ done: occ->prev_stat = header->status; occ->prev_ext_stat = header->ext_status; occ->prev_occs_present = header->occs_present; + occ->prev_ips_status = header->ips_status; + occ->prev_mode = header->mode; } int occ_setup_sysfs(struct occ *occ) diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c index d311e0557401..3b07bfb43e93 100644 --- a/drivers/hwmon/pmbus/adm1275.c +++ b/drivers/hwmon/pmbus/adm1275.c @@ -475,6 +475,7 @@ static int adm1275_probe(struct i2c_client *client) int vindex = -1, voindex = -1, cindex = -1, pindex = -1; int tindex = -1; u32 shunt; + u32 avg; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA @@ -687,7 +688,7 @@ static int adm1275_probe(struct i2c_client *client) if ((config & (ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) != (ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) { config |= ADM1278_VOUT_EN | ADM1278_TEMP1_EN; - ret = i2c_smbus_write_byte_data(client, + ret = i2c_smbus_write_word_data(client, ADM1275_PMON_CONFIG, config); if (ret < 0) { @@ -756,6 +757,43 @@ static int adm1275_probe(struct i2c_client *client) return -ENODEV; } + if (data->have_power_sampling && + of_property_read_u32(client->dev.of_node, + "adi,power-sample-average", &avg) == 0) { + if (!avg || avg > ADM1275_SAMPLES_AVG_MAX || + BIT(__fls(avg)) != avg) { + dev_err(&client->dev, + "Invalid number of power samples"); + return -EINVAL; + } + ret = adm1275_write_pmon_config(data, client, true, + ilog2(avg)); + if (ret < 0) { + dev_err(&client->dev, + "Setting power sample averaging failed with error %d", + ret); + return ret; + } + } + + if (of_property_read_u32(client->dev.of_node, + "adi,volt-curr-sample-average", &avg) == 0) { + if (!avg || avg > ADM1275_SAMPLES_AVG_MAX || + BIT(__fls(avg)) != avg) { + dev_err(&client->dev, + "Invalid number of voltage/current samples"); + return -EINVAL; + } + ret = adm1275_write_pmon_config(data, client, false, + ilog2(avg)); + if (ret < 0) { + dev_err(&client->dev, + "Setting voltage and current sample averaging failed with error %d", + ret); + return ret; + } + } + if (voindex < 0) voindex = vindex; if (vindex >= 0) { diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index e655cbf6299b..75aa97b1ecc0 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h @@ -319,6 +319,7 @@ enum pmbus_fan_mode { percent = 0, rpm }; /* * STATUS_VOUT, STATUS_INPUT */ +#define PB_VOLTAGE_VIN_OFF BIT(3) #define PB_VOLTAGE_UV_FAULT BIT(4) #define PB_VOLTAGE_UV_WARNING BIT(5) #define PB_VOLTAGE_OV_WARNING BIT(6) diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index ceb1b4db985e..d462f732f3b4 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -1465,7 +1465,7 @@ static const struct pmbus_limit_attr vin_limit_attrs[] = { .reg = PMBUS_VIN_UV_FAULT_LIMIT, .attr = "lcrit", .alarm = "lcrit_alarm", - .sbit = PB_VOLTAGE_UV_FAULT, + .sbit = PB_VOLTAGE_UV_FAULT | PB_VOLTAGE_VIN_OFF, }, { .reg = PMBUS_VIN_OV_WARN_LIMIT, .attr = "max", @@ -2485,10 +2485,14 @@ static int pmbus_regulator_is_enabled(struct regulator_dev *rdev) { struct device *dev = rdev_get_dev(rdev); struct i2c_client *client = to_i2c_client(dev->parent); + struct pmbus_data *data = i2c_get_clientdata(client); u8 page = rdev_get_id(rdev); int ret; + mutex_lock(&data->update_lock); ret = pmbus_read_byte_data(client, page, PMBUS_OPERATION); + mutex_unlock(&data->update_lock); + if (ret < 0) return ret; @@ -2499,11 +2503,17 @@ static int _pmbus_regulator_on_off(struct regulator_dev *rdev, bool enable) { struct device *dev = rdev_get_dev(rdev); struct i2c_client *client = to_i2c_client(dev->parent); + struct pmbus_data *data = i2c_get_clientdata(client); u8 page = rdev_get_id(rdev); + int ret; - return pmbus_update_byte_data(client, page, PMBUS_OPERATION, - PB_OPERATION_CONTROL_ON, - enable ? PB_OPERATION_CONTROL_ON : 0); + mutex_lock(&data->update_lock); + ret = pmbus_update_byte_data(client, page, PMBUS_OPERATION, + PB_OPERATION_CONTROL_ON, + enable ? PB_OPERATION_CONTROL_ON : 0); + mutex_unlock(&data->update_lock); + + return ret; } static int pmbus_regulator_enable(struct regulator_dev *rdev) diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c index 40cdadad35e5..f85eede6d766 100644 --- a/drivers/hwmon/sch56xx-common.c +++ b/drivers/hwmon/sch56xx-common.c @@ -422,7 +422,7 @@ void sch56xx_watchdog_register(struct device *parent, u16 addr, u32 revision, data->wddev.max_timeout = 255 * 60; watchdog_set_nowayout(&data->wddev, nowayout); if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE) - set_bit(WDOG_ACTIVE, &data->wddev.status); + set_bit(WDOG_HW_RUNNING, &data->wddev.status); /* Since the watchdog uses a downcounter there is no register to read the BIOS set timeout from (if any was set at all) -> diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c index a0640fa5c55b..57e94424a8d6 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c @@ -367,8 +367,12 @@ static ssize_t mode_store(struct device *dev, mode = ETM_MODE_QELEM(config->mode); /* start by clearing QE bits */ config->cfg &= ~(BIT(13) | BIT(14)); - /* if supported, Q elements with instruction counts are enabled */ - if ((mode & BIT(0)) && (drvdata->q_support & BIT(0))) + /* + * if supported, Q elements with instruction counts are enabled. + * Always set the low bit for any requested mode. Valid combos are + * 0b00, 0b01 and 0b11. + */ + if (mode && drvdata->q_support) config->cfg |= BIT(13); /* * if supported, Q elements with and without instruction diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c index 43054568430f..c30989e0675f 100644 --- a/drivers/hwtracing/coresight/coresight-syscfg.c +++ b/drivers/hwtracing/coresight/coresight-syscfg.c @@ -791,7 +791,7 @@ static int cscfg_create_device(void) err = device_register(dev); if (err) - cscfg_dev_release(dev); + put_device(dev); create_dev_exit_unlock: mutex_unlock(&cscfg_mutex); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index e17790fe35a7..fea403431f22 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -488,7 +488,7 @@ config I2C_BRCMSTB config I2C_CADENCE tristate "Cadence I2C Controller" - depends on ARCH_ZYNQ || ARM64 || XTENSA + depends on ARCH_ZYNQ || ARM64 || XTENSA || COMPILE_TEST help Say yes here to select Cadence I2C Host Controller. This controller is e.g. used by Xilinx Zynq. @@ -677,7 +677,7 @@ config I2C_IMG config I2C_IMX tristate "IMX I2C interface" - depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE + depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE || COMPILE_TEST select I2C_SLAVE help Say Y here if you want to use the IIC bus controller on @@ -921,7 +921,7 @@ config I2C_QCOM_GENI config I2C_QUP tristate "Qualcomm QUP based I2C controller" - depends on ARCH_QCOM + depends on ARCH_QCOM || COMPILE_TEST help If you say yes to this option, support will be included for the built-in I2C interface on the Qualcomm SoCs. diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index 37443edbf754..f72c6576d8a3 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -23,6 +23,11 @@ #define BCM2835_I2C_FIFO 0x10 #define BCM2835_I2C_DIV 0x14 #define BCM2835_I2C_DEL 0x18 +/* + * 16-bit field for the number of SCL cycles to wait after rising SCL + * before deciding the slave is not responding. 0 disables the + * timeout detection. + */ #define BCM2835_I2C_CLKT 0x1c #define BCM2835_I2C_C_READ BIT(0) @@ -402,7 +407,7 @@ static const struct i2c_adapter_quirks bcm2835_i2c_quirks = { static int bcm2835_i2c_probe(struct platform_device *pdev) { struct bcm2835_i2c_dev *i2c_dev; - struct resource *mem, *irq; + struct resource *mem; int ret; struct i2c_adapter *adap; struct clk *mclk; @@ -449,21 +454,20 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) ret = clk_prepare_enable(i2c_dev->bus_clk); if (ret) { dev_err(&pdev->dev, "Couldn't prepare clock"); - return ret; + goto err_put_exclusive_rate; } - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!irq) { - dev_err(&pdev->dev, "No IRQ resource\n"); - return -ENODEV; + i2c_dev->irq = platform_get_irq(pdev, 0); + if (i2c_dev->irq < 0) { + ret = i2c_dev->irq; + goto err_disable_unprepare_clk; } - i2c_dev->irq = irq->start; ret = request_irq(i2c_dev->irq, bcm2835_i2c_isr, IRQF_SHARED, dev_name(&pdev->dev), i2c_dev); if (ret) { dev_err(&pdev->dev, "Could not request IRQ\n"); - return -ENODEV; + goto err_disable_unprepare_clk; } adap = &i2c_dev->adapter; @@ -477,11 +481,26 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) adap->dev.of_node = pdev->dev.of_node; adap->quirks = of_device_get_match_data(&pdev->dev); + /* + * Disable the hardware clock stretching timeout. SMBUS + * specifies a limit for how long the device can stretch the + * clock, but core I2C doesn't. + */ + bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_CLKT, 0); bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0); ret = i2c_add_adapter(adap); if (ret) - free_irq(i2c_dev->irq, i2c_dev); + goto err_free_irq; + + return 0; + +err_free_irq: + free_irq(i2c_dev->irq, i2c_dev); +err_disable_unprepare_clk: + clk_disable_unprepare(i2c_dev->bus_clk); +err_put_exclusive_rate: + clk_rate_exclusive_put(i2c_dev->bus_clk); return ret; } diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 490ee3962645..b00f35c0b066 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c @@ -673,7 +673,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev) /* set the data in/out register size for compatible SoCs */ if (of_device_is_compatible(dev->device->of_node, - "brcmstb,brcmper-i2c")) + "brcm,brcmper-i2c")) dev->data_regsz = sizeof(u8); else dev->data_regsz = sizeof(u32); diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index ef73a42577cc..07eb819072c4 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c @@ -465,18 +465,18 @@ static int meson_i2c_probe(struct platform_device *pdev) */ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, 0); - ret = i2c_add_adapter(&i2c->adap); - if (ret < 0) { - clk_disable_unprepare(i2c->clk); - return ret; - } - /* Disable filtering */ meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SDA_FILTER | REG_SLV_SCL_FILTER, 0); meson_i2c_set_clk_div(i2c, timings.bus_freq_hz); + ret = i2c_add_adapter(&i2c->adap); + if (ret < 0) { + clk_disable_unprepare(i2c->clk); + return ret; + } + return 0; } diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c index 20f2772c0e79..2c909522f0f3 100644 --- a/drivers/i2c/busses/i2c-pasemi.c +++ b/drivers/i2c/busses/i2c-pasemi.c @@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter, TXFIFO_WR(smbus, msg->buf[msg->len-1] | (stop ? MTXFIFO_STOP : 0)); + + if (stop) { + err = pasemi_smb_waitready(smbus); + if (err) + goto reset_out; + } } return 0; diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c index c1de8eb66169..cf54f1cb4c57 100644 --- a/drivers/i2c/busses/i2c-qcom-cci.c +++ b/drivers/i2c/busses/i2c-qcom-cci.c @@ -558,7 +558,7 @@ static int cci_probe(struct platform_device *pdev) cci->master[idx].adap.quirks = &cci->data->quirks; cci->master[idx].adap.algo = &cci_algo; cci->master[idx].adap.dev.parent = dev; - cci->master[idx].adap.dev.of_node = child; + cci->master[idx].adap.dev.of_node = of_node_get(child); cci->master[idx].master = idx; cci->master[idx].cci = cci; @@ -643,8 +643,10 @@ static int cci_probe(struct platform_device *pdev) continue; ret = i2c_add_adapter(&cci->master[i].adap); - if (ret < 0) + if (ret < 0) { + of_node_put(cci->master[i].adap.dev.of_node); goto error_i2c; + } } pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); @@ -655,9 +657,11 @@ static int cci_probe(struct platform_device *pdev) return 0; error_i2c: - for (; i >= 0; i--) { - if (cci->master[i].cci) + for (--i ; i >= 0; i--) { + if (cci->master[i].cci) { i2c_del_adapter(&cci->master[i].adap); + of_node_put(cci->master[i].adap.dev.of_node); + } } error: disable_irq(cci->irq); @@ -673,8 +677,10 @@ static int cci_remove(struct platform_device *pdev) int i; for (i = 0; i < cci->data->num_masters; i++) { - if (cci->master[i].cci) + if (cci->master[i].cci) { i2c_del_adapter(&cci->master[i].adap); + of_node_put(cci->master[i].adap.dev.of_node); + } cci_halt(cci, i); } diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index bb93db98404e..612343771ce2 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -756,7 +756,6 @@ static const struct i2c_adapter_quirks xiic_quirks = { static const struct i2c_adapter xiic_adapter = { .owner = THIS_MODULE, - .name = DRIVER_NAME, .class = I2C_CLASS_DEPRECATED, .algo = &xiic_algorithm, .quirks = &xiic_quirks, @@ -793,6 +792,8 @@ static int xiic_i2c_probe(struct platform_device *pdev) i2c_set_adapdata(&i2c->adap, i2c); i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.of_node = pdev->dev.of_node; + snprintf(i2c->adap.name, sizeof(i2c->adap.name), + DRIVER_NAME " %s", pdev->name); mutex_init(&i2c->lock); init_waitqueue_head(&i2c->wait); diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index cf5d049342ea..6fd2b6718b08 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -668,16 +668,21 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy) i2c_dev->dev.class = i2c_dev_class; i2c_dev->dev.parent = &adap->dev; i2c_dev->dev.release = i2cdev_dev_release; - dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr); + + res = dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr); + if (res) + goto err_put_i2c_dev; res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev); - if (res) { - put_i2c_dev(i2c_dev, false); - return res; - } + if (res) + goto err_put_i2c_dev; pr_debug("adapter [%s] registered as minor %d\n", adap->name, adap->nr); return 0; + +err_put_i2c_dev: + put_i2c_dev(i2c_dev, false); + return res; } static int i2cdev_detach_adapter(struct device *dev, void *dummy) diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 5365199a31f4..f7a7405d4350 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -261,7 +261,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) err = device_create_file(&pdev->dev, &dev_attr_available_masters); if (err) - goto err_rollback; + goto err_rollback_activation; err = device_create_file(&pdev->dev, &dev_attr_current_master); if (err) @@ -271,8 +271,9 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) err_rollback_available: device_remove_file(&pdev->dev, &dev_attr_available_masters); -err_rollback: +err_rollback_activation: i2c_demux_deactivate_master(priv); +err_rollback: for (j = 0; j < i; j++) { of_node_put(priv->chan[j].parent_np); of_changeset_destroy(&priv->chan[j].chgset); diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 2fc9f4d7a708..656f0398d3e5 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -350,7 +350,8 @@ EXPORT_SYMBOL_GPL(i3c_bus_type); static enum i3c_addr_slot_status i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr) { - int status, bitpos = addr * 2; + unsigned long status; + int bitpos = addr * 2; if (addr > I2C_MAX_ADDR) return I3C_ADDR_SLOT_RSVD; diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index bda7c5e5972c..f67ff56febc8 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -1310,6 +1310,10 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m) return -ENOMEM; pos = dw_i3c_master_get_free_pos(master); + if (pos < 0) { + dw_i3c_master_free_xfer(xfer); + return pos; + } cmd = &xfer->cmds[0]; cmd->cmd_hi = 0x1; cmd->cmd_lo = COMMAND_PORT_DEV_COUNT(master->maxdevs - pos) | diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c index 783e551a2c85..97bb49ff5b53 100644 --- a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c +++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c @@ -160,9 +160,7 @@ static int hci_dat_v1_get_index(struct i3c_hci *hci, u8 dev_addr) unsigned int dat_idx; u32 dat_w0; - for (dat_idx = find_first_bit(hci->DAT_data, hci->DAT_entries); - dat_idx < hci->DAT_entries; - dat_idx = find_next_bit(hci->DAT_data, hci->DAT_entries, dat_idx)) { + for_each_set_bit(dat_idx, hci->DAT_data, hci->DAT_entries) { dat_w0 = dat_w0_read(dat_idx); if (FIELD_GET(DAT_0_DYNAMIC_ADDRESS, dat_w0) == dev_addr) return dat_idx; diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index e8693a42ad46..3af763b4a973 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -1782,11 +1782,14 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "Unable to register iio device\n"); - goto err_trigger_unregister; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_disable(dev); err_trigger_unregister: bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1); err_buffer_cleanup: diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c index f41db9e0249a..a2d29cabb389 100644 --- a/drivers/iio/accel/fxls8962af-core.c +++ b/drivers/iio/accel/fxls8962af-core.c @@ -154,12 +154,20 @@ struct fxls8962af_data { u8 watermark; }; -const struct regmap_config fxls8962af_regmap_conf = { +const struct regmap_config fxls8962af_i2c_regmap_conf = { .reg_bits = 8, .val_bits = 8, .max_register = FXLS8962AF_MAX_REG, }; -EXPORT_SYMBOL_GPL(fxls8962af_regmap_conf); +EXPORT_SYMBOL_GPL(fxls8962af_i2c_regmap_conf); + +const struct regmap_config fxls8962af_spi_regmap_conf = { + .reg_bits = 8, + .pad_bits = 8, + .val_bits = 8, + .max_register = FXLS8962AF_MAX_REG, +}; +EXPORT_SYMBOL_GPL(fxls8962af_spi_regmap_conf); enum { fxls8962af_idx_x, diff --git a/drivers/iio/accel/fxls8962af-i2c.c b/drivers/iio/accel/fxls8962af-i2c.c index cfb004b20455..6bde9891effb 100644 --- a/drivers/iio/accel/fxls8962af-i2c.c +++ b/drivers/iio/accel/fxls8962af-i2c.c @@ -18,7 +18,7 @@ static int fxls8962af_probe(struct i2c_client *client) { struct regmap *regmap; - regmap = devm_regmap_init_i2c(client, &fxls8962af_regmap_conf); + regmap = devm_regmap_init_i2c(client, &fxls8962af_i2c_regmap_conf); if (IS_ERR(regmap)) { dev_err(&client->dev, "Failed to initialize i2c regmap\n"); return PTR_ERR(regmap); diff --git a/drivers/iio/accel/fxls8962af-spi.c b/drivers/iio/accel/fxls8962af-spi.c index 57108d3d480b..6f4dff3238d3 100644 --- a/drivers/iio/accel/fxls8962af-spi.c +++ b/drivers/iio/accel/fxls8962af-spi.c @@ -18,7 +18,7 @@ static int fxls8962af_probe(struct spi_device *spi) { struct regmap *regmap; - regmap = devm_regmap_init_spi(spi, &fxls8962af_regmap_conf); + regmap = devm_regmap_init_spi(spi, &fxls8962af_spi_regmap_conf); if (IS_ERR(regmap)) { dev_err(&spi->dev, "Failed to initialize spi regmap\n"); return PTR_ERR(regmap); diff --git a/drivers/iio/accel/fxls8962af.h b/drivers/iio/accel/fxls8962af.h index b67572c3ef06..9cbe98c3ba9a 100644 --- a/drivers/iio/accel/fxls8962af.h +++ b/drivers/iio/accel/fxls8962af.h @@ -17,6 +17,7 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq); int fxls8962af_core_remove(struct device *dev); extern const struct dev_pm_ops fxls8962af_pm_ops; -extern const struct regmap_config fxls8962af_regmap_conf; +extern const struct regmap_config fxls8962af_i2c_regmap_conf; +extern const struct regmap_config fxls8962af_spi_regmap_conf; #endif /* _FXLS8962AF_H_ */ diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index 24c9387c2968..ba6c8ca488b1 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -1589,11 +1589,14 @@ static int kxcjk1013_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto err_buffer_cleanup; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); err_trigger_unregister: diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 09c7f10fefb6..21a99467f364 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -176,6 +176,7 @@ static const struct mma8452_event_regs trans_ev_regs = { * @enabled_events: event flags enabled and handled by this driver */ struct mma_chip_info { + const char *name; u8 chip_id; const struct iio_chan_spec *channels; int num_channels; @@ -1301,6 +1302,7 @@ enum { static const struct mma_chip_info mma_chip_info_table[] = { [mma8451] = { + .name = "mma8451", .chip_id = MMA8451_DEVICE_ID, .channels = mma8451_channels, .num_channels = ARRAY_SIZE(mma8451_channels), @@ -1325,6 +1327,7 @@ static const struct mma_chip_info mma_chip_info_table[] = { MMA8452_INT_FF_MT, }, [mma8452] = { + .name = "mma8452", .chip_id = MMA8452_DEVICE_ID, .channels = mma8452_channels, .num_channels = ARRAY_SIZE(mma8452_channels), @@ -1341,6 +1344,7 @@ static const struct mma_chip_info mma_chip_info_table[] = { MMA8452_INT_FF_MT, }, [mma8453] = { + .name = "mma8453", .chip_id = MMA8453_DEVICE_ID, .channels = mma8453_channels, .num_channels = ARRAY_SIZE(mma8453_channels), @@ -1357,6 +1361,7 @@ static const struct mma_chip_info mma_chip_info_table[] = { MMA8452_INT_FF_MT, }, [mma8652] = { + .name = "mma8652", .chip_id = MMA8652_DEVICE_ID, .channels = mma8652_channels, .num_channels = ARRAY_SIZE(mma8652_channels), @@ -1366,6 +1371,7 @@ static const struct mma_chip_info mma_chip_info_table[] = { .enabled_events = MMA8452_INT_FF_MT, }, [mma8653] = { + .name = "mma8653", .chip_id = MMA8653_DEVICE_ID, .channels = mma8653_channels, .num_channels = ARRAY_SIZE(mma8653_channels), @@ -1380,6 +1386,7 @@ static const struct mma_chip_info mma_chip_info_table[] = { .enabled_events = MMA8452_INT_FF_MT, }, [fxls8471] = { + .name = "fxls8471", .chip_id = FXLS8471_DEVICE_ID, .channels = mma8451_channels, .num_channels = ARRAY_SIZE(mma8451_channels), @@ -1522,13 +1529,6 @@ static int mma8452_probe(struct i2c_client *client, struct mma8452_data *data; struct iio_dev *indio_dev; int ret; - const struct of_device_id *match; - - match = of_match_device(mma8452_dt_ids, &client->dev); - if (!match) { - dev_err(&client->dev, "unknown device model\n"); - return -ENODEV; - } indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!indio_dev) @@ -1537,7 +1537,14 @@ static int mma8452_probe(struct i2c_client *client, data = iio_priv(indio_dev); data->client = client; mutex_init(&data->lock); - data->chip_info = match->data; + + data->chip_info = device_get_match_data(&client->dev); + if (!data->chip_info && id) { + data->chip_info = &mma_chip_info_table[id->driver_data]; + } else { + dev_err(&client->dev, "unknown device model\n"); + return -ENODEV; + } data->vdd_reg = devm_regulator_get(&client->dev, "vdd"); if (IS_ERR(data->vdd_reg)) @@ -1581,11 +1588,11 @@ static int mma8452_probe(struct i2c_client *client, } dev_info(&client->dev, "registering %s accelerometer; ID 0x%x\n", - match->compatible, data->chip_info->chip_id); + data->chip_info->name, data->chip_info->chip_id); i2c_set_clientdata(client, indio_dev); indio_dev->info = &mma8452_info; - indio_dev->name = id->name; + indio_dev->name = data->chip_info->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = data->chip_info->channels; indio_dev->num_channels = data->chip_info->num_channels; @@ -1810,7 +1817,7 @@ MODULE_DEVICE_TABLE(i2c, mma8452_id); static struct i2c_driver mma8452_driver = { .driver = { .name = "mma8452", - .of_match_table = of_match_ptr(mma8452_dt_ids), + .of_match_table = mma8452_dt_ids, .pm = &mma8452_pm_ops, }, .probe = mma8452_probe, diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c index 4c359fb05480..c53a3398b14c 100644 --- a/drivers/iio/accel/mma9551.c +++ b/drivers/iio/accel/mma9551.c @@ -495,11 +495,14 @@ static int mma9551_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto out_poweroff; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); out_poweroff: mma9551_set_device_state(client, false); diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c index ba3ecb3b57dc..1599b75724d4 100644 --- a/drivers/iio/accel/mma9553.c +++ b/drivers/iio/accel/mma9553.c @@ -1134,12 +1134,15 @@ static int mma9553_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto out_poweroff; + goto err_pm_cleanup; } dev_dbg(&indio_dev->dev, "Registered device %s\n", name); return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); out_poweroff: mma9551_set_device_state(client, false); return ret; diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index e45c600fccc0..18c154afbd7a 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -76,7 +76,7 @@ #define AD7124_CONFIG_REF_SEL(x) FIELD_PREP(AD7124_CONFIG_REF_SEL_MSK, x) #define AD7124_CONFIG_PGA_MSK GENMASK(2, 0) #define AD7124_CONFIG_PGA(x) FIELD_PREP(AD7124_CONFIG_PGA_MSK, x) -#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(7, 6) +#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(6, 5) #define AD7124_CONFIG_IN_BUFF(x) FIELD_PREP(AD7124_CONFIG_IN_BUFF_MSK, x) /* AD7124_FILTER_X */ diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c index 42ea8bc7e780..adc5ceaef8c9 100644 --- a/drivers/iio/adc/men_z188_adc.c +++ b/drivers/iio/adc/men_z188_adc.c @@ -103,6 +103,7 @@ static int men_z188_probe(struct mcb_device *dev, struct z188_adc *adc; struct iio_dev *indio_dev; struct resource *mem; + int ret; indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc)); if (!indio_dev) @@ -128,8 +129,14 @@ static int men_z188_probe(struct mcb_device *dev, adc->mem = mem; mcb_set_drvdata(dev, indio_dev); - return iio_device_register(indio_dev); + ret = iio_device_register(indio_dev); + if (ret) + goto err_unmap; + + return 0; +err_unmap: + iounmap(adc->base); err: mcb_release_mem(mem); return -ENXIO; diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c index d84ae6b008c1..e8fc4d01f30b 100644 --- a/drivers/iio/adc/ti-tsc2046.c +++ b/drivers/iio/adc/ti-tsc2046.c @@ -388,7 +388,7 @@ static int tsc2046_adc_update_scan_mode(struct iio_dev *indio_dev, mutex_lock(&priv->slock); size = 0; - for_each_set_bit(ch_idx, active_scan_mask, indio_dev->num_channels) { + for_each_set_bit(ch_idx, active_scan_mask, ARRAY_SIZE(priv->l)) { size += tsc2046_adc_group_set_layout(priv, group, ch_idx); tsc2046_adc_group_set_cmd(priv, group, ch_idx); group++; @@ -548,7 +548,7 @@ static int tsc2046_adc_setup_spi_msg(struct tsc2046_adc_priv *priv) * enabled. */ size = 0; - for (ch_idx = 0; ch_idx < priv->dcfg->num_channels; ch_idx++) + for (ch_idx = 0; ch_idx < ARRAY_SIZE(priv->l); ch_idx++) size += tsc2046_adc_group_set_layout(priv, ch_idx, ch_idx); priv->tx = devm_kzalloc(&priv->spi->dev, size, GFP_KERNEL); diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c index c6416ad795ca..256177b15c51 100644 --- a/drivers/iio/adc/twl6030-gpadc.c +++ b/drivers/iio/adc/twl6030-gpadc.c @@ -911,6 +911,8 @@ static int twl6030_gpadc_probe(struct platform_device *pdev) ret = devm_request_threaded_irq(dev, irq, NULL, twl6030_gpadc_irq_handler, IRQF_ONESHOT, "twl6030_gpadc", indio_dev); + if (ret) + return ret; ret = twl6030_gpadc_enable_irq(TWL6030_GPADC_RT_SW1_EOC_MASK); if (ret < 0) { diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c index 774eb3044edd..271d73e420c4 100644 --- a/drivers/iio/afe/iio-rescale.c +++ b/drivers/iio/afe/iio-rescale.c @@ -39,7 +39,7 @@ static int rescale_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct rescale *rescale = iio_priv(indio_dev); - unsigned long long tmp; + s64 tmp; int ret; switch (mask) { @@ -77,10 +77,10 @@ static int rescale_read_raw(struct iio_dev *indio_dev, *val2 = rescale->denominator; return IIO_VAL_FRACTIONAL; case IIO_VAL_FRACTIONAL_LOG2: - tmp = *val * 1000000000LL; - do_div(tmp, rescale->denominator); + tmp = (s64)*val * 1000000000LL; + tmp = div_s64(tmp, rescale->denominator); tmp *= rescale->numerator; - do_div(tmp, 1000000000LL); + tmp = div_s64(tmp, 1000000000LL); *val = tmp; return ret; default: diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c index 17b939a367ad..81a6d09788bd 100644 --- a/drivers/iio/gyro/bmg160_core.c +++ b/drivers/iio/gyro/bmg160_core.c @@ -1188,11 +1188,14 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "unable to register iio device\n"); - goto err_buffer_cleanup; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_disable(dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); err_trigger_unregister: diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c index ed129321a14d..f9b4540db1f4 100644 --- a/drivers/iio/imu/adis16480.c +++ b/drivers/iio/imu/adis16480.c @@ -1403,6 +1403,7 @@ static int adis16480_probe(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); const struct adis_data *adis16480_data; + irq_handler_t trigger_handler = NULL; struct iio_dev *indio_dev; struct adis16480 *st; int ret; @@ -1474,8 +1475,12 @@ static int adis16480_probe(struct spi_device *spi) st->clk_freq = st->chip_info->int_clk; } + /* Only use our trigger handler if burst mode is supported */ + if (adis16480_data->burst_len) + trigger_handler = adis16480_trigger_handler; + ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev, - adis16480_trigger_handler); + trigger_handler); if (ret) return ret; diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c index 1dabfd615dab..f89724481df9 100644 --- a/drivers/iio/imu/kmx61.c +++ b/drivers/iio/imu/kmx61.c @@ -1385,7 +1385,7 @@ static int kmx61_probe(struct i2c_client *client, ret = iio_device_register(data->acc_indio_dev); if (ret < 0) { dev_err(&client->dev, "Failed to register acc iio device\n"); - goto err_buffer_cleanup_mag; + goto err_pm_cleanup; } ret = iio_device_register(data->mag_indio_dev); @@ -1398,6 +1398,9 @@ static int kmx61_probe(struct i2c_client *client, err_iio_unregister_acc: iio_device_unregister(data->acc_indio_dev); +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); err_buffer_cleanup_mag: if (client->irq > 0) iio_triggered_buffer_cleanup(data->mag_indio_dev); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index 8dbf744c5651..a778aceba3b1 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -1372,8 +1372,12 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor, if (err < 0) return err; + /* + * we need to wait for sensor settling time before + * reading data in order to avoid corrupted samples + */ delay = 1000000000 / sensor->odr; - usleep_range(delay, 2 * delay); + usleep_range(3 * delay, 4 * delay); err = st_lsm6dsx_read_locked(hw, addr, &data, sizeof(data)); if (err < 0) diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index 391a3380a1d1..b5966365d769 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -578,28 +578,50 @@ EXPORT_SYMBOL_GPL(iio_read_channel_average_raw); static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, int raw, int *processed, unsigned int scale) { - int scale_type, scale_val, scale_val2, offset; + int scale_type, scale_val, scale_val2; + int offset_type, offset_val, offset_val2; s64 raw64 = raw; - int ret; - ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET); - if (ret >= 0) - raw64 += offset; + offset_type = iio_channel_read(chan, &offset_val, &offset_val2, + IIO_CHAN_INFO_OFFSET); + if (offset_type >= 0) { + switch (offset_type) { + case IIO_VAL_INT: + break; + case IIO_VAL_INT_PLUS_MICRO: + case IIO_VAL_INT_PLUS_NANO: + /* + * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO + * implicitely truncate the offset to it's integer form. + */ + break; + case IIO_VAL_FRACTIONAL: + offset_val /= offset_val2; + break; + case IIO_VAL_FRACTIONAL_LOG2: + offset_val >>= offset_val2; + break; + default: + return -EINVAL; + } + + raw64 += offset_val; + } scale_type = iio_channel_read(chan, &scale_val, &scale_val2, IIO_CHAN_INFO_SCALE); if (scale_type < 0) { /* - * Just pass raw values as processed if no scaling is - * available. + * If no channel scaling is available apply consumer scale to + * raw value and return. */ - *processed = raw; + *processed = raw * scale; return 0; } switch (scale_type) { case IIO_VAL_INT: - *processed = raw64 * scale_val; + *processed = raw64 * scale_val * scale; break; case IIO_VAL_INT_PLUS_MICRO: if (scale_val2 < 0) diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c index f96f53175349..3d4d21f979fa 100644 --- a/drivers/iio/magnetometer/bmc150_magn.c +++ b/drivers/iio/magnetometer/bmc150_magn.c @@ -962,13 +962,14 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "unable to register iio device\n"); - goto err_disable_runtime_pm; + goto err_pm_cleanup; } dev_dbg(dev, "Registered device %s\n", name); return 0; -err_disable_runtime_pm: +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 35f0d5e7533d..1c107d6d03b9 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -2824,6 +2824,7 @@ static int cm_dreq_handler(struct cm_work *work) switch (cm_id_priv->id.state) { case IB_CM_REP_SENT: case IB_CM_DREQ_SENT: + case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->msg); break; case IB_CM_ESTABLISHED: @@ -2831,8 +2832,6 @@ static int cm_dreq_handler(struct cm_work *work) cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) ib_cancel_mad(cm_id_priv->msg); break; - case IB_CM_MRA_REP_RCVD: - break; case IB_CM_TIMEWAIT: atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_DREQ_COUNTER]); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 13679c7b6577..a814dabcdff4 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2640,7 +2640,7 @@ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout) { struct rdma_id_private *id_priv; - if (id->qp_type != IB_QPT_RC) + if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI) return -EINVAL; id_priv = container_of(id, struct rdma_id_private, id); @@ -3368,22 +3368,30 @@ err: static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, const struct sockaddr *dst_addr) { - if (!src_addr || !src_addr->sa_family) { - src_addr = (struct sockaddr *) &id->route.addr.src_addr; - src_addr->sa_family = dst_addr->sa_family; - if (IS_ENABLED(CONFIG_IPV6) && - dst_addr->sa_family == AF_INET6) { - struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; - struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; - src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; - if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) - id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; - } else if (dst_addr->sa_family == AF_IB) { - ((struct sockaddr_ib *) src_addr)->sib_pkey = - ((struct sockaddr_ib *) dst_addr)->sib_pkey; - } - } - return rdma_bind_addr(id, src_addr); + struct sockaddr_storage zero_sock = {}; + + if (src_addr && src_addr->sa_family) + return rdma_bind_addr(id, src_addr); + + /* + * When the src_addr is not specified, automatically supply an any addr + */ + zero_sock.ss_family = dst_addr->sa_family; + if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { + struct sockaddr_in6 *src_addr6 = + (struct sockaddr_in6 *)&zero_sock; + struct sockaddr_in6 *dst_addr6 = + (struct sockaddr_in6 *)dst_addr; + + src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; + if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) + id->route.addr.dev_addr.bound_dev_if = + dst_addr6->sin6_scope_id; + } else if (dst_addr->sa_family == AF_IB) { + ((struct sockaddr_ib *)&zero_sock)->sib_pkey = + ((struct sockaddr_ib *)dst_addr)->sib_pkey; + } + return rdma_bind_addr(id, (struct sockaddr *)&zero_sock); } /* diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 20a46d873145..59e20936b800 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -2153,6 +2153,7 @@ struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, return mr; mr->device = pd->device; + mr->type = IB_MR_TYPE_USER; mr->pd = pd; mr->dm = NULL; atomic_inc(&pd->usecnt); diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c index 876cc78a22cc..7333646021bb 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c @@ -80,6 +80,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) unsigned long flags; struct list_head del_list; + /* Prevent freeing of mm until we are completely finished. */ + mmgrab(handler->mn.mm); + /* Unregister first so we don't get any more notifications. */ mmu_notifier_unregister(&handler->mn, handler->mn.mm); @@ -102,6 +105,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) do_remove(handler, &del_list); + /* Now the mm may be freed. */ + mmdrop(handler->mn.mm); + kfree(handler); } diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 26bea51869bf..ef8e0bdacb51 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1397,8 +1397,7 @@ static int query_port(struct rvt_dev_info *rdi, u32 port_num, 4096 : hfi1_max_mtu), IB_MTU_4096); props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu : mtu_to_enum(ppd->ibmtu, IB_MTU_4096); - props->phys_mtu = HFI1_CAP_IS_KSET(AIP) ? hfi1_max_mtu : - ib_mtu_enum_to_int(props->max_mtu); + props->phys_mtu = hfi1_max_mtu; return 0; } diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c index f1e5515256e0..1ac7067e21be 100644 --- a/drivers/infiniband/hw/irdma/ctrl.c +++ b/drivers/infiniband/hw/irdma/ctrl.c @@ -431,7 +431,7 @@ enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_c cqp = qp->dev->cqp; if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id || - qp->qp_uk.qp_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt - 1)) + qp->qp_uk.qp_id >= (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt)) return IRDMA_ERR_INVALID_QP_ID; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); @@ -2551,10 +2551,10 @@ static enum irdma_status_code irdma_sc_cq_create(struct irdma_sc_cq *cq, enum irdma_status_code ret_code = 0; cqp = cq->dev->cqp; - if (cq->cq_uk.cq_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt - 1)) + if (cq->cq_uk.cq_id >= (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt)) return IRDMA_ERR_INVALID_CQ_ID; - if (cq->ceq_id > (cq->dev->hmc_fpm_misc.max_ceqs - 1)) + if (cq->ceq_id >= (cq->dev->hmc_fpm_misc.max_ceqs)) return IRDMA_ERR_INVALID_CEQ_ID; ceq = cq->dev->ceq[cq->ceq_id]; @@ -3656,7 +3656,7 @@ enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq, info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size) return IRDMA_ERR_INVALID_SIZE; - if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1)) + if (info->ceq_id >= (info->dev->hmc_fpm_misc.max_ceqs)) return IRDMA_ERR_INVALID_CEQ_ID; pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; @@ -4205,7 +4205,7 @@ enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *cq, info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size) return IRDMA_ERR_INVALID_SIZE; - if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1)) + if (info->ceq_id >= (info->dev->hmc_fpm_misc.max_ceqs )) return IRDMA_ERR_INVALID_CEQ_ID; pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c index aa119441eb45..4f763e552eae 100644 --- a/drivers/infiniband/hw/irdma/hw.c +++ b/drivers/infiniband/hw/irdma/hw.c @@ -1608,7 +1608,7 @@ static enum irdma_status_code irdma_initialize_dev(struct irdma_pci_f *rf) info.fpm_commit_buf = mem.va; info.bar0 = rf->hw.hw_addr; - info.hmc_fn_id = PCI_FUNC(rf->pcidev->devfn); + info.hmc_fn_id = rf->pf_id; info.hw = &rf->hw; status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info); if (status) diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c index d219f64b2c3d..a6f758b61b0c 100644 --- a/drivers/infiniband/hw/irdma/i40iw_if.c +++ b/drivers/infiniband/hw/irdma/i40iw_if.c @@ -77,6 +77,7 @@ static void i40iw_fill_device_info(struct irdma_device *iwdev, struct i40e_info rf->rdma_ver = IRDMA_GEN_1; rf->gen_ops.request_reset = i40iw_request_reset; rf->pcidev = cdev_info->pcidev; + rf->pf_id = cdev_info->fid; rf->hw.hw_addr = cdev_info->hw_addr; rf->cdev = cdev_info; rf->msix_count = cdev_info->msix_count; diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c index 51a41359e0b4..c556a36e7670 100644 --- a/drivers/infiniband/hw/irdma/main.c +++ b/drivers/infiniband/hw/irdma/main.c @@ -226,6 +226,7 @@ static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf rf->hw.hw_addr = pf->hw.hw_addr; rf->pcidev = pf->pdev; rf->msix_count = pf->num_rdma_msix; + rf->pf_id = pf->hw.pf_id; rf->msix_entries = &pf->msix_entries[pf->rdma_base_vector]; rf->default_vsi.vsi_idx = vsi->vsi_num; rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY; diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h index 8b215f3cee89..454b4b370386 100644 --- a/drivers/infiniband/hw/irdma/main.h +++ b/drivers/infiniband/hw/irdma/main.h @@ -257,6 +257,7 @@ struct irdma_pci_f { u8 *mem_rsrc; u8 rdma_ver; u8 rst_to; + u8 pf_id; enum irdma_protocol_used protocol_used; u32 sd_type; u32 msix_count; diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index feebfe6bf31a..fee179fd8c67 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -150,31 +150,35 @@ int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct in_ifaddr *ifa = ptr; - struct net_device *netdev = ifa->ifa_dev->dev; + struct net_device *real_dev, *netdev = ifa->ifa_dev->dev; struct irdma_device *iwdev; struct ib_device *ibdev; u32 local_ipaddr; - ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA); + real_dev = rdma_vlan_dev_real_dev(netdev); + if (!real_dev) + real_dev = netdev; + + ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA); if (!ibdev) return NOTIFY_DONE; iwdev = to_iwdev(ibdev); local_ipaddr = ntohl(ifa->ifa_address); ibdev_dbg(&iwdev->ibdev, - "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", netdev, - event, &local_ipaddr, netdev->dev_addr); + "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", real_dev, + event, &local_ipaddr, real_dev->dev_addr); switch (event) { case NETDEV_DOWN: - irdma_manage_arp_cache(iwdev->rf, netdev->dev_addr, + irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr, &local_ipaddr, true, IRDMA_ARP_DELETE); - irdma_if_notify(iwdev, netdev, &local_ipaddr, true, false); + irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, false); irdma_gid_change_event(&iwdev->ibdev); break; case NETDEV_UP: case NETDEV_CHANGEADDR: - irdma_add_arp(iwdev->rf, &local_ipaddr, true, netdev->dev_addr); - irdma_if_notify(iwdev, netdev, &local_ipaddr, true, true); + irdma_add_arp(iwdev->rf, &local_ipaddr, true, real_dev->dev_addr); + irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, true); irdma_gid_change_event(&iwdev->ibdev); break; default: @@ -196,32 +200,36 @@ int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct inet6_ifaddr *ifa = ptr; - struct net_device *netdev = ifa->idev->dev; + struct net_device *real_dev, *netdev = ifa->idev->dev; struct irdma_device *iwdev; struct ib_device *ibdev; u32 local_ipaddr6[4]; - ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA); + real_dev = rdma_vlan_dev_real_dev(netdev); + if (!real_dev) + real_dev = netdev; + + ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA); if (!ibdev) return NOTIFY_DONE; iwdev = to_iwdev(ibdev); irdma_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32); ibdev_dbg(&iwdev->ibdev, - "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", netdev, - event, local_ipaddr6, netdev->dev_addr); + "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", real_dev, + event, local_ipaddr6, real_dev->dev_addr); switch (event) { case NETDEV_DOWN: - irdma_manage_arp_cache(iwdev->rf, netdev->dev_addr, + irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr, local_ipaddr6, false, IRDMA_ARP_DELETE); - irdma_if_notify(iwdev, netdev, local_ipaddr6, false, false); + irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, false); irdma_gid_change_event(&iwdev->ibdev); break; case NETDEV_UP: case NETDEV_CHANGEADDR: irdma_add_arp(iwdev->rf, local_ipaddr6, false, - netdev->dev_addr); - irdma_if_notify(iwdev, netdev, local_ipaddr6, false, true); + real_dev->dev_addr); + irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, true); irdma_gid_change_event(&iwdev->ibdev); break; default: @@ -243,14 +251,18 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct neighbour *neigh = ptr; + struct net_device *real_dev, *netdev = (struct net_device *)neigh->dev; struct irdma_device *iwdev; struct ib_device *ibdev; __be32 *p; u32 local_ipaddr[4] = {}; bool ipv4 = true; - ibdev = ib_device_get_by_netdev((struct net_device *)neigh->dev, - RDMA_DRIVER_IRDMA); + real_dev = rdma_vlan_dev_real_dev(netdev); + if (!real_dev) + real_dev = netdev; + + ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA); if (!ibdev) return NOTIFY_DONE; diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 8bbc4620a97a..4a6fdd5c09e3 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -2506,7 +2506,7 @@ static int irdma_dealloc_mw(struct ib_mw *ibmw) cqp_info = &cqp_request->info; info = &cqp_info->in.u.dealloc_stag.info; memset(info, 0, sizeof(*info)); - info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff; + info->pd_id = iwpd->sc_pd.pd_id; info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S; info->mr = false; cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG; @@ -3018,7 +3018,7 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) cqp_info = &cqp_request->info; info = &cqp_info->in.u.dealloc_stag.info; memset(info, 0, sizeof(*info)); - info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff; + info->pd_id = iwpd->sc_pd.pd_id; info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S; info->mr = true; if (iwpbl->pbl_allocated) diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index e95967aefe78..21beded40066 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -1891,8 +1891,10 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table, key_level2, obj_event, GFP_KERNEL); - if (err) + if (err) { + kfree(obj_event); return err; + } INIT_LIST_HEAD(&obj_event->obj_sub_list); } diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 22e2f4d79743..cf203f879d34 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -536,8 +536,10 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) spin_lock_irq(&ent->lock); if (ent->disabled) goto out; - if (need_delay) + if (need_delay) { queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); + goto out; + } remove_cache_mr_locked(ent); queue_adjust_cache_locked(ent); } @@ -580,6 +582,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, ent = &cache->ent[entry]; spin_lock_irq(&ent->lock); if (list_empty(&ent->head)) { + queue_adjust_cache_locked(ent); + ent->miss++; spin_unlock_irq(&ent->lock); mr = create_cache_mr(ent); if (IS_ERR(mr)) @@ -631,6 +635,7 @@ static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { struct mlx5_cache_ent *ent = mr->cache_ent; + WRITE_ONCE(dev->cache.last_add, jiffies); spin_lock_irq(&ent->lock); list_add_tail(&mr->list, &ent->head); ent->available_mrs++; diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 0a3b28142c05..41c272980f91 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -541,7 +541,7 @@ static struct attribute *port_diagc_attributes[] = { }; static const struct attribute_group port_diagc_group = { - .name = "linkcontrol", + .name = "diag_counters", .attrs = port_diagc_attributes, }; diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index ae50b56e8913..8ef112f883a7 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -3190,7 +3190,11 @@ serr_no_r_lock: spin_lock_irqsave(&sqp->s_lock, flags); rvt_send_complete(sqp, wqe, send_status); if (sqp->ibqp.qp_type == IB_QPT_RC) { - int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); + int lastwqe; + + spin_lock(&sqp->r_lock); + lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); + spin_unlock(&sqp->r_lock); sqp->s_flags &= ~RVT_S_BUSY; spin_unlock_irqrestore(&sqp->s_lock, flags); diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 5501227ddc65..8ed172ab0beb 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -830,6 +830,10 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) return RESPST_ERR_INVALIDATE_RKEY; } + if (pkt->mask & RXE_END_MASK) + /* We successfully processed this new request. */ + qp->resp.msn++; + /* next expected psn, read handles this separately */ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; qp->resp.ack_psn = qp->resp.psn; @@ -837,11 +841,9 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) qp->resp.opcode = pkt->opcode; qp->resp.status = IB_WC_SUCCESS; - if (pkt->mask & RXE_COMP_MASK) { - /* We successfully processed this new request. */ - qp->resp.msn++; + if (pkt->mask & RXE_COMP_MASK) return RESPST_COMPLETE; - } else if (qp_type(qp) == IB_QPT_RC) + else if (qp_type(qp) == IB_QPT_RC) return RESPST_ACKNOWLEDGE; else return RESPST_CLEANUP; diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 55ebe01ec995..a23438bacf12 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -2664,6 +2664,8 @@ static void rtrs_clt_dev_release(struct device *dev) { struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev); + mutex_destroy(&clt->paths_ev_mutex); + mutex_destroy(&clt->paths_mutex); kfree(clt); } @@ -2693,6 +2695,8 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num, return ERR_PTR(-ENOMEM); } + clt->dev.class = rtrs_clt_dev_class; + clt->dev.release = rtrs_clt_dev_release; uuid_gen(&clt->paths_uuid); INIT_LIST_HEAD_RCU(&clt->paths_list); clt->paths_num = paths_num; @@ -2709,53 +2713,51 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num, init_waitqueue_head(&clt->permits_wait); mutex_init(&clt->paths_ev_mutex); mutex_init(&clt->paths_mutex); + device_initialize(&clt->dev); - clt->dev.class = rtrs_clt_dev_class; - clt->dev.release = rtrs_clt_dev_release; err = dev_set_name(&clt->dev, "%s", sessname); if (err) - goto err; + goto err_put; + /* * Suppress user space notification until * sysfs files are created */ dev_set_uevent_suppress(&clt->dev, true); - err = device_register(&clt->dev); - if (err) { - put_device(&clt->dev); - goto err; - } + err = device_add(&clt->dev); + if (err) + goto err_put; clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj); if (!clt->kobj_paths) { err = -ENOMEM; - goto err_dev; + goto err_del; } err = rtrs_clt_create_sysfs_root_files(clt); if (err) { kobject_del(clt->kobj_paths); kobject_put(clt->kobj_paths); - goto err_dev; + goto err_del; } dev_set_uevent_suppress(&clt->dev, false); kobject_uevent(&clt->dev.kobj, KOBJ_ADD); return clt; -err_dev: - device_unregister(&clt->dev); -err: +err_del: + device_del(&clt->dev); +err_put: free_percpu(clt->pcpu_path); - kfree(clt); + put_device(&clt->dev); return ERR_PTR(err); } static void free_clt(struct rtrs_clt *clt) { - free_permits(clt); free_percpu(clt->pcpu_path); - mutex_destroy(&clt->paths_ev_mutex); - mutex_destroy(&clt->paths_mutex); - /* release callback will free clt in last put */ + + /* + * release callback will free clt and destroy mutexes in last put + */ device_unregister(&clt->dev); } @@ -2866,6 +2868,7 @@ void rtrs_clt_close(struct rtrs_clt *clt) rtrs_clt_destroy_sess_files(sess, NULL); kobject_put(&sess->kobj); } + free_permits(clt); free_clt(clt); } EXPORT_SYMBOL(rtrs_clt_close); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 71eda91e810c..5d416ec22871 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -4038,9 +4038,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data) spin_unlock(&host->target_lock); /* - * Wait for tl_err and target port removal tasks. + * srp_queue_remove_work() queues a call to + * srp_remove_target(). The latter function cancels + * target->tl_err_work so waiting for the remove works to + * finish is sufficient. */ - flush_workqueue(system_long_wq); flush_workqueue(srp_remove_wq); kfree(host); diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index e75650e98c9e..e402915cc0c0 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -556,7 +556,7 @@ config KEYBOARD_PMIC8XXX config KEYBOARD_SAMSUNG tristate "Samsung keypad support" - depends on HAVE_CLK + depends on HAS_IOMEM && HAVE_CLK select INPUT_MATRIXKMAP help Say Y here if you want to use the keypad on your Samsung mobile diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c index 43375b38ee59..8a7ce41b8c56 100644 --- a/drivers/input/keyboard/omap4-keypad.c +++ b/drivers/input/keyboard/omap4-keypad.c @@ -393,7 +393,7 @@ static int omap4_keypad_probe(struct platform_device *pdev) * revision register. */ error = pm_runtime_get_sync(dev); - if (error) { + if (error < 0) { dev_err(dev, "pm_runtime_get_sync() failed\n"); pm_runtime_put_noidle(dev); return error; diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 47af62c12267..e1758d5ffe42 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -186,55 +186,21 @@ static int elan_get_fwinfo(u16 ic_type, u8 iap_version, u16 *validpage_count, return 0; } -static int elan_enable_power(struct elan_tp_data *data) +static int elan_set_power(struct elan_tp_data *data, bool on) { int repeat = ETP_RETRY_COUNT; int error; - error = regulator_enable(data->vcc); - if (error) { - dev_err(&data->client->dev, - "failed to enable regulator: %d\n", error); - return error; - } - do { - error = data->ops->power_control(data->client, true); + error = data->ops->power_control(data->client, on); if (error >= 0) return 0; msleep(30); } while (--repeat > 0); - dev_err(&data->client->dev, "failed to enable power: %d\n", error); - return error; -} - -static int elan_disable_power(struct elan_tp_data *data) -{ - int repeat = ETP_RETRY_COUNT; - int error; - - do { - error = data->ops->power_control(data->client, false); - if (!error) { - error = regulator_disable(data->vcc); - if (error) { - dev_err(&data->client->dev, - "failed to disable regulator: %d\n", - error); - /* Attempt to power the chip back up */ - data->ops->power_control(data->client, true); - break; - } - - return 0; - } - - msleep(30); - } while (--repeat > 0); - - dev_err(&data->client->dev, "failed to disable power: %d\n", error); + dev_err(&data->client->dev, "failed to set power %s: %d\n", + on ? "on" : "off", error); return error; } @@ -1399,9 +1365,19 @@ static int __maybe_unused elan_suspend(struct device *dev) /* Enable wake from IRQ */ data->irq_wake = (enable_irq_wake(client->irq) == 0); } else { - ret = elan_disable_power(data); + ret = elan_set_power(data, false); + if (ret) + goto err; + + ret = regulator_disable(data->vcc); + if (ret) { + dev_err(dev, "error %d disabling regulator\n", ret); + /* Attempt to power the chip back up */ + elan_set_power(data, true); + } } +err: mutex_unlock(&data->sysfs_mutex); return ret; } @@ -1412,12 +1388,18 @@ static int __maybe_unused elan_resume(struct device *dev) struct elan_tp_data *data = i2c_get_clientdata(client); int error; - if (device_may_wakeup(dev) && data->irq_wake) { + if (!device_may_wakeup(dev)) { + error = regulator_enable(data->vcc); + if (error) { + dev_err(dev, "error %d enabling regulator\n", error); + goto err; + } + } else if (data->irq_wake) { disable_irq_wake(client->irq); data->irq_wake = false; } - error = elan_enable_power(data); + error = elan_set_power(data, true); if (error) { dev_err(dev, "power up when resuming failed: %d\n", error); goto err; diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index fcb1b646436a..1581f6ef0927 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c @@ -1787,15 +1787,13 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0); input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); - /* Verify that a device really has an endpoint */ - if (intf->cur_altsetting->desc.bNumEndpoints < 1) { + err = usb_find_common_endpoints(intf->cur_altsetting, + NULL, NULL, &endpoint, NULL); + if (err) { dev_err(&intf->dev, - "interface has %d endpoints, but must have minimum 1\n", - intf->cur_altsetting->desc.bNumEndpoints); - err = -EINVAL; + "interface has no int in endpoints, but must have minimum 1\n"); goto fail3; } - endpoint = &intf->cur_altsetting->endpoint[0].desc; /* Go set up our URB, which is called when the tablet receives * input. diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c index 83e685557a19..cfc943423241 100644 --- a/drivers/input/touchscreen/ti_am335x_tsc.c +++ b/drivers/input/touchscreen/ti_am335x_tsc.c @@ -131,7 +131,8 @@ static void titsc_step_config(struct titsc *ts_dev) u32 stepenable; config = STEPCONFIG_MODE_HWSYNC | - STEPCONFIG_AVG_16 | ts_dev->bit_xp; + STEPCONFIG_AVG_16 | ts_dev->bit_xp | + STEPCONFIG_INM_ADCREFM; switch (ts_dev->wires) { case 4: config |= STEPCONFIG_INP(ts_dev->inp_yp) | ts_dev->bit_xn; @@ -195,7 +196,10 @@ static void titsc_step_config(struct titsc *ts_dev) STEPCONFIG_OPENDLY); end_step++; - config |= STEPCONFIG_INP(ts_dev->inp_yn); + config = STEPCONFIG_MODE_HWSYNC | + STEPCONFIG_AVG_16 | ts_dev->bit_yp | + ts_dev->bit_xn | STEPCONFIG_INM_ADCREFM | + STEPCONFIG_INP(ts_dev->inp_yn); titsc_writel(ts_dev, REG_STEPCONFIG(end_step), config); titsc_writel(ts_dev, REG_STEPDELAY(end_step), STEPCONFIG_OPENDLY); diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c index 1e70b8d2a8d7..400957f4c8c9 100644 --- a/drivers/input/touchscreen/zinitix.c +++ b/drivers/input/touchscreen/zinitix.c @@ -135,7 +135,7 @@ struct point_coord { struct touch_event { __le16 status; - u8 finger_cnt; + u8 finger_mask; u8 time_stamp; struct point_coord point_coord[MAX_SUPPORTED_FINGER_NUM]; }; @@ -311,11 +311,32 @@ static int zinitix_send_power_on_sequence(struct bt541_ts_data *bt541) static void zinitix_report_finger(struct bt541_ts_data *bt541, int slot, const struct point_coord *p) { + u16 x, y; + + if (unlikely(!(p->sub_status & + (SUB_BIT_UP | SUB_BIT_DOWN | SUB_BIT_MOVE)))) { + dev_dbg(&bt541->client->dev, "unknown finger event %#02x\n", + p->sub_status); + return; + } + + x = le16_to_cpu(p->x); + y = le16_to_cpu(p->y); + input_mt_slot(bt541->input_dev, slot); - input_mt_report_slot_state(bt541->input_dev, MT_TOOL_FINGER, true); - touchscreen_report_pos(bt541->input_dev, &bt541->prop, - le16_to_cpu(p->x), le16_to_cpu(p->y), true); - input_report_abs(bt541->input_dev, ABS_MT_TOUCH_MAJOR, p->width); + if (input_mt_report_slot_state(bt541->input_dev, MT_TOOL_FINGER, + !(p->sub_status & SUB_BIT_UP))) { + touchscreen_report_pos(bt541->input_dev, + &bt541->prop, x, y, true); + input_report_abs(bt541->input_dev, + ABS_MT_TOUCH_MAJOR, p->width); + dev_dbg(&bt541->client->dev, "finger %d %s (%u, %u)\n", + slot, p->sub_status & SUB_BIT_DOWN ? "down" : "move", + x, y); + } else { + dev_dbg(&bt541->client->dev, "finger %d up (%u, %u)\n", + slot, x, y); + } } static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler) @@ -323,6 +344,7 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler) struct bt541_ts_data *bt541 = bt541_handler; struct i2c_client *client = bt541->client; struct touch_event touch_event; + unsigned long finger_mask; int error; int i; @@ -335,10 +357,14 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler) goto out; } - for (i = 0; i < MAX_SUPPORTED_FINGER_NUM; i++) - if (touch_event.point_coord[i].sub_status & SUB_BIT_EXIST) - zinitix_report_finger(bt541, i, - &touch_event.point_coord[i]); + finger_mask = touch_event.finger_mask; + for_each_set_bit(i, &finger_mask, MAX_SUPPORTED_FINGER_NUM) { + const struct point_coord *p = &touch_event.point_coord[i]; + + /* Only process contacts that are actually reported */ + if (p->sub_status & SUB_BIT_EXIST) + zinitix_report_finger(bt541, i, p); + } input_mt_sync_frame(bt541->input_dev); input_sync(bt541->input_dev); diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 416815a525d6..bb95edf74415 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -14,6 +14,7 @@ extern irqreturn_t amd_iommu_int_thread(int irq, void *data); extern irqreturn_t amd_iommu_int_handler(int irq, void *data); extern void amd_iommu_apply_erratum_63(u16 devid); +extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu); extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); extern int amd_iommu_init_devices(void); extern void amd_iommu_uninit_devices(void); diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 8394c2787ff8..b6e0bf186cf5 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -110,6 +110,7 @@ #define PASID_MASK 0x0000ffff /* MMIO status bits */ +#define MMIO_STATUS_EVT_OVERFLOW_INT_MASK (1 << 0) #define MMIO_STATUS_EVT_INT_MASK (1 << 1) #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) #define MMIO_STATUS_PPR_INT_MASK (1 << 6) diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 72cad85e5e5f..204ed33c56dc 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -656,6 +656,16 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu) } /* + * This function restarts event logging in case the IOMMU experienced + * an event log buffer overflow. + */ +void amd_iommu_restart_event_logging(struct amd_iommu *iommu) +{ + iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); + iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); +} + +/* * This function resets the command buffer if the IOMMU stopped fetching * commands from it. */ diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index 182c93a43efd..1eddf557636d 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -519,12 +519,6 @@ static void v1_free_pgtable(struct io_pgtable *iop) dom = container_of(pgtable, struct protection_domain, iop); - /* Update data structure */ - amd_iommu_domain_clr_pt_root(dom); - - /* Make changes visible to IOMMUs */ - amd_iommu_domain_update(dom); - /* Page-table is not visible to IOMMU anymore, so free it */ BUG_ON(pgtable->mode < PAGE_MODE_NONE || pgtable->mode > PAGE_MODE_6_LEVEL); @@ -532,6 +526,12 @@ static void v1_free_pgtable(struct io_pgtable *iop) root = (unsigned long)pgtable->root; freelist = free_sub_pt(root, pgtable->mode, freelist); + /* Update data structure */ + amd_iommu_domain_clr_pt_root(dom); + + /* Make changes visible to IOMMUs */ + amd_iommu_domain_update(dom); + free_page_list(freelist); } diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 1722bb161841..f46eb7397021 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -742,7 +742,8 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } #endif /* !CONFIG_IRQ_REMAP */ #define AMD_IOMMU_INT_MASK \ - (MMIO_STATUS_EVT_INT_MASK | \ + (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \ + MMIO_STATUS_EVT_INT_MASK | \ MMIO_STATUS_PPR_INT_MASK | \ MMIO_STATUS_GALOG_INT_MASK) @@ -752,7 +753,7 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); while (status & AMD_IOMMU_INT_MASK) { - /* Enable EVT and PPR and GA interrupts again */ + /* Enable interrupt sources again */ writel(AMD_IOMMU_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); @@ -773,6 +774,11 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) } #endif + if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) { + pr_info_ratelimited("IOMMU event log overflow\n"); + amd_iommu_restart_event_logging(iommu); + } + /* * Hardware bug: ERBT1312 * When re-enabling interrupt (by writing 1 diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index a388e318f86e..430315135cff 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1552,6 +1552,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) dev_info(smmu->dev, "\t0x%016llx\n", (unsigned long long)evt[i]); + cond_resched(); } /* diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 2d6021644000..276b6fcc80ad 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -317,6 +317,11 @@ static bool dev_is_untrusted(struct device *dev) return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; } +static bool dev_use_swiotlb(struct device *dev) +{ + return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev); +} + /* sysfs updates are serialised by the mutex of the group owning @domain */ int iommu_dma_init_fq(struct iommu_domain *domain) { @@ -510,23 +515,6 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather); } -static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - struct iommu_domain *domain = iommu_get_dma_domain(dev); - phys_addr_t phys; - - phys = iommu_iova_to_phys(domain, dma_addr); - if (WARN_ON(!phys)) - return; - - __iommu_dma_unmap(dev, dma_addr, size); - - if (unlikely(is_swiotlb_buffer(dev, phys))) - swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); -} - static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size_t size, int prot, u64 dma_mask) { @@ -553,55 +541,6 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, return iova + iova_off; } -static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys, - size_t org_size, dma_addr_t dma_mask, bool coherent, - enum dma_data_direction dir, unsigned long attrs) -{ - int prot = dma_info_to_prot(dir, coherent, attrs); - struct iommu_domain *domain = iommu_get_dma_domain(dev); - struct iommu_dma_cookie *cookie = domain->iova_cookie; - struct iova_domain *iovad = &cookie->iovad; - size_t aligned_size = org_size; - void *padding_start; - size_t padding_size; - dma_addr_t iova; - - /* - * If both the physical buffer start address and size are - * page aligned, we don't need to use a bounce page. - */ - if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) && - iova_offset(iovad, phys | org_size)) { - aligned_size = iova_align(iovad, org_size); - phys = swiotlb_tbl_map_single(dev, phys, org_size, - aligned_size, dir, attrs); - - if (phys == DMA_MAPPING_ERROR) - return DMA_MAPPING_ERROR; - - /* Cleanup the padding area. */ - padding_start = phys_to_virt(phys); - padding_size = aligned_size; - - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - (dir == DMA_TO_DEVICE || - dir == DMA_BIDIRECTIONAL)) { - padding_start += org_size; - padding_size -= org_size; - } - - memset(padding_start, 0, padding_size); - } - - if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - arch_sync_dma_for_device(phys, org_size, dir); - - iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask); - if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys)) - swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs); - return iova; -} - static void __iommu_dma_free_pages(struct page **pages, int count) { while (count--) @@ -797,7 +736,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev, { phys_addr_t phys; - if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev)) + if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev)) return; phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); @@ -813,7 +752,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev, { phys_addr_t phys; - if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev)) + if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev)) return; phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); @@ -831,7 +770,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg; int i; - if (dev_is_untrusted(dev)) + if (dev_use_swiotlb(dev)) for_each_sg(sgl, sg, nelems, i) iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg), sg->length, dir); @@ -847,7 +786,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg; int i; - if (dev_is_untrusted(dev)) + if (dev_use_swiotlb(dev)) for_each_sg(sgl, sg, nelems, i) iommu_dma_sync_single_for_device(dev, sg_dma_address(sg), @@ -863,17 +802,66 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, { phys_addr_t phys = page_to_phys(page) + offset; bool coherent = dev_is_dma_coherent(dev); + int prot = dma_info_to_prot(dir, coherent, attrs); + struct iommu_domain *domain = iommu_get_dma_domain(dev); + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + dma_addr_t iova, dma_mask = dma_get_mask(dev); + + /* + * If both the physical buffer start address and size are + * page aligned, we don't need to use a bounce page. + */ + if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) { + void *padding_start; + size_t padding_size, aligned_size; + + aligned_size = iova_align(iovad, size); + phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size, + iova_mask(iovad), dir, attrs); + + if (phys == DMA_MAPPING_ERROR) + return DMA_MAPPING_ERROR; + + /* Cleanup the padding area. */ + padding_start = phys_to_virt(phys); + padding_size = aligned_size; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) { + padding_start += size; + padding_size -= size; + } + + memset(padding_start, 0, padding_size); + } - return __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev), - coherent, dir, attrs); + if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + arch_sync_dma_for_device(phys, size, dir); + + iova = __iommu_dma_map(dev, phys, size, prot, dma_mask); + if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys)) + swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); + return iova; } static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); - __iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs); + struct iommu_domain *domain = iommu_get_dma_domain(dev); + phys_addr_t phys; + + phys = iommu_iova_to_phys(domain, dma_handle); + if (WARN_ON(!phys)) + return; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev)) + arch_sync_dma_for_cpu(phys, size, dir); + + __iommu_dma_unmap(dev, dma_handle, size); + + if (unlikely(is_swiotlb_buffer(dev, phys))) + swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); } /* @@ -958,7 +946,7 @@ static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *s int i; for_each_sg(sg, s, nents, i) - __iommu_dma_unmap_swiotlb(dev, sg_dma_address(s), + iommu_dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); } @@ -969,9 +957,8 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg, int i; for_each_sg(sg, s, nents, i) { - sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s), - s->length, dma_get_mask(dev), - dev_is_dma_coherent(dev), dir, attrs); + sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s), + s->offset, s->length, dir, attrs); if (sg_dma_address(s) == DMA_MAPPING_ERROR) goto out_unmap; sg_dma_len(s) = s->length; @@ -1011,7 +998,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, goto out; } - if (dev_is_untrusted(dev)) + if (dev_use_swiotlb(dev)) return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs); if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) @@ -1089,14 +1076,14 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, struct scatterlist *tmp; int i; - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); - - if (dev_is_untrusted(dev)) { + if (dev_use_swiotlb(dev)) { iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs); return; } + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); + /* * The scatterlist segments are mapped into a single * contiguous IOVA allocation, so this is incredibly easy. diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 78f8c8e6803e..0b3076144beb 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2651,7 +2651,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, spin_unlock_irqrestore(&device_domain_lock, flags); /* PASID table is mandatory for a PCI device in scalable mode. */ - if (dev && dev_is_pci(dev) && sm_supported(iommu)) { + if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { ret = intel_pasid_alloc_table(dev); if (ret) { dev_err(dev, "PASID table allocation failed\n"); diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 920fcc27c9a1..cae5a73ff518 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -154,10 +154,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) cached_iova = to_iova(iovad->cached32_node); if (free == cached_iova || (free->pfn_hi < iovad->dma_32bit_pfn && - free->pfn_lo >= cached_iova->pfn_lo)) { + free->pfn_lo >= cached_iova->pfn_lo)) iovad->cached32_node = rb_next(&free->node); + + if (free->pfn_lo < iovad->dma_32bit_pfn) iovad->max32_alloc_size = iovad->dma_32bit_pfn; - } cached_iova = to_iova(iovad->cached_node); if (free->pfn_lo >= cached_iova->pfn_lo) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index d38ff29a76e8..96708cd2757f 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -982,7 +982,9 @@ static int ipmmu_probe(struct platform_device *pdev) bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); mmu->features = of_device_get_match_data(&pdev->dev); memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs); - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (ret) + return ret; /* Map I/O memory and request IRQ. */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 25b834104790..5971a1168666 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -562,22 +562,52 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct mtk_iommu_data *data; + struct device_link *link; + struct device *larbdev; + unsigned int larbid, larbidx, i; if (!fwspec || fwspec->ops != &mtk_iommu_ops) return ERR_PTR(-ENODEV); /* Not a iommu client device */ data = dev_iommu_priv_get(dev); + /* + * Link the consumer device with the smi-larb device(supplier). + * The device that connects with each a larb is a independent HW. + * All the ports in each a device should be in the same larbs. + */ + larbid = MTK_M4U_TO_LARB(fwspec->ids[0]); + for (i = 1; i < fwspec->num_ids; i++) { + larbidx = MTK_M4U_TO_LARB(fwspec->ids[i]); + if (larbid != larbidx) { + dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n", + larbid, larbidx); + return ERR_PTR(-EINVAL); + } + } + larbdev = data->larb_imu[larbid].dev; + link = device_link_add(dev, larbdev, + DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS); + if (!link) + dev_err(dev, "Unable to link %s\n", dev_name(larbdev)); return &data->iommu; } static void mtk_iommu_release_device(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct mtk_iommu_data *data; + struct device *larbdev; + unsigned int larbid; if (!fwspec || fwspec->ops != &mtk_iommu_ops) return; + data = dev_iommu_priv_get(dev); + larbid = MTK_M4U_TO_LARB(fwspec->ids[0]); + larbdev = data->larb_imu[larbid].dev; + device_link_remove(dev, larbdev); + iommu_fwspec_free(dev); } @@ -848,7 +878,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) plarbdev = of_find_device_by_node(larbnode); if (!plarbdev) { of_node_put(larbnode); - return -EPROBE_DEFER; + return -ENODEV; } data->larb_imu[id].dev = &plarbdev->dev; diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index be22fcf988ce..bc7ee90b9373 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -423,7 +423,18 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev) struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct of_phandle_args iommu_spec; struct mtk_iommu_data *data; - int err, idx = 0; + int err, idx = 0, larbid, larbidx; + struct device_link *link; + struct device *larbdev; + + /* + * In the deferred case, free the existed fwspec. + * Always initialize the fwspec internally. + */ + if (fwspec) { + iommu_fwspec_free(dev); + fwspec = dev_iommu_fwspec_get(dev); + } while (!of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells", @@ -444,6 +455,23 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev) data = dev_iommu_priv_get(dev); + /* Link the consumer device with the smi-larb device(supplier) */ + larbid = mt2701_m4u_to_larb(fwspec->ids[0]); + for (idx = 1; idx < fwspec->num_ids; idx++) { + larbidx = mt2701_m4u_to_larb(fwspec->ids[idx]); + if (larbid != larbidx) { + dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n", + larbid, larbidx); + return ERR_PTR(-EINVAL); + } + } + + larbdev = data->larb_imu[larbid].dev; + link = device_link_add(dev, larbdev, + DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS); + if (!link) + dev_err(dev, "Unable to link %s\n", dev_name(larbdev)); + return &data->iommu; } @@ -464,10 +492,18 @@ static void mtk_iommu_probe_finalize(struct device *dev) static void mtk_iommu_release_device(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct mtk_iommu_data *data; + struct device *larbdev; + unsigned int larbid; if (!fwspec || fwspec->ops != &mtk_iommu_ops) return; + data = dev_iommu_priv_get(dev); + larbid = mt2701_m4u_to_larb(fwspec->ids[0]); + larbdev = data->larb_imu[larbid].dev; + device_link_remove(dev, larbdev); + iommu_fwspec_free(dev); } @@ -595,7 +631,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) plarbdev = of_find_device_by_node(larbnode); if (!plarbdev) { of_node_put(larbnode); - return -EPROBE_DEFER; + return -ENODEV; } data->larb_imu[i].dev = &plarbdev->dev; diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 91749654fd49..be60f6f3a265 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1661,7 +1661,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev) num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus", sizeof(phandle)); if (num_iommus < 0) - return 0; + return ERR_PTR(-ENODEV); arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL); if (!arch_data) diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 0a281833f611..abbdaeb4bf8f 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -808,8 +808,10 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np) return NULL; mc = platform_get_drvdata(pdev); - if (!mc) + if (!mc) { + put_device(&pdev->dev); return NULL; + } return mc->smmu; } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 0cb584d9815b..fc1bfffc468f 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -3007,18 +3007,12 @@ static int __init allocate_lpi_tables(void) return 0; } -static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) +static u64 read_vpend_dirty_clear(void __iomem *vlpi_base) { u32 count = 1000000; /* 1s! */ bool clean; u64 val; - val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); - val &= ~GICR_VPENDBASER_Valid; - val &= ~clr; - val |= set; - gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); - do { val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); clean = !(val & GICR_VPENDBASER_Dirty); @@ -3029,10 +3023,26 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) } } while (!clean && count); - if (unlikely(val & GICR_VPENDBASER_Dirty)) { + if (unlikely(!clean)) pr_err_ratelimited("ITS virtual pending table not cleaning\n"); + + return val; +} + +static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) +{ + u64 val; + + /* Make sure we wait until the RD is done with the initial scan */ + val = read_vpend_dirty_clear(vlpi_base); + val &= ~GICR_VPENDBASER_Valid; + val &= ~clr; + val |= set; + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + val = read_vpend_dirty_clear(vlpi_base); + if (unlikely(val & GICR_VPENDBASER_Dirty)) val |= GICR_VPENDBASER_PendingLast; - } return val; } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 7bbccb13b896..1269284461da 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -206,11 +206,11 @@ static inline void __iomem *gic_dist_base(struct irq_data *d) } } -static void gic_do_wait_for_rwp(void __iomem *base) +static void gic_do_wait_for_rwp(void __iomem *base, u32 bit) { u32 count = 1000000; /* 1s! */ - while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { + while (readl_relaxed(base + GICD_CTLR) & bit) { count--; if (!count) { pr_err_ratelimited("RWP timeout, gone fishing\n"); @@ -224,13 +224,13 @@ static void gic_do_wait_for_rwp(void __iomem *base) /* Wait for completion of a distributor change */ static void gic_dist_wait_for_rwp(void) { - gic_do_wait_for_rwp(gic_data.dist_base); + gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP); } /* Wait for completion of a redistributor change */ static void gic_redist_wait_for_rwp(void) { - gic_do_wait_for_rwp(gic_data_rdist_rd_base()); + gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP); } #ifdef CONFIG_ARM64 @@ -1466,6 +1466,12 @@ static int gic_irq_domain_translate(struct irq_domain *d, if(fwspec->param_count != 2) return -EINVAL; + if (fwspec->param[0] < 16) { + pr_err(FW_BUG "Illegal GSI%d translation request\n", + fwspec->param[0]); + return -EINVAL; + } + *hwirq = fwspec->param[0]; *type = fwspec->param[1]; diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 5f22c9d65e57..99077f30f699 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1085,6 +1085,12 @@ static int gic_irq_domain_translate(struct irq_domain *d, if(fwspec->param_count != 2) return -EINVAL; + if (fwspec->param[0] < 16) { + pr_err(FW_BUG "Illegal GSI%d translation request\n", + fwspec->param[0]); + return -EINVAL; + } + *hwirq = fwspec->param[0]; *type = fwspec->param[1]; diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c index 599bb6fc5f0a..47b3b165479e 100644 --- a/drivers/irqchip/irq-nvic.c +++ b/drivers/irqchip/irq-nvic.c @@ -92,6 +92,7 @@ static int __init nvic_of_init(struct device_node *node, if (!nvic_irq_domain) { pr_warn("Failed to allocate irq domain\n"); + iounmap(nvic_base); return -ENOMEM; } @@ -101,6 +102,7 @@ static int __init nvic_of_init(struct device_node *node, if (ret) { pr_warn("Failed to allocate irq chips\n"); irq_domain_remove(nvic_irq_domain); + iounmap(nvic_base); return ret; } diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 259065d271ef..09cc98266d30 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -398,3 +398,4 @@ out_free_priv: IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init); IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */ +IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */ diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c index 173e6520e06e..c0b457f26ec4 100644 --- a/drivers/irqchip/qcom-pdc.c +++ b/drivers/irqchip/qcom-pdc.c @@ -56,17 +56,18 @@ static u32 pdc_reg_read(int reg, u32 i) static void pdc_enable_intr(struct irq_data *d, bool on) { int pin_out = d->hwirq; + unsigned long flags; u32 index, mask; u32 enable; index = pin_out / 32; mask = pin_out % 32; - raw_spin_lock(&pdc_lock); + raw_spin_lock_irqsave(&pdc_lock, flags); enable = pdc_reg_read(IRQ_ENABLE_BANK, index); enable = on ? ENABLE_INTR(enable, mask) : CLEAR_INTR(enable, mask); pdc_reg_write(IRQ_ENABLE_BANK, index, enable); - raw_spin_unlock(&pdc_lock); + raw_spin_unlock_irqrestore(&pdc_lock, flags); } static void qcom_pdc_gic_disable(struct irq_data *d) diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index bd087cca1c1d..af17459c1a5c 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -2005,7 +2005,11 @@ setup_hw(struct hfc_pci *hc) } /* Allocate memory for FIFOS */ /* the memory needs to be on a 32k boundary within the first 4G */ - dma_set_mask(&hc->pdev->dev, 0xFFFF8000); + if (dma_set_mask(&hc->pdev->dev, 0xFFFF8000)) { + printk(KERN_WARNING + "HFC-PCI: No usable DMA configuration!\n"); + return -EIO; + } buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle, GFP_KERNEL); /* We silently assume the address is okay if nonzero */ diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c index e11ca6bbc7f4..c3b2c99b5cd5 100644 --- a/drivers/isdn/mISDN/dsp_pipeline.c +++ b/drivers/isdn/mISDN/dsp_pipeline.c @@ -192,7 +192,7 @@ void dsp_pipeline_destroy(struct dsp_pipeline *pipeline) int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) { int found = 0; - char *dup, *tok, *name, *args; + char *dup, *next, *tok, *name, *args; struct dsp_element_entry *entry, *n; struct dsp_pipeline_entry *pipeline_entry; struct mISDN_dsp_element *elem; @@ -203,10 +203,10 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) if (!list_empty(&pipeline->list)) _dsp_pipeline_destroy(pipeline); - dup = kstrdup(cfg, GFP_ATOMIC); + dup = next = kstrdup(cfg, GFP_ATOMIC); if (!dup) return 0; - while ((tok = strsep(&dup, "|"))) { + while ((tok = strsep(&next, "|"))) { if (!strlen(tok)) continue; name = strsep(&tok, "("); diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c index 81aaf21212d7..2570f92b6754 100644 --- a/drivers/leds/leds-pca955x.c +++ b/drivers/leds/leds-pca955x.c @@ -62,6 +62,8 @@ #define PCA955X_GPIO_HIGH LED_OFF #define PCA955X_GPIO_LOW LED_FULL +#define PCA955X_BLINK_DEFAULT_MS 1000 + enum pca955x_type { pca9550, pca9551, @@ -74,6 +76,7 @@ struct pca955x_chipdef { int bits; u8 slv_addr; /* 7-bit slave address mask */ int slv_addr_shift; /* Number of bits to ignore */ + int blink_div; /* PSC divider */ }; static struct pca955x_chipdef pca955x_chipdefs[] = { @@ -81,26 +84,31 @@ static struct pca955x_chipdef pca955x_chipdefs[] = { .bits = 2, .slv_addr = /* 110000x */ 0x60, .slv_addr_shift = 1, + .blink_div = 44, }, [pca9551] = { .bits = 8, .slv_addr = /* 1100xxx */ 0x60, .slv_addr_shift = 3, + .blink_div = 38, }, [pca9552] = { .bits = 16, .slv_addr = /* 1100xxx */ 0x60, .slv_addr_shift = 3, + .blink_div = 44, }, [ibm_pca9552] = { .bits = 16, .slv_addr = /* 0110xxx */ 0x30, .slv_addr_shift = 3, + .blink_div = 44, }, [pca9553] = { .bits = 4, .slv_addr = /* 110001x */ 0x62, .slv_addr_shift = 1, + .blink_div = 44, }, }; @@ -119,7 +127,9 @@ struct pca955x { struct pca955x_led *leds; struct pca955x_chipdef *chipdef; struct i2c_client *client; + unsigned long active_blink; unsigned long active_pins; + unsigned long blink_period; #ifdef CONFIG_LEDS_PCA955X_GPIO struct gpio_chip gpio; #endif @@ -134,19 +144,21 @@ struct pca955x_led { struct fwnode_handle *fwnode; }; +#define led_to_pca955x(l) container_of(l, struct pca955x_led, led_cdev) + struct pca955x_platform_data { struct pca955x_led *leds; int num_leds; }; /* 8 bits per input register */ -static inline int pca95xx_num_input_regs(int bits) +static inline int pca955x_num_input_regs(int bits) { return (bits + 7) / 8; } /* 4 bits per LED selector register */ -static inline int pca95xx_num_led_regs(int bits) +static inline int pca955x_num_led_regs(int bits) { return (bits + 3) / 4; } @@ -161,20 +173,25 @@ static inline u8 pca955x_ledsel(u8 oldval, int led_num, int state) ((state & 0x3) << (led_num << 1)); } +static inline int pca955x_ledstate(u8 ls, int led_num) +{ + return (ls >> (led_num << 1)) & 0x3; +} + /* * Write to frequency prescaler register, used to program the - * period of the PWM output. period = (PSCx + 1) / 38 + * period of the PWM output. period = (PSCx + 1) / coeff + * Where for pca9551 chips coeff = 38 and for all other chips coeff = 44 */ -static int pca955x_write_psc(struct i2c_client *client, int n, u8 val) +static int pca955x_write_psc(struct pca955x *pca955x, int n, u8 val) { - struct pca955x *pca955x = i2c_get_clientdata(client); - u8 cmd = pca95xx_num_input_regs(pca955x->chipdef->bits) + (2 * n); + u8 cmd = pca955x_num_input_regs(pca955x->chipdef->bits) + (2 * n); int ret; - ret = i2c_smbus_write_byte_data(client, cmd, val); + ret = i2c_smbus_write_byte_data(pca955x->client, cmd, val); if (ret < 0) - dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n", - __func__, n, val, ret); + dev_err(&pca955x->client->dev, "%s: reg 0x%x, val 0x%x, err %d\n", __func__, n, + val, ret); return ret; } @@ -185,16 +202,15 @@ static int pca955x_write_psc(struct i2c_client *client, int n, u8 val) * * Duty cycle is (256 - PWMx) / 256 */ -static int pca955x_write_pwm(struct i2c_client *client, int n, u8 val) +static int pca955x_write_pwm(struct pca955x *pca955x, int n, u8 val) { - struct pca955x *pca955x = i2c_get_clientdata(client); - u8 cmd = pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + (2 * n); + u8 cmd = pca955x_num_input_regs(pca955x->chipdef->bits) + 1 + (2 * n); int ret; - ret = i2c_smbus_write_byte_data(client, cmd, val); + ret = i2c_smbus_write_byte_data(pca955x->client, cmd, val); if (ret < 0) - dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n", - __func__, n, val, ret); + dev_err(&pca955x->client->dev, "%s: reg 0x%x, val 0x%x, err %d\n", __func__, n, + val, ret); return ret; } @@ -202,16 +218,15 @@ static int pca955x_write_pwm(struct i2c_client *client, int n, u8 val) * Write to LED selector register, which determines the source that * drives the LED output. */ -static int pca955x_write_ls(struct i2c_client *client, int n, u8 val) +static int pca955x_write_ls(struct pca955x *pca955x, int n, u8 val) { - struct pca955x *pca955x = i2c_get_clientdata(client); - u8 cmd = pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n; + u8 cmd = pca955x_num_input_regs(pca955x->chipdef->bits) + 4 + n; int ret; - ret = i2c_smbus_write_byte_data(client, cmd, val); + ret = i2c_smbus_write_byte_data(pca955x->client, cmd, val); if (ret < 0) - dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n", - __func__, n, val, ret); + dev_err(&pca955x->client->dev, "%s: reg 0x%x, val 0x%x, err %d\n", __func__, n, + val, ret); return ret; } @@ -219,32 +234,42 @@ static int pca955x_write_ls(struct i2c_client *client, int n, u8 val) * Read the LED selector register, which determines the source that * drives the LED output. */ -static int pca955x_read_ls(struct i2c_client *client, int n, u8 *val) +static int pca955x_read_ls(struct pca955x *pca955x, int n, u8 *val) { - struct pca955x *pca955x = i2c_get_clientdata(client); - u8 cmd = pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n; + u8 cmd = pca955x_num_input_regs(pca955x->chipdef->bits) + 4 + n; int ret; - ret = i2c_smbus_read_byte_data(client, cmd); + ret = i2c_smbus_read_byte_data(pca955x->client, cmd); if (ret < 0) { - dev_err(&client->dev, "%s: reg 0x%x, err %d\n", - __func__, n, ret); + dev_err(&pca955x->client->dev, "%s: reg 0x%x, err %d\n", __func__, n, ret); return ret; } *val = (u8)ret; return 0; } -static int pca955x_read_pwm(struct i2c_client *client, int n, u8 *val) +static int pca955x_read_pwm(struct pca955x *pca955x, int n, u8 *val) { - struct pca955x *pca955x = i2c_get_clientdata(client); - u8 cmd = pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + (2 * n); + u8 cmd = pca955x_num_input_regs(pca955x->chipdef->bits) + 1 + (2 * n); int ret; - ret = i2c_smbus_read_byte_data(client, cmd); + ret = i2c_smbus_read_byte_data(pca955x->client, cmd); if (ret < 0) { - dev_err(&client->dev, "%s: reg 0x%x, err %d\n", - __func__, n, ret); + dev_err(&pca955x->client->dev, "%s: reg 0x%x, err %d\n", __func__, n, ret); + return ret; + } + *val = (u8)ret; + return 0; +} + +static int pca955x_read_psc(struct pca955x *pca955x, int n, u8 *val) +{ + u8 cmd = pca955x_num_input_regs(pca955x->chipdef->bits) + (2 * n); + int ret; + + ret = i2c_smbus_read_byte_data(pca955x->client, cmd); + if (ret < 0) { + dev_err(&pca955x->client->dev, "%s: reg 0x%x, err %d\n", __func__, n, ret); return ret; } *val = (u8)ret; @@ -253,19 +278,16 @@ static int pca955x_read_pwm(struct i2c_client *client, int n, u8 *val) static enum led_brightness pca955x_led_get(struct led_classdev *led_cdev) { - struct pca955x_led *pca955x_led = container_of(led_cdev, - struct pca955x_led, - led_cdev); + struct pca955x_led *pca955x_led = led_to_pca955x(led_cdev); struct pca955x *pca955x = pca955x_led->pca955x; u8 ls, pwm; int ret; - ret = pca955x_read_ls(pca955x->client, pca955x_led->led_num / 4, &ls); + ret = pca955x_read_ls(pca955x, pca955x_led->led_num / 4, &ls); if (ret) return ret; - ls = (ls >> ((pca955x_led->led_num % 4) << 1)) & 0x3; - switch (ls) { + switch (pca955x_ledstate(ls, pca955x_led->led_num % 4)) { case PCA955X_LS_LED_ON: ret = LED_FULL; break; @@ -273,10 +295,13 @@ static enum led_brightness pca955x_led_get(struct led_classdev *led_cdev) ret = LED_OFF; break; case PCA955X_LS_BLINK0: - ret = LED_HALF; + ret = pca955x_read_pwm(pca955x, 0, &pwm); + if (ret) + return ret; + ret = 256 - pwm; break; case PCA955X_LS_BLINK1: - ret = pca955x_read_pwm(pca955x->client, 1, &pwm); + ret = pca955x_read_pwm(pca955x, 1, &pwm); if (ret) return ret; ret = 255 - pwm; @@ -289,51 +314,140 @@ static enum led_brightness pca955x_led_get(struct led_classdev *led_cdev) static int pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness value) { - struct pca955x_led *pca955x_led; - struct pca955x *pca955x; + struct pca955x_led *pca955x_led = led_to_pca955x(led_cdev); + struct pca955x *pca955x = pca955x_led->pca955x; + int reg = pca955x_led->led_num / 4; + int bit = pca955x_led->led_num % 4; u8 ls; - int chip_ls; /* which LSx to use (0-3 potentially) */ - int ls_led; /* which set of bits within LSx to use (0-3) */ int ret; - pca955x_led = container_of(led_cdev, struct pca955x_led, led_cdev); - pca955x = pca955x_led->pca955x; - - chip_ls = pca955x_led->led_num / 4; - ls_led = pca955x_led->led_num % 4; - mutex_lock(&pca955x->lock); - ret = pca955x_read_ls(pca955x->client, chip_ls, &ls); + ret = pca955x_read_ls(pca955x, reg, &ls); if (ret) goto out; - switch (value) { - case LED_FULL: - ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON); - break; - case LED_OFF: - ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_OFF); - break; - case LED_HALF: - ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK0); - break; - default: - /* - * Use PWM1 for all other values. This has the unwanted - * side effect of making all LEDs on the chip share the - * same brightness level if set to a value other than - * OFF, HALF, or FULL. But, this is probably better than - * just turning off for all other values. - */ - ret = pca955x_write_pwm(pca955x->client, 1, 255 - value); - if (ret) + if (test_bit(pca955x_led->led_num, &pca955x->active_blink)) { + if (value == LED_OFF) { + clear_bit(pca955x_led->led_num, &pca955x->active_blink); + ls = pca955x_ledsel(ls, bit, PCA955X_LS_LED_OFF); + } else { + ret = pca955x_write_pwm(pca955x, 0, 256 - value); goto out; - ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1); - break; + } + } else { + switch (value) { + case LED_FULL: + ls = pca955x_ledsel(ls, bit, PCA955X_LS_LED_ON); + break; + case LED_OFF: + ls = pca955x_ledsel(ls, bit, PCA955X_LS_LED_OFF); + break; + default: + /* + * Use PWM1 for all other values. This has the unwanted + * side effect of making all LEDs on the chip share the + * same brightness level if set to a value other than + * OFF or FULL. But, this is probably better than just + * turning off for all other values. + */ + ret = pca955x_write_pwm(pca955x, 1, 255 - value); + if (ret) + goto out; + ls = pca955x_ledsel(ls, bit, PCA955X_LS_BLINK1); + break; + } + } + + ret = pca955x_write_ls(pca955x, reg, ls); + +out: + mutex_unlock(&pca955x->lock); + + return ret; +} + +static u8 pca955x_period_to_psc(struct pca955x *pca955x, unsigned long p) +{ + p *= pca955x->chipdef->blink_div; + p /= MSEC_PER_SEC; + p -= 1; + + return p; +} + +static unsigned long pca955x_psc_to_period(struct pca955x *pca955x, u8 psc) +{ + unsigned long p = psc; + + p += 1; + p *= MSEC_PER_SEC; + p /= pca955x->chipdef->blink_div; + + return p; +} + +static int pca955x_led_blink(struct led_classdev *led_cdev, + unsigned long *delay_on, unsigned long *delay_off) +{ + struct pca955x_led *pca955x_led = led_to_pca955x(led_cdev); + struct pca955x *pca955x = pca955x_led->pca955x; + unsigned long p = *delay_on + *delay_off; + int ret = 0; + + mutex_lock(&pca955x->lock); + + if (p) { + if (*delay_on != *delay_off) { + ret = -EINVAL; + goto out; + } + + if (p < pca955x_psc_to_period(pca955x, 0) || + p > pca955x_psc_to_period(pca955x, 0xff)) { + ret = -EINVAL; + goto out; + } + } else { + p = pca955x->active_blink ? pca955x->blink_period : + PCA955X_BLINK_DEFAULT_MS; } - ret = pca955x_write_ls(pca955x->client, chip_ls, ls); + if (!pca955x->active_blink || + pca955x->active_blink == BIT(pca955x_led->led_num) || + pca955x->blink_period == p) { + u8 psc = pca955x_period_to_psc(pca955x, p); + + if (!test_and_set_bit(pca955x_led->led_num, + &pca955x->active_blink)) { + u8 ls; + int reg = pca955x_led->led_num / 4; + int bit = pca955x_led->led_num % 4; + + ret = pca955x_read_ls(pca955x, reg, &ls); + if (ret) + goto out; + + ls = pca955x_ledsel(ls, bit, PCA955X_LS_BLINK0); + ret = pca955x_write_ls(pca955x, reg, ls); + if (ret) + goto out; + } + + if (pca955x->blink_period != p) { + pca955x->blink_period = p; + ret = pca955x_write_psc(pca955x, 0, psc); + if (ret) + goto out; + } + + p = pca955x_psc_to_period(pca955x, psc); + p /= 2; + *delay_on = p; + *delay_off = p; + } else { + ret = -EBUSY; + } out: mutex_unlock(&pca955x->lock); @@ -492,10 +606,13 @@ static int pca955x_probe(struct i2c_client *client) struct led_classdev *led; struct led_init_data init_data; struct i2c_adapter *adapter; - int i, err; + int i, bit, err, nls, reg; + u8 ls1[4]; + u8 ls2[4]; struct pca955x_platform_data *pdata; + u8 psc0; + bool keep_psc0 = false; bool set_default_label = false; - bool keep_pwm = false; char default_label[8]; enum pca955x_type chip_type; const void *md = device_get_match_data(&client->dev); @@ -559,10 +676,20 @@ static int pca955x_probe(struct i2c_client *client) mutex_init(&pca955x->lock); pca955x->client = client; pca955x->chipdef = chip; + pca955x->blink_period = PCA955X_BLINK_DEFAULT_MS; init_data.devname_mandatory = false; init_data.devicename = "pca955x"; + nls = pca955x_num_led_regs(chip->bits); + for (i = 0; i < nls; ++i) { + err = pca955x_read_ls(pca955x, i, &ls1[i]); + if (err) + return err; + + ls2[i] = ls1[i]; + } + for (i = 0; i < chip->bits; i++) { pca955x_led = &pca955x->leds[i]; pca955x_led->led_num = i; @@ -574,20 +701,25 @@ static int pca955x_probe(struct i2c_client *client) case PCA955X_TYPE_GPIO: break; case PCA955X_TYPE_LED: + bit = i % 4; + reg = i / 4; led = &pca955x_led->led_cdev; led->brightness_set_blocking = pca955x_led_set; led->brightness_get = pca955x_led_get; + led->blink_set = pca955x_led_blink; if (pdata->leds[i].default_state == LEDS_GPIO_DEFSTATE_OFF) { - err = pca955x_led_set(led, LED_OFF); - if (err) - return err; + ls2[reg] = pca955x_ledsel(ls2[reg], bit, + PCA955X_LS_LED_OFF); } else if (pdata->leds[i].default_state == LEDS_GPIO_DEFSTATE_ON) { - err = pca955x_led_set(led, LED_FULL); - if (err) - return err; + ls2[reg] = pca955x_ledsel(ls2[reg], bit, + PCA955X_LS_LED_ON); + } else if (pca955x_ledstate(ls2[reg], bit) == + PCA955X_LS_BLINK0) { + keep_psc0 = true; + set_bit(i, &pca955x->active_blink); } init_data.fwnode = pdata->leds[i].fwnode; @@ -616,40 +748,31 @@ static int pca955x_probe(struct i2c_client *client) return err; set_bit(i, &pca955x->active_pins); - - /* - * For default-state == "keep", let the core update the - * brightness from the hardware, then check the - * brightness to see if it's using PWM1. If so, PWM1 - * should not be written below. - */ - if (pdata->leds[i].default_state == - LEDS_GPIO_DEFSTATE_KEEP) { - if (led->brightness != LED_FULL && - led->brightness != LED_OFF && - led->brightness != LED_HALF) - keep_pwm = true; - } } } - /* PWM0 is used for half brightness or 50% duty cycle */ - err = pca955x_write_pwm(client, 0, 255 - LED_HALF); - if (err) - return err; + for (i = 0; i < nls; ++i) { + if (ls1[i] != ls2[i]) { + err = pca955x_write_ls(pca955x, i, ls2[i]); + if (err) + return err; + } + } - if (!keep_pwm) { - /* PWM1 is used for variable brightness, default to OFF */ - err = pca955x_write_pwm(client, 1, 0); - if (err) - return err; + if (keep_psc0) { + err = pca955x_read_psc(pca955x, 0, &psc0); + } else { + psc0 = pca955x_period_to_psc(pca955x, pca955x->blink_period); + err = pca955x_write_psc(pca955x, 0, psc0); } - /* Set to fast frequency so we do not see flashing */ - err = pca955x_write_psc(client, 0, 0); if (err) return err; - err = pca955x_write_psc(client, 1, 0); + + pca955x->blink_period = pca955x_psc_to_period(pca955x, psc0); + + /* Set PWM1 to fast frequency so we do not see flashing */ + err = pca955x_write_psc(pca955x, 1, 0); if (err) return err; diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index 0ce75c6b36b6..850d4004c50e 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -13,6 +13,7 @@ #include <linux/module.h> #include <linux/of_device.h> #include <linux/pm_runtime.h> +#include <linux/suspend.h> #include <linux/slab.h> #define IMX_MU_CHANS 16 @@ -67,6 +68,7 @@ struct imx_mu_priv { const struct imx_mu_dcfg *dcfg; struct clk *clk; int irq; + bool suspend; u32 xcr[4]; @@ -307,6 +309,9 @@ static irqreturn_t imx_mu_isr(int irq, void *p) return IRQ_NONE; } + if (priv->suspend) + pm_system_wakeup(); + return IRQ_HANDLED; } @@ -652,6 +657,8 @@ static int __maybe_unused imx_mu_suspend_noirq(struct device *dev) priv->xcr[i] = imx_mu_read(priv, priv->dcfg->xCR[i]); } + priv->suspend = true; + return 0; } @@ -668,11 +675,13 @@ static int __maybe_unused imx_mu_resume_noirq(struct device *dev) * send failed, may lead to system freeze. This issue * is observed by testing freeze mode suspend. */ - if (!imx_mu_read(priv, priv->dcfg->xCR[0]) && !priv->clk) { + if (!priv->clk && !imx_mu_read(priv, priv->dcfg->xCR[0])) { for (i = 0; i < IMX_MU_xCR_MAX; i++) imx_mu_write(priv, priv->xcr[i], priv->dcfg->xCR[i]); } + priv->suspend = false; + return 0; } diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c index acd0675da681..78f7265039c6 100644 --- a/drivers/mailbox/tegra-hsp.c +++ b/drivers/mailbox/tegra-hsp.c @@ -412,6 +412,11 @@ static int tegra_hsp_mailbox_flush(struct mbox_chan *chan, value = tegra_hsp_channel_readl(ch, HSP_SM_SHRD_MBOX); if ((value & HSP_SM_SHRD_MBOX_FULL) == 0) { mbox_chan_txdone(chan, 0); + + /* Wait until channel is empty */ + if (chan->active_req != NULL) + continue; + return 0; } diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 183a58c89377..8eecc9df319b 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -2060,9 +2060,11 @@ int bch_btree_check(struct cache_set *c) } } + /* + * Must wait for all threads to stop. + */ wait_event_interruptible(check_state->wait, - atomic_read(&check_state->started) == 0 || - test_bit(CACHE_SET_IO_DISABLE, &c->flags)); + atomic_read(&check_state->started) == 0); for (i = 0; i < check_state->total_threads; i++) { if (check_state->infos[i].result) { diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 8120da278161..1c03a3b72f8a 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -998,9 +998,11 @@ void bch_sectors_dirty_init(struct bcache_device *d) } } + /* + * Must wait for all threads to stop. + */ wait_event_interruptible(state->wait, - atomic_read(&state->started) == 0 || - test_bit(CACHE_SET_IO_DISABLE, &c->flags)); + atomic_read(&state->started) == 0); out: kfree(state); diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 55dccdfbcb22..5a7d270b32c0 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -65,6 +65,8 @@ struct mapped_device { struct gendisk *disk; struct dax_device *dax_dev; + unsigned long __percpu *pending_io; + /* * A list of ios that arrived while we were suspended. */ diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 916b7da16de2..154139bf7d22 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -2579,7 +2579,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string static int get_key_size(char **key_string) { - return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1; + return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1); } #endif /* CONFIG_KEYS */ diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index dc03b70f6e65..e2a51c184a25 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2459,9 +2459,11 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, dm_integrity_io_error(ic, "invalid sector in journal", -EIO); sec &= ~(sector_t)(ic->sectors_per_block - 1); } + if (unlikely(sec >= ic->provided_data_sectors)) { + journal_entry_set_unused(je); + continue; + } } - if (unlikely(sec >= ic->provided_data_sectors)) - continue; get_area_and_offset(ic, sec, &area, &offset); restore_last_bytes(ic, access_journal_data(ic, i, j), je); for (k = j + 1; k < ic->journal_section_entries; k++) { @@ -4381,6 +4383,7 @@ try_smaller_buffer: } if (ic->internal_hash) { + size_t recalc_tags_size; ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1); if (!ic->recalc_wq ) { ti->error = "Cannot allocate workqueue"; @@ -4394,8 +4397,10 @@ try_smaller_buffer: r = -ENOMEM; goto bad; } - ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block, - ic->tag_size, GFP_KERNEL); + recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size; + if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size) + recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size; + ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL); if (!ic->recalc_tags) { ti->error = "Cannot allocate tags for recalculating"; r = -ENOMEM; diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 21fe8652b095..901abd6dea41 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -18,6 +18,7 @@ #include <linux/dm-ioctl.h> #include <linux/hdreg.h> #include <linux/compat.h> +#include <linux/nospec.h> #include <linux/uaccess.h> #include <linux/ima.h> @@ -1788,6 +1789,7 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) if (unlikely(cmd >= ARRAY_SIZE(_ioctls))) return NULL; + cmd = array_index_nospec(cmd, ARRAY_SIZE(_ioctls)); *ioctl_flags = _ioctls[cmd].flags; return _ioctls[cmd].fn; } diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c index 1856a1b125cc..82f2a06153dc 100644 --- a/drivers/md/dm-ps-historical-service-time.c +++ b/drivers/md/dm-ps-historical-service-time.c @@ -432,7 +432,7 @@ static struct dm_path *hst_select_path(struct path_selector *ps, { struct selector *s = ps->context; struct path_info *pi = NULL, *best = NULL; - u64 time_now = sched_clock(); + u64 time_now = ktime_get_ns(); struct dm_path *ret = NULL; unsigned long flags; @@ -473,7 +473,7 @@ static int hst_start_io(struct path_selector *ps, struct dm_path *path, static u64 path_service_time(struct path_info *pi, u64 start_time) { - u64 sched_now = ktime_get_ns(); + u64 now = ktime_get_ns(); /* if a previous disk request has finished after this IO was * sent to the hardware, pretend the submission happened @@ -482,11 +482,11 @@ static u64 path_service_time(struct path_info *pi, u64 start_time) if (time_after64(pi->last_finish, start_time)) start_time = pi->last_finish; - pi->last_finish = sched_now; - if (time_before64(sched_now, start_time)) + pi->last_finish = now; + if (time_before64(now, start_time)) return 0; - return sched_now - start_time; + return now - start_time; } static int hst_end_io(struct path_selector *ps, struct dm_path *path, diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index a896dea9750e..53a9b16c7b2e 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -500,8 +500,13 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, if (unlikely(!ti)) { int srcu_idx; - struct dm_table *map = dm_get_live_table(md, &srcu_idx); + struct dm_table *map; + map = dm_get_live_table(md, &srcu_idx); + if (unlikely(!map)) { + dm_put_live_table(md, srcu_idx); + return BLK_STS_RESOURCE; + } ti = dm_table_find_target(map, 0); dm_put_live_table(md, srcu_idx); } diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 35d368c418d0..0e039a8c0bf2 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -195,6 +195,7 @@ void dm_stats_init(struct dm_stats *stats) mutex_init(&stats->mutex); INIT_LIST_HEAD(&stats->list); + stats->precise_timestamps = false; stats->last = alloc_percpu(struct dm_stats_last_position); for_each_possible_cpu(cpu) { last = per_cpu_ptr(stats->last, cpu); @@ -231,6 +232,22 @@ void dm_stats_cleanup(struct dm_stats *stats) mutex_destroy(&stats->mutex); } +static void dm_stats_recalc_precise_timestamps(struct dm_stats *stats) +{ + struct list_head *l; + struct dm_stat *tmp_s; + bool precise_timestamps = false; + + list_for_each(l, &stats->list) { + tmp_s = container_of(l, struct dm_stat, list_entry); + if (tmp_s->stat_flags & STAT_PRECISE_TIMESTAMPS) { + precise_timestamps = true; + break; + } + } + stats->precise_timestamps = precise_timestamps; +} + static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, sector_t step, unsigned stat_flags, unsigned n_histogram_entries, @@ -376,6 +393,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, } ret_id = s->id; list_add_tail_rcu(&s->list_entry, l); + + dm_stats_recalc_precise_timestamps(stats); + mutex_unlock(&stats->mutex); resume_callback(md); @@ -418,6 +438,9 @@ static int dm_stats_delete(struct dm_stats *stats, int id) } list_del_rcu(&s->list_entry); + + dm_stats_recalc_precise_timestamps(stats); + mutex_unlock(&stats->mutex); /* @@ -621,13 +644,14 @@ static void __dm_stat_bio(struct dm_stat *s, int bi_rw, void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, sector_t bi_sector, unsigned bi_sectors, bool end, - unsigned long duration_jiffies, + unsigned long start_time, struct dm_stats_aux *stats_aux) { struct dm_stat *s; sector_t end_sector; struct dm_stats_last_position *last; bool got_precise_time; + unsigned long duration_jiffies = 0; if (unlikely(!bi_sectors)) return; @@ -647,16 +671,16 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, )); WRITE_ONCE(last->last_sector, end_sector); WRITE_ONCE(last->last_rw, bi_rw); - } + } else + duration_jiffies = jiffies - start_time; rcu_read_lock(); got_precise_time = false; list_for_each_entry_rcu(s, &stats->list, list_entry) { if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) { - if (!end) - stats_aux->duration_ns = ktime_to_ns(ktime_get()); - else + /* start (!end) duration_ns is set by DM core's alloc_io() */ + if (end) stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns; got_precise_time = true; } diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h index 2ddfae678f32..09c81a1ec057 100644 --- a/drivers/md/dm-stats.h +++ b/drivers/md/dm-stats.h @@ -13,8 +13,7 @@ struct dm_stats { struct mutex mutex; struct list_head list; /* list of struct dm_stat */ struct dm_stats_last_position __percpu *last; - sector_t last_sector; - unsigned last_rw; + bool precise_timestamps; }; struct dm_stats_aux { @@ -32,7 +31,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv, void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, sector_t bi_sector, unsigned bi_sectors, bool end, - unsigned long duration_jiffies, + unsigned long start_time, struct dm_stats_aux *aux); static inline bool dm_stats_used(struct dm_stats *st) @@ -40,4 +39,10 @@ static inline bool dm_stats_used(struct dm_stats *st) return !list_empty(&st->list); } +static inline void dm_stats_record_start(struct dm_stats *stats, struct dm_stats_aux *aux) +{ + if (unlikely(stats->precise_timestamps)) + aux->duration_ns = ktime_to_ns(ktime_get()); +} + #endif diff --git a/drivers/md/dm.c b/drivers/md/dm.c index b75ff6b2b952..36449422e7e0 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -484,33 +484,48 @@ u64 dm_start_time_ns_from_clone(struct bio *bio) } EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); -static void start_io_acct(struct dm_io *io) +static bool bio_is_flush_with_data(struct bio *bio) { - struct mapped_device *md = io->md; - struct bio *bio = io->orig_bio; - - bio_start_io_acct_time(bio, io->start_time); - if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio_data_dir(bio), - bio->bi_iter.bi_sector, bio_sectors(bio), - false, 0, &io->stats_aux); + return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size); } -static void end_io_acct(struct mapped_device *md, struct bio *bio, - unsigned long start_time, struct dm_stats_aux *stats_aux) +static void dm_io_acct(bool end, struct mapped_device *md, struct bio *bio, + unsigned long start_time, struct dm_stats_aux *stats_aux) { - unsigned long duration = jiffies - start_time; + bool is_flush_with_data; + unsigned int bi_size; - bio_end_io_acct(bio, start_time); + /* If REQ_PREFLUSH set save any payload but do not account it */ + is_flush_with_data = bio_is_flush_with_data(bio); + if (is_flush_with_data) { + bi_size = bio->bi_iter.bi_size; + bio->bi_iter.bi_size = 0; + } + + if (!end) + bio_start_io_acct_time(bio, start_time); + else + bio_end_io_acct(bio, start_time); if (unlikely(dm_stats_used(&md->stats))) dm_stats_account_io(&md->stats, bio_data_dir(bio), bio->bi_iter.bi_sector, bio_sectors(bio), - true, duration, stats_aux); + end, start_time, stats_aux); + + /* Restore bio's payload so it does get accounted upon requeue */ + if (is_flush_with_data) + bio->bi_iter.bi_size = bi_size; +} + +static void start_io_acct(struct dm_io *io) +{ + dm_io_acct(false, io->md, io->orig_bio, io->start_time, &io->stats_aux); +} - /* nudge anyone waiting on suspend queue */ - if (unlikely(wq_has_sleeper(&md->wait))) - wake_up(&md->wait); +static void end_io_acct(struct mapped_device *md, struct bio *bio, + unsigned long start_time, struct dm_stats_aux *stats_aux) +{ + dm_io_acct(true, md, bio, start_time, stats_aux); } static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) @@ -531,12 +546,15 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) io->magic = DM_IO_MAGIC; io->status = 0; atomic_set(&io->io_count, 1); + this_cpu_inc(*md->pending_io); io->orig_bio = bio; io->md = md; spin_lock_init(&io->endio_lock); io->start_time = jiffies; + dm_stats_record_start(&md->stats, &io->stats_aux); + return io; } @@ -826,11 +844,17 @@ void dm_io_dec_pending(struct dm_io *io, blk_status_t error) stats_aux = io->stats_aux; free_io(md, io); end_io_acct(md, bio, start_time, &stats_aux); + smp_wmb(); + this_cpu_dec(*md->pending_io); + + /* nudge anyone waiting on suspend queue */ + if (unlikely(wq_has_sleeper(&md->wait))) + wake_up(&md->wait); if (io_error == BLK_STS_DM_REQUEUE) return; - if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { + if (bio_is_flush_with_data(bio)) { /* * Preflush done for flush with data, reissue * without REQ_PREFLUSH. @@ -1570,15 +1594,10 @@ static blk_qc_t dm_submit_bio(struct bio *bio) struct dm_table *map; map = dm_get_live_table(md, &srcu_idx); - if (unlikely(!map)) { - DMERR_LIMIT("%s: mapping table unavailable, erroring io", - dm_device_name(md)); - bio_io_error(bio); - goto out; - } - /* If suspended, queue this IO for later */ - if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { + /* If suspended, or map not yet available, queue this IO for later */ + if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || + unlikely(!map)) { if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); else if (bio->bi_opf & REQ_RAHEAD) @@ -1682,6 +1701,7 @@ static void cleanup_mapped_device(struct mapped_device *md) md->dax_dev = NULL; } + dm_cleanup_zoned_dev(md); if (md->disk) { spin_lock(&_minor_lock); md->disk->private_data = NULL; @@ -1694,6 +1714,11 @@ static void cleanup_mapped_device(struct mapped_device *md) blk_cleanup_disk(md->disk); } + if (md->pending_io) { + free_percpu(md->pending_io); + md->pending_io = NULL; + } + cleanup_srcu_struct(&md->io_barrier); mutex_destroy(&md->suspend_lock); @@ -1702,7 +1727,6 @@ static void cleanup_mapped_device(struct mapped_device *md) mutex_destroy(&md->swap_bios_lock); dm_mq_cleanup_mapped_device(md); - dm_cleanup_zoned_dev(md); } /* @@ -1792,6 +1816,10 @@ static struct mapped_device *alloc_dev(int minor) if (!md->wq) goto bad; + md->pending_io = alloc_percpu(unsigned long); + if (!md->pending_io) + goto bad; + dm_stats_init(&md->stats); /* Populate the mapping, nobody knows we exist yet */ @@ -2156,7 +2184,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); - blk_set_queue_dying(md->queue); + blk_mark_disk_dead(md->disk); /* * Take suspend_lock so that presuspend and postsuspend methods @@ -2207,16 +2235,13 @@ void dm_put(struct mapped_device *md) } EXPORT_SYMBOL_GPL(dm_put); -static bool md_in_flight_bios(struct mapped_device *md) +static bool dm_in_flight_bios(struct mapped_device *md) { int cpu; - struct block_device *part = dm_disk(md)->part0; - long sum = 0; + unsigned long sum = 0; - for_each_possible_cpu(cpu) { - sum += part_stat_local_read_cpu(part, in_flight[0], cpu); - sum += part_stat_local_read_cpu(part, in_flight[1], cpu); - } + for_each_possible_cpu(cpu) + sum += *per_cpu_ptr(md->pending_io, cpu); return sum != 0; } @@ -2229,7 +2254,7 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int ta while (true) { prepare_to_wait(&md->wait, &wait, task_state); - if (!md_in_flight_bios(md)) + if (!dm_in_flight_bios(md)) break; if (signal_pending_state(task_state, current)) { @@ -2241,6 +2266,8 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int ta } finish_wait(&md->wait, &wait); + smp_rmb(); + return r; } diff --git a/drivers/md/md.c b/drivers/md/md.c index 2d31a079be33..5ce2648cbe5b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -51,6 +51,7 @@ #include <linux/hdreg.h> #include <linux/proc_fs.h> #include <linux/random.h> +#include <linux/major.h> #include <linux/module.h> #include <linux/reboot.h> #include <linux/file.h> diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c index 41f4e749a859..2217004264e4 100644 --- a/drivers/media/i2c/adv7511-v4l2.c +++ b/drivers/media/i2c/adv7511-v4l2.c @@ -544,7 +544,7 @@ static void log_infoframe(struct v4l2_subdev *sd, const struct adv7511_cfg_read_ buffer[3] = 0; buffer[3] = hdmi_infoframe_checksum(buffer, len + 4); - if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) { + if (hdmi_infoframe_unpack(&frame, buffer, len + 4) < 0) { v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc); return; } diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index 122e1fdccd96..d688ffff7a07 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -2484,7 +2484,7 @@ static int adv76xx_read_infoframe(struct v4l2_subdev *sd, int index, buffer[i + 3] = infoframe_read(sd, adv76xx_cri[index].payload_addr + i); - if (hdmi_infoframe_unpack(frame, buffer, sizeof(buffer)) < 0) { + if (hdmi_infoframe_unpack(frame, buffer, len + 3) < 0) { v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, adv76xx_cri[index].desc); return -ENOENT; diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index 7f8acbdf0db4..8ab4c63839b4 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c @@ -2593,7 +2593,7 @@ static void log_infoframe(struct v4l2_subdev *sd, const struct adv7842_cfg_read_ for (i = 0; i < len; i++) buffer[i + 3] = infoframe_read(sd, cri->payload_addr + i); - if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) { + if (hdmi_infoframe_unpack(&frame, buffer, len + 3) < 0) { v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc); return; } diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index ddbd71394db3..db5a19babe67 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -2293,7 +2293,6 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd, struct ov5640_dev *sensor = to_ov5640_dev(sd); const struct ov5640_mode_info *new_mode; struct v4l2_mbus_framefmt *mbus_fmt = &format->format; - struct v4l2_mbus_framefmt *fmt; int ret; if (format->pad != 0) @@ -2311,12 +2310,10 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd, if (ret) goto out; - if (format->which == V4L2_SUBDEV_FORMAT_TRY) - fmt = v4l2_subdev_get_try_format(sd, sd_state, 0); - else - fmt = &sensor->fmt; - - *fmt = *mbus_fmt; + if (format->which == V4L2_SUBDEV_FORMAT_TRY) { + *v4l2_subdev_get_try_format(sd, sd_state, 0) = *mbus_fmt; + goto out; + } if (new_mode != sensor->current_mode) { sensor->current_mode = new_mode; @@ -2325,6 +2322,9 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd, if (mbus_fmt->code != sensor->fmt.code) sensor->pending_fmt_change = true; + /* update format even if code is unchanged, resolution might change */ + sensor->fmt = *mbus_fmt; + __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate, ov5640_calc_pixel_rate(sensor)); out: diff --git a/drivers/media/i2c/ov5648.c b/drivers/media/i2c/ov5648.c index 947d437ed0ef..ef8b52dc9401 100644 --- a/drivers/media/i2c/ov5648.c +++ b/drivers/media/i2c/ov5648.c @@ -639,7 +639,7 @@ struct ov5648_ctrls { struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl_handler handler; -} __packed; +}; struct ov5648_sensor { struct device *dev; @@ -1778,8 +1778,14 @@ static int ov5648_state_configure(struct ov5648_sensor *sensor, static int ov5648_state_init(struct ov5648_sensor *sensor) { - return ov5648_state_configure(sensor, &ov5648_modes[0], - ov5648_mbus_codes[0]); + int ret; + + mutex_lock(&sensor->mutex); + ret = ov5648_state_configure(sensor, &ov5648_modes[0], + ov5648_mbus_codes[0]); + mutex_unlock(&sensor->mutex); + + return ret; } /* Sensor Base */ diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c index f67412150b16..eb59dc8bb592 100644 --- a/drivers/media/i2c/ov6650.c +++ b/drivers/media/i2c/ov6650.c @@ -472,9 +472,16 @@ static int ov6650_get_selection(struct v4l2_subdev *sd, { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov6650 *priv = to_ov6650(client); + struct v4l2_rect *rect; - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) - return -EINVAL; + if (sel->which == V4L2_SUBDEV_FORMAT_TRY) { + /* pre-select try crop rectangle */ + rect = &sd_state->pads->try_crop; + + } else { + /* pre-select active crop rectangle */ + rect = &priv->rect; + } switch (sel->target) { case V4L2_SEL_TGT_CROP_BOUNDS: @@ -483,14 +490,33 @@ static int ov6650_get_selection(struct v4l2_subdev *sd, sel->r.width = W_CIF; sel->r.height = H_CIF; return 0; + case V4L2_SEL_TGT_CROP: - sel->r = priv->rect; + /* use selected crop rectangle */ + sel->r = *rect; return 0; + default: return -EINVAL; } } +static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect) +{ + return width > rect->width >> 1 || height > rect->height >> 1; +} + +static void ov6650_bind_align_crop_rectangle(struct v4l2_rect *rect) +{ + v4l_bound_align_image(&rect->width, 2, W_CIF, 1, + &rect->height, 2, H_CIF, 1, 0); + v4l_bound_align_image(&rect->left, DEF_HSTRT << 1, + (DEF_HSTRT << 1) + W_CIF - (__s32)rect->width, 1, + &rect->top, DEF_VSTRT << 1, + (DEF_VSTRT << 1) + H_CIF - (__s32)rect->height, + 1, 0); +} + static int ov6650_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) @@ -499,18 +525,30 @@ static int ov6650_set_selection(struct v4l2_subdev *sd, struct ov6650 *priv = to_ov6650(client); int ret; - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE || - sel->target != V4L2_SEL_TGT_CROP) + if (sel->target != V4L2_SEL_TGT_CROP) return -EINVAL; - v4l_bound_align_image(&sel->r.width, 2, W_CIF, 1, - &sel->r.height, 2, H_CIF, 1, 0); - v4l_bound_align_image(&sel->r.left, DEF_HSTRT << 1, - (DEF_HSTRT << 1) + W_CIF - (__s32)sel->r.width, 1, - &sel->r.top, DEF_VSTRT << 1, - (DEF_VSTRT << 1) + H_CIF - (__s32)sel->r.height, - 1, 0); + ov6650_bind_align_crop_rectangle(&sel->r); + + if (sel->which == V4L2_SUBDEV_FORMAT_TRY) { + struct v4l2_rect *crop = &sd_state->pads->try_crop; + struct v4l2_mbus_framefmt *mf = &sd_state->pads->try_fmt; + /* detect current pad config scaling factor */ + bool half_scale = !is_unscaled_ok(mf->width, mf->height, crop); + + /* store new crop rectangle */ + *crop = sel->r; + /* adjust frame size */ + mf->width = crop->width >> half_scale; + mf->height = crop->height >> half_scale; + + return 0; + } + + /* V4L2_SUBDEV_FORMAT_ACTIVE */ + + /* apply new crop rectangle */ ret = ov6650_reg_write(client, REG_HSTRT, sel->r.left >> 1); if (!ret) { priv->rect.width += priv->rect.left - sel->r.left; @@ -562,30 +600,13 @@ static int ov6650_get_fmt(struct v4l2_subdev *sd, return 0; } -static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect) -{ - return width > rect->width >> 1 || height > rect->height >> 1; -} - #define to_clkrc(div) ((div) - 1) /* set the format we will capture in */ -static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) +static int ov6650_s_fmt(struct v4l2_subdev *sd, u32 code, bool half_scale) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov6650 *priv = to_ov6650(client); - bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect); - struct v4l2_subdev_selection sel = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .target = V4L2_SEL_TGT_CROP, - .r.left = priv->rect.left + (priv->rect.width >> 1) - - (mf->width >> (1 - half_scale)), - .r.top = priv->rect.top + (priv->rect.height >> 1) - - (mf->height >> (1 - half_scale)), - .r.width = mf->width << half_scale, - .r.height = mf->height << half_scale, - }; - u32 code = mf->code; u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask; int ret; @@ -653,9 +674,7 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) coma_mask |= COMA_QCIF; } - ret = ov6650_set_selection(sd, NULL, &sel); - if (!ret) - ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask); + ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask); if (!ret) { priv->half_scale = half_scale; @@ -674,14 +693,12 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf = &format->format; struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov6650 *priv = to_ov6650(client); + struct v4l2_rect *crop; + bool half_scale; if (format->pad) return -EINVAL; - if (is_unscaled_ok(mf->width, mf->height, &priv->rect)) - v4l_bound_align_image(&mf->width, 2, W_CIF, 1, - &mf->height, 2, H_CIF, 1, 0); - switch (mf->code) { case MEDIA_BUS_FMT_Y10_1X10: mf->code = MEDIA_BUS_FMT_Y8_1X8; @@ -699,10 +716,17 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd, break; } + if (format->which == V4L2_SUBDEV_FORMAT_TRY) + crop = &sd_state->pads->try_crop; + else + crop = &priv->rect; + + half_scale = !is_unscaled_ok(mf->width, mf->height, crop); + if (format->which == V4L2_SUBDEV_FORMAT_TRY) { - /* store media bus format code and frame size in pad config */ - sd_state->pads->try_fmt.width = mf->width; - sd_state->pads->try_fmt.height = mf->height; + /* store new mbus frame format code and size in pad config */ + sd_state->pads->try_fmt.width = crop->width >> half_scale; + sd_state->pads->try_fmt.height = crop->height >> half_scale; sd_state->pads->try_fmt.code = mf->code; /* return default mbus frame format updated with pad config */ @@ -712,9 +736,11 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd, mf->code = sd_state->pads->try_fmt.code; } else { - /* apply new media bus format code and frame size */ - int ret = ov6650_s_fmt(sd, mf); + int ret = 0; + /* apply new media bus frame format and scaling if changed */ + if (mf->code != priv->code || half_scale != priv->half_scale) + ret = ov6650_s_fmt(sd, mf->code, half_scale); if (ret) return ret; @@ -890,9 +916,8 @@ static int ov6650_video_probe(struct v4l2_subdev *sd) if (!ret) ret = ov6650_prog_dflt(client, xclk->clkrc); if (!ret) { - struct v4l2_mbus_framefmt mf = ov6650_def_fmt; - - ret = ov6650_s_fmt(sd, &mf); + /* driver default frame format, no scaling */ + ret = ov6650_s_fmt(sd, ov6650_def_fmt.code, false); } if (!ret) ret = v4l2_ctrl_handler_setup(&priv->hdl); diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index 0e9df8b35ac6..661ebfa7bf3f 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -3890,7 +3890,7 @@ static int bttv_register_video(struct bttv *btv) /* video */ vdev_init(btv, &btv->video_dev, &bttv_video_template, "video"); - btv->video_dev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | + btv->video_dev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; if (btv->tuner_type != TUNER_ABSENT) btv->video_dev.device_caps |= V4L2_CAP_TUNER; @@ -3911,7 +3911,7 @@ static int bttv_register_video(struct bttv *btv) /* vbi */ vdev_init(btv, &btv->vbi_dev, &bttv_video_template, "vbi"); btv->vbi_dev.device_caps = V4L2_CAP_VBI_CAPTURE | V4L2_CAP_READWRITE | - V4L2_CAP_STREAMING | V4L2_CAP_TUNER; + V4L2_CAP_STREAMING; if (btv->tuner_type != TUNER_ABSENT) btv->vbi_dev.device_caps |= V4L2_CAP_TUNER; diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c index 680e1e3fe89b..2c1d5137ac47 100644 --- a/drivers/media/pci/cx88/cx88-mpeg.c +++ b/drivers/media/pci/cx88/cx88-mpeg.c @@ -162,6 +162,9 @@ int cx8802_start_dma(struct cx8802_dev *dev, cx_write(MO_TS_GPCNTRL, GP_COUNT_CONTROL_RESET); q->count = 0; + /* clear interrupt status register */ + cx_write(MO_TS_INTSTAT, 0x1f1111); + /* enable irqs */ dprintk(1, "setting the interrupt mask\n"); cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_TSINT); diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h index 4cf92dee6527..ce3a7ca51736 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.h +++ b/drivers/media/pci/ivtv/ivtv-driver.h @@ -330,7 +330,6 @@ struct ivtv_stream { struct ivtv *itv; /* for ease of use */ const char *name; /* name of the stream */ int type; /* stream type */ - u32 caps; /* V4L2 capabilities */ struct v4l2_fh *fh; /* pointer to the streaming filehandle */ spinlock_t qlock; /* locks access to the queues */ diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c index da19b2e95e6c..6d2d3348e321 100644 --- a/drivers/media/pci/ivtv/ivtv-ioctl.c +++ b/drivers/media/pci/ivtv/ivtv-ioctl.c @@ -438,7 +438,7 @@ static int ivtv_g_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_f struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; struct v4l2_window *winfmt = &fmt->fmt.win; - if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -EINVAL; if (!itv->osd_video_pbase) return -EINVAL; @@ -549,7 +549,7 @@ static int ivtv_try_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2 u32 chromakey = fmt->fmt.win.chromakey; u8 global_alpha = fmt->fmt.win.global_alpha; - if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -EINVAL; if (!itv->osd_video_pbase) return -EINVAL; @@ -1383,7 +1383,7 @@ static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb) 0, }; - if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -ENOTTY; if (!itv->osd_video_pbase) return -ENOTTY; @@ -1450,7 +1450,7 @@ static int ivtv_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffe struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; struct yuv_playback_info *yi = &itv->yuv_info; - if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -ENOTTY; if (!itv->osd_video_pbase) return -ENOTTY; @@ -1470,7 +1470,7 @@ static int ivtv_overlay(struct file *file, void *fh, unsigned int on) struct ivtv *itv = id->itv; struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; - if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -ENOTTY; if (!itv->osd_video_pbase) return -ENOTTY; diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c index f04ee84bab5f..f9de5d1605fe 100644 --- a/drivers/media/pci/ivtv/ivtv-streams.c +++ b/drivers/media/pci/ivtv/ivtv-streams.c @@ -176,7 +176,7 @@ static void ivtv_stream_init(struct ivtv *itv, int type) s->itv = itv; s->type = type; s->name = ivtv_stream_info[type].name; - s->caps = ivtv_stream_info[type].v4l2_caps; + s->vdev.device_caps = ivtv_stream_info[type].v4l2_caps; if (ivtv_stream_info[type].pio) s->dma = PCI_DMA_NONE; @@ -299,12 +299,9 @@ static int ivtv_reg_dev(struct ivtv *itv, int type) if (s_mpg->vdev.v4l2_dev) num = s_mpg->vdev.num + ivtv_stream_info[type].num_offset; } - s->vdev.device_caps = s->caps; - if (itv->osd_video_pbase) { - itv->streams[IVTV_DEC_STREAM_TYPE_YUV].vdev.device_caps |= - V4L2_CAP_VIDEO_OUTPUT_OVERLAY; - itv->streams[IVTV_DEC_STREAM_TYPE_MPG].vdev.device_caps |= - V4L2_CAP_VIDEO_OUTPUT_OVERLAY; + if (itv->osd_video_pbase && (type == IVTV_DEC_STREAM_TYPE_YUV || + type == IVTV_DEC_STREAM_TYPE_MPG)) { + s->vdev.device_caps |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY; itv->v4l2_cap |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY; } video_set_drvdata(&s->vdev, s); diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c index fb24d2ed3621..d3cde05a6eba 100644 --- a/drivers/media/pci/saa7134/saa7134-alsa.c +++ b/drivers/media/pci/saa7134/saa7134-alsa.c @@ -1214,7 +1214,7 @@ static int alsa_device_exit(struct saa7134_dev *dev) static int saa7134_alsa_init(void) { - struct saa7134_dev *dev = NULL; + struct saa7134_dev *dev; saa7134_dmasound_init = alsa_device_init; saa7134_dmasound_exit = alsa_device_exit; @@ -1229,7 +1229,7 @@ static int saa7134_alsa_init(void) alsa_device_init(dev); } - if (dev == NULL) + if (list_empty(&saa7134_devlist)) pr_info("saa7134 ALSA: no saa7134 cards found\n"); return 0; diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c index debc7509c173..757a58829a51 100644 --- a/drivers/media/platform/aspeed-video.c +++ b/drivers/media/platform/aspeed-video.c @@ -151,7 +151,7 @@ #define VE_SRC_TB_EDGE_DET_BOT GENMASK(28, VE_SRC_TB_EDGE_DET_BOT_SHF) #define VE_MODE_DETECT_STATUS 0x098 -#define VE_MODE_DETECT_H_PIXELS GENMASK(11, 0) +#define VE_MODE_DETECT_H_PERIOD GENMASK(11, 0) #define VE_MODE_DETECT_V_LINES_SHF 16 #define VE_MODE_DETECT_V_LINES GENMASK(27, VE_MODE_DETECT_V_LINES_SHF) #define VE_MODE_DETECT_STATUS_VSYNC BIT(28) @@ -162,6 +162,8 @@ #define VE_SYNC_STATUS_VSYNC_SHF 16 #define VE_SYNC_STATUS_VSYNC GENMASK(27, VE_SYNC_STATUS_VSYNC_SHF) +#define VE_H_TOTAL_PIXELS 0x0A0 + #define VE_INTERRUPT_CTRL 0x304 #define VE_INTERRUPT_STATUS 0x308 #define VE_INTERRUPT_MODE_DETECT_WD BIT(0) @@ -765,6 +767,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video) u32 src_lr_edge; u32 src_tb_edge; u32 sync; + u32 htotal; struct v4l2_bt_timings *det = &video->detected_timings; det->width = MIN_WIDTH; @@ -809,6 +812,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video) src_tb_edge = aspeed_video_read(video, VE_SRC_TB_EDGE_DET); mds = aspeed_video_read(video, VE_MODE_DETECT_STATUS); sync = aspeed_video_read(video, VE_SYNC_STATUS); + htotal = aspeed_video_read(video, VE_H_TOTAL_PIXELS); video->frame_bottom = (src_tb_edge & VE_SRC_TB_EDGE_DET_BOT) >> VE_SRC_TB_EDGE_DET_BOT_SHF; @@ -825,8 +829,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video) VE_SRC_LR_EDGE_DET_RT_SHF; video->frame_left = src_lr_edge & VE_SRC_LR_EDGE_DET_LEFT; det->hfrontporch = video->frame_left; - det->hbackporch = (mds & VE_MODE_DETECT_H_PIXELS) - - video->frame_right; + det->hbackporch = htotal - video->frame_right; det->hsync = sync & VE_SYNC_STATUS_HSYNC; if (video->frame_left > video->frame_right) continue; diff --git a/drivers/media/platform/atmel/atmel-isc-base.c b/drivers/media/platform/atmel/atmel-isc-base.c index ebf264b980f9..f768be3c4059 100644 --- a/drivers/media/platform/atmel/atmel-isc-base.c +++ b/drivers/media/platform/atmel/atmel-isc-base.c @@ -1369,14 +1369,12 @@ static int isc_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize) { struct isc_device *isc = video_drvdata(file); - struct v4l2_subdev_frame_size_enum fse = { - .code = isc->config.sd_format->mbus_code, - .index = fsize->index, - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - }; int ret = -EINVAL; int i; + if (fsize->index) + return -EINVAL; + for (i = 0; i < isc->num_user_formats; i++) if (isc->user_formats[i]->fourcc == fsize->pixel_format) ret = 0; @@ -1388,14 +1386,14 @@ static int isc_enum_framesizes(struct file *file, void *fh, if (ret) return ret; - ret = v4l2_subdev_call(isc->current_subdev->sd, pad, enum_frame_size, - NULL, &fse); - if (ret) - return ret; + fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS; - fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; - fsize->discrete.width = fse.max_width; - fsize->discrete.height = fse.max_height; + fsize->stepwise.min_width = 16; + fsize->stepwise.max_width = isc->max_width; + fsize->stepwise.min_height = 16; + fsize->stepwise.max_height = isc->max_height; + fsize->stepwise.step_width = 1; + fsize->stepwise.step_height = 1; return 0; } diff --git a/drivers/media/platform/atmel/atmel-sama7g5-isc.c b/drivers/media/platform/atmel/atmel-sama7g5-isc.c index 9c05acafd072..6a5d3f7ce75e 100644 --- a/drivers/media/platform/atmel/atmel-sama7g5-isc.c +++ b/drivers/media/platform/atmel/atmel-sama7g5-isc.c @@ -555,7 +555,6 @@ static int microchip_xisc_remove(struct platform_device *pdev) v4l2_device_unregister(&isc->v4l2_dev); - clk_disable_unprepare(isc->ispck); clk_disable_unprepare(isc->hclock); isc_clk_cleanup(isc); @@ -567,7 +566,6 @@ static int __maybe_unused xisc_runtime_suspend(struct device *dev) { struct isc_device *isc = dev_get_drvdata(dev); - clk_disable_unprepare(isc->ispck); clk_disable_unprepare(isc->hclock); return 0; @@ -582,10 +580,6 @@ static int __maybe_unused xisc_runtime_resume(struct device *dev) if (ret) return ret; - ret = clk_prepare_enable(isc->ispck); - if (ret) - clk_disable_unprepare(isc->hclock); - return ret; } diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index 9a2640a9c75c..4a553f42ff0a 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -408,6 +408,7 @@ static struct vdoa_data *coda_get_vdoa_data(void) if (!vdoa_data) vdoa_data = ERR_PTR(-EPROBE_DEFER); + put_device(&vdoa_pdev->dev); out: of_node_put(vdoa_node); diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c index f1ce10828b8e..5658c7f148d7 100644 --- a/drivers/media/platform/davinci/vpif.c +++ b/drivers/media/platform/davinci/vpif.c @@ -428,6 +428,7 @@ static int vpif_probe(struct platform_device *pdev) static struct resource *res, *res_irq; struct platform_device *pdev_capture, *pdev_display; struct device_node *endpoint = NULL; + int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); vpif_base = devm_ioremap_resource(&pdev->dev, res); @@ -457,8 +458,8 @@ static int vpif_probe(struct platform_device *pdev) res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res_irq) { dev_warn(&pdev->dev, "Missing IRQ resource.\n"); - pm_runtime_put(&pdev->dev); - return -EINVAL; + ret = -EINVAL; + goto err_put_rpm; } pdev_capture = devm_kzalloc(&pdev->dev, sizeof(*pdev_capture), @@ -492,10 +493,17 @@ static int vpif_probe(struct platform_device *pdev) } return 0; + +err_put_rpm: + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return ret; } static int vpif_remove(struct platform_device *pdev) { + pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } diff --git a/drivers/media/platform/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/imx-jpeg/mxc-jpeg.c index fc905ea78b17..37905547466b 100644 --- a/drivers/media/platform/imx-jpeg/mxc-jpeg.c +++ b/drivers/media/platform/imx-jpeg/mxc-jpeg.c @@ -925,8 +925,13 @@ static void mxc_jpeg_device_run(void *priv) v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true); jpeg_src_buf = vb2_to_mxc_buf(&src_buf->vb2_buf); + if (q_data_cap->fmt->colplanes != dst_buf->vb2_buf.num_planes) { + dev_err(dev, "Capture format %s has %d planes, but capture buffer has %d planes\n", + q_data_cap->fmt->name, q_data_cap->fmt->colplanes, + dst_buf->vb2_buf.num_planes); + jpeg_src_buf->jpeg_parse_error = true; + } if (jpeg_src_buf->jpeg_parse_error) { - jpeg->slot_data[ctx->slot].used = false; v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR); diff --git a/drivers/media/platform/meson/ge2d/ge2d.c b/drivers/media/platform/meson/ge2d/ge2d.c index 9b1e973e78da..a373dea9866b 100644 --- a/drivers/media/platform/meson/ge2d/ge2d.c +++ b/drivers/media/platform/meson/ge2d/ge2d.c @@ -215,35 +215,35 @@ static void ge2d_hw_start(struct meson_ge2d *ge2d) regmap_write(ge2d->map, GE2D_SRC1_CLIPY_START_END, FIELD_PREP(GE2D_START, ctx->in.crop.top) | - FIELD_PREP(GE2D_END, ctx->in.crop.top + ctx->in.crop.height)); + FIELD_PREP(GE2D_END, ctx->in.crop.top + ctx->in.crop.height - 1)); regmap_write(ge2d->map, GE2D_SRC1_CLIPX_START_END, FIELD_PREP(GE2D_START, ctx->in.crop.left) | - FIELD_PREP(GE2D_END, ctx->in.crop.left + ctx->in.crop.width)); + FIELD_PREP(GE2D_END, ctx->in.crop.left + ctx->in.crop.width - 1)); regmap_write(ge2d->map, GE2D_SRC2_CLIPY_START_END, FIELD_PREP(GE2D_START, ctx->out.crop.top) | - FIELD_PREP(GE2D_END, ctx->out.crop.top + ctx->out.crop.height)); + FIELD_PREP(GE2D_END, ctx->out.crop.top + ctx->out.crop.height - 1)); regmap_write(ge2d->map, GE2D_SRC2_CLIPX_START_END, FIELD_PREP(GE2D_START, ctx->out.crop.left) | - FIELD_PREP(GE2D_END, ctx->out.crop.left + ctx->out.crop.width)); + FIELD_PREP(GE2D_END, ctx->out.crop.left + ctx->out.crop.width - 1)); regmap_write(ge2d->map, GE2D_DST_CLIPY_START_END, FIELD_PREP(GE2D_START, ctx->out.crop.top) | - FIELD_PREP(GE2D_END, ctx->out.crop.top + ctx->out.crop.height)); + FIELD_PREP(GE2D_END, ctx->out.crop.top + ctx->out.crop.height - 1)); regmap_write(ge2d->map, GE2D_DST_CLIPX_START_END, FIELD_PREP(GE2D_START, ctx->out.crop.left) | - FIELD_PREP(GE2D_END, ctx->out.crop.left + ctx->out.crop.width)); + FIELD_PREP(GE2D_END, ctx->out.crop.left + ctx->out.crop.width - 1)); regmap_write(ge2d->map, GE2D_SRC1_Y_START_END, - FIELD_PREP(GE2D_END, ctx->in.pix_fmt.height)); + FIELD_PREP(GE2D_END, ctx->in.pix_fmt.height - 1)); regmap_write(ge2d->map, GE2D_SRC1_X_START_END, - FIELD_PREP(GE2D_END, ctx->in.pix_fmt.width)); + FIELD_PREP(GE2D_END, ctx->in.pix_fmt.width - 1)); regmap_write(ge2d->map, GE2D_SRC2_Y_START_END, - FIELD_PREP(GE2D_END, ctx->out.pix_fmt.height)); + FIELD_PREP(GE2D_END, ctx->out.pix_fmt.height - 1)); regmap_write(ge2d->map, GE2D_SRC2_X_START_END, - FIELD_PREP(GE2D_END, ctx->out.pix_fmt.width)); + FIELD_PREP(GE2D_END, ctx->out.pix_fmt.width - 1)); regmap_write(ge2d->map, GE2D_DST_Y_START_END, - FIELD_PREP(GE2D_END, ctx->out.pix_fmt.height)); + FIELD_PREP(GE2D_END, ctx->out.pix_fmt.height - 1)); regmap_write(ge2d->map, GE2D_DST_X_START_END, - FIELD_PREP(GE2D_END, ctx->out.pix_fmt.width)); + FIELD_PREP(GE2D_END, ctx->out.pix_fmt.width - 1)); /* Color, no blend, use source color */ reg = GE2D_ALU_DO_COLOR_OPERATION_LOGIC(LOGIC_OPERATION_COPY, diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c index cd27f637dbe7..cfc7ebed8fb7 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c @@ -102,6 +102,8 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(struct mtk_vcodec_dev *dev, vpu_wdt_reg_handler(fw_pdev, mtk_vcodec_vpu_reset_handler, dev, rst_id); fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL); + if (!fw) + return ERR_PTR(-ENOMEM); fw->type = VPU; fw->ops = &mtk_vcodec_vpu_msg; fw->pdev = fw_pdev; diff --git a/drivers/media/platform/qcom/camss/camss-csid-170.c b/drivers/media/platform/qcom/camss/camss-csid-170.c index ac22ff29d2a9..82f59933ad7b 100644 --- a/drivers/media/platform/qcom/camss/camss-csid-170.c +++ b/drivers/media/platform/qcom/camss/camss-csid-170.c @@ -105,7 +105,8 @@ #define CSID_RDI_CTRL(rdi) ((IS_LITE ? 0x208 : 0x308)\ + 0x100 * (rdi)) #define RDI_CTRL_HALT_CMD 0 -#define ALT_CMD_RESUME_AT_FRAME_BOUNDARY 1 +#define HALT_CMD_HALT_AT_FRAME_BOUNDARY 0 +#define HALT_CMD_RESUME_AT_FRAME_BOUNDARY 1 #define RDI_CTRL_HALT_MODE 2 #define CSID_RDI_FRM_DROP_PATTERN(rdi) ((IS_LITE ? 0x20C : 0x30C)\ @@ -366,7 +367,7 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable) val |= input_format->width & 0x1fff << TPG_DT_n_CFG_0_FRAME_WIDTH; writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_0(0)); - val = DATA_TYPE_RAW_10BIT << TPG_DT_n_CFG_1_DATA_TYPE; + val = format->data_type << TPG_DT_n_CFG_1_DATA_TYPE; writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_1(0)); val = tg->mode << TPG_DT_n_CFG_2_PAYLOAD_MODE; @@ -382,8 +383,9 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable) val = 1 << RDI_CFG0_BYTE_CNTR_EN; val |= 1 << RDI_CFG0_FORMAT_MEASURE_EN; val |= 1 << RDI_CFG0_TIMESTAMP_EN; + /* note: for non-RDI path, this should be format->decode_format */ val |= DECODE_FORMAT_PAYLOAD_ONLY << RDI_CFG0_DECODE_FORMAT; - val |= DATA_TYPE_RAW_10BIT << RDI_CFG0_DATA_TYPE; + val |= format->data_type << RDI_CFG0_DATA_TYPE; val |= vc << RDI_CFG0_VIRTUAL_CHANNEL; val |= dt_id << RDI_CFG0_DT_ID; writel_relaxed(val, csid->base + CSID_RDI_CFG0(0)); @@ -443,13 +445,10 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable) val |= 1 << CSI2_RX_CFG1_MISR_EN; writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1); // csi2_vc_mode_shift_val ? - /* error irqs start at BIT(11) */ - writel_relaxed(~0u, csid->base + CSID_CSI2_RX_IRQ_MASK); - - /* RDI irq */ - writel_relaxed(~0u, csid->base + CSID_TOP_IRQ_MASK); - - val = 1 << RDI_CTRL_HALT_CMD; + if (enable) + val = HALT_CMD_RESUME_AT_FRAME_BOUNDARY << RDI_CTRL_HALT_CMD; + else + val = HALT_CMD_HALT_AT_FRAME_BOUNDARY << RDI_CTRL_HALT_CMD; writel_relaxed(val, csid->base + CSID_RDI_CTRL(0)); } diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c index 8594d275b41d..02cb8005504a 100644 --- a/drivers/media/platform/qcom/camss/camss-vfe-170.c +++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c @@ -399,17 +399,7 @@ static irqreturn_t vfe_isr(int irq, void *dev) */ static int vfe_halt(struct vfe_device *vfe) { - unsigned long time; - - reinit_completion(&vfe->halt_complete); - - time = wait_for_completion_timeout(&vfe->halt_complete, - msecs_to_jiffies(VFE_HALT_TIMEOUT_MS)); - if (!time) { - dev_err(vfe->camss->dev, "VFE halt timeout\n"); - return -EIO; - } - + /* rely on vfe_disable_output() to stop the VFE */ return 0; } diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c index 60f4b8e4b8d0..1bf5db7673eb 100644 --- a/drivers/media/platform/qcom/venus/hfi_cmds.c +++ b/drivers/media/platform/qcom/venus/hfi_cmds.c @@ -1054,6 +1054,8 @@ static int pkt_session_set_property_1x(struct hfi_session_set_property_pkt *pkt, pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info); break; } + case HFI_PROPERTY_PARAM_VENC_HDR10_PQ_SEI: + return -ENOTSUPP; /* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */ case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS: diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c index bc1c42dd53c0..c4e0fe14c058 100644 --- a/drivers/media/platform/qcom/venus/venc.c +++ b/drivers/media/platform/qcom/venus/venc.c @@ -604,8 +604,8 @@ static int venc_set_properties(struct venus_inst *inst) ptype = HFI_PROPERTY_PARAM_VENC_H264_TRANSFORM_8X8; h264_transform.enable_type = 0; - if (ctr->profile.h264 == HFI_H264_PROFILE_HIGH || - ctr->profile.h264 == HFI_H264_PROFILE_CONSTRAINED_HIGH) + if (ctr->profile.h264 == V4L2_MPEG_VIDEO_H264_PROFILE_HIGH || + ctr->profile.h264 == V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH) h264_transform.enable_type = ctr->h264_8x8_transform; ret = hfi_session_set_property(inst, ptype, &h264_transform); diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c index 1ada42df314d..ea5805e71c14 100644 --- a/drivers/media/platform/qcom/venus/venc_ctrls.c +++ b/drivers/media/platform/qcom/venus/venc_ctrls.c @@ -320,8 +320,8 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl) ctr->intra_refresh_period = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: - if (ctr->profile.h264 != HFI_H264_PROFILE_HIGH && - ctr->profile.h264 != HFI_H264_PROFILE_CONSTRAINED_HIGH) + if (ctr->profile.h264 != V4L2_MPEG_VIDEO_H264_PROFILE_HIGH && + ctr->profile.h264 != V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH) return -EINVAL; /* @@ -457,7 +457,7 @@ int venc_ctrl_init(struct venus_inst *inst) V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP, 1, 51, 1, 1); v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, - V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM, 0, 1, 1, 0); + V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM, 0, 1, 1, 1); v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP, 1, 51, 1, 1); diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c index 6759091b15e0..d99ea8973b67 100644 --- a/drivers/media/platform/rockchip/rga/rga.c +++ b/drivers/media/platform/rockchip/rga/rga.c @@ -895,7 +895,7 @@ static int rga_probe(struct platform_device *pdev) } rga->dst_mmu_pages = (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3); - if (rga->dst_mmu_pages) { + if (!rga->dst_mmu_pages) { ret = -ENOMEM; goto free_src_pages; } diff --git a/drivers/media/platform/ti-vpe/cal-video.c b/drivers/media/platform/ti-vpe/cal-video.c index 7799da1cc261..3e936a2ca36c 100644 --- a/drivers/media/platform/ti-vpe/cal-video.c +++ b/drivers/media/platform/ti-vpe/cal-video.c @@ -823,6 +823,9 @@ static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx) /* Enumerate sub device formats and enable all matching local formats */ ctx->active_fmt = devm_kcalloc(ctx->cal->dev, cal_num_formats, sizeof(*ctx->active_fmt), GFP_KERNEL); + if (!ctx->active_fmt) + return -ENOMEM; + ctx->num_active_fmt = 0; for (j = 0, i = 0; ; ++j) { diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c index c6cd2e6d8e65..a50701cfbbd7 100644 --- a/drivers/media/rc/gpio-ir-tx.c +++ b/drivers/media/rc/gpio-ir-tx.c @@ -48,11 +48,29 @@ static int gpio_ir_tx_set_carrier(struct rc_dev *dev, u32 carrier) return 0; } +static void delay_until(ktime_t until) +{ + /* + * delta should never exceed 0.5 seconds (IR_MAX_DURATION) and on + * m68k ndelay(s64) does not compile; so use s32 rather than s64. + */ + s32 delta; + + while (true) { + delta = ktime_us_delta(until, ktime_get()); + if (delta <= 0) + return; + + /* udelay more than 1ms may not work */ + delta = min(delta, 1000); + udelay(delta); + } +} + static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf, uint count) { ktime_t edge; - s32 delta; int i; local_irq_disable(); @@ -63,9 +81,7 @@ static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf, gpiod_set_value(gpio_ir->gpio, !(i % 2)); edge = ktime_add_us(edge, txbuf[i]); - delta = ktime_us_delta(edge, ktime_get()); - if (delta > 0) - udelay(delta); + delay_until(edge); } gpiod_set_value(gpio_ir->gpio, 0); @@ -97,9 +113,7 @@ static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf, if (i % 2) { // space edge = ktime_add_us(edge, txbuf[i]); - delta = ktime_us_delta(edge, ktime_get()); - if (delta > 0) - udelay(delta); + delay_until(edge); } else { // pulse ktime_t last = ktime_add_us(edge, txbuf[i]); diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c index 1aa7989e756c..7f394277478b 100644 --- a/drivers/media/rc/ir_toy.c +++ b/drivers/media/rc/ir_toy.c @@ -429,7 +429,7 @@ static int irtoy_probe(struct usb_interface *intf, err = usb_submit_urb(irtoy->urb_in, GFP_KERNEL); if (err != 0) { dev_err(irtoy->dev, "fail to submit in urb: %d\n", err); - return err; + goto free_rcdev; } err = irtoy_setup(irtoy); diff --git a/drivers/media/test-drivers/vidtv/vidtv_s302m.c b/drivers/media/test-drivers/vidtv/vidtv_s302m.c index d79b65854627..4676083cee3b 100644 --- a/drivers/media/test-drivers/vidtv/vidtv_s302m.c +++ b/drivers/media/test-drivers/vidtv/vidtv_s302m.c @@ -455,6 +455,9 @@ struct vidtv_encoder e->name = kstrdup(args.name, GFP_KERNEL); e->encoder_buf = vzalloc(VIDTV_S302M_BUF_SZ); + if (!e->encoder_buf) + goto out_kfree_e; + e->encoder_buf_sz = VIDTV_S302M_BUF_SZ; e->encoder_buf_offset = 0; @@ -467,10 +470,8 @@ struct vidtv_encoder e->is_video_encoder = false; ctx = kzalloc(priv_sz, GFP_KERNEL); - if (!ctx) { - kfree(e); - return NULL; - } + if (!ctx) + goto out_kfree_buf; e->ctx = ctx; ctx->last_duration = 0; @@ -498,6 +499,14 @@ struct vidtv_encoder e->next = NULL; return e; + +out_kfree_buf: + kfree(e->encoder_buf); + +out_kfree_e: + kfree(e->name); + kfree(e); + return NULL; } void vidtv_s302m_encoder_destroy(struct vidtv_encoder *e) diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index ebc430b05f21..92d867fc519c 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -3931,6 +3931,8 @@ static int em28xx_usb_probe(struct usb_interface *intf, goto err_free; } + kref_init(&dev->ref); + dev->devno = nr; dev->model = id->driver_info; dev->alt = -1; @@ -4031,6 +4033,8 @@ static int em28xx_usb_probe(struct usb_interface *intf, } if (dev->board.has_dual_ts && em28xx_duplicate_dev(dev) == 0) { + kref_init(&dev->dev_next->ref); + dev->dev_next->ts = SECONDARY_TS; dev->dev_next->alt = -1; dev->dev_next->is_audio_only = has_vendor_audio && @@ -4085,12 +4089,8 @@ static int em28xx_usb_probe(struct usb_interface *intf, em28xx_write_reg(dev, 0x0b, 0x82); mdelay(100); } - - kref_init(&dev->dev_next->ref); } - kref_init(&dev->ref); - request_modules(dev); /* @@ -4145,11 +4145,8 @@ static void em28xx_usb_disconnect(struct usb_interface *intf) em28xx_close_extension(dev); - if (dev->dev_next) { - em28xx_close_extension(dev->dev_next); + if (dev->dev_next) em28xx_release_resources(dev->dev_next); - } - em28xx_release_resources(dev); if (dev->dev_next) { diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c index c742cc88fac5..1fa6f10ee157 100644 --- a/drivers/media/usb/go7007/s2250-board.c +++ b/drivers/media/usb/go7007/s2250-board.c @@ -504,6 +504,7 @@ static int s2250_probe(struct i2c_client *client, u8 *data; struct go7007 *go = i2c_get_adapdata(adapter); struct go7007_usb *usb = go->hpi_context; + int err = -EIO; audio = i2c_new_dummy_device(adapter, TLV320_ADDRESS >> 1); if (IS_ERR(audio)) @@ -532,11 +533,8 @@ static int s2250_probe(struct i2c_client *client, V4L2_CID_HUE, -512, 511, 1, 0); sd->ctrl_handler = &state->hdl; if (state->hdl.error) { - int err = state->hdl.error; - - v4l2_ctrl_handler_free(&state->hdl); - kfree(state); - return err; + err = state->hdl.error; + goto fail; } state->std = V4L2_STD_NTSC; @@ -600,7 +598,7 @@ fail: i2c_unregister_device(audio); v4l2_ctrl_handler_free(&state->hdl); kfree(state); - return -EIO; + return err; } static int s2250_remove(struct i2c_client *client) diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c index 563128d11731..60e57e0f1927 100644 --- a/drivers/media/usb/hdpvr/hdpvr-video.c +++ b/drivers/media/usb/hdpvr/hdpvr-video.c @@ -308,7 +308,6 @@ static int hdpvr_start_streaming(struct hdpvr_device *dev) dev->status = STATUS_STREAMING; - INIT_WORK(&dev->worker, hdpvr_transmit_buffers); schedule_work(&dev->worker); v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, @@ -1165,6 +1164,9 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent, bool ac3 = dev->flags & HDPVR_FLAG_AC3_CAP; int res; + // initialize dev->worker + INIT_WORK(&dev->worker, hdpvr_transmit_buffers); + dev->cur_std = V4L2_STD_525_60; dev->width = 720; dev->height = 480; diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c index 4e1698f78818..ce717502ea4c 100644 --- a/drivers/media/usb/stk1160/stk1160-core.c +++ b/drivers/media/usb/stk1160/stk1160-core.c @@ -403,7 +403,7 @@ static void stk1160_disconnect(struct usb_interface *interface) /* Here is the only place where isoc get released */ stk1160_uninit_isoc(dev); - stk1160_clear_queue(dev); + stk1160_clear_queue(dev, VB2_BUF_STATE_ERROR); video_unregister_device(&dev->vdev); v4l2_device_disconnect(&dev->v4l2_dev); diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c index 6a4eb616d516..1aa953469402 100644 --- a/drivers/media/usb/stk1160/stk1160-v4l.c +++ b/drivers/media/usb/stk1160/stk1160-v4l.c @@ -258,7 +258,7 @@ out_uninit: stk1160_uninit_isoc(dev); out_stop_hw: usb_set_interface(dev->udev, 0, 0); - stk1160_clear_queue(dev); + stk1160_clear_queue(dev, VB2_BUF_STATE_QUEUED); mutex_unlock(&dev->v4l_lock); @@ -306,7 +306,7 @@ static int stk1160_stop_streaming(struct stk1160 *dev) stk1160_stop_hw(dev); - stk1160_clear_queue(dev); + stk1160_clear_queue(dev, VB2_BUF_STATE_ERROR); stk1160_dbg("streaming stopped\n"); @@ -745,7 +745,7 @@ static const struct video_device v4l_template = { /********************************************************************/ /* Must be called with both v4l_lock and vb_queue_lock hold */ -void stk1160_clear_queue(struct stk1160 *dev) +void stk1160_clear_queue(struct stk1160 *dev, enum vb2_buffer_state vb2_state) { struct stk1160_buffer *buf; unsigned long flags; @@ -756,7 +756,7 @@ void stk1160_clear_queue(struct stk1160 *dev) buf = list_first_entry(&dev->avail_bufs, struct stk1160_buffer, list); list_del(&buf->list); - vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); + vb2_buffer_done(&buf->vb.vb2_buf, vb2_state); stk1160_dbg("buffer [%p/%d] aborted\n", buf, buf->vb.vb2_buf.index); } @@ -766,7 +766,7 @@ void stk1160_clear_queue(struct stk1160 *dev) buf = dev->isoc_ctl.buf; dev->isoc_ctl.buf = NULL; - vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); + vb2_buffer_done(&buf->vb.vb2_buf, vb2_state); stk1160_dbg("buffer [%p/%d] aborted\n", buf, buf->vb.vb2_buf.index); } diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h index a31ea1c80f25..a70963ce8753 100644 --- a/drivers/media/usb/stk1160/stk1160.h +++ b/drivers/media/usb/stk1160/stk1160.h @@ -166,7 +166,7 @@ struct regval { int stk1160_vb2_setup(struct stk1160 *dev); int stk1160_video_register(struct stk1160 *dev); void stk1160_video_unregister(struct stk1160 *dev); -void stk1160_clear_queue(struct stk1160 *dev); +void stk1160_clear_queue(struct stk1160 *dev, enum vb2_buffer_state vb2_state); /* Provided by stk1160-video.c */ int stk1160_alloc_isoc(struct stk1160 *dev); diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c index c4b5082849b6..45a76f40deeb 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls-core.c +++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c @@ -113,6 +113,7 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx, struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quant; struct v4l2_ctrl_vp8_frame *p_vp8_frame; struct v4l2_ctrl_fwht_params *p_fwht_params; + struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix; void *p = ptr.p + idx * ctrl->elem_size; if (ctrl->p_def.p_const) @@ -160,6 +161,15 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx, p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV | (2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET); break; + case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: + p_h264_scaling_matrix = p; + /* + * The default (flat) H.264 scaling matrix when none are + * specified in the bitstream, this is according to formulas + * (7-8) and (7-9) of the specification. + */ + memset(p_h264_scaling_matrix, 16, sizeof(*p_h264_scaling_matrix)); + break; } } diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index c7308a2a80a0..7c596a85f34f 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -279,8 +279,8 @@ static void v4l_print_format(const void *arg, bool write_only) const struct v4l2_vbi_format *vbi; const struct v4l2_sliced_vbi_format *sliced; const struct v4l2_window *win; - const struct v4l2_sdr_format *sdr; const struct v4l2_meta_format *meta; + u32 pixelformat; u32 planes; unsigned i; @@ -299,8 +299,9 @@ static void v4l_print_format(const void *arg, bool write_only) case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: mp = &p->fmt.pix_mp; + pixelformat = mp->pixelformat; pr_cont(", width=%u, height=%u, format=%p4cc, field=%s, colorspace=%d, num_planes=%u, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n", - mp->width, mp->height, &mp->pixelformat, + mp->width, mp->height, &pixelformat, prt_names(mp->field, v4l2_field_names), mp->colorspace, mp->num_planes, mp->flags, mp->ycbcr_enc, mp->quantization, mp->xfer_func); @@ -343,14 +344,15 @@ static void v4l_print_format(const void *arg, bool write_only) break; case V4L2_BUF_TYPE_SDR_CAPTURE: case V4L2_BUF_TYPE_SDR_OUTPUT: - sdr = &p->fmt.sdr; - pr_cont(", pixelformat=%p4cc\n", &sdr->pixelformat); + pixelformat = p->fmt.sdr.pixelformat; + pr_cont(", pixelformat=%p4cc\n", &pixelformat); break; case V4L2_BUF_TYPE_META_CAPTURE: case V4L2_BUF_TYPE_META_OUTPUT: meta = &p->fmt.meta; + pixelformat = meta->dataformat; pr_cont(", dataformat=%p4cc, buffersize=%u\n", - &meta->dataformat, meta->buffersize); + &pixelformat, meta->buffersize); break; } } diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index e7f4bf5bc8dd..3de683b5e06d 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -585,19 +585,14 @@ int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, } EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); -int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, - struct v4l2_buffer *buf) +static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq, + struct v4l2_buffer *buf) { - struct vb2_queue *vq; - int ret = 0; - unsigned int i; - - vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); - ret = vb2_querybuf(vq, buf); - /* Adjust MMAP memory offsets for the CAPTURE queue */ if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) { if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { + unsigned int i; + for (i = 0; i < buf->length; ++i) buf->m.planes[i].m.mem_offset += DST_QUEUE_OFF_BASE; @@ -605,8 +600,23 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, buf->m.offset += DST_QUEUE_OFF_BASE; } } +} - return ret; +int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + ret = vb2_querybuf(vq, buf); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); @@ -763,6 +773,9 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, if (ret) return ret; + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + /* * If the capture queue is streaming, but streaming hasn't started * on the device, but was asked to stop, mark the previously queued @@ -784,9 +797,17 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf) { struct vb2_queue *vq; + int ret; vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); - return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); + ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); @@ -795,9 +816,17 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, { struct video_device *vdev = video_devdata(file); struct vb2_queue *vq; + int ret; vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); - return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); + ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c index c267283b01fd..e749dcb3ddea 100644 --- a/drivers/memory/atmel-ebi.c +++ b/drivers/memory/atmel-ebi.c @@ -544,20 +544,27 @@ static int atmel_ebi_probe(struct platform_device *pdev) smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0); ebi->smc.regmap = syscon_node_to_regmap(smc_np); - if (IS_ERR(ebi->smc.regmap)) - return PTR_ERR(ebi->smc.regmap); + if (IS_ERR(ebi->smc.regmap)) { + ret = PTR_ERR(ebi->smc.regmap); + goto put_node; + } ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np); - if (IS_ERR(ebi->smc.layout)) - return PTR_ERR(ebi->smc.layout); + if (IS_ERR(ebi->smc.layout)) { + ret = PTR_ERR(ebi->smc.layout); + goto put_node; + } ebi->smc.clk = of_clk_get(smc_np, 0); if (IS_ERR(ebi->smc.clk)) { - if (PTR_ERR(ebi->smc.clk) != -ENOENT) - return PTR_ERR(ebi->smc.clk); + if (PTR_ERR(ebi->smc.clk) != -ENOENT) { + ret = PTR_ERR(ebi->smc.clk); + goto put_node; + } ebi->smc.clk = NULL; } + of_node_put(smc_np); ret = clk_prepare_enable(ebi->smc.clk); if (ret) return ret; @@ -608,6 +615,10 @@ static int atmel_ebi_probe(struct platform_device *pdev) } return of_platform_populate(np, NULL, NULL, dev); + +put_node: + of_node_put(smc_np); + return ret; } static __maybe_unused int atmel_ebi_resume(struct device *dev) diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c index 762d0c0f0716..ecc78d6f89ed 100644 --- a/drivers/memory/emif.c +++ b/drivers/memory/emif.c @@ -1025,7 +1025,7 @@ static struct emif_data *__init_or_module get_device_details( temp = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL); - if (!emif || !pd || !dev_info) { + if (!emif || !temp || !dev_info) { dev_err(dev, "%s:%d: allocation error\n", __func__, __LINE__); goto error; } @@ -1117,7 +1117,7 @@ static int __init_or_module emif_probe(struct platform_device *pdev) { struct emif_data *emif; struct resource *res; - int irq; + int irq, ret; if (pdev->dev.of_node) emif = of_get_memory_device_details(pdev->dev.of_node, &pdev->dev); @@ -1147,7 +1147,9 @@ static int __init_or_module emif_probe(struct platform_device *pdev) emif_onetime_settings(emif); emif_debugfs_init(emif); disable_and_clear_all_interrupts(emif); - setup_interrupts(emif, irq); + ret = setup_interrupts(emif, irq); + if (ret) + goto error; /* One-time actions taken on probing the first device */ if (!emif1) { diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c index 861870223300..2a4c1f94bfa0 100644 --- a/drivers/memory/renesas-rpc-if.c +++ b/drivers/memory/renesas-rpc-if.c @@ -579,6 +579,7 @@ static int rpcif_probe(struct platform_device *pdev) struct platform_device *vdev; struct device_node *flash; const char *name; + int ret; flash = of_get_next_child(pdev->dev.of_node, NULL); if (!flash) { @@ -602,7 +603,14 @@ static int rpcif_probe(struct platform_device *pdev) return -ENOMEM; vdev->dev.parent = &pdev->dev; platform_set_drvdata(pdev, vdev); - return platform_device_add(vdev); + + ret = platform_device_add(vdev); + if (ret) { + platform_device_put(vdev); + return ret; + } + + return 0; } static int rpcif_remove(struct platform_device *pdev) diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 8d58c8df46cf..56338f9dbd0b 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c @@ -906,14 +906,14 @@ static int __init asic3_mfd_probe(struct platform_device *pdev, ret = mfd_add_devices(&pdev->dev, pdev->id, &asic3_cell_ds1wm, 1, mem, asic->irq_base, NULL); if (ret < 0) - goto out; + goto out_unmap; } if (mem_sdio && (irq >= 0)) { ret = mfd_add_devices(&pdev->dev, pdev->id, &asic3_cell_mmc, 1, mem_sdio, irq, NULL); if (ret < 0) - goto out; + goto out_unmap; } ret = 0; @@ -927,8 +927,12 @@ static int __init asic3_mfd_probe(struct platform_device *pdev, ret = mfd_add_devices(&pdev->dev, 0, asic3_cell_leds, ASIC3_NUM_LEDS, NULL, 0, NULL); } + return ret; - out: +out_unmap: + if (asic->tmio_cnf) + iounmap(asic->tmio_cnf); +out: return ret; } diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c index 1abe7432aad8..e281a9202f11 100644 --- a/drivers/mfd/mc13xxx-core.c +++ b/drivers/mfd/mc13xxx-core.c @@ -323,8 +323,10 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode, adc1 |= MC13783_ADC1_ATOX; dev_dbg(mc13xxx->dev, "%s: request irq\n", __func__); - mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE, + ret = mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE, mc13xxx_handler_adcdone, __func__, &adcdone_data); + if (ret) + goto out; mc13xxx_reg_write(mc13xxx, MC13XXX_ADC0, adc0); mc13xxx_reg_write(mc13xxx, MC13XXX_ADC1, adc1); diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c index de6d44a158bb..3f514d77a843 100644 --- a/drivers/misc/cardreader/alcor_pci.c +++ b/drivers/misc/cardreader/alcor_pci.c @@ -266,7 +266,7 @@ static int alcor_pci_probe(struct pci_dev *pdev, if (!priv) return -ENOMEM; - ret = ida_simple_get(&alcor_pci_idr, 0, 0, GFP_KERNEL); + ret = ida_alloc(&alcor_pci_idr, GFP_KERNEL); if (ret < 0) return ret; priv->id = ret; @@ -280,7 +280,8 @@ static int alcor_pci_probe(struct pci_dev *pdev, ret = pci_request_regions(pdev, DRV_NAME_ALCOR_PCI); if (ret) { dev_err(&pdev->dev, "Cannot request region\n"); - return -ENOMEM; + ret = -ENOMEM; + goto error_free_ida; } if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { @@ -324,6 +325,8 @@ static int alcor_pci_probe(struct pci_dev *pdev, error_release_regions: pci_release_regions(pdev); +error_free_ida: + ida_free(&alcor_pci_idr, priv->id); return ret; } @@ -337,7 +340,7 @@ static void alcor_pci_remove(struct pci_dev *pdev) mfd_remove_devices(&pdev->dev); - ida_simple_remove(&alcor_pci_idr, priv->id); + ida_free(&alcor_pci_idr, priv->id); pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index 985f1f3dbd20..0b46fd22c411 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -856,6 +856,8 @@ static ssize_t hl_set_power_state(struct file *f, const char __user *buf, pci_set_power_state(hdev->pdev, PCI_D0); pci_restore_state(hdev->pdev); rc = pci_enable_device(hdev->pdev); + if (rc < 0) + return rc; } else if (value == 2) { pci_save_state(hdev->pdev); pci_disable_device(hdev->pdev); diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c index 0f536f79dd9c..e68e9f71c546 100644 --- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c +++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c @@ -467,7 +467,7 @@ static void hl_mmu_v1_fini(struct hl_device *hdev) { /* MMU H/W fini was already done in device hw_fini() */ - if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) { + if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) { kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0); gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 67c5b452dd35..88b91ad8e541 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -1070,10 +1070,10 @@ static int kgdbts_option_setup(char *opt) { if (strlen(opt) >= MAX_CONFIG_LEN) { printk(KERN_ERR "kgdbts: config string too long\n"); - return -ENOSPC; + return 1; } strcpy(config, opt); - return 0; + return 1; } __setup("kgdbts=", kgdbts_option_setup); diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 67bb6a25fd0a..64ce3f830262 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -107,6 +107,7 @@ #define MEI_DEV_ID_ADP_S 0x7AE8 /* Alder Lake Point S */ #define MEI_DEV_ID_ADP_LP 0x7A60 /* Alder Lake Point LP */ #define MEI_DEV_ID_ADP_P 0x51E0 /* Alder Lake Point P */ +#define MEI_DEV_ID_ADP_N 0x54E0 /* Alder Lake Point N */ /* * MEI HW Section @@ -120,6 +121,7 @@ #define PCI_CFG_HFS_2 0x48 #define PCI_CFG_HFS_3 0x60 # define PCI_CFG_HFS_3_FW_SKU_MSK 0x00000070 +# define PCI_CFG_HFS_3_FW_SKU_IGN 0x00000000 # define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060 #define PCI_CFG_HFS_4 0x64 #define PCI_CFG_HFS_5 0x68 diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index d3a6c0728645..fbc4c9581864 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -1405,16 +1405,16 @@ static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev) .quirk_probe = mei_me_fw_type_sps_4 /** - * mei_me_fw_type_sps() - check for sps sku + * mei_me_fw_type_sps_ign() - check for sps or ign sku * - * Read ME FW Status register to check for SPS Firmware. - * The SPS FW is only signaled in pci function 0 + * Read ME FW Status register to check for SPS or IGN Firmware. + * The SPS/IGN FW is only signaled in pci function 0 * * @pdev: pci device * - * Return: true in case of SPS firmware + * Return: true in case of SPS/IGN firmware */ -static bool mei_me_fw_type_sps(const struct pci_dev *pdev) +static bool mei_me_fw_type_sps_ign(const struct pci_dev *pdev) { u32 reg; u32 fw_type; @@ -1427,14 +1427,15 @@ static bool mei_me_fw_type_sps(const struct pci_dev *pdev) dev_dbg(&pdev->dev, "fw type is %d\n", fw_type); - return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS; + return fw_type == PCI_CFG_HFS_3_FW_SKU_IGN || + fw_type == PCI_CFG_HFS_3_FW_SKU_SPS; } #define MEI_CFG_KIND_ITOUCH \ .kind = "itouch" -#define MEI_CFG_FW_SPS \ - .quirk_probe = mei_me_fw_type_sps +#define MEI_CFG_FW_SPS_IGN \ + .quirk_probe = mei_me_fw_type_sps_ign #define MEI_CFG_FW_VER_SUPP \ .fw_ver_supported = 1 @@ -1535,7 +1536,7 @@ static const struct mei_cfg mei_me_pch12_sps_cfg = { MEI_CFG_PCH8_HFS, MEI_CFG_FW_VER_SUPP, MEI_CFG_DMA_128, - MEI_CFG_FW_SPS, + MEI_CFG_FW_SPS_IGN, }; /* Cannon Lake itouch with quirk for SPS 5.0 and newer Firmware exclusion @@ -1545,7 +1546,7 @@ static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = { MEI_CFG_KIND_ITOUCH, MEI_CFG_PCH8_HFS, MEI_CFG_FW_VER_SUPP, - MEI_CFG_FW_SPS, + MEI_CFG_FW_SPS_IGN, }; /* Tiger Lake and newer devices */ @@ -1562,7 +1563,7 @@ static const struct mei_cfg mei_me_pch15_sps_cfg = { MEI_CFG_FW_VER_SUPP, MEI_CFG_DMA_128, MEI_CFG_TRC, - MEI_CFG_FW_SPS, + MEI_CFG_FW_SPS_IGN, }; /* diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index a67f4f2d33a9..0706322154cb 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -424,31 +424,26 @@ int mei_irq_read_handler(struct mei_device *dev, list_for_each_entry(cl, &dev->file_list, link) { if (mei_cl_hbm_equal(cl, mei_hdr)) { cl_dbg(dev, cl, "got a message\n"); - break; + ret = mei_cl_irq_read_msg(cl, mei_hdr, meta_hdr, cmpl_list); + goto reset_slots; } } /* if no recipient cl was found we assume corrupted header */ - if (&cl->link == &dev->file_list) { - /* A message for not connected fixed address clients - * should be silently discarded - * On power down client may be force cleaned, - * silently discard such messages - */ - if (hdr_is_fixed(mei_hdr) || - dev->dev_state == MEI_DEV_POWER_DOWN) { - mei_irq_discard_msg(dev, mei_hdr, mei_hdr->length); - ret = 0; - goto reset_slots; - } - dev_err(dev->dev, "no destination client found 0x%08X\n", - dev->rd_msg_hdr[0]); - ret = -EBADMSG; - goto end; + /* A message for not connected fixed address clients + * should be silently discarded + * On power down client may be force cleaned, + * silently discard such messages + */ + if (hdr_is_fixed(mei_hdr) || + dev->dev_state == MEI_DEV_POWER_DOWN) { + mei_irq_discard_msg(dev, mei_hdr, mei_hdr->length); + ret = 0; + goto reset_slots; } - - ret = mei_cl_irq_read_msg(cl, mei_hdr, meta_hdr, cmpl_list); - + dev_err(dev->dev, "no destination client found 0x%08X\n", dev->rd_msg_hdr[0]); + ret = -EBADMSG; + goto end; reset_slots: /* reset the number of slots and header */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 3a45aaf002ac..a738253dbd05 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -113,6 +113,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)}, /* required last entry */ {0, } diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 431af5e8be2f..c4fcb1ad6d4d 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1682,31 +1682,31 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) struct mmc_card *card = mq->card; struct mmc_host *host = card->host; blk_status_t error = BLK_STS_OK; - int retries = 0; do { u32 status; int err; + int retries = 0; - mmc_blk_rw_rq_prep(mqrq, card, 1, mq); + while (retries++ <= MMC_READ_SINGLE_RETRIES) { + mmc_blk_rw_rq_prep(mqrq, card, 1, mq); - mmc_wait_for_req(host, mrq); + mmc_wait_for_req(host, mrq); - err = mmc_send_status(card, &status); - if (err) - goto error_exit; - - if (!mmc_host_is_spi(host) && - !mmc_ready_for_data(status)) { - err = mmc_blk_fix_state(card, req); + err = mmc_send_status(card, &status); if (err) goto error_exit; - } - if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES) - continue; + if (!mmc_host_is_spi(host) && + !mmc_ready_for_data(status)) { + err = mmc_blk_fix_state(card, req); + if (err) + goto error_exit; + } - retries = 0; + if (!mrq->cmd->error) + break; + } if (mrq->cmd->error || mrq->data->error || @@ -1880,6 +1880,31 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; } +static int mmc_spi_err_check(struct mmc_card *card) +{ + u32 status = 0; + int err; + + /* + * SPI does not have a TRAN state we have to wait on, instead the + * card is ready again when it no longer holds the line LOW. + * We still have to ensure two things here before we know the write + * was successful: + * 1. The card has not disconnected during busy and we actually read our + * own pull-up, thinking it was still connected, so ensure it + * still responds. + * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a + * just reconnected card after being disconnected during busy. + */ + err = __mmc_send_status(card, &status, 0); + if (err) + return err; + /* All R1 and R2 bits of SPI are errors in our case */ + if (status) + return -EIO; + return 0; +} + static int mmc_blk_busy_cb(void *cb_data, bool *busy) { struct mmc_blk_busy_data *data = cb_data; @@ -1903,9 +1928,16 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) struct mmc_blk_busy_data cb_data; int err; - if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) + if (rq_data_dir(req) == READ) return 0; + if (mmc_host_is_spi(card->host)) { + err = mmc_spi_err_check(card); + if (err) + mqrq->brq.data.bytes_xfered = 0; + return err; + } + cb_data.card = card; cb_data.status = 0; err = __mmc_poll_for_busy(card, MMC_BLK_TIMEOUT_MS, &mmc_blk_busy_cb, @@ -2344,6 +2376,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, struct mmc_blk_data *md; int devidx, ret; char cap_str[10]; + bool cache_enabled = false; + bool fua_enabled = false; devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL); if (devidx < 0) { @@ -2425,13 +2459,17 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, md->flags |= MMC_BLK_CMD23; } - if (mmc_card_mmc(card) && - md->flags & MMC_BLK_CMD23 && + if (md->flags & MMC_BLK_CMD23 && ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || card->ext_csd.rel_sectors)) { md->flags |= MMC_BLK_REL_WR; - blk_queue_write_cache(md->queue.queue, true, true); + fua_enabled = true; + cache_enabled = true; } + if (mmc_cache_enabled(card->host)) + cache_enabled = true; + + blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled); string_get_size((u64)size, 512, STRING_UNITS_2, cap_str, sizeof(cap_str)); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index cf140f4ec864..d739e2b631fe 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -588,6 +588,16 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) EXPORT_SYMBOL(mmc_alloc_host); +static int mmc_validate_host_caps(struct mmc_host *host) +{ + if (host->caps & MMC_CAP_SDIO_IRQ && !host->ops->enable_sdio_irq) { + dev_warn(host->parent, "missing ->enable_sdio_irq() ops\n"); + return -EINVAL; + } + + return 0; +} + /** * mmc_add_host - initialise host hardware * @host: mmc host @@ -600,8 +610,9 @@ int mmc_add_host(struct mmc_host *host) { int err; - WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && - !host->ops->enable_sdio_irq); + err = mmc_validate_host_caps(host); + if (err) + return err; err = device_add(&host->class_dev); if (err) diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 2a757c88f9d2..80de660027d8 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1375,8 +1375,12 @@ static int davinci_mmcsd_suspend(struct device *dev) static int davinci_mmcsd_resume(struct device *dev) { struct mmc_davinci_host *host = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(host->clk); + if (ret) + return ret; - clk_enable(host->clk); mmc_davinci_reset_ctrl(host, 0); return 0; diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 8f36536cb1b6..58ab9d90bc8b 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -173,6 +173,8 @@ struct meson_host { int irq; bool vqmmc_enabled; + bool needs_pre_post_req; + }; #define CMD_CFG_LENGTH_MASK GENMASK(8, 0) @@ -663,6 +665,8 @@ static void meson_mmc_request_done(struct mmc_host *mmc, struct meson_host *host = mmc_priv(mmc); host->cmd = NULL; + if (host->needs_pre_post_req) + meson_mmc_post_req(mmc, mrq, 0); mmc_request_done(host->mmc, mrq); } @@ -880,7 +884,7 @@ static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct meson_host *host = mmc_priv(mmc); - bool needs_pre_post_req = mrq->data && + host->needs_pre_post_req = mrq->data && !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE); /* @@ -896,22 +900,19 @@ static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) } } - if (needs_pre_post_req) { + if (host->needs_pre_post_req) { meson_mmc_get_transfer_mode(mmc, mrq); if (!meson_mmc_desc_chain_mode(mrq->data)) - needs_pre_post_req = false; + host->needs_pre_post_req = false; } - if (needs_pre_post_req) + if (host->needs_pre_post_req) meson_mmc_pre_req(mmc, mrq); /* Stop execution */ writel(0, host->regs + SD_EMMC_START); meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd); - - if (needs_pre_post_req) - meson_mmc_post_req(mmc, mrq, 0); } static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd) diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c index a75d3dd34d18..4cceb9bab036 100644 --- a/drivers/mmc/host/mmci_stm32_sdmmc.c +++ b/drivers/mmc/host/mmci_stm32_sdmmc.c @@ -62,8 +62,8 @@ static int sdmmc_idma_validate_data(struct mmci_host *host, * excepted the last element which has no constraint on idmasize */ for_each_sg(data->sg, sg, data->sg_len - 1, i) { - if (!IS_ALIGNED(data->sg->offset, sizeof(u32)) || - !IS_ALIGNED(data->sg->length, SDMMC_IDMA_BURST)) { + if (!IS_ALIGNED(sg->offset, sizeof(u32)) || + !IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) { dev_err(mmc_dev(host->mmc), "unaligned scatterlist: ofst:%x length:%d\n", data->sg->offset, data->sg->length); @@ -71,7 +71,7 @@ static int sdmmc_idma_validate_data(struct mmci_host *host, } } - if (!IS_ALIGNED(data->sg->offset, sizeof(u32))) { + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { dev_err(mmc_dev(host->mmc), "unaligned last scatterlist: ofst:%x length:%d\n", data->sg->offset, data->sg->length); diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index f5b2684ad805..ae689bf54686 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -382,10 +382,10 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc) SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) | sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2)); - /* Set the sampling clock selection range of HS400 mode */ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL, SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN | - 0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT); + sd_scc_read32(host, priv, + SH_MOBILE_SDHI_SCC_DTCNTL)); /* Avoid bad TAP */ if (bad_taps & BIT(priv->tap_set)) { diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 58cfaffa3c2d..f7c384db89bf 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -1495,12 +1495,12 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev) realtek_init_host(host); - if (pcr->rtd3_en) { - pm_runtime_set_autosuspend_delay(&pdev->dev, 5000); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_enable(&pdev->dev); - } - + pm_runtime_no_callbacks(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 200); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_use_autosuspend(&pdev->dev); mmc_add_host(mmc); @@ -1521,11 +1521,6 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev) pcr->slots[RTSX_SD_CARD].card_event = NULL; mmc = host->mmc; - if (pcr->rtd3_en) { - pm_runtime_dont_use_autosuspend(&pdev->dev); - pm_runtime_disable(&pdev->dev); - } - cancel_work_sync(&host->work); mutex_lock(&host->host_mutex); @@ -1548,6 +1543,9 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev) flush_work(&host->work); + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_disable(&pdev->dev); + mmc_free_host(mmc); dev_dbg(&(pdev->dev), diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 666cee4c7f7c..08e838400b52 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -241,16 +241,6 @@ static void xenon_voltage_switch(struct sdhci_host *host) { /* Wait for 5ms after set 1.8V signal enable bit */ usleep_range(5000, 5500); - - /* - * For some reason the controller's Host Control2 register reports - * the bit representing 1.8V signaling as 0 when read after it was - * written as 1. Subsequent read reports 1. - * - * Since this may cause some issues, do an empty read of the Host - * Control2 register here to circumvent this. - */ - sdhci_readw(host, SDHCI_HOST_CONTROL2); } static unsigned int xenon_get_max_clock(struct sdhci_host *host) diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index f654afbe8e83..b4891bb26648 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -514,26 +514,6 @@ static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = { .flags = IOMUX_PRESENT, }; -static const struct sdhci_pltfm_data sdhci_am64_8bit_pdata = { - .ops = &sdhci_j721e_8bit_ops, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, -}; - -static const struct sdhci_am654_driver_data sdhci_am64_8bit_drvdata = { - .pdata = &sdhci_am64_8bit_pdata, - .flags = DLL_PRESENT | DLL_CALIB, -}; - -static const struct sdhci_pltfm_data sdhci_am64_4bit_pdata = { - .ops = &sdhci_j721e_4bit_ops, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, -}; - -static const struct sdhci_am654_driver_data sdhci_am64_4bit_drvdata = { - .pdata = &sdhci_am64_4bit_pdata, - .flags = IOMUX_PRESENT, -}; - static const struct soc_device_attribute sdhci_am654_devices[] = { { .family = "AM65X", .revision = "SR1.0", @@ -759,11 +739,11 @@ static const struct of_device_id sdhci_am654_of_match[] = { }, { .compatible = "ti,am64-sdhci-8bit", - .data = &sdhci_am64_8bit_drvdata, + .data = &sdhci_j721e_8bit_drvdata, }, { .compatible = "ti,am64-sdhci-4bit", - .data = &sdhci_am64_4bit_drvdata, + .data = &sdhci_j721e_4bit_drvdata, }, { /* sentinel */ } }; diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c index 77c872fd3d83..7d188cdff6a2 100644 --- a/drivers/mtd/devices/mchp23k256.c +++ b/drivers/mtd/devices/mchp23k256.c @@ -229,6 +229,19 @@ static const struct of_device_id mchp23k256_of_table[] = { }; MODULE_DEVICE_TABLE(of, mchp23k256_of_table); +static const struct spi_device_id mchp23k256_spi_ids[] = { + { + .name = "mchp23k256", + .driver_data = (kernel_ulong_t)&mchp23k256_caps, + }, + { + .name = "mchp23lcv1024", + .driver_data = (kernel_ulong_t)&mchp23lcv1024_caps, + }, + {} +}; +MODULE_DEVICE_TABLE(spi, mchp23k256_spi_ids); + static struct spi_driver mchp23k256_driver = { .driver = { .name = "mchp23k256", @@ -236,6 +249,7 @@ static struct spi_driver mchp23k256_driver = { }, .probe = mchp23k256_probe, .remove = mchp23k256_remove, + .id_table = mchp23k256_spi_ids, }; module_spi_driver(mchp23k256_driver); diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c index 99400d0fb8c1..fbd6b6bf908e 100644 --- a/drivers/mtd/devices/mchp48l640.c +++ b/drivers/mtd/devices/mchp48l640.c @@ -357,6 +357,15 @@ static const struct of_device_id mchp48l640_of_table[] = { }; MODULE_DEVICE_TABLE(of, mchp48l640_of_table); +static const struct spi_device_id mchp48l640_spi_ids[] = { + { + .name = "48l640", + .driver_data = (kernel_ulong_t)&mchp48l640_caps, + }, + {} +}; +MODULE_DEVICE_TABLE(spi, mchp48l640_spi_ids); + static struct spi_driver mchp48l640_driver = { .driver = { .name = "mchp48l640", @@ -364,6 +373,7 @@ static struct spi_driver mchp48l640_driver = { }, .probe = mchp48l640_probe, .remove = mchp48l640_remove, + .id_table = mchp48l640_spi_ids, }; module_spi_driver(mchp48l640_driver); diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index 6ed6c51fac69..d503821a3e60 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c @@ -264,16 +264,20 @@ static int phram_setup(const char *val) } } - if (erasesize) - div_u64_rem(len, (uint32_t)erasesize, &rem); - if (len == 0 || erasesize == 0 || erasesize > len - || erasesize > UINT_MAX || rem) { + || erasesize > UINT_MAX) { parse_err("illegal erasesize or len\n"); ret = -EINVAL; goto error; } + div_u64_rem(len, (uint32_t)erasesize, &rem); + if (rem) { + parse_err("len is not multiple of erasesize\n"); + ret = -EINVAL; + goto error; + } + ret = register_device(name, start, len, (uint32_t)erasesize); if (ret) goto error; diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 54df9cfd588e..61f236e0378a 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -546,6 +546,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd) config.stride = 1; config.read_only = true; config.root_only = true; + config.ignore_wp = true; config.no_of_node = !of_device_is_compatible(node, "nvmem-cells"); config.priv = mtd; @@ -830,6 +831,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd, config.owner = THIS_MODULE; config.type = NVMEM_TYPE_OTP; config.root_only = true; + config.ignore_wp = true; config.reg_read = reg_read; config.size = size; config.of_node = np; diff --git a/drivers/mtd/nand/onenand/generic.c b/drivers/mtd/nand/onenand/generic.c index 8b6f4da5d720..a4b8b65fe15f 100644 --- a/drivers/mtd/nand/onenand/generic.c +++ b/drivers/mtd/nand/onenand/generic.c @@ -53,7 +53,12 @@ static int generic_onenand_probe(struct platform_device *pdev) } info->onenand.mmcontrol = pdata ? pdata->mmcontrol : NULL; - info->onenand.irq = platform_get_irq(pdev, 0); + + err = platform_get_irq(pdev, 0); + if (err < 0) + goto out_iounmap; + + info->onenand.irq = err; info->mtd.dev.parent = &pdev->dev; info->mtd.priv = &info->onenand; diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index f3276ee9e4fe..ddd93bc38ea6 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -2060,13 +2060,15 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc, nc->mck = of_clk_get(dev->parent->of_node, 0); if (IS_ERR(nc->mck)) { dev_err(dev, "Failed to retrieve MCK clk\n"); - return PTR_ERR(nc->mck); + ret = PTR_ERR(nc->mck); + goto out_release_dma; } np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0); if (!np) { dev_err(dev, "Missing or invalid atmel,smc property\n"); - return -EINVAL; + ret = -EINVAL; + goto out_release_dma; } nc->smc = syscon_node_to_regmap(np); @@ -2074,10 +2076,16 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc, if (IS_ERR(nc->smc)) { ret = PTR_ERR(nc->smc); dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret); - return ret; + goto out_release_dma; } return 0; + +out_release_dma: + if (nc->dmac) + dma_release_channel(nc->dmac); + + return ret; } static int diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index f75929783b94..aee78f5f4f15 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -2106,7 +2106,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, mtd->oobsize / trans, host->hwcfg.sector_size_1k); - if (!ret) { + if (ret != -EBADMSG) { *err_addr = brcmnand_get_uncorrecc_addr(ctrl); if (*err_addr) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 6e9f7d80ef8b..b72b387c08ef 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -648,6 +648,7 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this, const struct nand_sdr_timings *sdr) { struct gpmi_nfc_hardware_timing *hw = &this->hw; + struct resources *r = &this->resources; unsigned int dll_threshold_ps = this->devdata->max_chain_delay; unsigned int period_ps, reference_period_ps; unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles; @@ -671,6 +672,8 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this, wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; } + hw->clk_rate = clk_round_rate(r->clock[0], hw->clk_rate); + /* SDR core timings are given in picoseconds */ period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate); @@ -2293,7 +2296,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, this->hw.must_apply_timings = false; ret = gpmi_nfc_apply_timings(this); if (ret) - return ret; + goto out_pm; } dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs); @@ -2422,6 +2425,7 @@ unmap: this->bch = false; +out_pm: pm_runtime_mark_last_busy(this->dev); pm_runtime_put_autosuspend(this->dev); diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c index efe0ffe4f1ab..9054559e52dd 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c @@ -68,9 +68,14 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np) struct ingenic_ecc *ecc; pdev = of_find_device_by_node(np); - if (!pdev || !platform_get_drvdata(pdev)) + if (!pdev) return ERR_PTR(-EPROBE_DEFER); + if (!platform_get_drvdata(pdev)) { + put_device(&pdev->dev); + return ERR_PTR(-EPROBE_DEFER); + } + ecc = platform_get_drvdata(pdev); clk_prepare_enable(ecc->clk); diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index d5a2110eb38e..881e768f636f 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -335,16 +335,19 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs) * * Return: -EBUSY if the chip has been suspended, 0 otherwise */ -static int nand_get_device(struct nand_chip *chip) +static void nand_get_device(struct nand_chip *chip) { - mutex_lock(&chip->lock); - if (chip->suspended) { + /* Wait until the device is resumed. */ + while (1) { + mutex_lock(&chip->lock); + if (!chip->suspended) { + mutex_lock(&chip->controller->lock); + return; + } mutex_unlock(&chip->lock); - return -EBUSY; - } - mutex_lock(&chip->controller->lock); - return 0; + wait_event(chip->resume_wq, !chip->suspended); + } } /** @@ -573,9 +576,7 @@ static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs) nand_erase_nand(chip, &einfo, 0); /* Write bad block marker to OOB */ - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); ret = nand_markbad_bbm(chip, ofs); nand_release_device(chip); @@ -3823,9 +3824,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from, ops->mode != MTD_OPS_RAW) return -ENOTSUPP; - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); if (!ops->datbuf) ret = nand_do_read_oob(chip, from, ops); @@ -4412,13 +4411,11 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { struct nand_chip *chip = mtd_to_nand(mtd); - int ret; + int ret = 0; ops->retlen = 0; - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); switch (ops->mode) { case MTD_OPS_PLACE_OOB: @@ -4478,9 +4475,7 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, return -EIO; /* Grab the lock and see if the device is available */ - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); /* Shift to get first page */ page = (int)(instr->addr >> chip->page_shift); @@ -4567,7 +4562,7 @@ static void nand_sync(struct mtd_info *mtd) pr_debug("%s: called\n", __func__); /* Grab the lock and see if the device is available */ - WARN_ON(nand_get_device(chip)); + nand_get_device(chip); /* Release it and go back */ nand_release_device(chip); } @@ -4584,9 +4579,7 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) int ret; /* Select the NAND device */ - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); nand_select_target(chip, chipnr); @@ -4657,6 +4650,8 @@ static void nand_resume(struct mtd_info *mtd) __func__); } mutex_unlock(&chip->lock); + + wake_up_all(&chip->resume_wq); } /** @@ -5434,6 +5429,7 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, chip->cur_cs = -1; mutex_init(&chip->lock); + init_waitqueue_head(&chip->resume_wq); /* Enforce the right timings for reset/detection */ chip->current_interface_config = nand_get_reset_interface_config(); diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c index 8a91e069ee2e..3c6f6aff649f 100644 --- a/drivers/mtd/nand/raw/pl35x-nand-controller.c +++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c @@ -1062,7 +1062,7 @@ static int pl35x_nand_chip_init(struct pl35x_nandc *nfc, chip->controller = &nfc->controller; mtd = nand_to_mtd(chip); mtd->dev.parent = nfc->dev; - nand_set_flash_node(chip, nfc->dev->of_node); + nand_set_flash_node(chip, np); if (!mtd->name) { mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL, "%s", PL35X_NANDC_DRIVER_NAME); diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 04e6f7b26706..0f41a9a42157 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -2,7 +2,6 @@ /* * Copyright (c) 2016, The Linux Foundation. All rights reserved. */ - #include <linux/clk.h> #include <linux/slab.h> #include <linux/bitops.h> @@ -3063,10 +3062,6 @@ static int qcom_nandc_probe(struct platform_device *pdev) if (dma_mapping_error(dev, nandc->base_dma)) return -ENXIO; - ret = qcom_nandc_alloc(nandc); - if (ret) - goto err_nandc_alloc; - ret = clk_prepare_enable(nandc->core_clk); if (ret) goto err_core_clk; @@ -3075,6 +3070,10 @@ static int qcom_nandc_probe(struct platform_device *pdev) if (ret) goto err_aon_clk; + ret = qcom_nandc_alloc(nandc); + if (ret) + goto err_nandc_alloc; + ret = qcom_nandc_setup(nandc); if (ret) goto err_setup; @@ -3086,15 +3085,14 @@ static int qcom_nandc_probe(struct platform_device *pdev) return 0; err_setup: + qcom_nandc_unalloc(nandc); +err_nandc_alloc: clk_disable_unprepare(nandc->aon_clk); err_aon_clk: clk_disable_unprepare(nandc->core_clk); err_core_clk: - qcom_nandc_unalloc(nandc); -err_nandc_alloc: dma_unmap_resource(dev, res->start, resource_size(res), DMA_BIDIRECTIONAL, 0); - return ret; } diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c index 06a818cd2433..32ddfea70142 100644 --- a/drivers/mtd/parsers/qcomsmempart.c +++ b/drivers/mtd/parsers/qcomsmempart.c @@ -58,11 +58,11 @@ static int parse_qcomsmem_part(struct mtd_info *mtd, const struct mtd_partition **pparts, struct mtd_part_parser_data *data) { + size_t len = SMEM_FLASH_PTABLE_HDR_LEN; + int ret, i, j, tmpparts, numparts = 0; struct smem_flash_pentry *pentry; struct smem_flash_ptable *ptable; - size_t len = SMEM_FLASH_PTABLE_HDR_LEN; struct mtd_partition *parts; - int ret, i, numparts; char *name, *c; if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS) @@ -87,8 +87,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd, } /* Ensure that # of partitions is less than the max we have allocated */ - numparts = le32_to_cpu(ptable->numparts); - if (numparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) { + tmpparts = le32_to_cpu(ptable->numparts); + if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) { pr_err("Partition numbers exceed the max limit\n"); return -EINVAL; } @@ -116,11 +116,17 @@ static int parse_qcomsmem_part(struct mtd_info *mtd, return PTR_ERR(ptable); } + for (i = 0; i < tmpparts; i++) { + pentry = &ptable->pentry[i]; + if (pentry->name[0] != '\0') + numparts++; + } + parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL); if (!parts) return -ENOMEM; - for (i = 0; i < numparts; i++) { + for (i = 0, j = 0; i < tmpparts; i++) { pentry = &ptable->pentry[i]; if (pentry->name[0] == '\0') continue; @@ -135,24 +141,25 @@ static int parse_qcomsmem_part(struct mtd_info *mtd, for (c = name; *c != '\0'; c++) *c = tolower(*c); - parts[i].name = name; - parts[i].offset = le32_to_cpu(pentry->offset) * mtd->erasesize; - parts[i].mask_flags = pentry->attr; - parts[i].size = le32_to_cpu(pentry->length) * mtd->erasesize; + parts[j].name = name; + parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize; + parts[j].mask_flags = pentry->attr; + parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize; pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n", i, pentry->name, le32_to_cpu(pentry->offset), le32_to_cpu(pentry->length), pentry->attr); + j++; } pr_debug("SMEM partition table found: ver: %d len: %d\n", - le32_to_cpu(ptable->version), numparts); + le32_to_cpu(ptable->version), tmpparts); *pparts = parts; return numparts; out_free_parts: - while (--i >= 0) - kfree(parts[i].name); + while (--j >= 0) + kfree(parts[j].name); kfree(parts); *pparts = NULL; @@ -166,6 +173,8 @@ static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts, for (i = 0; i < nr_parts; i++) kfree(pparts[i].name); + + kfree(pparts); } static const struct of_device_id qcomsmem_of_match_table[] = { diff --git a/drivers/mtd/spi-nor/controllers/aspeed-smc.c b/drivers/mtd/spi-nor/controllers/aspeed-smc.c index 36ea017954a1..367d97abb3f6 100644 --- a/drivers/mtd/spi-nor/controllers/aspeed-smc.c +++ b/drivers/mtd/spi-nor/controllers/aspeed-smc.c @@ -790,6 +790,18 @@ static u32 aspeed_smc_chip_set_segment(struct aspeed_smc_chip *chip) chip->cs, size >> 20); } + /* + * The decoding size of AST2600 SPI controller should set at + * least 2MB. + */ + if ((controller->info == &spi_2600_info || + controller->info == &fmc_2600_info) && size < SZ_2M) { + size = SZ_2M; + dev_info(chip->nor.dev, + "CE%d window resized to %dMB (AST2600 Decoding)", + chip->cs, size >> 20); + } + ahb_base_phy = controller->ahb_base_phy; /* diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c index c783ab4ba1e4..0ff7a20d030c 100644 --- a/drivers/mtd/spi-nor/winbond.c +++ b/drivers/mtd/spi-nor/winbond.c @@ -104,6 +104,8 @@ static const struct flash_info winbond_parts[] = { SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) }, { "w25q512jvq", INFO(0xef4020, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "w25q01jviq", INFO(0xef4021, 0, 64 * 1024, 2048, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, }; /** diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c index 1138bdbf4199..75dd13a39040 100644 --- a/drivers/mtd/spi-nor/xilinx.c +++ b/drivers/mtd/spi-nor/xilinx.c @@ -66,7 +66,8 @@ static int xilinx_nor_setup(struct spi_nor *nor, /* Flash in Power of 2 mode */ nor->page_size = (nor->page_size == 264) ? 256 : 512; nor->mtd.writebufsize = nor->page_size; - nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors; + nor->params->size = 8 * nor->page_size * nor->info->n_sectors; + nor->mtd.size = nor->params->size; nor->mtd.erasesize = 8 * nor->page_size; } else { /* Flash in Default addressing mode */ diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index a7e3eb9befb6..a32050fecabf 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -351,9 +351,6 @@ static ssize_t dev_attribute_show(struct device *dev, * we still can use 'ubi->ubi_num'. */ ubi = container_of(dev, struct ubi_device, dev); - ubi = ubi_get_device(ubi->ubi_num); - if (!ubi) - return -ENODEV; if (attr == &dev_eraseblock_size) ret = sprintf(buf, "%d\n", ubi->leb_size); @@ -382,7 +379,6 @@ static ssize_t dev_attribute_show(struct device *dev, else ret = -EINVAL; - ubi_put_device(ubi); return ret; } @@ -979,9 +975,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, goto out_detach; } - /* Make device "available" before it becomes accessible via sysfs */ - ubi_devices[ubi_num] = ubi; - err = uif_init(ubi); if (err) goto out_detach; @@ -1026,6 +1019,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, wake_up_process(ubi->bgt_thread); spin_unlock(&ubi->wl_lock); + ubi_devices[ubi_num] = ubi; ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); return ubi_num; @@ -1034,7 +1028,6 @@ out_debugfs: out_uif: uif_close(ubi); out_detach: - ubi_devices[ubi_num] = NULL; ubi_wl_close(ubi); ubi_free_all_volumes(ubi); vfree(ubi->vtbl); diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 022af59906aa..6b5f1ffd961b 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -468,7 +468,9 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, if (err == UBI_IO_FF_BITFLIPS) scrub = 1; - add_aeb(ai, free, pnum, ec, scrub); + ret = add_aeb(ai, free, pnum, ec, scrub); + if (ret) + goto out; continue; } else if (err == 0 || err == UBI_IO_BITFLIPS) { dbg_bld("Found non empty PEB:%i in pool", pnum); @@ -638,8 +640,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, if (fm_pos >= fm_size) goto fail_bad; - add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum), - be32_to_cpu(fmec->ec), 0); + ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum), + be32_to_cpu(fmec->ec), 0); + if (ret) + goto fail; } /* read EC values from used list */ @@ -649,8 +653,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, if (fm_pos >= fm_size) goto fail_bad; - add_aeb(ai, &used, be32_to_cpu(fmec->pnum), - be32_to_cpu(fmec->ec), 0); + ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum), + be32_to_cpu(fmec->ec), 0); + if (ret) + goto fail; } /* read EC values from scrub list */ @@ -660,8 +666,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, if (fm_pos >= fm_size) goto fail_bad; - add_aeb(ai, &used, be32_to_cpu(fmec->pnum), - be32_to_cpu(fmec->ec), 1); + ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum), + be32_to_cpu(fmec->ec), 1); + if (ret) + goto fail; } /* read EC values from erase list */ @@ -671,8 +679,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, if (fm_pos >= fm_size) goto fail_bad; - add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum), - be32_to_cpu(fmec->ec), 1); + ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum), + be32_to_cpu(fmec->ec), 1); + if (ret) + goto fail; } ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 139ee132bfbc..1bc7b3a05604 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -56,16 +56,11 @@ static ssize_t vol_attribute_show(struct device *dev, { int ret; struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); - struct ubi_device *ubi; - - ubi = ubi_get_device(vol->ubi->ubi_num); - if (!ubi) - return -ENODEV; + struct ubi_device *ubi = vol->ubi; spin_lock(&ubi->volumes_lock); if (!ubi->volumes[vol->vol_id]) { spin_unlock(&ubi->volumes_lock); - ubi_put_device(ubi); return -ENODEV; } /* Take a reference to prevent volume removal */ @@ -103,7 +98,6 @@ static ssize_t vol_attribute_show(struct device *dev, vol->ref_count -= 1; ubi_assert(vol->ref_count >= 0); spin_unlock(&ubi->volumes_lock); - ubi_put_device(ubi); return ret; } diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 3c8f665c1558..28dccbc0e8d8 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -138,6 +138,9 @@ static int com20020pci_probe(struct pci_dev *pdev, return -ENOMEM; ci = (struct com20020_pci_card_info *)id->driver_data; + if (!ci) + return -EINVAL; + priv->ci = ci; mm = &ci->misc_map; diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 54e321a695ce..98c915943f32 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -141,14 +141,14 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) skb_reset_network_header(skb); skb_reset_mac_header(skb); - if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET) + if (!ipv6_mod_enabled() || family == AF_INET) err = IP_ECN_decapsulate(oiph, skb); else err = IP6_ECN_decapsulate(oiph, skb); if (unlikely(err)) { if (log_ecn_error) { - if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET) + if (!ipv6_mod_enabled() || family == AF_INET) net_info_ratelimited("non-ECT from %pI4 " "with TOS=%#x\n", &((struct iphdr *)oiph)->saddr, @@ -214,11 +214,12 @@ static struct socket *bareudp_create_sock(struct net *net, __be16 port) int err; memset(&udp_conf, 0, sizeof(udp_conf)); -#if IS_ENABLED(CONFIG_IPV6) - udp_conf.family = AF_INET6; -#else - udp_conf.family = AF_INET; -#endif + + if (ipv6_mod_enabled()) + udp_conf.family = AF_INET6; + else + udp_conf.family = AF_INET; + udp_conf.local_udp_port = port; /* Open UDP socket */ err = udp_sock_create(net, &udp_conf, &sock); @@ -441,7 +442,7 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev) } rcu_read_lock(); - if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6) + if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6) err = bareudp6_xmit_skb(skb, dev, bareudp, info); else err = bareudp_xmit_skb(skb, dev, bareudp, info); @@ -471,7 +472,7 @@ static int bareudp_fill_metadata_dst(struct net_device *dev, use_cache = ip_tunnel_dst_cache_usable(skb, info); - if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) { + if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; __be32 saddr; diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 9fd1d6cba3cd..a86b1f71762e 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -225,7 +225,7 @@ static inline int __check_agg_selection_timer(struct port *port) if (bond == NULL) return 0; - return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0; + return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0; } /** @@ -1995,7 +1995,7 @@ static void ad_marker_response_received(struct bond_marker *marker, */ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout) { - BOND_AD_INFO(bond).agg_select_timer = timeout; + atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout); } /** @@ -2279,6 +2279,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond) } /** + * bond_agg_timer_advance - advance agg_select_timer + * @bond: bonding structure + * + * Return true when agg_select_timer reaches 0. + */ +static bool bond_agg_timer_advance(struct bonding *bond) +{ + int val, nval; + + while (1) { + val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer); + if (!val) + return false; + nval = val - 1; + if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer, + val, nval) == val) + break; + } + return nval == 0; +} + +/** * bond_3ad_state_machine_handler - handle state machines timeout * @work: work context to fetch bonding struct to work on from * @@ -2313,9 +2335,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) if (!bond_has_slaves(bond)) goto re_arm; - /* check if agg_select_timer timer after initialize is timed out */ - if (BOND_AD_INFO(bond).agg_select_timer && - !(--BOND_AD_INFO(bond).agg_select_timer)) { + if (bond_agg_timer_advance(bond)) { slave = bond_first_slave_rcu(bond); port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 83cdaabd7b69..46c3301a5e07 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2377,10 +2377,9 @@ static int __bond_release_one(struct net_device *bond_dev, bond_select_active_slave(bond); } - if (!bond_has_slaves(bond)) { - bond_set_carrier(bond); + bond_set_carrier(bond); + if (!bond_has_slaves(bond)) eth_hw_addr_random(bond_dev); - } unblock_netpoll_tx(); synchronize_rcu(); diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 25713d623215..bff33a619acd 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1640,8 +1640,6 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) if (err) goto out_fail; - can_put_echo_skb(skb, dev, 0, 0); - if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { cccr = m_can_read(cdev, M_CAN_CCCR); cccr &= ~CCCR_CMR_MASK; @@ -1658,6 +1656,9 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) m_can_write(cdev, M_CAN_CCCR, cccr); } m_can_write(cdev, M_CAN_TXBTIE, 0x1); + + can_put_echo_skb(skb, dev, 0, 0); + m_can_write(cdev, M_CAN_TXBAR, 0x1); /* End of xmit function for version 3.0.x */ } else { diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 388521e70837..2f44c567ebd7 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1720,15 +1720,15 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll, RCANFD_NAPI_WEIGHT); + spin_lock_init(&priv->tx_lock); + devm_can_led_init(ndev); + gpriv->ch[priv->channel] = priv; err = register_candev(ndev); if (err) { dev_err(&pdev->dev, "register_candev() failed, error %d\n", err); goto fail_candev; } - spin_lock_init(&priv->tx_lock); - devm_can_led_init(ndev); - gpriv->ch[priv->channel] = priv; dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel); return 0; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 9a4791d88683..3a0f022b1562 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -2706,7 +2706,7 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, out_kfree_buf_rx: kfree(buf_rx); - return 0; + return err; } #define MCP251XFD_QUIRK_ACTIVE(quirk) \ diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 2b5302e72435..c9552846fe25 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -823,7 +823,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne usb_unanchor_urb(urb); usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); - dev_kfree_skb(skb); atomic_dec(&dev->active_tx_urbs); diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c index 24627ab14626..cd4e7f356e48 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.c +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c @@ -1794,7 +1794,7 @@ static int es58x_open(struct net_device *netdev) struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; int ret; - if (atomic_inc_return(&es58x_dev->opened_channel_cnt) == 1) { + if (!es58x_dev->opened_channel_cnt) { ret = es58x_alloc_rx_urbs(es58x_dev); if (ret) return ret; @@ -1812,12 +1812,13 @@ static int es58x_open(struct net_device *netdev) if (ret) goto free_urbs; + es58x_dev->opened_channel_cnt++; netif_start_queue(netdev); return ret; free_urbs: - if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt)) + if (!es58x_dev->opened_channel_cnt) es58x_free_urbs(es58x_dev); netdev_err(netdev, "%s: Could not open the network device: %pe\n", __func__, ERR_PTR(ret)); @@ -1852,7 +1853,8 @@ static int es58x_stop(struct net_device *netdev) es58x_flush_pending_tx_msg(netdev); - if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt)) + es58x_dev->opened_channel_cnt--; + if (!es58x_dev->opened_channel_cnt) es58x_free_urbs(es58x_dev); return 0; @@ -2221,7 +2223,6 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf, init_usb_anchor(&es58x_dev->tx_urbs_idle); init_usb_anchor(&es58x_dev->tx_urbs_busy); atomic_set(&es58x_dev->tx_urbs_idle_cnt, 0); - atomic_set(&es58x_dev->opened_channel_cnt, 0); usb_set_intfdata(intf, es58x_dev); es58x_dev->rx_pipe = usb_rcvbulkpipe(es58x_dev->udev, diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h index 826a15871573..e5033cb5e695 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.h +++ b/drivers/net/can/usb/etas_es58x/es58x_core.h @@ -373,8 +373,6 @@ struct es58x_operators { * queue wake/stop logic should prevent this URB from getting * empty. Please refer to es58x_get_tx_urb() for more details. * @tx_urbs_idle_cnt: number of urbs in @tx_urbs_idle. - * @opened_channel_cnt: number of channels opened (c.f. es58x_open() - * and es58x_stop()). * @ktime_req_ns: kernel timestamp when es58x_set_realtime_diff_ns() * was called. * @realtime_diff_ns: difference in nanoseconds between the clocks of @@ -384,6 +382,10 @@ struct es58x_operators { * in RX branches. * @rx_max_packet_size: Maximum length of bulk-in URB. * @num_can_ch: Number of CAN channel (i.e. number of elements of @netdev). + * @opened_channel_cnt: number of channels opened. Free of race + * conditions because its two users (net_device_ops:ndo_open() + * and net_device_ops:ndo_close()) guarantee that the network + * stack big kernel lock (a.k.a. rtnl_mutex) is being hold. * @rx_cmd_buf_len: Length of @rx_cmd_buf. * @rx_cmd_buf: The device might split the URB commands in an * arbitrary amount of pieces. This buffer is used to concatenate @@ -406,7 +408,6 @@ struct es58x_device { struct usb_anchor tx_urbs_busy; struct usb_anchor tx_urbs_idle; atomic_t tx_urbs_idle_cnt; - atomic_t opened_channel_cnt; u64 ktime_req_ns; s64 realtime_diff_ns; @@ -415,6 +416,7 @@ struct es58x_device { u16 rx_max_packet_size; u8 num_can_ch; + u8 opened_channel_cnt; u16 rx_cmd_buf_len; union es58x_urb_cmd rx_cmd_buf; diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c index af042aa55f59..26bf4775e884 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_fd.c +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c @@ -171,12 +171,11 @@ static int es58x_fd_rx_event_msg(struct net_device *netdev, const struct es58x_fd_rx_event_msg *rx_event_msg; int ret; + rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg; ret = es58x_check_msg_len(es58x_dev->dev, *rx_event_msg, msg_len); if (ret) return ret; - rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg; - return es58x_rx_err_msg(netdev, rx_event_msg->error_code, rx_event_msg->event_code, get_unaligned_le64(&rx_event_msg->timestamp)); diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 8dcdd5162ecf..d68d698f2606 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -191,8 +191,8 @@ struct gs_can { struct gs_usb { struct gs_can *canch[GS_MAX_INTF]; struct usb_anchor rx_submitted; - atomic_t active_channels; struct usb_device *udev; + u8 active_channels; }; /* 'allocate' a tx context. @@ -590,7 +590,7 @@ static int gs_can_open(struct net_device *netdev) if (rc) return rc; - if (atomic_add_return(1, &parent->active_channels) == 1) { + if (!parent->active_channels) { for (i = 0; i < GS_MAX_RX_URBS; i++) { struct urb *urb; u8 *buf; @@ -691,6 +691,7 @@ static int gs_can_open(struct net_device *netdev) dev->can.state = CAN_STATE_ERROR_ACTIVE; + parent->active_channels++; if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(netdev); @@ -706,7 +707,8 @@ static int gs_can_close(struct net_device *netdev) netif_stop_queue(netdev); /* Stop polling */ - if (atomic_dec_and_test(&parent->active_channels)) + parent->active_channels--; + if (!parent->active_channels) usb_kill_anchored_urbs(&parent->rx_submitted); /* Stop sending URBs */ @@ -985,8 +987,6 @@ static int gs_usb_probe(struct usb_interface *intf, init_usb_anchor(&dev->rx_submitted); - atomic_set(&dev->active_channels, 0); - usb_set_intfdata(intf, dev); dev->udev = interface_to_usbdev(intf); diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index a1a154c08b7f..023bd34d48e3 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -33,10 +33,6 @@ #define MCBA_USB_RX_BUFF_SIZE 64 #define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg)) -/* MCBA endpoint numbers */ -#define MCBA_USB_EP_IN 1 -#define MCBA_USB_EP_OUT 1 - /* Microchip command id */ #define MBCA_CMD_RECEIVE_MESSAGE 0xE3 #define MBCA_CMD_I_AM_ALIVE_FROM_CAN 0xF5 @@ -84,6 +80,8 @@ struct mcba_priv { atomic_t free_ctx_cnt; void *rxbuf[MCBA_MAX_RX_URBS]; dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS]; + int rx_pipe; + int tx_pipe; }; /* CAN frame */ @@ -272,10 +270,8 @@ static netdev_tx_t mcba_usb_xmit(struct mcba_priv *priv, memcpy(buf, usb_msg, MCBA_USB_TX_BUFF_SIZE); - usb_fill_bulk_urb(urb, priv->udev, - usb_sndbulkpipe(priv->udev, MCBA_USB_EP_OUT), buf, - MCBA_USB_TX_BUFF_SIZE, mcba_usb_write_bulk_callback, - ctx); + usb_fill_bulk_urb(urb, priv->udev, priv->tx_pipe, buf, MCBA_USB_TX_BUFF_SIZE, + mcba_usb_write_bulk_callback, ctx); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &priv->tx_submitted); @@ -368,7 +364,6 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb, xmit_failed: can_free_echo_skb(priv->netdev, ctx->ndx, NULL); mcba_usb_free_ctx(ctx); - dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; @@ -611,7 +606,7 @@ static void mcba_usb_read_bulk_callback(struct urb *urb) resubmit_urb: usb_fill_bulk_urb(urb, priv->udev, - usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_OUT), + priv->rx_pipe, urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE, mcba_usb_read_bulk_callback, priv); @@ -656,7 +651,7 @@ static int mcba_usb_start(struct mcba_priv *priv) urb->transfer_dma = buf_dma; usb_fill_bulk_urb(urb, priv->udev, - usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN), + priv->rx_pipe, buf, MCBA_USB_RX_BUFF_SIZE, mcba_usb_read_bulk_callback, priv); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; @@ -810,6 +805,13 @@ static int mcba_usb_probe(struct usb_interface *intf, struct mcba_priv *priv; int err; struct usb_device *usbdev = interface_to_usbdev(intf); + struct usb_endpoint_descriptor *in, *out; + + err = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, NULL); + if (err) { + dev_err(&intf->dev, "Can't find endpoints\n"); + return err; + } netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS); if (!netdev) { @@ -855,6 +857,9 @@ static int mcba_usb_probe(struct usb_interface *intf, goto cleanup_free_candev; } + priv->rx_pipe = usb_rcvbulkpipe(priv->udev, in->bEndpointAddress); + priv->tx_pipe = usb_sndbulkpipe(priv->udev, out->bEndpointAddress); + devm_can_led_init(netdev); /* Start USB dev only if we have successfully registered CAN device */ diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index d1b83bd1b3cb..d4c8f934a1ce 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -670,9 +670,20 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, atomic_inc(&priv->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); - if (unlikely(err)) - goto failed; - else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) + if (unlikely(err)) { + can_free_echo_skb(netdev, context->echo_index, NULL); + + usb_unanchor_urb(urb); + usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); + + atomic_dec(&priv->active_tx_urbs); + + if (err == -ENODEV) + netif_device_detach(netdev); + else + netdev_warn(netdev, "failed tx_urb %d\n", err); + stats->tx_dropped++; + } else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) /* Slow down tx path */ netif_stop_queue(netdev); @@ -691,19 +702,6 @@ nofreecontext: return NETDEV_TX_BUSY; -failed: - can_free_echo_skb(netdev, context->echo_index, NULL); - - usb_unanchor_urb(urb); - usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); - - atomic_dec(&priv->active_tx_urbs); - - if (err == -ENODEV) - netif_device_detach(netdev); - else - netdev_warn(netdev, "failed tx_urb %d\n", err); - nomembuf: usb_free_urb(urb); diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 8861a7d875e7..be5566168d0f 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -148,7 +148,7 @@ static void vxcan_setup(struct net_device *dev) dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 0; - dev->flags = (IFF_NOARP|IFF_ECHO); + dev->flags = IFF_NOARP; dev->netdev_ops = &vxcan_netdev_ops; dev->needs_free_netdev = true; diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 9891b072b462..9428aac4a686 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -81,6 +81,7 @@ config NET_DSA_REALTEK_SMI config NET_DSA_SMSC_LAN9303 tristate + depends on VLAN_8021Q || VLAN_8021Q=n select NET_DSA_TAG_LAN9303 select REGMAP help diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index a7e2fcf2df2c..edbe5e7f1cb6 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -567,14 +567,14 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv, static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv, int port, u32 location) { - struct cfp_rule *rule = NULL; + struct cfp_rule *rule; list_for_each_entry(rule, &priv->cfp.rules_list, next) { if (rule->port == port && rule->fs.location == location) - break; + return rule; } - return rule; + return NULL; } static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port, diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 89f920289ae2..0b6f29ee87b5 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -10,6 +10,7 @@ #include <linux/mii.h> #include <linux/phy.h> #include <linux/if_bridge.h> +#include <linux/if_vlan.h> #include <linux/etherdevice.h> #include "lan9303.h" @@ -1083,21 +1084,27 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port, static int lan9303_port_enable(struct dsa_switch *ds, int port, struct phy_device *phy) { + struct dsa_port *dp = dsa_to_port(ds, port); struct lan9303 *chip = ds->priv; - if (!dsa_is_user_port(ds, port)) + if (!dsa_port_is_user(dp)) return 0; + vlan_vid_add(dp->cpu_dp->master, htons(ETH_P_8021Q), port); + return lan9303_enable_processing_port(chip, port); } static void lan9303_port_disable(struct dsa_switch *ds, int port) { + struct dsa_port *dp = dsa_to_port(ds, port); struct lan9303 *chip = ds->priv; - if (!dsa_is_user_port(ds, port)) + if (!dsa_port_is_user(dp)) return; + vlan_vid_del(dp->cpu_dp->master, htons(ETH_P_8021Q), port); + lan9303_disable_processing_port(chip, port); lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN); } @@ -1309,7 +1316,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip, struct device_node *np) { chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset", - GPIOD_OUT_LOW); + GPIOD_OUT_HIGH); if (IS_ERR(chip->reset_gpio)) return PTR_ERR(chip->reset_gpio); diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 503adf03d2fc..9e006a25b636 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -2201,8 +2201,8 @@ static int gswip_remove(struct platform_device *pdev) if (priv->ds->slave_mii_bus) { mdiobus_unregister(priv->ds->slave_mii_bus); - mdiobus_free(priv->ds->slave_mii_bus); of_node_put(priv->ds->slave_mii_bus->dev.of_node); + mdiobus_free(priv->ds->slave_mii_bus); } for (i = 0; i < priv->num_gphy_fw; i++) diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c index 866767b70d65..b0a7dee27ffc 100644 --- a/drivers/net/dsa/microchip/ksz8795_spi.c +++ b/drivers/net/dsa/microchip/ksz8795_spi.c @@ -124,12 +124,23 @@ static const struct of_device_id ksz8795_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, ksz8795_dt_ids); +static const struct spi_device_id ksz8795_spi_ids[] = { + { "ksz8765" }, + { "ksz8794" }, + { "ksz8795" }, + { "ksz8863" }, + { "ksz8873" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, ksz8795_spi_ids); + static struct spi_driver ksz8795_spi_driver = { .driver = { .name = "ksz8795-switch", .owner = THIS_MODULE, .of_match_table = of_match_ptr(ksz8795_dt_ids), }, + .id_table = ksz8795_spi_ids, .probe = ksz8795_spi_probe, .remove = ksz8795_spi_remove, .shutdown = ksz8795_spi_shutdown, diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index e3cb0e6c9f6f..43addeabfc25 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -98,12 +98,24 @@ static const struct of_device_id ksz9477_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); +static const struct spi_device_id ksz9477_spi_ids[] = { + { "ksz9477" }, + { "ksz9897" }, + { "ksz9893" }, + { "ksz9563" }, + { "ksz8563" }, + { "ksz9567" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, ksz9477_spi_ids); + static struct spi_driver ksz9477_spi_driver = { .driver = { .name = "ksz9477-switch", .owner = THIS_MODULE, .of_match_table = of_match_ptr(ksz9477_dt_ids), }, + .id_table = ksz9477_spi_ids, .probe = ksz9477_spi_probe, .remove = ksz9477_spi_remove, .shutdown = ksz9477_spi_shutdown, diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index fb59efc7f926..14bf1828cbba 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2928,7 +2928,7 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port, phylink_set_port_modes(mask); - if (state->interface != PHY_INTERFACE_MODE_TRGMII || + if (state->interface != PHY_INTERFACE_MODE_TRGMII && !phy_interface_mode_is_8023z(state->interface)) { phylink_set(mask, 10baseT_Half); phylink_set(mask, 10baseT_Full); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 056e3b65cd27..0830d7bb7a00 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3649,6 +3649,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .port_sync_link = mv88e6185_port_sync_link, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index e53ad283e259..a9c7ada890d8 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1455,7 +1455,7 @@ static int felix_pci_probe(struct pci_dev *pdev, err = dsa_register_switch(ds); if (err) { - dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err); + dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n"); goto err_register_ds; } diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index de1d34a1f1e4..05e4e75c0107 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -10,6 +10,7 @@ #include <linux/pcs-lynx.h> #include <linux/dsa/ocelot.h> #include <linux/iopoll.h> +#include <linux/of_mdio.h> #include "felix.h" #define MSCC_MIIM_CMD_OPR_WRITE BIT(1) @@ -1110,7 +1111,7 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot) snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev)); /* Needed in order to initialize the bus mutex lock */ - rc = mdiobus_register(bus); + rc = devm_of_mdiobus_register(dev, bus, NULL); if (rc < 0) { dev_err(dev, "failed to register MDIO bus\n"); return rc; @@ -1162,7 +1163,8 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot) mdio_device_free(pcs->mdio); lynx_pcs_destroy(pcs); } - mdiobus_unregister(felix->imdio); + + /* mdiobus_unregister and mdiobus_free handled by devres */ } static const struct felix_info seville_info_vsc9953 = { diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c index 4ad8031ab669..065fdbe66c42 100644 --- a/drivers/net/ethernet/8390/mcf8390.c +++ b/drivers/net/ethernet/8390/mcf8390.c @@ -406,12 +406,12 @@ static int mcf8390_init(struct net_device *dev) static int mcf8390_probe(struct platform_device *pdev) { struct net_device *dev; - struct resource *mem, *irq; + struct resource *mem; resource_size_t msize; - int ret; + int ret, irq; - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (irq == NULL) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) { dev_err(&pdev->dev, "no IRQ specified?\n"); return -ENXIO; } @@ -434,7 +434,7 @@ static int mcf8390_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); platform_set_drvdata(pdev, dev); - dev->irq = irq->start; + dev->irq = irq; dev->base_addr = mem->start; ret = mcf8390_init(dev); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 5a92070a7178..f86e3b172e6d 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -34,15 +34,6 @@ source "drivers/net/ethernet/apple/Kconfig" source "drivers/net/ethernet/aquantia/Kconfig" source "drivers/net/ethernet/arc/Kconfig" source "drivers/net/ethernet/atheros/Kconfig" -source "drivers/net/ethernet/broadcom/Kconfig" -source "drivers/net/ethernet/brocade/Kconfig" -source "drivers/net/ethernet/cadence/Kconfig" -source "drivers/net/ethernet/calxeda/Kconfig" -source "drivers/net/ethernet/cavium/Kconfig" -source "drivers/net/ethernet/chelsio/Kconfig" -source "drivers/net/ethernet/cirrus/Kconfig" -source "drivers/net/ethernet/cisco/Kconfig" -source "drivers/net/ethernet/cortina/Kconfig" config CX_ECAT tristate "Beckhoff CX5020 EtherCAT master support" @@ -56,6 +47,14 @@ config CX_ECAT To compile this driver as a module, choose M here. The module will be called ec_bhf. +source "drivers/net/ethernet/broadcom/Kconfig" +source "drivers/net/ethernet/cadence/Kconfig" +source "drivers/net/ethernet/calxeda/Kconfig" +source "drivers/net/ethernet/cavium/Kconfig" +source "drivers/net/ethernet/chelsio/Kconfig" +source "drivers/net/ethernet/cirrus/Kconfig" +source "drivers/net/ethernet/cisco/Kconfig" +source "drivers/net/ethernet/cortina/Kconfig" source "drivers/net/ethernet/davicom/Kconfig" config DNET @@ -82,7 +81,6 @@ source "drivers/net/ethernet/huawei/Kconfig" source "drivers/net/ethernet/i825xx/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" -source "drivers/net/ethernet/microsoft/Kconfig" source "drivers/net/ethernet/xscale/Kconfig" config JME @@ -125,8 +123,9 @@ source "drivers/net/ethernet/mediatek/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" source "drivers/net/ethernet/micrel/Kconfig" source "drivers/net/ethernet/microchip/Kconfig" -source "drivers/net/ethernet/moxa/Kconfig" source "drivers/net/ethernet/mscc/Kconfig" +source "drivers/net/ethernet/microsoft/Kconfig" +source "drivers/net/ethernet/moxa/Kconfig" source "drivers/net/ethernet/myricom/Kconfig" config FEALNX @@ -138,10 +137,10 @@ config FEALNX Say Y here to support the Myson MTD-800 family of PCI-based Ethernet cards. <http://www.myson.com.tw/> +source "drivers/net/ethernet/ni/Kconfig" source "drivers/net/ethernet/natsemi/Kconfig" source "drivers/net/ethernet/neterion/Kconfig" source "drivers/net/ethernet/netronome/Kconfig" -source "drivers/net/ethernet/ni/Kconfig" source "drivers/net/ethernet/8390/Kconfig" source "drivers/net/ethernet/nuvoton/Kconfig" source "drivers/net/ethernet/nvidia/Kconfig" @@ -162,6 +161,7 @@ source "drivers/net/ethernet/packetengines/Kconfig" source "drivers/net/ethernet/pasemi/Kconfig" source "drivers/net/ethernet/pensando/Kconfig" source "drivers/net/ethernet/qlogic/Kconfig" +source "drivers/net/ethernet/brocade/Kconfig" source "drivers/net/ethernet/qualcomm/Kconfig" source "drivers/net/ethernet/rdc/Kconfig" source "drivers/net/ethernet/realtek/Kconfig" @@ -169,10 +169,10 @@ source "drivers/net/ethernet/renesas/Kconfig" source "drivers/net/ethernet/rocker/Kconfig" source "drivers/net/ethernet/samsung/Kconfig" source "drivers/net/ethernet/seeq/Kconfig" -source "drivers/net/ethernet/sfc/Kconfig" source "drivers/net/ethernet/sgi/Kconfig" source "drivers/net/ethernet/silan/Kconfig" source "drivers/net/ethernet/sis/Kconfig" +source "drivers/net/ethernet/sfc/Kconfig" source "drivers/net/ethernet/smsc/Kconfig" source "drivers/net/ethernet/socionext/Kconfig" source "drivers/net/ethernet/stmicro/Kconfig" diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 4786f0504691..899c8a2a34b6 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -168,7 +168,7 @@ config SUNLANCE config AMD_XGBE tristate "AMD 10GbE Ethernet driver" - depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM + depends on (OF_ADDRESS || ACPI || PCI) && HAS_IOMEM depends on X86 || ARM64 || COMPILE_TEST depends on PTP_1588_CLOCK_OPTIONAL select BITREVERSE diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 5f1fc6582d74..78c7cbc372b0 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -696,6 +696,12 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, buf_pool->rx_skb[skb_index] = NULL; datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1)); + + /* strip off CRC as HW isn't doing this */ + nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0)); + if (!nv) + datalen -= 4; + skb_put(skb, datalen); prefetch(skb->data - NET_IP_ALIGN); skb->protocol = eth_type_trans(skb, ndev); @@ -717,12 +723,8 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, } } - nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0)); - if (!nv) { - /* strip off CRC as HW isn't doing this */ - datalen -= 4; + if (!nv) goto skip_jumbo; - } slots = page_pool->slots - 1; head = page_pool->head; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 9de0065f89b9..fbb1e05d5878 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -480,8 +480,8 @@ int aq_nic_start(struct aq_nic_s *self) if (err < 0) goto err_exit; - for (i = 0U, aq_vec = self->aq_vec[0]; - self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + for (i = 0U; self->aq_vecs > i; ++i) { + aq_vec = self->aq_vec[i]; err = aq_vec_start(aq_vec); if (err < 0) goto err_exit; @@ -511,8 +511,8 @@ int aq_nic_start(struct aq_nic_s *self) mod_timer(&self->polling_timer, jiffies + AQ_CFG_POLLING_TIMER_INTERVAL); } else { - for (i = 0U, aq_vec = self->aq_vec[0]; - self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + for (i = 0U; self->aq_vecs > i; ++i) { + aq_vec = self->aq_vec[i]; err = aq_pci_func_alloc_irq(self, i, self->ndev->name, aq_vec_isr, aq_vec, aq_vec_get_affinity_mask(aq_vec)); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 797a95142d1f..3a529ee8c834 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -444,22 +444,22 @@ err_exit: static int aq_pm_freeze(struct device *dev) { - return aq_suspend_common(dev, false); + return aq_suspend_common(dev, true); } static int aq_pm_suspend_poweroff(struct device *dev) { - return aq_suspend_common(dev, true); + return aq_suspend_common(dev, false); } static int aq_pm_thaw(struct device *dev) { - return atl_resume_common(dev, false); + return atl_resume_common(dev, true); } static int aq_pm_resume_restore(struct device *dev) { - return atl_resume_common(dev, true); + return atl_resume_common(dev, false); } static const struct dev_pm_ops aq_pm_ops = { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index f4774cf051c9..6ab1f3212d24 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -43,8 +43,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) if (!self) { err = -EINVAL; } else { - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp); ring[AQ_VEC_RX_ID].stats.rx.polls++; u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp); @@ -182,8 +182,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops, self->aq_hw_ops = aq_hw_ops; self->aq_hw = aq_hw; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX); if (err < 0) goto err_exit; @@ -224,8 +224,8 @@ int aq_vec_start(struct aq_vec_s *self) unsigned int i = 0U; int err = 0; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw, &ring[AQ_VEC_TX_ID]); if (err < 0) @@ -248,8 +248,8 @@ void aq_vec_stop(struct aq_vec_s *self) struct aq_ring_s *ring = NULL; unsigned int i = 0U; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw, &ring[AQ_VEC_TX_ID]); @@ -268,8 +268,8 @@ void aq_vec_deinit(struct aq_vec_s *self) if (!self) goto err_exit; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); } @@ -297,8 +297,8 @@ void aq_vec_ring_free(struct aq_vec_s *self) if (!self) goto err_exit; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; aq_ring_free(&ring[AQ_VEC_TX_ID]); if (i < self->rx_rings) aq_ring_free(&ring[AQ_VEC_RX_ID]); diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig index 92a79c4ffa2c..0a67612af228 100644 --- a/drivers/net/ethernet/arc/Kconfig +++ b/drivers/net/ethernet/arc/Kconfig @@ -26,7 +26,7 @@ config ARC_EMAC_CORE config ARC_EMAC tristate "ARC EMAC support" select ARC_EMAC_CORE - depends on OF_IRQ && OF_NET + depends on OF_IRQ depends on ARC || COMPILE_TEST help On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x @@ -36,7 +36,7 @@ config ARC_EMAC config EMAC_ROCKCHIP tristate "Rockchip EMAC support" select ARC_EMAC_CORE - depends on OF_IRQ && OF_NET && REGULATOR + depends on OF_IRQ && REGULATOR depends on ARCH_ROCKCHIP || COMPILE_TEST help Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers. diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 4ea157efca86..98a8698a3201 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1181,8 +1181,11 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) alx->hw.mtu = mtu; alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); netdev_update_features(netdev); - if (netif_running(netdev)) + if (netif_running(netdev)) { + mutex_lock(&alx->mtx); alx_reinit(alx); + mutex_unlock(&alx->mtx); + } return 0; } diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 3b51b172b317..5cbd815c737e 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -900,7 +900,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter, atl1c_clean_buffer(pdev, buffer_info); } - netdev_reset_queue(adapter->netdev); + netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue)); /* Zero out Tx-buffers */ memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) * diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index df8ff839cc62..94eb3a42158e 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct bgmac *bgmac; + struct resource *regs; int ret; bgmac = bgmac_alloc(&pdev->dev); @@ -208,15 +209,23 @@ static int bgmac_probe(struct platform_device *pdev) if (IS_ERR(bgmac->plat.base)) return PTR_ERR(bgmac->plat.base); - bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base"); - if (IS_ERR(bgmac->plat.idm_base)) - return PTR_ERR(bgmac->plat.idm_base); - else + /* The idm_base resource is optional for some platforms */ + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base"); + if (regs) { + bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(bgmac->plat.idm_base)) + return PTR_ERR(bgmac->plat.idm_base); bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK; + } - bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base"); - if (IS_ERR(bgmac->plat.nicpm_base)) - return PTR_ERR(bgmac->plat.nicpm_base); + /* The nicpm_base resource is optional for some platforms */ + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base"); + if (regs) { + bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev, + regs); + if (IS_ERR(bgmac->plat.nicpm_base)) + return PTR_ERR(bgmac->plat.nicpm_base); + } bgmac->read = platform_bgmac_read; bgmac->write = platform_bgmac_write; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 8c83973adca5..9d70d908c064 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8212,7 +8212,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask); if (rc) { dev_err(&pdev->dev, - "pci_set_consistent_dma_mask failed, aborting\n"); + "dma_set_coherent_mask failed, aborting\n"); goto err_out_unmap; } } else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 72bdbebf25ce..9e79bcfb365f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -2533,6 +2533,4 @@ void bnx2x_register_phc(struct bnx2x *bp); * Meant for implicit re-load flows. */ int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp); -int bnx2x_init_firmware(struct bnx2x *bp); -void bnx2x_release_firmware(struct bnx2x *bp); #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 41ebbb2c7d3a..198e041d8410 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2363,24 +2363,30 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) /* is another pf loaded on this engine? */ if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP && load_code != FW_MSG_CODE_DRV_LOAD_COMMON) { - /* build my FW version dword */ - u32 my_fw = (bp->fw_major) + (bp->fw_minor << 8) + - (bp->fw_rev << 16) + (bp->fw_eng << 24); + u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng; + u32 loaded_fw; /* read loaded FW from chip */ - u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); + loaded_fw = REG_RD(bp, XSEM_REG_PRAM); - DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n", - loaded_fw, my_fw); + loaded_fw_major = loaded_fw & 0xff; + loaded_fw_minor = (loaded_fw >> 8) & 0xff; + loaded_fw_rev = (loaded_fw >> 16) & 0xff; + loaded_fw_eng = (loaded_fw >> 24) & 0xff; + + DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n", + loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng); /* abort nic load if version mismatch */ - if (my_fw != loaded_fw) { + if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION || + loaded_fw_minor != BCM_5710_FW_MINOR_VERSION || + loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION || + loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) { if (print_err) - BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n", - loaded_fw, my_fw); + BNX2X_ERR("loaded FW incompatible. Aborting\n"); else - BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n", - loaded_fw, my_fw); + BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n"); + return -EBUSY; } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 10a5b43976d2..bdd4e420f869 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -100,6 +100,9 @@ MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FW_FILE_NAME_E1); MODULE_FIRMWARE(FW_FILE_NAME_E1H); MODULE_FIRMWARE(FW_FILE_NAME_E2); +MODULE_FIRMWARE(FW_FILE_NAME_E1_V15); +MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15); +MODULE_FIRMWARE(FW_FILE_NAME_E2_V15); int bnx2x_num_queues; module_param_named(num_queues, bnx2x_num_queues, int, 0444); @@ -12310,15 +12313,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) bnx2x_read_fwinfo(bp); - if (IS_PF(bp)) { - rc = bnx2x_init_firmware(bp); - - if (rc) { - bnx2x_free_mem_bp(bp); - return rc; - } - } - func = BP_FUNC(bp); /* need to reset chip if undi was active */ @@ -12331,7 +12325,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) rc = bnx2x_prev_unload(bp); if (rc) { - bnx2x_release_firmware(bp); bnx2x_free_mem_bp(bp); return rc; } @@ -13411,7 +13404,7 @@ do { \ (u8 *)bp->arr, len); \ } while (0) -int bnx2x_init_firmware(struct bnx2x *bp) +static int bnx2x_init_firmware(struct bnx2x *bp) { const char *fw_file_name, *fw_file_name_v15; struct bnx2x_fw_file_hdr *fw_hdr; @@ -13511,7 +13504,7 @@ request_firmware_exit: return rc; } -void bnx2x_release_firmware(struct bnx2x *bp) +static void bnx2x_release_firmware(struct bnx2x *bp) { kfree(bp->init_ops_offsets); kfree(bp->init_ops); @@ -14028,7 +14021,6 @@ static int bnx2x_init_one(struct pci_dev *pdev, return 0; init_one_freemem: - bnx2x_release_firmware(bp); bnx2x_free_mem_bp(bp); init_one_exit: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a8855a200a3c..8b078c319872 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3234,6 +3234,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) } qidx = bp->tc_to_qidx[j]; ring->queue_id = bp->q_info[qidx].queue_id; + spin_lock_init(&txr->xdp_tx_lock); if (i < bp->tx_nr_rings_xdp) continue; if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) @@ -4757,8 +4758,10 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) return rc; req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); - req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); - req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); + if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { + req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); + req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); + } req->mask = cpu_to_le32(vnic->rx_mask); return hwrm_req_send_silent(bp, req); } @@ -8615,6 +8618,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) vnic->uc_filter_count = 1; vnic->rx_mask = 0; + if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) + goto skip_rx_mask; + if (bp->dev->flags & IFF_BROADCAST) vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; @@ -8624,7 +8630,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) if (bp->dev->flags & IFF_ALLMULTI) { vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; vnic->mc_list_count = 0; - } else { + } else if (bp->dev->flags & IFF_MULTICAST) { u32 mask = 0; bnxt_mc_list_updated(bp, &mask); @@ -8635,6 +8641,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) if (rc) goto err_out; +skip_rx_mask: rc = bnxt_hwrm_set_coal(bp); if (rc) netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", @@ -10240,6 +10247,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) if (irq_re_init) udp_tunnel_nic_reset_ntf(bp->dev); + if (bp->tx_nr_rings_xdp < num_possible_cpus()) { + if (!static_key_enabled(&bnxt_xdp_locking_key)) + static_branch_enable(&bnxt_xdp_locking_key); + } else if (static_key_enabled(&bnxt_xdp_locking_key)) { + static_branch_disable(&bnxt_xdp_locking_key); + } set_bit(BNXT_STATE_OPEN, &bp->state); bnxt_enable_int(bp); /* Enable TX queues */ @@ -10295,13 +10308,15 @@ int bnxt_half_open_nic(struct bnxt *bp) goto half_open_err; } - rc = bnxt_alloc_mem(bp, false); + rc = bnxt_alloc_mem(bp, true); if (rc) { netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); goto half_open_err; } - rc = bnxt_init_nic(bp, false); + set_bit(BNXT_STATE_HALF_OPEN, &bp->state); + rc = bnxt_init_nic(bp, true); if (rc) { + clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); goto half_open_err; } @@ -10309,7 +10324,7 @@ int bnxt_half_open_nic(struct bnxt *bp) half_open_err: bnxt_free_skbs(bp); - bnxt_free_mem(bp, false); + bnxt_free_mem(bp, true); dev_close(bp->dev); return rc; } @@ -10319,9 +10334,10 @@ half_open_err: */ void bnxt_half_close_nic(struct bnxt *bp) { - bnxt_hwrm_resource_free(bp, false, false); + bnxt_hwrm_resource_free(bp, false, true); bnxt_free_skbs(bp); - bnxt_free_mem(bp, false); + bnxt_free_mem(bp, true); + clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); } static void bnxt_reenable_sriov(struct bnxt *bp) @@ -10737,7 +10753,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) if (dev->flags & IFF_ALLMULTI) { mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; vnic->mc_list_count = 0; - } else { + } else if (dev->flags & IFF_MULTICAST) { mc_update = bnxt_mc_list_updated(bp, &mask); } @@ -10805,9 +10821,10 @@ skip_uc: !bnxt_promisc_ok(bp)) vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); - if (rc && vnic->mc_list_count) { + if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", rc); + vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; vnic->mc_list_count = 0; rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 0a5137c1f6d4..e5874c829226 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -584,7 +584,8 @@ struct nqe_cn { #define BNXT_MAX_MTU 9500 #define BNXT_MAX_PAGE_MODE_MTU \ ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \ - XDP_PACKET_HEADROOM) + XDP_PACKET_HEADROOM - \ + SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info))) #define BNXT_MIN_PKT_SIZE 52 @@ -791,6 +792,8 @@ struct bnxt_tx_ring_info { u32 dev_state; struct bnxt_ring_struct tx_ring_struct; + /* Synchronize simultaneous xdp_xmit on same ring */ + spinlock_t xdp_tx_lock; }; #define BNXT_LEGACY_COAL_CMPL_PARAMS \ @@ -1840,6 +1843,7 @@ struct bnxt { #define BNXT_STATE_DRV_REGISTERED 7 #define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8 #define BNXT_STATE_NAPI_DISABLED 9 +#define BNXT_STATE_HALF_OPEN 15 /* For offline ethtool tests */ #define BNXT_NO_FW_ACCESS(bp) \ (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 249792510521..0f276ce2d1eb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -25,6 +25,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" #include "bnxt_hwrm.h" +#include "bnxt_ulp.h" #include "bnxt_xdp.h" #include "bnxt_ptp.h" #include "bnxt_ethtool.h" @@ -1942,6 +1943,9 @@ static int bnxt_get_fecparam(struct net_device *dev, case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: fec->active_fec |= ETHTOOL_FEC_LLRS; break; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: + fec->active_fec |= ETHTOOL_FEC_OFF; + break; } return 0; } @@ -2070,9 +2074,7 @@ static int bnxt_set_pauseparam(struct net_device *dev, } link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; - if (bp->hwrm_spec_code >= 0x10201) - link_info->req_flow_ctrl = - PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; + link_info->req_flow_ctrl = 0; } else { /* when transition from auto pause to force pause, * force a link change @@ -3405,7 +3407,7 @@ static int bnxt_run_loopback(struct bnxt *bp) if (!skb) return -ENOMEM; data = skb_put(skb, pkt_size); - eth_broadcast_addr(data); + ether_addr_copy(&data[i], bp->dev->dev_addr); i += ETH_ALEN; ether_addr_copy(&data[i], bp->dev->dev_addr); i += ETH_ALEN; @@ -3499,9 +3501,12 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, if (!offline) { bnxt_run_fw_tests(bp, test_mask, &test_results); } else { - rc = bnxt_close_nic(bp, false, false); - if (rc) + bnxt_ulp_stop(bp); + rc = bnxt_close_nic(bp, true, false); + if (rc) { + bnxt_ulp_start(bp, rc); return; + } bnxt_run_fw_tests(bp, test_mask, &test_results); buf[BNXT_MACLPBK_TEST_IDX] = 1; @@ -3511,6 +3516,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, if (rc) { bnxt_hwrm_mac_loopback(bp, false); etest->flags |= ETH_TEST_FL_FAILED; + bnxt_ulp_start(bp, rc); return; } if (bnxt_run_loopback(bp)) @@ -3536,7 +3542,8 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, } bnxt_hwrm_phy_loopback(bp, false, false); bnxt_half_close_nic(bp); - rc = bnxt_open_nic(bp, false, true); + rc = bnxt_open_nic(bp, true, true); + bnxt_ulp_start(bp, rc); } if (rc || bnxt_test_irq(bp)) { buf[BNXT_IRQ_TEST_IDX] = 1; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c index 8171f4912fa0..3a0eeb373776 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c @@ -595,18 +595,24 @@ timeout_abort: /* Last byte of resp contains valid bit */ valid = ((u8 *)ctx->resp) + len - 1; - for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { + for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) { /* make sure we read from updated DMA memory */ dma_rmb(); if (*valid) break; - usleep_range(1, 5); + if (j < 10) { + udelay(1); + j++; + } else { + usleep_range(20, 30); + j += 20; + } } if (j >= HWRM_VALID_BIT_DELAY_USEC) { if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n", - hwrm_total_timeout(i), + hwrm_total_timeout(i) + j, le16_to_cpu(ctx->req->req_type), le16_to_cpu(ctx->req->seq_id), len, *valid); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h index 9a9fc4e8041b..380ef69afb51 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h @@ -94,7 +94,7 @@ static inline unsigned int hwrm_total_timeout(unsigned int n) } -#define HWRM_VALID_BIT_DELAY_USEC 150 +#define HWRM_VALID_BIT_DELAY_USEC 50000 static inline bool bnxt_cfa_hwrm_message(u16 req_type) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index f0aa480799ca..62a931de5b1a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -331,7 +331,7 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info, struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); struct bnxt *bp = ptp->bp; - u8 pin_id; + int pin_id; int rc; switch (rq->type) { @@ -339,6 +339,8 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info, /* Configure an External PPS IN */ pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, rq->extts.index); + if (!TSIO_PIN_VALID(pin_id)) + return -EOPNOTSUPP; if (!on) break; rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_IN); @@ -352,6 +354,8 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info, /* Configure a Periodic PPS OUT */ pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, rq->perout.index); + if (!TSIO_PIN_VALID(pin_id)) + return -EOPNOTSUPP; if (!on) break; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index fa5f05708e6d..c3cd51e672e7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -28,7 +28,7 @@ struct pps_pin { u8 state; }; -#define TSIO_PIN_VALID(pin) ((pin) < (BNXT_MAX_TSIO_PINS)) +#define TSIO_PIN_VALID(pin) ((pin) >= 0 && (pin) < (BNXT_MAX_TSIO_PINS)) #define EVENT_DATA2_PPS_EVENT_TYPE(data2) \ ((data2) & ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index c8083df5e0ab..148b58f3468b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -20,6 +20,8 @@ #include "bnxt.h" #include "bnxt_xdp.h" +DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key); + struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct bnxt_tx_ring_info *txr, dma_addr_t mapping, u32 len) @@ -227,11 +229,16 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames, ring = smp_processor_id() % bp->tx_nr_rings_xdp; txr = &bp->tx_ring[ring]; + if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING) + return -EINVAL; + + if (static_branch_unlikely(&bnxt_xdp_locking_key)) + spin_lock(&txr->xdp_tx_lock); + for (i = 0; i < num_frames; i++) { struct xdp_frame *xdp = frames[i]; - if (!txr || !bnxt_tx_avail(bp, txr) || - !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP)) + if (!bnxt_tx_avail(bp, txr)) break; mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, @@ -250,6 +257,9 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames, bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); } + if (static_branch_unlikely(&bnxt_xdp_locking_key)) + spin_unlock(&txr->xdp_tx_lock); + return nxmit; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index 0df40c3beb05..067bb5e821f5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -10,6 +10,8 @@ #ifndef BNXT_XDP_H #define BNXT_XDP_H +DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key); + struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct bnxt_tx_ring_info *txr, dma_addr_t mapping, u32 len); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 9a67e24f46b2..b4f99dd284e5 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2243,8 +2243,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, dma_length_status = status->length_status; if (dev->features & NETIF_F_RXCSUM) { rx_csum = (__force __be16)(status->rx_csum & 0xffff); - skb->csum = (__force __wsum)ntohs(rx_csum); - skb->ip_summed = CHECKSUM_COMPLETE; + if (rx_csum) { + skb->csum = (__force __wsum)ntohs(rx_csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } } /* DMA flags and length are still valid no matter how diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index e31a5a397f11..f55d9d9c01a8 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -40,6 +40,13 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) { + wol->supported = 0; + wol->wolopts = 0; + return; + } wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; wol->wolopts = priv->wolopts; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index d13fb1d31821..217c1a0f8940 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1606,7 +1606,14 @@ static int macb_poll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete_done(napi, work_done); - /* Packets received while interrupts were disabled */ + /* RSR bits only seem to propagate to raise interrupts when + * interrupts are enabled at the time, so if bits are already + * set due to packets received while interrupts were disabled, + * they will not cause another interrupt to be generated when + * interrupts are re-enabled. + * Check for this case here. This has been seen to happen + * around 30% of the time under heavy network load. + */ status = macb_readl(bp, RSR); if (status) { if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) @@ -1614,6 +1621,22 @@ static int macb_poll(struct napi_struct *napi, int budget) napi_reschedule(napi); } else { queue_writel(queue, IER, bp->rx_intr_mask); + + /* In rare cases, packets could have been received in + * the window between the check above and re-enabling + * interrupts. Therefore, a double-check is required + * to avoid losing a wakeup. This can potentially race + * with the interrupt handler doing the same actions + * if an interrupt is raised just after enabling them, + * but this should be harmless. + */ + status = macb_readl(bp, RSR); + if (unlikely(status)) { + queue_writel(queue, IDR, bp->rx_intr_mask); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_BIT(RCOMP)); + napi_schedule(napi); + } } } @@ -1666,6 +1689,7 @@ static void macb_tx_restart(struct macb_queue *queue) unsigned int head = queue->tx_head; unsigned int tail = queue->tx_tail; struct macb *bp = queue->bp; + unsigned int head_idx, tbqp; if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(queue, ISR, MACB_BIT(TXUBR)); @@ -1673,6 +1697,13 @@ static void macb_tx_restart(struct macb_queue *queue) if (head == tail) return; + tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp); + tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp)); + head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head)); + + if (tbqp == head_idx) + return; + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); } @@ -4739,7 +4770,7 @@ static int macb_probe(struct platform_device *pdev) #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { - dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); bp->hw_dma_cap |= HW_DMA_CAP_64B; } #endif diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index 7ff31d1026fb..e0d34e64fc6c 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -3678,6 +3678,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10); adapter->params.pci.vpd_cap_addr = pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD); + if (!adapter->params.pci.vpd_cap_addr) + return -ENODEV; ret = get_vpd_params(adapter, &adapter->params.vpd); if (ret < 0) return ret; diff --git a/drivers/net/ethernet/ezchip/Kconfig b/drivers/net/ethernet/ezchip/Kconfig index 38aa824efb25..9241b9b1c7a3 100644 --- a/drivers/net/ethernet/ezchip/Kconfig +++ b/drivers/net/ethernet/ezchip/Kconfig @@ -18,7 +18,7 @@ if NET_VENDOR_EZCHIP config EZCHIP_NPS_MANAGEMENT_ENET tristate "EZchip NPS management enet support" - depends on OF_IRQ && OF_NET + depends on OF_IRQ depends on HAS_IOMEM help Simple LAN device for debug or management purposes. diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index fbaf173ca4a4..33e119833b20 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1847,11 +1847,6 @@ static int ftgmac100_probe(struct platform_device *pdev) priv->rxdes0_edorr_mask = BIT(30); priv->txdes0_edotr_mask = BIT(30); priv->is_aspeed = true; - /* Disable ast2600 problematic HW arbitration */ - if (of_device_is_compatible(np, "aspeed,ast2600-mac")) { - iowrite32(FTGMAC100_TM_DEFAULT, - priv->base + FTGMAC100_OFFSET_TM); - } } else { priv->rxdes0_edorr_mask = BIT(15); priv->txdes0_edotr_mask = BIT(15); @@ -1923,6 +1918,11 @@ static int ftgmac100_probe(struct platform_device *pdev) err = ftgmac100_setup_clk(priv); if (err) goto err_phy_connect; + + /* Disable ast2600 problematic HW arbitration */ + if (of_device_is_compatible(np, "aspeed,ast2600-mac")) + iowrite32(FTGMAC100_TM_DEFAULT, + priv->base + FTGMAC100_OFFSET_TM); } /* Default ring sizes */ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 763d2c7b5fb1..5750f9a56393 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -489,11 +489,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev, info->phc_index = -1; fman_node = of_get_parent(mac_node); - if (fman_node) + if (fman_node) { ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0); + of_node_put(fman_node); + } - if (ptp_node) + if (ptp_node) { ptp_dev = of_find_device_by_node(ptp_node); + of_node_put(ptp_node); + } if (ptp_dev) ptp = platform_get_drvdata(ptp_dev); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 110075336a75..8b7a29e1e221 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -4329,7 +4329,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) } INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp); - + mutex_init(&priv->onestep_tstamp_lock); skb_queue_head_init(&priv->tx_skbs); priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c index 32b5faa87bb8..208a3459f2e2 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c @@ -168,7 +168,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) base = of_iomap(node, 0); if (!base) { err = -ENOMEM; - goto err_close; + goto err_put; } err = fsl_mc_allocate_irqs(mc_dev); @@ -212,6 +212,8 @@ err_free_mc_irq: fsl_mc_free_irqs(mc_dev); err_unmap: iounmap(base); +err_put: + of_node_put(node); err_close: dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); err_free_mcp: diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c index d6eefbbf163f..cacd454ac696 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c @@ -532,6 +532,7 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls, struct flow_rule *rule = flow_cls_offload_flow_rule(cls); struct flow_dissector *dissector = rule->match.dissector; struct netlink_ext_ack *extack = cls->common.extack; + int ret = -EOPNOTSUPP; if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | @@ -561,9 +562,10 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls, } *vlan = (u16)match.key->vlan_id; + ret = 0; } - return 0; + return ret; } static int diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 910b9f722504..d62c188c8748 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -672,7 +672,10 @@ static int enetc_get_ts_info(struct net_device *ndev, #ifdef CONFIG_FSL_ENETC_PTP_CLOCK info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; + SOF_TIMESTAMPING_RAW_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON) | diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 0536d2c76fbc..d779dde522c8 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -45,6 +45,7 @@ void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed) | pspeed); } +#define ENETC_QOS_ALIGN 64 static int enetc_setup_taprio(struct net_device *ndev, struct tc_taprio_qopt_offload *admin_conf) { @@ -52,10 +53,11 @@ static int enetc_setup_taprio(struct net_device *ndev, struct enetc_cbd cbd = {.cmd = 0}; struct tgs_gcl_conf *gcl_config; struct tgs_gcl_data *gcl_data; + dma_addr_t dma, dma_align; struct gce *gce; - dma_addr_t dma; u16 data_size; u16 gcl_len; + void *tmp; u32 tge; int err; int i; @@ -82,9 +84,16 @@ static int enetc_setup_taprio(struct net_device *ndev, gcl_config = &cbd.gcl_conf; data_size = struct_size(gcl_data, entry, gcl_len); - gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); - if (!gcl_data) + tmp = dma_alloc_coherent(&priv->si->pdev->dev, + data_size + ENETC_QOS_ALIGN, + &dma, GFP_KERNEL); + if (!tmp) { + dev_err(&priv->si->pdev->dev, + "DMA mapping of taprio gate list failed!\n"); return -ENOMEM; + } + dma_align = ALIGN(dma, ENETC_QOS_ALIGN); + gcl_data = (struct tgs_gcl_data *)PTR_ALIGN(tmp, ENETC_QOS_ALIGN); gce = (struct gce *)(gcl_data + 1); @@ -110,16 +119,8 @@ static int enetc_setup_taprio(struct net_device *ndev, cbd.length = cpu_to_le16(data_size); cbd.status_flags = 0; - dma = dma_map_single(&priv->si->pdev->dev, gcl_data, - data_size, DMA_TO_DEVICE); - if (dma_mapping_error(&priv->si->pdev->dev, dma)) { - netdev_err(priv->si->ndev, "DMA mapping failed!\n"); - kfree(gcl_data); - return -ENOMEM; - } - - cbd.addr[0] = cpu_to_le32(lower_32_bits(dma)); - cbd.addr[1] = cpu_to_le32(upper_32_bits(dma)); + cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align)); + cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align)); cbd.cls = BDCR_CMD_PORT_GCL; cbd.status_flags = 0; @@ -132,8 +133,8 @@ static int enetc_setup_taprio(struct net_device *ndev, ENETC_QBV_PTGCR_OFFSET, tge & (~ENETC_QBV_TGE)); - dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE); - kfree(gcl_data); + dma_free_coherent(&priv->si->pdev->dev, data_size + ENETC_QOS_ALIGN, + tmp, dma); return err; } @@ -463,8 +464,9 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, struct enetc_cbd cbd = {.cmd = 0}; struct streamid_data *si_data; struct streamid_conf *si_conf; + dma_addr_t dma, dma_align; u16 data_size; - dma_addr_t dma; + void *tmp; int port; int err; @@ -485,21 +487,20 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, cbd.status_flags = 0; data_size = sizeof(struct streamid_data); - si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); - if (!si_data) + tmp = dma_alloc_coherent(&priv->si->pdev->dev, + data_size + ENETC_QOS_ALIGN, + &dma, GFP_KERNEL); + if (!tmp) { + dev_err(&priv->si->pdev->dev, + "DMA mapping of stream identify failed!\n"); return -ENOMEM; - cbd.length = cpu_to_le16(data_size); - - dma = dma_map_single(&priv->si->pdev->dev, si_data, - data_size, DMA_FROM_DEVICE); - if (dma_mapping_error(&priv->si->pdev->dev, dma)) { - netdev_err(priv->si->ndev, "DMA mapping failed!\n"); - err = -ENOMEM; - goto out; } + dma_align = ALIGN(dma, ENETC_QOS_ALIGN); + si_data = (struct streamid_data *)PTR_ALIGN(tmp, ENETC_QOS_ALIGN); - cbd.addr[0] = cpu_to_le32(lower_32_bits(dma)); - cbd.addr[1] = cpu_to_le32(upper_32_bits(dma)); + cbd.length = cpu_to_le16(data_size); + cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align)); + cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align)); eth_broadcast_addr(si_data->dmac); si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK + ((0x3 << 14) | ENETC_CBDR_SID_VIDM)); @@ -539,8 +540,8 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, cbd.length = cpu_to_le16(data_size); - cbd.addr[0] = cpu_to_le32(lower_32_bits(dma)); - cbd.addr[1] = cpu_to_le32(upper_32_bits(dma)); + cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align)); + cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align)); /* VIDM default to be 1. * VID Match. If set (b1) then the VID must match, otherwise @@ -561,10 +562,8 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, err = enetc_send_cmd(priv->si, &cbd); out: - if (!dma_mapping_error(&priv->si->pdev->dev, dma)) - dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE); - - kfree(si_data); + dma_free_coherent(&priv->si->pdev->dev, data_size + ENETC_QOS_ALIGN, + tmp, dma); return err; } @@ -633,8 +632,9 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv, { struct enetc_cbd cbd = { .cmd = 2 }; struct sfi_counter_data *data_buf; - dma_addr_t dma; + dma_addr_t dma, dma_align; u16 data_size; + void *tmp; int err; cbd.index = cpu_to_le16((u16)index); @@ -643,19 +643,19 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv, cbd.status_flags = 0; data_size = sizeof(struct sfi_counter_data); - data_buf = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); - if (!data_buf) + tmp = dma_alloc_coherent(&priv->si->pdev->dev, + data_size + ENETC_QOS_ALIGN, + &dma, GFP_KERNEL); + if (!tmp) { + dev_err(&priv->si->pdev->dev, + "DMA mapping of stream counter failed!\n"); return -ENOMEM; - - dma = dma_map_single(&priv->si->pdev->dev, data_buf, - data_size, DMA_FROM_DEVICE); - if (dma_mapping_error(&priv->si->pdev->dev, dma)) { - netdev_err(priv->si->ndev, "DMA mapping failed!\n"); - err = -ENOMEM; - goto exit; } - cbd.addr[0] = cpu_to_le32(lower_32_bits(dma)); - cbd.addr[1] = cpu_to_le32(upper_32_bits(dma)); + dma_align = ALIGN(dma, ENETC_QOS_ALIGN); + data_buf = (struct sfi_counter_data *)PTR_ALIGN(tmp, ENETC_QOS_ALIGN); + + cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align)); + cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align)); cbd.length = cpu_to_le16(data_size); @@ -684,7 +684,9 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv, data_buf->flow_meter_dropl; exit: - kfree(data_buf); + dma_free_coherent(&priv->si->pdev->dev, data_size + ENETC_QOS_ALIGN, + tmp, dma); + return err; } @@ -723,9 +725,10 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv, struct sgcl_conf *sgcl_config; struct sgcl_data *sgcl_data; struct sgce *sgce; - dma_addr_t dma; + dma_addr_t dma, dma_align; u16 data_size; int err, i; + void *tmp; u64 now; cbd.index = cpu_to_le16(sgi->index); @@ -772,24 +775,20 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv, sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3; data_size = struct_size(sgcl_data, sgcl, sgi->num_entries); - - sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); - if (!sgcl_data) - return -ENOMEM; - - cbd.length = cpu_to_le16(data_size); - - dma = dma_map_single(&priv->si->pdev->dev, - sgcl_data, data_size, - DMA_FROM_DEVICE); - if (dma_mapping_error(&priv->si->pdev->dev, dma)) { - netdev_err(priv->si->ndev, "DMA mapping failed!\n"); - kfree(sgcl_data); + tmp = dma_alloc_coherent(&priv->si->pdev->dev, + data_size + ENETC_QOS_ALIGN, + &dma, GFP_KERNEL); + if (!tmp) { + dev_err(&priv->si->pdev->dev, + "DMA mapping of stream counter failed!\n"); return -ENOMEM; } + dma_align = ALIGN(dma, ENETC_QOS_ALIGN); + sgcl_data = (struct sgcl_data *)PTR_ALIGN(tmp, ENETC_QOS_ALIGN); - cbd.addr[0] = cpu_to_le32(lower_32_bits(dma)); - cbd.addr[1] = cpu_to_le32(upper_32_bits(dma)); + cbd.length = cpu_to_le16(data_size); + cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align)); + cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align)); sgce = &sgcl_data->sgcl[0]; @@ -844,7 +843,8 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv, err = enetc_send_cmd(priv->si, &cbd); exit: - kfree(sgcl_data); + dma_free_coherent(&priv->si->pdev->dev, data_size + ENETC_QOS_ALIGN, + tmp, dma); return err; } diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 7b32ed29bf4c..8c17fe5d66ed 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1460,6 +1460,7 @@ static int gfar_get_ts_info(struct net_device *dev, ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp"); if (ptp_node) { ptp_dev = of_find_device_by_node(ptp_node); + of_node_put(ptp_node); if (ptp_dev) ptp = platform_get_drvdata(ptp_dev); } diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index 629d8ed08fc6..97431969a488 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -450,6 +450,7 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, skb_set_hash(skb, be32_to_cpu(rx_desc->rss_hash), gve_rss_type(rx_desc->flags_seq)); + skb_record_rx_queue(skb, rx->q_num); if (skb_is_nonlinear(skb)) napi_gro_frags(napi); else diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 47bba4c62f04..9204f5ecd415 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -527,6 +527,8 @@ struct hnae3_ae_dev { * Get 1588 rx hwstamp * get_ts_info * Get phc info + * clean_vf_config + * Clean residual vf info after disable sriov */ struct hnae3_ae_ops { int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); @@ -720,6 +722,7 @@ struct hnae3_ae_ops { struct ethtool_ts_info *info); int (*get_link_diagnosis_info)(struct hnae3_handle *handle, u32 *status_code); + void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs); }; struct hnae3_dcb_ops { @@ -832,6 +835,7 @@ struct hnae3_handle { struct dentry *hnae3_dbgfs; /* protects concurrent contention between debugfs commands */ struct mutex dbgfs_lock; + char **dbgfs_buf; /* Network interface message level enabled bits */ u32 msg_enable; @@ -852,6 +856,20 @@ struct hnae3_handle { #define hnae3_get_bit(origin, shift) \ hnae3_get_field(origin, 0x1 << (shift), shift) +#define HNAE3_FORMAT_MAC_ADDR_LEN 18 +#define HNAE3_FORMAT_MAC_ADDR_OFFSET_0 0 +#define HNAE3_FORMAT_MAC_ADDR_OFFSET_4 4 +#define HNAE3_FORMAT_MAC_ADDR_OFFSET_5 5 + +static inline void hnae3_format_mac_addr(char *format_mac_addr, + const u8 *mac_addr) +{ + snprintf(format_mac_addr, HNAE3_FORMAT_MAC_ADDR_LEN, "%02x:**:**:**:%02x:%02x", + mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_0], + mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_4], + mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_5]); +} + int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 3205849bdb95..15ce1a33649e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -1022,7 +1022,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, return ret; mutex_lock(&handle->dbgfs_lock); - save_buf = &hns3_dbg_cmd[index].buf; + save_buf = &handle->dbgfs_buf[index]; if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) { @@ -1127,6 +1127,13 @@ int hns3_dbg_init(struct hnae3_handle *handle) int ret; u32 i; + handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev, + ARRAY_SIZE(hns3_dbg_cmd), + sizeof(*handle->dbgfs_buf), + GFP_KERNEL); + if (!handle->dbgfs_buf) + return -ENOMEM; + hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry = debugfs_create_dir(name, hns3_dbgfs_root); handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry; @@ -1175,9 +1182,9 @@ void hns3_dbg_uninit(struct hnae3_handle *handle) u32 i; for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) - if (hns3_dbg_cmd[i].buf) { - kvfree(hns3_dbg_cmd[i].buf); - hns3_dbg_cmd[i].buf = NULL; + if (handle->dbgfs_buf[i]) { + kvfree(handle->dbgfs_buf[i]); + handle->dbgfs_buf[i] = NULL; } mutex_destroy(&handle->dbgfs_lock); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h index bd8801065e02..814f7491ca08 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h @@ -47,7 +47,6 @@ struct hns3_dbg_cmd_info { enum hnae3_dbg_cmd cmd; enum hns3_dbg_dentry_type dentry; u32 buf_len; - char *buf; int (*init)(struct hnae3_handle *handle, unsigned int cmd); }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 4b886a13e079..16cbd146ad06 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2255,6 +2255,8 @@ out_err_tx_ok: static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) { + char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN]; + char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hnae3_handle *h = hns3_get_handle(netdev); struct sockaddr *mac_addr = p; int ret; @@ -2263,8 +2265,9 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) return -EADDRNOTAVAIL; if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { - netdev_info(netdev, "already using mac address %pM\n", - mac_addr->sa_data); + hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); + netdev_info(netdev, "already using mac address %s\n", + format_mac_addr_sa); return 0; } @@ -2273,8 +2276,10 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) */ if (!hns3_is_phys_func(h->pdev) && !is_zero_ether_addr(netdev->perm_addr)) { - netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n", - netdev->perm_addr, mac_addr->sa_data); + hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr); + hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); + netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n", + format_mac_addr_perm, format_mac_addr_sa); return -EPERM; } @@ -2836,14 +2841,16 @@ static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf, static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) { struct hnae3_handle *h = hns3_get_handle(netdev); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; if (!h->ae_algo->ops->set_vf_mac) return -EOPNOTSUPP; if (is_multicast_ether_addr(mac)) { + hnae3_format_mac_addr(format_mac_addr, mac); netdev_err(netdev, - "Invalid MAC:%pM specified. Could not set MAC\n", - mac); + "Invalid MAC:%s specified. Could not set MAC\n", + format_mac_addr); return -EINVAL; } @@ -2947,6 +2954,21 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; } +/** + * hns3_clean_vf_config + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs allocated + * + * Clean residual vf config after disable sriov + **/ +static void hns3_clean_vf_config(struct pci_dev *pdev, int num_vfs) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + if (ae_dev->ops->clean_vf_config) + ae_dev->ops->clean_vf_config(ae_dev, num_vfs); +} + /* hns3_remove - Device removal routine * @pdev: PCI device information struct */ @@ -2985,7 +3007,10 @@ static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) else return num_vfs; } else if (!pci_vfs_assigned(pdev)) { + int num_vfs_pre = pci_num_vf(pdev); + pci_disable_sriov(pdev); + hns3_clean_vf_config(pdev, num_vfs_pre); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); @@ -4927,6 +4952,7 @@ static void hns3_uninit_all_ring(struct hns3_nic_priv *priv) static int hns3_init_mac_addr(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hnae3_handle *h = priv->ae_handle; u8 mac_addr_temp[ETH_ALEN]; int ret = 0; @@ -4937,8 +4963,9 @@ static int hns3_init_mac_addr(struct net_device *netdev) /* Check if the MAC address is valid, if not get a random one */ if (!is_valid_ether_addr(mac_addr_temp)) { eth_hw_addr_random(netdev); - dev_warn(priv->dev, "using random MAC address %pM\n", - netdev->dev_addr); + hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr); + dev_warn(priv->dev, "using random MAC address %s\n", + format_mac_addr); } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { ether_addr_copy(netdev->dev_addr, mac_addr_temp); ether_addr_copy(netdev->perm_addr, mac_addr_temp); @@ -4990,8 +5017,10 @@ static void hns3_client_stop(struct hnae3_handle *handle) static void hns3_info_show(struct hns3_nic_priv *priv) { struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; - dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); + hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr); + dev_info(priv->dev, "MAC address: %s\n", format_mac_addr); dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 66c407d0d507..892f2f12c54c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1863,6 +1863,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO; vport->mps = HCLGE_MAC_DEFAULT_FRAME; vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; + vport->port_base_vlan_cfg.tbl_sta = true; vport->rxvlan_cfg.rx_vlan_offload_en = true; vport->req_vlan_fltr_en = true; INIT_LIST_HEAD(&vport->vlan_list); @@ -8569,6 +8570,7 @@ int hclge_update_mac_list(struct hclge_vport *vport, enum HCLGE_MAC_ADDR_TYPE mac_type, const unsigned char *addr) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; struct hclge_mac_node *mac_node; struct list_head *list; @@ -8593,9 +8595,10 @@ int hclge_update_mac_list(struct hclge_vport *vport, /* if this address is never added, unnecessary to delete */ if (state == HCLGE_MAC_TO_DEL) { spin_unlock_bh(&vport->mac_list_lock); + hnae3_format_mac_addr(format_mac_addr, addr); dev_err(&hdev->pdev->dev, - "failed to delete address %pM from mac list\n", - addr); + "failed to delete address %s from mac list\n", + format_mac_addr); return -ENOENT; } @@ -8628,6 +8631,7 @@ static int hclge_add_uc_addr(struct hnae3_handle *handle, int hclge_add_uc_addr_common(struct hclge_vport *vport, const unsigned char *addr) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; struct hclge_desc desc; @@ -8638,9 +8642,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, if (is_zero_ether_addr(addr) || is_broadcast_ether_addr(addr) || is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); dev_err(&hdev->pdev->dev, - "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", - addr, is_zero_ether_addr(addr), + "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n", + format_mac_addr, is_zero_ether_addr(addr), is_broadcast_ether_addr(addr), is_multicast_ether_addr(addr)); return -EINVAL; @@ -8697,6 +8702,7 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle, int hclge_rm_uc_addr_common(struct hclge_vport *vport, const unsigned char *addr) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; int ret; @@ -8705,8 +8711,9 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, if (is_zero_ether_addr(addr) || is_broadcast_ether_addr(addr) || is_multicast_ether_addr(addr)) { - dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n", - addr); + hnae3_format_mac_addr(format_mac_addr, addr); + dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n", + format_mac_addr); return -EINVAL; } @@ -8714,12 +8721,11 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr, false); ret = hclge_remove_mac_vlan_tbl(vport, &req); - if (!ret) { + if (!ret || ret == -ENOENT) { mutex_lock(&hdev->vport_lock); hclge_update_umv_space(vport, true); mutex_unlock(&hdev->vport_lock); - } else if (ret == -ENOENT) { - ret = 0; + return 0; } return ret; @@ -8737,6 +8743,7 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle, int hclge_add_mc_addr_common(struct hclge_vport *vport, const unsigned char *addr) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; struct hclge_desc desc[3]; @@ -8744,9 +8751,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, /* mac addr check */ if (!is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); dev_err(&hdev->pdev->dev, - "Add mc mac err! invalid mac:%pM.\n", - addr); + "Add mc mac err! invalid mac:%s.\n", + format_mac_addr); return -EINVAL; } memset(&req, 0, sizeof(req)); @@ -8782,6 +8790,7 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle, int hclge_rm_mc_addr_common(struct hclge_vport *vport, const unsigned char *addr) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; enum hclge_cmd_status status; @@ -8789,9 +8798,10 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, /* mac addr check */ if (!is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); dev_dbg(&hdev->pdev->dev, - "Remove mc mac err! invalid mac:%pM.\n", - addr); + "Remove mc mac err! invalid mac:%s.\n", + format_mac_addr); return -EINVAL; } @@ -9257,16 +9267,18 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, u8 *mac_addr) { struct hclge_vport *vport = hclge_get_vport(handle); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; vport = hclge_get_vf_vport(hdev, vf); if (!vport) return -EINVAL; + hnae3_format_mac_addr(format_mac_addr, mac_addr); if (ether_addr_equal(mac_addr, vport->vf_info.mac)) { dev_info(&hdev->pdev->dev, - "Specified MAC(=%pM) is same as before, no change committed!\n", - mac_addr); + "Specified MAC(=%s) is same as before, no change committed!\n", + format_mac_addr); return 0; } @@ -9278,15 +9290,20 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, ether_addr_copy(vport->vf_info.mac, mac_addr); + /* there is a timewindow for PF to know VF unalive, it may + * cause send mailbox fail, but it doesn't matter, VF will + * query it when reinit. + */ if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { dev_info(&hdev->pdev->dev, - "MAC of VF %d has been set to %pM, and it will be reinitialized!\n", - vf, mac_addr); - return hclge_inform_reset_assert_to_vf(vport); + "MAC of VF %d has been set to %s, and it will be reinitialized!\n", + vf, format_mac_addr); + (void)hclge_inform_reset_assert_to_vf(vport); + return 0; } - dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n", - vf, mac_addr); + dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n", + vf, format_mac_addr); return 0; } @@ -9390,6 +9407,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, { const unsigned char *new_addr = (const unsigned char *)p; struct hclge_vport *vport = hclge_get_vport(handle); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; unsigned char *old_addr = NULL; int ret; @@ -9398,9 +9416,10 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, if (is_zero_ether_addr(new_addr) || is_broadcast_ether_addr(new_addr) || is_multicast_ether_addr(new_addr)) { + hnae3_format_mac_addr(format_mac_addr, new_addr); dev_err(&hdev->pdev->dev, - "change uc mac err! invalid mac: %pM.\n", - new_addr); + "change uc mac err! invalid mac: %s.\n", + format_mac_addr); return -EINVAL; } @@ -9418,9 +9437,10 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, spin_lock_bh(&vport->mac_list_lock); ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr); if (ret) { + hnae3_format_mac_addr(format_mac_addr, new_addr); dev_err(&hdev->pdev->dev, - "failed to change the mac addr:%pM, ret = %d\n", - new_addr, ret); + "failed to change the mac addr:%s, ret = %d\n", + format_mac_addr, ret); spin_unlock_bh(&vport->mac_list_lock); if (!is_first) @@ -10078,19 +10098,28 @@ static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, bool writen_to_tbl) { struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; - list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) - if (vlan->vlan_id == vlan_id) + mutex_lock(&hdev->vport_lock); + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->vlan_id == vlan_id) { + mutex_unlock(&hdev->vport_lock); return; + } + } vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); - if (!vlan) + if (!vlan) { + mutex_unlock(&hdev->vport_lock); return; + } vlan->hd_tbl_status = writen_to_tbl; vlan->vlan_id = vlan_id; list_add_tail(&vlan->node, &vport->vlan_list); + mutex_unlock(&hdev->vport_lock); } static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) @@ -10099,6 +10128,8 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) struct hclge_dev *hdev = vport->back; int ret; + mutex_lock(&hdev->vport_lock); + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { if (!vlan->hd_tbl_status) { ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), @@ -10108,12 +10139,16 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) dev_err(&hdev->pdev->dev, "restore vport vlan list failed, ret=%d\n", ret); + + mutex_unlock(&hdev->vport_lock); return ret; } } vlan->hd_tbl_status = true; } + mutex_unlock(&hdev->vport_lock); + return 0; } @@ -10123,6 +10158,8 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, struct hclge_vport_vlan_cfg *vlan, *tmp; struct hclge_dev *hdev = vport->back; + mutex_lock(&hdev->vport_lock); + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { if (vlan->vlan_id == vlan_id) { if (is_write_tbl && vlan->hd_tbl_status) @@ -10137,6 +10174,8 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, break; } } + + mutex_unlock(&hdev->vport_lock); } void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) @@ -10144,6 +10183,8 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) struct hclge_vport_vlan_cfg *vlan, *tmp; struct hclge_dev *hdev = vport->back; + mutex_lock(&hdev->vport_lock); + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { if (vlan->hd_tbl_status) hclge_set_vlan_filter_hw(hdev, @@ -10159,6 +10200,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) } } clear_bit(vport->vport_id, hdev->vf_vlan_full); + mutex_unlock(&hdev->vport_lock); } void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) @@ -10167,6 +10209,8 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) struct hclge_vport *vport; int i; + mutex_lock(&hdev->vport_lock); + for (i = 0; i < hdev->num_alloc_vport; i++) { vport = &hdev->vport[i]; list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { @@ -10174,37 +10218,61 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) kfree(vlan); } } + + mutex_unlock(&hdev->vport_lock); } -void hclge_restore_vport_vlan_table(struct hclge_vport *vport) +void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev) { - struct hclge_vport_vlan_cfg *vlan, *tmp; - struct hclge_dev *hdev = vport->back; + struct hclge_vlan_info *vlan_info; + struct hclge_vport *vport; u16 vlan_proto; u16 vlan_id; u16 state; + int vf_id; int ret; - vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto; - vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag; - state = vport->port_base_vlan_cfg.state; + /* PF should restore all vfs port base vlan */ + for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) { + vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM]; + vlan_info = vport->port_base_vlan_cfg.tbl_sta ? + &vport->port_base_vlan_cfg.vlan_info : + &vport->port_base_vlan_cfg.old_vlan_info; - if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { - clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); - hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), - vport->vport_id, vlan_id, - false); - return; + vlan_id = vlan_info->vlan_tag; + vlan_proto = vlan_info->vlan_proto; + state = vport->port_base_vlan_cfg.state; + + if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { + clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); + ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), + vport->vport_id, + vlan_id, false); + vport->port_base_vlan_cfg.tbl_sta = ret == 0; + } } +} - list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { - ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), - vport->vport_id, - vlan->vlan_id, false); - if (ret) - break; - vlan->hd_tbl_status = true; +void hclge_restore_vport_vlan_table(struct hclge_vport *vport) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + int ret; + + mutex_lock(&hdev->vport_lock); + + if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, false); + if (ret) + break; + vlan->hd_tbl_status = true; + } } + + mutex_unlock(&hdev->vport_lock); } /* For global reset and imp reset, hardware will clear the mac table, @@ -10244,6 +10312,7 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev) struct hnae3_handle *handle = &vport->nic; hclge_restore_mac_table_common(vport); + hclge_restore_vport_port_base_vlan_config(hdev); hclge_restore_vport_vlan_table(vport); set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); hclge_restore_fd_entries(handle); @@ -10300,6 +10369,8 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, false); } + vport->port_base_vlan_cfg.tbl_sta = false; + /* force add VLAN 0 */ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0); if (ret) @@ -10386,7 +10457,9 @@ out: else nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info; vport->port_base_vlan_cfg.vlan_info = *vlan_info; + vport->port_base_vlan_cfg.tbl_sta = true; hclge_set_vport_vlan_fltr_change(vport); return 0; @@ -10454,14 +10527,17 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, return ret; } - /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based + /* there is a timewindow for PF to know VF unalive, it may + * cause send mailbox fail, but it doesn't matter, VF will + * query it when reinit. + * for DEVICE_VERSION_V3, vf doesn't need to know about the port based * VLAN state. */ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 && test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) - hclge_push_vf_port_base_vlan_info(&hdev->vport[0], - vport->vport_id, state, - &vlan_info); + (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], + vport->vport_id, + state, &vlan_info); return 0; } @@ -10519,11 +10595,11 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, } if (!ret) { - if (is_kill) - hclge_rm_vport_vlan_table(vport, vlan_id, false); - else + if (!is_kill) hclge_add_vport_vlan_table(vport, vlan_id, writen_to_tbl); + else if (is_kill && vlan_id != 0) + hclge_rm_vport_vlan_table(vport, vlan_id, false); } else if (is_kill) { /* when remove hw vlan filter failed, record the vlan id, * and try to remove it from hw later, to be consistence @@ -12097,8 +12173,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_misc_irq_uninit(hdev); hclge_devlink_uninit(hdev); hclge_pci_uninit(hdev); - mutex_destroy(&hdev->vport_lock); hclge_uninit_vport_vlan_table(hdev); + mutex_destroy(&hdev->vport_lock); ae_dev->priv = NULL; } @@ -12911,6 +12987,55 @@ static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle, return 0; } +/* After disable sriov, VF still has some config and info need clean, + * which configed by PF. + */ +static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_vlan_info vlan_info; + int ret; + + /* after disable sriov, clean VF rate configured by PF */ + ret = hclge_tm_qs_shaper_cfg(vport, 0); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clean vf%d rate config, ret = %d\n", + vfid, ret); + + vlan_info.vlan_tag = 0; + vlan_info.qos = 0; + vlan_info.vlan_proto = ETH_P_8021Q; + ret = hclge_update_port_base_vlan_cfg(vport, + HNAE3_PORT_BASE_VLAN_DISABLE, + &vlan_info); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clean vf%d port base vlan, ret = %d\n", + vfid, ret); + + ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clean vf%d spoof config, ret = %d\n", + vfid, ret); + + memset(&vport->vf_info, 0, sizeof(vport->vf_info)); +} + +static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_vport *vport; + int i; + + for (i = 0; i < num_vfs; i++) { + vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; + + hclge_clear_vport_vf_info(vport, i); + } +} + static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, @@ -13012,6 +13137,7 @@ static const struct hnae3_ae_ops hclge_ops = { .get_rx_hwts = hclge_ptp_get_rx_hwts, .get_ts_info = hclge_ptp_get_ts_info, .get_link_diagnosis_info = hclge_get_link_diagnosis_info, + .clean_vf_config = hclge_clean_vport_config, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 2fa6e14c96e5..4d6dbfe0be7a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -1000,7 +1000,9 @@ struct hclge_vlan_info { struct hclge_port_base_vlan_config { u16 state; + bool tbl_sta; struct hclge_vlan_info vlan_info; + struct hclge_vlan_info old_vlan_info; }; struct hclge_vf_info { @@ -1055,6 +1057,7 @@ struct hclge_vport { spinlock_t mac_list_lock; /* protect mac address need to add/detele */ struct list_head uc_mac_list; /* Store VF unicast table */ struct list_head mc_mac_list; /* Store VF multicast table */ + struct list_head vlan_list; /* Store VF vlan table */ }; @@ -1124,6 +1127,7 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); void hclge_restore_mac_table_common(struct hclge_vport *vport); +void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev); void hclge_restore_vport_vlan_table(struct hclge_vport *vport); int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, struct hclge_vlan_info *vlan_info); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 417a08d600b8..21678c12afa2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1514,15 +1514,18 @@ static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, struct list_head *list, enum HCLGEVF_MAC_ADDR_TYPE mac_type) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclgevf_mac_addr_node *mac_node, *tmp; int ret; list_for_each_entry_safe(mac_node, tmp, list, node) { ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); if (ret) { + hnae3_format_mac_addr(format_mac_addr, + mac_node->mac_addr); dev_err(&hdev->pdev->dev, - "failed to configure mac %pM, state = %d, ret = %d\n", - mac_node->mac_addr, mac_node->state, ret); + "failed to configure mac %s, state = %d, ret = %d\n", + format_mac_addr, mac_node->state, ret); return; } if (mac_node->state == HCLGEVF_MAC_TO_ADD) { @@ -3341,6 +3344,11 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) return ret; } + /* get current port based vlan state from PF */ + ret = hclgevf_get_port_base_vlan_filter_state(hdev); + if (ret) + return ret; + set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); hclgevf_init_rxd_adv_layout(hdev); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5c7371dc8384..c809e8fe648f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -108,6 +108,7 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter); static int send_query_phys_parms(struct ibmvnic_adapter *adapter); static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, struct ibmvnic_sub_crq_queue *tx_scrq); +static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter); struct ibmvnic_stat { char name[ETH_GSTRING_LEN]; @@ -1245,10 +1246,19 @@ static int __ibmvnic_open(struct net_device *netdev) rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); if (rc) { ibmvnic_napi_disable(adapter); - release_resources(adapter); + ibmvnic_disable_irqs(adapter); return rc; } + adapter->tx_queues_active = true; + + /* Since queues were stopped until now, there shouldn't be any + * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we + * don't need the synchronize_rcu()? Leaving it for consistency + * with setting ->tx_queues_active = false. + */ + synchronize_rcu(); + netif_tx_start_all_queues(netdev); if (prev_state == VNIC_CLOSED) { @@ -1295,7 +1305,6 @@ static int ibmvnic_open(struct net_device *netdev) rc = init_resources(adapter); if (rc) { netdev_err(netdev, "failed to initialize resources\n"); - release_resources(adapter); goto out; } } @@ -1312,6 +1321,11 @@ out: adapter->state = VNIC_OPEN; rc = 0; } + + if (rc) { + release_resources(adapter); + } + return rc; } @@ -1417,6 +1431,14 @@ static void ibmvnic_cleanup(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); /* ensure that transmissions are stopped if called by do_reset */ + + adapter->tx_queues_active = false; + + /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active + * update so they don't restart a queue after we stop it below. + */ + synchronize_rcu(); + if (test_bit(0, &adapter->resetting)) netif_tx_disable(netdev); else @@ -1657,14 +1679,21 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, tx_buff->skb = NULL; adapter->netdev->stats.tx_dropped++; } + ind_bufp->index = 0; + if (atomic_sub_return(entries, &tx_scrq->used) <= (adapter->req_tx_entries_per_subcrq / 2) && - __netif_subqueue_stopped(adapter->netdev, queue_num) && - !test_bit(0, &adapter->resetting)) { - netif_wake_subqueue(adapter->netdev, queue_num); - netdev_dbg(adapter->netdev, "Started queue %d\n", - queue_num); + __netif_subqueue_stopped(adapter->netdev, queue_num)) { + rcu_read_lock(); + + if (adapter->tx_queues_active) { + netif_wake_subqueue(adapter->netdev, queue_num); + netdev_dbg(adapter->netdev, "Started queue %d\n", + queue_num); + } + + rcu_read_unlock(); } } @@ -1719,11 +1748,12 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) int index = 0; u8 proto = 0; - tx_scrq = adapter->tx_scrq[queue_num]; - txq = netdev_get_tx_queue(netdev, queue_num); - ind_bufp = &tx_scrq->ind_buf; - - if (test_bit(0, &adapter->resetting)) { + /* If a reset is in progress, drop the packet since + * the scrqs may get torn down. Otherwise use the + * rcu to ensure reset waits for us to complete. + */ + rcu_read_lock(); + if (!adapter->tx_queues_active) { dev_kfree_skb_any(skb); tx_send_failed++; @@ -1732,6 +1762,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) goto out; } + tx_scrq = adapter->tx_scrq[queue_num]; + txq = netdev_get_tx_queue(netdev, queue_num); + ind_bufp = &tx_scrq->ind_buf; + if (ibmvnic_xmit_workarounds(skb, netdev)) { tx_dropped++; tx_send_failed++; @@ -1739,6 +1773,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) ibmvnic_tx_scrq_flush(adapter, tx_scrq); goto out; } + if (skb_is_gso(skb)) tx_pool = &adapter->tso_pool[queue_num]; else @@ -1893,6 +1928,7 @@ tx_err: netif_carrier_off(netdev); } out: + rcu_read_unlock(); netdev->stats.tx_dropped += tx_dropped; netdev->stats.tx_bytes += tx_bytes; netdev->stats.tx_packets += tx_packets; @@ -2552,12 +2588,23 @@ static void __ibmvnic_delayed_reset(struct work_struct *work) __ibmvnic_reset(&adapter->ibmvnic_reset); } +static void flush_reset_queue(struct ibmvnic_adapter *adapter) +{ + struct list_head *entry, *tmp_entry; + + if (!list_empty(&adapter->rwi_list)) { + list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) { + list_del(entry); + kfree(list_entry(entry, struct ibmvnic_rwi, list)); + } + } +} + static int ibmvnic_reset(struct ibmvnic_adapter *adapter, enum ibmvnic_reset_reason reason) { - struct list_head *entry, *tmp_entry; - struct ibmvnic_rwi *rwi, *tmp; struct net_device *netdev = adapter->netdev; + struct ibmvnic_rwi *rwi, *tmp; unsigned long flags; int ret; @@ -2600,10 +2647,9 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, /* if we just received a transport event, * flush reset queue and process this reset */ - if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) { - list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) - list_del(entry); - } + if (adapter->force_reset_recovery) + flush_reset_queue(adapter); + rwi->reset_reason = reason; list_add_tail(&rwi->list, &adapter->rwi_list); netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n", @@ -3467,9 +3513,15 @@ restart_loop: (adapter->req_tx_entries_per_subcrq / 2) && __netif_subqueue_stopped(adapter->netdev, scrq->pool_index)) { - netif_wake_subqueue(adapter->netdev, scrq->pool_index); - netdev_dbg(adapter->netdev, "Started queue %d\n", - scrq->pool_index); + rcu_read_lock(); + if (adapter->tx_queues_active) { + netif_wake_subqueue(adapter->netdev, + scrq->pool_index); + netdev_dbg(adapter->netdev, + "Started queue %d\n", + scrq->pool_index); + } + rcu_read_unlock(); } } @@ -5138,9 +5190,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, } if (!completion_done(&adapter->init_done)) { - complete(&adapter->init_done); if (!adapter->init_done_rc) adapter->init_done_rc = -EAGAIN; + complete(&adapter->init_done); } break; @@ -5163,6 +5215,13 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, adapter->fw_done_rc = -EIO; complete(&adapter->fw_done); } + + /* if we got here during crq-init, retry crq-init */ + if (!completion_done(&adapter->init_done)) { + adapter->init_done_rc = -EAGAIN; + complete(&adapter->init_done); + } + if (!completion_done(&adapter->stats_done)) complete(&adapter->stats_done); if (test_bit(0, &adapter->resetting)) @@ -5627,12 +5686,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) goto ibmvnic_dev_file_err; netif_carrier_off(netdev); - rc = register_netdev(netdev); - if (rc) { - dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); - goto ibmvnic_register_fail; - } - dev_info(&dev->dev, "ibmvnic registered\n"); if (init_success) { adapter->state = VNIC_PROBED; @@ -5645,6 +5698,14 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) adapter->wait_for_reset = false; adapter->last_reset_time = jiffies; + + rc = register_netdev(netdev); + if (rc) { + dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); + goto ibmvnic_register_fail; + } + dev_info(&dev->dev, "ibmvnic registered\n"); + return 0; ibmvnic_register_fail: @@ -5733,10 +5794,14 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr, be64_to_cpu(session_token)); rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, H_SESSION_ERR_DETECTED, session_token, 0, 0); - if (rc) + if (rc) { netdev_err(netdev, "H_VIOCTL initiated failover failed, rc %ld\n", rc); + goto last_resort; + } + + return count; last_resort: netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n"); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 22df602323bc..ef395fd3b1e6 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -1002,11 +1002,14 @@ struct ibmvnic_adapter { struct work_struct ibmvnic_reset; struct delayed_work ibmvnic_delayed_reset; unsigned long resetting; - bool napi_enabled, from_passive_init; - bool login_pending; /* last device reset time */ unsigned long last_reset_time; + bool napi_enabled; + bool from_passive_init; + bool login_pending; + /* protected by rcu */ + bool tx_queues_active; bool failover_pending; bool force_reset_recovery; diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index bcf680e83811..13382df2f2ef 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -630,6 +630,7 @@ struct e1000_phy_info { bool disable_polarity_correction; bool is_mdix; bool polarity_correction; + bool reset_disable; bool speed_downgraded; bool autoneg_wait_to_complete; }; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index c908c84b86d2..e6c8e6d5234f 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1009,8 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) { u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; - u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ - u16 lat_enc_d = 0; /* latency decoded */ + u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ + u32 lat_enc_d = 0; /* latency decoded */ u16 lat_enc = 0; /* latency encoded */ if (link) { @@ -2050,6 +2050,10 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) bool blocked = false; int i = 0; + /* Check the PHY (LCD) reset flag */ + if (hw->phy.reset_disable) + return true; + while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && (i++ < 30)) usleep_range(10000, 11000); @@ -4136,9 +4140,9 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) return ret_val; if (!(data & valid_csum_mask)) { - e_dbg("NVM Checksum Invalid\n"); + e_dbg("NVM Checksum valid bit not set\n"); - if (hw->mac.type < e1000_pch_cnp) { + if (hw->mac.type < e1000_pch_tgp) { data |= valid_csum_mask; ret_val = e1000_write_nvm(hw, word, 1, &data); if (ret_val) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 2504b11c3169..638a3ddd7ada 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -271,6 +271,7 @@ #define I217_CGFREG_ENABLE_MTA_RESET 0x0002 #define I217_MEMPWR PHY_REG(772, 26) #define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 +#define I217_MEMPWR_MOEM 0x1000 /* Receive Address Initial CRC Calculation */ #define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4)) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index af2029bb43e3..ce48e630fe55 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6992,8 +6992,21 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev) struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = to_pci_dev(dev); + struct e1000_hw *hw = &adapter->hw; + u16 phy_data; int rc; + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && + hw->mac.type >= e1000_pch_adp) { + /* Mask OEM Bits / Gig Disable / Restart AN (772_26[12] = 1) */ + e1e_rphy(hw, I217_MEMPWR, &phy_data); + phy_data |= I217_MEMPWR_MOEM; + e1e_wphy(hw, I217_MEMPWR, phy_data); + + /* Disable LCD reset */ + hw->phy.reset_disable = true; + } + e1000e_flush_lpic(pdev); e1000e_pm_freeze(dev); @@ -7015,6 +7028,8 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev) struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = to_pci_dev(dev); + struct e1000_hw *hw = &adapter->hw; + u16 phy_data; int rc; /* Introduce S0ix implementation */ @@ -7025,6 +7040,17 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev) if (rc) return rc; + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && + hw->mac.type >= e1000_pch_adp) { + /* Unmask OEM Bits / Gig Disable / Restart AN 772_26[12] = 0 */ + e1e_rphy(hw, I217_MEMPWR, &phy_data); + phy_data &= ~I217_MEMPWR_MOEM; + e1e_wphy(hw, I217_MEMPWR, phy_data); + + /* Enable LCD reset */ + hw->phy.reset_disable = false; + } + return e1000e_pm_thaw(dev); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 1e57cc8c47d7..9db5001297c7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -742,10 +742,8 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id) vsi = pf->vsi[vf->lan_vsi_idx]; dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n", vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs); - dev_info(&pf->pdev->dev, " num MDD=%lld, invalid msg=%lld, valid msg=%lld\n", - vf->num_mdd_events, - vf->num_invalid_msgs, - vf->num_valid_msgs); + dev_info(&pf->pdev->dev, " num MDD=%lld\n", + vf->num_mdd_events); } else { dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 063ded36b902..ad73dd2540e7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5372,15 +5372,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, /* There is no need to reset BW when mqprio mode is on. */ if (pf->flags & I40E_FLAG_TC_MQPRIO) return 0; - - if (!vsi->mqprio_qopt.qopt.hw) { - if (pf->flags & I40E_FLAG_DCB_ENABLED) - goto skip_reset; - - if (IS_ENABLED(CONFIG_I40E_DCB) && - i40e_dcb_hw_get_num_tc(&pf->hw) == 1) - goto skip_reset; - + if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { ret = i40e_set_bw_limit(vsi, vsi->seid, 0); if (ret) dev_info(&pf->pdev->dev, @@ -5388,8 +5380,6 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, vsi->seid); return ret; } - -skip_reset: memset(&bw_data, 0, sizeof(bw_data)); bw_data.tc_valid_bits = enabled_tc; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index c6f643e54c4f..babf8b7fa767 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1917,19 +1917,17 @@ sriov_configure_out: /***********************virtual channel routines******************/ /** - * i40e_vc_send_msg_to_vf_ex + * i40e_vc_send_msg_to_vf * @vf: pointer to the VF info * @v_opcode: virtual channel opcode * @v_retval: virtual channel return value * @msg: pointer to the msg buffer * @msglen: msg length - * @is_quiet: true for not printing unsuccessful return values, false otherwise * * send msg to VF **/ -static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen, - bool is_quiet) +static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) { struct i40e_pf *pf; struct i40e_hw *hw; @@ -1944,25 +1942,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, hw = &pf->hw; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; - /* single place to detect unsuccessful return values */ - if (v_retval && !is_quiet) { - vf->num_invalid_msgs++; - dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", - vf->vf_id, v_opcode, v_retval); - if (vf->num_invalid_msgs > - I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { - dev_err(&pf->pdev->dev, - "Number of invalid messages exceeded for VF %d\n", - vf->vf_id); - dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); - set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); - } - } else { - vf->num_valid_msgs++; - /* reset the invalid counter, if a valid message is received. */ - vf->num_invalid_msgs = 0; - } - aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret) { @@ -1976,23 +1955,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, } /** - * i40e_vc_send_msg_to_vf - * @vf: pointer to the VF info - * @v_opcode: virtual channel opcode - * @v_retval: virtual channel return value - * @msg: pointer to the msg buffer - * @msglen: msg length - * - * send msg to VF - **/ -static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen) -{ - return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval, - msg, msglen, false); -} - -/** * i40e_vc_send_resp_to_vf * @vf: pointer to the VF info * @opcode: operation code @@ -2813,7 +2775,6 @@ error_param: * i40e_check_vf_permission * @vf: pointer to the VF info * @al: MAC address list from virtchnl - * @is_quiet: set true for printing msg without opcode info, false otherwise * * Check that the given list of MAC addresses is allowed. Will return -EPERM * if any address in the list is not valid. Checks the following conditions: @@ -2828,15 +2789,13 @@ error_param: * addresses might not be accurate. **/ static inline int i40e_check_vf_permission(struct i40e_vf *vf, - struct virtchnl_ether_addr_list *al, - bool *is_quiet) + struct virtchnl_ether_addr_list *al) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; int mac2add_cnt = 0; int i; - *is_quiet = false; for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; u8 *addr = al->list[i].addr; @@ -2860,7 +2819,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); - *is_quiet = true; return -EPERM; } @@ -2897,7 +2855,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_ether_addr_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - bool is_quiet = false; i40e_status ret = 0; int i; @@ -2914,7 +2871,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) */ spin_lock_bh(&vsi->mac_filter_hash_lock); - ret = i40e_check_vf_permission(vf, al, &is_quiet); + ret = i40e_check_vf_permission(vf, al); if (ret) { spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; @@ -2952,8 +2909,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) error_param: /* send the response to the VF */ - return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR, - ret, NULL, 0, is_quiet); + return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, + ret, NULL, 0); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 03c42fd0fea1..a554d0a0b09b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -10,8 +10,6 @@ #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2 -#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10 - #define I40E_VLAN_PRIORITY_SHIFT 13 #define I40E_VLAN_MASK 0xFFF #define I40E_PRIORITY_MASK 0xE000 @@ -92,9 +90,6 @@ struct i40e_vf { u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u8 num_req_queues; /* num of requested qps */ u64 num_mdd_events; /* num of mdd events detected */ - /* num of continuous malformed or invalid msgs detected */ - u64 num_invalid_msgs; - u64 num_valid_msgs; /* num of valid msgs detected */ unsigned long vf_caps; /* vf's adv. capabilities */ unsigned long vf_states; /* vf's runtime states */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index e7e778ca074c..3f27a8ebe2ec 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -243,21 +243,25 @@ no_buffers: static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) { + unsigned int totalsize = xdp->data_end - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta; - unsigned int datasize = xdp->data_end - xdp->data; struct sk_buff *skb; + net_prefetch(xdp->data_meta); + /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - xdp->data_end - xdp->data_hard_start, + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto out; - skb_reserve(skb, xdp->data - xdp->data_hard_start); - memcpy(__skb_put(skb, datasize), xdp->data, datasize); - if (metasize) + memcpy(__skb_put(skb, totalsize), xdp->data_meta, + ALIGN(totalsize, sizeof(long))); + + if (metasize) { skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } out: xsk_buff_free(xdp); diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 0ae6da2992d0..9a122aea6979 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -178,6 +178,7 @@ enum iavf_state_t { __IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ __IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ __IAVF_INIT_SW, /* got resources, setting up structs */ + __IAVF_INIT_FAILED, /* init failed, restarting procedure */ __IAVF_RESETTING, /* in reset */ __IAVF_COMM_FAILED, /* communication with PF failed */ /* Below here, watchdog is running */ @@ -187,6 +188,10 @@ enum iavf_state_t { __IAVF_RUNNING, /* opened, working */ }; +enum iavf_critical_section_t { + __IAVF_IN_REMOVE_TASK, /* device being removed */ +}; + #define IAVF_CLOUD_FIELD_OMAC 0x01 #define IAVF_CLOUD_FIELD_IMAC 0x02 #define IAVF_CLOUD_FIELD_IVLAN 0x04 @@ -226,14 +231,12 @@ struct iavf_adapter { struct work_struct reset_task; struct work_struct adminq_task; struct delayed_work client_task; - struct delayed_work init_task; wait_queue_head_t down_waitqueue; struct iavf_q_vector *q_vectors; struct list_head vlan_filter_list; struct list_head mac_filter_list; struct mutex crit_lock; struct mutex client_lock; - struct mutex remove_lock; /* Lock to protect accesses to MAC and VLAN lists */ spinlock_t mac_vlan_list_lock; char misc_vector_name[IFNAMSIZ + 9]; @@ -271,6 +274,7 @@ struct iavf_adapter { #define IAVF_FLAG_LEGACY_RX BIT(15) #define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16) #define IAVF_FLAG_QUEUES_DISABLED BIT(17) +#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18) /* duplicates for common code */ #define IAVF_FLAG_DCB_ENABLED 0 /* flags for admin queue service task */ @@ -314,6 +318,7 @@ struct iavf_adapter { struct iavf_hw hw; /* defined in iavf_type.h */ enum iavf_state_t state; + enum iavf_state_t last_state; unsigned long crit_section; struct delayed_work watchdog_task; @@ -395,6 +400,51 @@ struct iavf_device { extern char iavf_driver_name[]; extern struct workqueue_struct *iavf_wq; +static inline const char *iavf_state_str(enum iavf_state_t state) +{ + switch (state) { + case __IAVF_STARTUP: + return "__IAVF_STARTUP"; + case __IAVF_REMOVE: + return "__IAVF_REMOVE"; + case __IAVF_INIT_VERSION_CHECK: + return "__IAVF_INIT_VERSION_CHECK"; + case __IAVF_INIT_GET_RESOURCES: + return "__IAVF_INIT_GET_RESOURCES"; + case __IAVF_INIT_SW: + return "__IAVF_INIT_SW"; + case __IAVF_INIT_FAILED: + return "__IAVF_INIT_FAILED"; + case __IAVF_RESETTING: + return "__IAVF_RESETTING"; + case __IAVF_COMM_FAILED: + return "__IAVF_COMM_FAILED"; + case __IAVF_DOWN: + return "__IAVF_DOWN"; + case __IAVF_DOWN_PENDING: + return "__IAVF_DOWN_PENDING"; + case __IAVF_TESTING: + return "__IAVF_TESTING"; + case __IAVF_RUNNING: + return "__IAVF_RUNNING"; + default: + return "__IAVF_UNKNOWN_STATE"; + } +} + +static inline void iavf_change_state(struct iavf_adapter *adapter, + enum iavf_state_t state) +{ + if (adapter->state != state) { + adapter->last_state = adapter->state; + adapter->state = state; + } + dev_dbg(&adapter->pdev->dev, + "state transition from:%s to:%s\n", + iavf_state_str(adapter->last_state), + iavf_state_str(adapter->state)); +} + int iavf_up(struct iavf_adapter *adapter); void iavf_down(struct iavf_adapter *adapter); int iavf_process_config(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 6502c8056a8e..ca74824d40b8 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -14,7 +14,7 @@ static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); static int iavf_close(struct net_device *netdev); -static int iavf_init_get_resources(struct iavf_adapter *adapter); +static void iavf_init_get_resources(struct iavf_adapter *adapter); static int iavf_check_reset_complete(struct iavf_hw *hw); char iavf_driver_name[] = "iavf"; @@ -52,6 +52,15 @@ static const struct net_device_ops iavf_netdev_ops; struct workqueue_struct *iavf_wq; /** + * iavf_pdev_to_adapter - go from pci_dev to adapter + * @pdev: pci_dev pointer + */ +static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev) +{ + return netdev_priv(pci_get_drvdata(pdev)); +} + +/** * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out @@ -293,8 +302,9 @@ static irqreturn_t iavf_msix_aq(int irq, void *data) rd32(hw, IAVF_VFINT_ICR01); rd32(hw, IAVF_VFINT_ICR0_ENA1); - /* schedule work on the private workqueue */ - queue_work(iavf_wq, &adapter->adminq_task); + if (adapter->state != __IAVF_REMOVE) + /* schedule work on the private workqueue */ + queue_work(iavf_wq, &adapter->adminq_task); return IRQ_HANDLED; } @@ -990,7 +1000,7 @@ static void iavf_configure(struct iavf_adapter *adapter) **/ static void iavf_up_complete(struct iavf_adapter *adapter) { - adapter->state = __IAVF_RUNNING; + iavf_change_state(adapter, __IAVF_RUNNING); clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); iavf_napi_enable_all(adapter); @@ -1063,8 +1073,7 @@ void iavf_down(struct iavf_adapter *adapter) rss->state = IAVF_ADV_RSS_DEL_REQUEST; spin_unlock_bh(&adapter->adv_rss_lock); - if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && - adapter->state != __IAVF_RESETTING) { + if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) { /* cancel any current operation */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; /* Schedule operations to close down the HW. Don't wait @@ -1722,9 +1731,9 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) * * Function process __IAVF_STARTUP driver state. * When success the state is changed to __IAVF_INIT_VERSION_CHECK - * when fails it returns -EAGAIN + * when fails the state is changed to __IAVF_INIT_FAILED **/ -static int iavf_startup(struct iavf_adapter *adapter) +static void iavf_startup(struct iavf_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; @@ -1763,9 +1772,10 @@ static int iavf_startup(struct iavf_adapter *adapter) iavf_shutdown_adminq(hw); goto err; } - adapter->state = __IAVF_INIT_VERSION_CHECK; + iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK); + return; err: - return err; + iavf_change_state(adapter, __IAVF_INIT_FAILED); } /** @@ -1774,9 +1784,9 @@ err: * * Function process __IAVF_INIT_VERSION_CHECK driver state. * When success the state is changed to __IAVF_INIT_GET_RESOURCES - * when fails it returns -EAGAIN + * when fails the state is changed to __IAVF_INIT_FAILED **/ -static int iavf_init_version_check(struct iavf_adapter *adapter) +static void iavf_init_version_check(struct iavf_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; @@ -1787,7 +1797,7 @@ static int iavf_init_version_check(struct iavf_adapter *adapter) if (!iavf_asq_done(hw)) { dev_err(&pdev->dev, "Admin queue command never completed\n"); iavf_shutdown_adminq(hw); - adapter->state = __IAVF_STARTUP; + iavf_change_state(adapter, __IAVF_STARTUP); goto err; } @@ -1810,10 +1820,10 @@ static int iavf_init_version_check(struct iavf_adapter *adapter) err); goto err; } - adapter->state = __IAVF_INIT_GET_RESOURCES; - + iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES); + return; err: - return err; + iavf_change_state(adapter, __IAVF_INIT_FAILED); } /** @@ -1823,9 +1833,9 @@ err: * Function process __IAVF_INIT_GET_RESOURCES driver state and * finishes driver initialization procedure. * When success the state is changed to __IAVF_DOWN - * when fails it returns -EAGAIN + * when fails the state is changed to __IAVF_INIT_FAILED **/ -static int iavf_init_get_resources(struct iavf_adapter *adapter) +static void iavf_init_get_resources(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; @@ -1853,7 +1863,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) */ iavf_shutdown_adminq(hw); dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); - return 0; + return; } if (err) { dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); @@ -1927,7 +1937,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) if (netdev->features & NETIF_F_GRO) dev_info(&pdev->dev, "GRO is enabled\n"); - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); rtnl_unlock(); @@ -1945,7 +1955,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) else iavf_init_rss(adapter); - return err; + return; err_mem: iavf_free_rss(adapter); err_register: @@ -1956,7 +1966,7 @@ err_alloc: kfree(adapter->vf_res); adapter->vf_res = NULL; err: - return err; + iavf_change_state(adapter, __IAVF_INIT_FAILED); } /** @@ -1971,14 +1981,80 @@ static void iavf_watchdog_task(struct work_struct *work) struct iavf_hw *hw = &adapter->hw; u32 reg_val; - if (!mutex_trylock(&adapter->crit_lock)) + if (!mutex_trylock(&adapter->crit_lock)) { + if (adapter->state == __IAVF_REMOVE) + return; + goto restart_watchdog; + } if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) - adapter->state = __IAVF_COMM_FAILED; + iavf_change_state(adapter, __IAVF_COMM_FAILED); + + if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { + adapter->aq_required = 0; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + mutex_unlock(&adapter->crit_lock); + queue_work(iavf_wq, &adapter->reset_task); + return; + } switch (adapter->state) { + case __IAVF_STARTUP: + iavf_startup(adapter); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(30)); + return; + case __IAVF_INIT_VERSION_CHECK: + iavf_init_version_check(adapter); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(30)); + return; + case __IAVF_INIT_GET_RESOURCES: + iavf_init_get_resources(adapter); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(1)); + return; + case __IAVF_INIT_FAILED: + if (test_bit(__IAVF_IN_REMOVE_TASK, + &adapter->crit_section)) { + /* Do not update the state and do not reschedule + * watchdog task, iavf_remove should handle this state + * as it can loop forever + */ + mutex_unlock(&adapter->crit_lock); + return; + } + if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { + dev_err(&adapter->pdev->dev, + "Failed to communicate with PF; waiting before retry\n"); + adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; + iavf_shutdown_adminq(hw); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, + &adapter->watchdog_task, (5 * HZ)); + return; + } + /* Try again from failed step*/ + iavf_change_state(adapter, adapter->last_state); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ); + return; case __IAVF_COMM_FAILED: + if (test_bit(__IAVF_IN_REMOVE_TASK, + &adapter->crit_section)) { + /* Set state to __IAVF_INIT_FAILED and perform remove + * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task + * doesn't bring the state back to __IAVF_COMM_FAILED. + */ + iavf_change_state(adapter, __IAVF_INIT_FAILED); + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; + mutex_unlock(&adapter->crit_lock); + return; + } reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if (reg_val == VIRTCHNL_VFR_VFACTIVE || @@ -1986,23 +2062,20 @@ static void iavf_watchdog_task(struct work_struct *work) /* A chance for redemption! */ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); - adapter->state = __IAVF_STARTUP; - adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; - queue_delayed_work(iavf_wq, &adapter->init_task, 10); - mutex_unlock(&adapter->crit_lock); - /* Don't reschedule the watchdog, since we've restarted - * the init task. When init_task contacts the PF and + /* When init task contacts the PF and * gets everything set up again, it'll restart the * watchdog for us. Down, boy. Sit. Stay. Woof. */ - return; + iavf_change_state(adapter, __IAVF_STARTUP); + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; } adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; + mutex_unlock(&adapter->crit_lock); queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(10)); - goto watchdog_done; + return; case __IAVF_RESETTING: mutex_unlock(&adapter->crit_lock); queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); @@ -2025,15 +2098,16 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->state == __IAVF_RUNNING) iavf_request_stats(adapter); } + if (adapter->state == __IAVF_RUNNING) + iavf_detect_recover_hung(&adapter->vsi); break; case __IAVF_REMOVE: + default: mutex_unlock(&adapter->crit_lock); return; - default: - goto restart_watchdog; } - /* check for hw reset */ + /* check for hw reset */ reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; if (!reg_val) { adapter->flags |= IAVF_FLAG_RESET_PENDING; @@ -2041,24 +2115,31 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->current_op = VIRTCHNL_OP_UNKNOWN; dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); queue_work(iavf_wq, &adapter->reset_task); - goto watchdog_done; + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, + &adapter->watchdog_task, HZ * 2); + return; } schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); -watchdog_done: - if (adapter->state == __IAVF_RUNNING || - adapter->state == __IAVF_COMM_FAILED) - iavf_detect_recover_hung(&adapter->vsi); mutex_unlock(&adapter->crit_lock); restart_watchdog: + if (adapter->state >= __IAVF_DOWN) + queue_work(iavf_wq, &adapter->adminq_task); if (adapter->aq_required) queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(20)); else queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); - queue_work(iavf_wq, &adapter->adminq_task); } +/** + * iavf_disable_vf - disable VF + * @adapter: board private structure + * + * Set communication failed flag and free all resources. + * NOTE: This function is expected to be called with crit_lock being held. + **/ static void iavf_disable_vf(struct iavf_adapter *adapter) { struct iavf_mac_filter *f, *ftmp; @@ -2113,9 +2194,8 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); iavf_shutdown_adminq(&adapter->hw); adapter->netdev->flags &= ~IFF_UP; - mutex_unlock(&adapter->crit_lock); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); } @@ -2145,13 +2225,13 @@ static void iavf_reset_task(struct work_struct *work) /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ - if (mutex_is_locked(&adapter->remove_lock)) - return; + if (!mutex_trylock(&adapter->crit_lock)) { + if (adapter->state != __IAVF_REMOVE) + queue_work(iavf_wq, &adapter->reset_task); - if (iavf_lock_timeout(&adapter->crit_lock, 200)) { - schedule_work(&adapter->reset_task); return; } + while (!mutex_trylock(&adapter->client_lock)) usleep_range(500, 1000); if (CLIENT_ENABLED(adapter)) { @@ -2206,6 +2286,7 @@ static void iavf_reset_task(struct work_struct *work) reg_val); iavf_disable_vf(adapter); mutex_unlock(&adapter->client_lock); + mutex_unlock(&adapter->crit_lock); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -2214,8 +2295,7 @@ continue_reset: * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. */ - running = ((adapter->state == __IAVF_RUNNING) || - (adapter->state == __IAVF_RESETTING)); + running = adapter->state == __IAVF_RUNNING; if (running) { netif_carrier_off(netdev); @@ -2225,7 +2305,7 @@ continue_reset: } iavf_irq_disable(adapter); - adapter->state = __IAVF_RESETTING; + iavf_change_state(adapter, __IAVF_RESETTING); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; /* free the Tx/Rx rings and descriptors, might be better to just @@ -2319,11 +2399,14 @@ continue_reset: iavf_configure(adapter); + /* iavf_up_complete() will switch device back + * to __IAVF_RUNNING + */ iavf_up_complete(adapter); iavf_irq_enable(adapter, true); } else { - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); } mutex_unlock(&adapter->client_lock); @@ -2333,6 +2416,8 @@ continue_reset: reset_err: mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); + if (running) + iavf_change_state(adapter, __IAVF_RUNNING); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); iavf_close(netdev); } @@ -2355,13 +2440,19 @@ static void iavf_adminq_task(struct work_struct *work) if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) goto out; + if (!mutex_trylock(&adapter->crit_lock)) { + if (adapter->state == __IAVF_REMOVE) + return; + + queue_work(iavf_wq, &adapter->adminq_task); + goto out; + } + event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) goto out; - if (iavf_lock_timeout(&adapter->crit_lock, 200)) - goto freedom; do { ret = iavf_clean_arq_element(hw, &event, &pending); v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); @@ -2377,6 +2468,18 @@ static void iavf_adminq_task(struct work_struct *work) } while (pending); mutex_unlock(&adapter->crit_lock); + if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) { + if (adapter->netdev_registered || + !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + netdev_update_features(netdev); + rtnl_unlock(); + } + + adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; + } if ((adapter->flags & (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || adapter->state == __IAVF_RESETTING) @@ -3259,6 +3362,13 @@ static int iavf_open(struct net_device *netdev) goto err_unlock; } + if (adapter->state == __IAVF_RUNNING && + !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) { + dev_dbg(&adapter->pdev->dev, "VF is already open.\n"); + err = 0; + goto err_unlock; + } + /* allocate transmit descriptors */ err = iavf_setup_all_tx_resources(adapter); if (err) @@ -3322,18 +3432,19 @@ static int iavf_close(struct net_device *netdev) struct iavf_adapter *adapter = netdev_priv(netdev); int status; - if (adapter->state <= __IAVF_DOWN_PENDING) - return 0; + mutex_lock(&adapter->crit_lock); - while (!mutex_trylock(&adapter->crit_lock)) - usleep_range(500, 1000); + if (adapter->state <= __IAVF_DOWN_PENDING) { + mutex_unlock(&adapter->crit_lock); + return 0; + } set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); if (CLIENT_ENABLED(adapter)) adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; iavf_down(adapter); - adapter->state = __IAVF_DOWN_PENDING; + iavf_change_state(adapter, __IAVF_DOWN_PENDING); iavf_free_traffic_irqs(adapter); mutex_unlock(&adapter->crit_lock); @@ -3373,8 +3484,11 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) iavf_notify_client_l2_params(&adapter->vsi); adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; } - adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); + + if (netif_running(netdev)) { + adapter->flags |= IAVF_FLAG_RESET_NEEDED; + queue_work(iavf_wq, &adapter->reset_task); + } return 0; } @@ -3673,71 +3787,13 @@ int iavf_process_config(struct iavf_adapter *adapter) } /** - * iavf_init_task - worker thread to perform delayed initialization - * @work: pointer to work_struct containing our data - * - * This task completes the work that was begun in probe. Due to the nature - * of VF-PF communications, we may need to wait tens of milliseconds to get - * responses back from the PF. Rather than busy-wait in probe and bog down the - * whole system, we'll do it in a task so we can sleep. - * This task only runs during driver init. Once we've established - * communications with the PF driver and set up our netdev, the watchdog - * takes over. - **/ -static void iavf_init_task(struct work_struct *work) -{ - struct iavf_adapter *adapter = container_of(work, - struct iavf_adapter, - init_task.work); - struct iavf_hw *hw = &adapter->hw; - - if (iavf_lock_timeout(&adapter->crit_lock, 5000)) { - dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); - return; - } - switch (adapter->state) { - case __IAVF_STARTUP: - if (iavf_startup(adapter) < 0) - goto init_failed; - break; - case __IAVF_INIT_VERSION_CHECK: - if (iavf_init_version_check(adapter) < 0) - goto init_failed; - break; - case __IAVF_INIT_GET_RESOURCES: - if (iavf_init_get_resources(adapter) < 0) - goto init_failed; - goto out; - default: - goto init_failed; - } - - queue_delayed_work(iavf_wq, &adapter->init_task, - msecs_to_jiffies(30)); - goto out; -init_failed: - if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { - dev_err(&adapter->pdev->dev, - "Failed to communicate with PF; waiting before retry\n"); - adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; - iavf_shutdown_adminq(hw); - adapter->state = __IAVF_STARTUP; - queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5); - goto out; - } - queue_delayed_work(iavf_wq, &adapter->init_task, HZ); -out: - mutex_unlock(&adapter->crit_lock); -} - -/** * iavf_shutdown - Shutdown the device in preparation for a reboot * @pdev: pci device structure **/ static void iavf_shutdown(struct pci_dev *pdev) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); + struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); @@ -3747,7 +3803,7 @@ static void iavf_shutdown(struct pci_dev *pdev) if (iavf_lock_timeout(&adapter->crit_lock, 5000)) dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); /* Prevent the watchdog from running. */ - adapter->state = __IAVF_REMOVE; + iavf_change_state(adapter, __IAVF_REMOVE); adapter->aq_required = 0; mutex_unlock(&adapter->crit_lock); @@ -3820,7 +3876,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->back = adapter; adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - adapter->state = __IAVF_STARTUP; + iavf_change_state(adapter, __IAVF_STARTUP); /* Call save state here because it relies on the adapter struct. */ pci_save_state(pdev); @@ -3845,7 +3901,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ mutex_init(&adapter->crit_lock); mutex_init(&adapter->client_lock); - mutex_init(&adapter->remove_lock); mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.arq_mutex); @@ -3864,8 +3919,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->adminq_task, iavf_adminq_task); INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); - INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task); - queue_delayed_work(iavf_wq, &adapter->init_task, + queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(5 * (pdev->devfn & 0x07))); /* Setup the wait queue for indicating transition to down status */ @@ -3922,10 +3976,11 @@ static int __maybe_unused iavf_suspend(struct device *dev_d) static int __maybe_unused iavf_resume(struct device *dev_d) { struct pci_dev *pdev = to_pci_dev(dev_d); - struct net_device *netdev = pci_get_drvdata(pdev); - struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter; u32 err; + adapter = iavf_pdev_to_adapter(pdev); + pci_set_master(pdev); rtnl_lock(); @@ -3944,7 +3999,7 @@ static int __maybe_unused iavf_resume(struct device *dev_d) queue_work(iavf_wq, &adapter->reset_task); - netif_device_attach(netdev); + netif_device_attach(adapter->netdev); return err; } @@ -3960,8 +4015,8 @@ static int __maybe_unused iavf_resume(struct device *dev_d) **/ static void iavf_remove(struct pci_dev *pdev) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); + struct net_device *netdev = adapter->netdev; struct iavf_fdir_fltr *fdir, *fdirtmp; struct iavf_vlan_filter *vlf, *vlftmp; struct iavf_adv_rss *rss, *rsstmp; @@ -3969,14 +4024,37 @@ static void iavf_remove(struct pci_dev *pdev) struct iavf_cloud_filter *cf, *cftmp; struct iavf_hw *hw = &adapter->hw; int err; - /* Indicate we are in remove and not to run reset_task */ - mutex_lock(&adapter->remove_lock); - cancel_delayed_work_sync(&adapter->init_task); - cancel_work_sync(&adapter->reset_task); - cancel_delayed_work_sync(&adapter->client_task); + + /* When reboot/shutdown is in progress no need to do anything + * as the adapter is already REMOVE state that was set during + * iavf_shutdown() callback. + */ + if (adapter->state == __IAVF_REMOVE) + return; + + set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); + /* Wait until port initialization is complete. + * There are flows where register/unregister netdev may race. + */ + while (1) { + mutex_lock(&adapter->crit_lock); + if (adapter->state == __IAVF_RUNNING || + adapter->state == __IAVF_DOWN || + adapter->state == __IAVF_INIT_FAILED) { + mutex_unlock(&adapter->crit_lock); + break; + } + + mutex_unlock(&adapter->crit_lock); + usleep_range(500, 1000); + } + cancel_delayed_work_sync(&adapter->watchdog_task); + if (adapter->netdev_registered) { - unregister_netdev(netdev); + rtnl_lock(); + unregister_netdevice(netdev); adapter->netdev_registered = false; + rtnl_unlock(); } if (CLIENT_ALLOWED(adapter)) { err = iavf_lan_del_device(adapter); @@ -3985,6 +4063,10 @@ static void iavf_remove(struct pci_dev *pdev) err); } + mutex_lock(&adapter->crit_lock); + dev_info(&adapter->pdev->dev, "Remove device\n"); + iavf_change_state(adapter, __IAVF_REMOVE); + iavf_request_reset(adapter); msleep(50); /* If the FW isn't responding, kick it once, but only once. */ @@ -3992,24 +4074,24 @@ static void iavf_remove(struct pci_dev *pdev) iavf_request_reset(adapter); msleep(50); } - if (iavf_lock_timeout(&adapter->crit_lock, 5000)) - dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); + iavf_misc_irq_disable(adapter); /* Shut down all the garbage mashers on the detention level */ - adapter->state = __IAVF_REMOVE; + cancel_work_sync(&adapter->reset_task); + cancel_delayed_work_sync(&adapter->watchdog_task); + cancel_work_sync(&adapter->adminq_task); + cancel_delayed_work_sync(&adapter->client_task); + adapter->aq_required = 0; adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); - iavf_misc_irq_disable(adapter); iavf_free_misc_irq(adapter); + iavf_reset_interrupt_capability(adapter); iavf_free_q_vectors(adapter); - cancel_delayed_work_sync(&adapter->watchdog_task); - - cancel_work_sync(&adapter->adminq_task); - iavf_free_rss(adapter); if (hw->aq.asq.count) @@ -4021,8 +4103,6 @@ static void iavf_remove(struct pci_dev *pdev) mutex_destroy(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); mutex_destroy(&adapter->crit_lock); - mutex_unlock(&adapter->remove_lock); - mutex_destroy(&adapter->remove_lock); iounmap(hw->hw_addr); pci_release_regions(pdev); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 08302ab35d68..7013769fc038 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1461,6 +1461,22 @@ void iavf_request_reset(struct iavf_adapter *adapter) } /** + * iavf_netdev_features_vlan_strip_set - update vlan strip status + * @netdev: ptr to netdev being adjusted + * @enable: enable or disable vlan strip + * + * Helper function to change vlan strip status in netdev->features. + */ +static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev, + const bool enable) +{ + if (enable) + netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; + else + netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; +} + +/** * iavf_virtchnl_completion * @adapter: adapter structure * @v_opcode: opcode sent by PF @@ -1683,8 +1699,18 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); + /* Vlan stripping could not be enabled by ethtool. + * Disable it in netdev->features. + */ + iavf_netdev_features_vlan_strip_set(netdev, false); + break; case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); + /* Vlan stripping could not be disabled by ethtool. + * Enable it in netdev->features. + */ + iavf_netdev_features_vlan_strip_set(netdev, true); break; default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", @@ -1752,19 +1778,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, spin_unlock_bh(&adapter->mac_vlan_list_lock); iavf_process_config(adapter); - - /* unlock crit_lock before acquiring rtnl_lock as other - * processes holding rtnl_lock could be waiting for the same - * crit_lock - */ - mutex_unlock(&adapter->crit_lock); - rtnl_lock(); - netdev_update_features(adapter->netdev); - rtnl_unlock(); - if (iavf_lock_timeout(&adapter->crit_lock, 10000)) - dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", - __FUNCTION__); - + adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; } break; case VIRTCHNL_OP_ENABLE_QUEUES: @@ -1776,7 +1790,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); if (adapter->state == __IAVF_DOWN_PENDING) { - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); } break; @@ -1930,6 +1944,20 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, spin_unlock_bh(&adapter->adv_rss_lock); } break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + /* PF enabled vlan strip on this VF. + * Update netdev->features if needed to be in sync with ethtool. + */ + if (!v_retval) + iavf_netdev_features_vlan_strip_set(netdev, true); + break; + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + /* PF disabled vlan strip on this VF. + * Update netdev->features if needed to be in sync with ethtool. + */ + if (!v_retval) + iavf_netdev_features_vlan_strip_set(netdev, false); + break; default: if (adapter->current_op && (v_opcode != adapter->current_op)) dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index d119812755b7..df65bb494695 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -231,7 +231,6 @@ enum ice_pf_state { ICE_VFLR_EVENT_PENDING, ICE_FLTR_OVERFLOW_PROMISC, ICE_VF_DIS, - ICE_VF_DEINIT_IN_PROGRESS, ICE_CFG_BUSY, ICE_SERVICE_SCHED, ICE_SERVICE_DIS, @@ -242,6 +241,7 @@ enum ice_pf_state { ICE_LINK_DEFAULT_OVERRIDE_PENDING, ICE_PHY_INIT_COMPLETE, ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */ + ICE_AUX_ERR_PENDING, ICE_STATE_NBITS /* must be last */ }; @@ -399,6 +399,7 @@ enum ice_pf_flags { ICE_FLAG_MDD_AUTO_RESET_VF, ICE_FLAG_LINK_LENIENT_MODE_ENA, ICE_FLAG_PLUG_AUX_DEV, + ICE_FLAG_MTU_CHANGED, ICE_PF_FLAGS_NBITS /* must be last */ }; @@ -464,6 +465,7 @@ struct ice_pf { wait_queue_head_t reset_wait_queue; u32 hw_csum_rx_error; + u32 oicr_err_reg; u16 oicr_idx; /* Other interrupt cause MSIX vector index */ u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ u16 max_pf_txqs; /* Total Tx queues PF wide */ @@ -550,7 +552,7 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) { - return !!vsi->xdp_prog; + return !!READ_ONCE(vsi->xdp_prog); } static inline void ice_set_ring_xdp(struct ice_ring *ring) @@ -703,7 +705,16 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf) */ static inline void ice_clear_rdma_cap(struct ice_pf *pf) { - ice_unplug_aux_dev(pf); + /* We can directly unplug aux device here only if the flag bit + * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev() + * could race with ice_plug_aux_dev() called from + * ice_service_task(). In this case we only clear that bit now and + * aux device will be unplugged later once ice_plug_aux_device() + * called from ice_service_task() finishes (see ice_service_task()). + */ + if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) + ice_unplug_aux_dev(pf); + clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); clear_bit(ICE_FLAG_AUX_ENA, pf->flags); } diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index f4463e962d52..3de6f16f985a 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -3270,7 +3270,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && !ice_fw_supports_report_dflt_cfg(hw)) { - struct ice_link_default_override_tlv tlv; + struct ice_link_default_override_tlv tlv = { 0 }; status = ice_get_link_default_override(&tlv, pi); if (status) diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index c451cf401e63..38c2d9a5574a 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -2275,7 +2275,7 @@ ice_set_link_ksettings(struct net_device *netdev, goto done; } - curr_link_speed = pi->phy.link_info.link_speed; + curr_link_speed = pi->phy.curr_user_speed_req; adv_link_speed = ice_ksettings_find_adv_link_speed(ks); /* If speed didn't get set, set it to what it currently is. diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index adcc9a251595..a2714988dd96 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -34,6 +34,9 @@ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event) { struct iidc_auxiliary_drv *iadrv; + if (WARN_ON_ONCE(!in_task())) + return; + if (!pf->adev) return; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 62bf879dc623..653996e8fd30 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1306,6 +1306,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->tx_tstamps = &pf->ptp.port.tx; ring->dev = dev; ring->count = vsi->num_tx_desc; + ring->txq_teid = ICE_INVAL_TEID; WRITE_ONCE(vsi->tx_rings[i], ring); } @@ -1521,6 +1522,12 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) if (status) dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n", vsi_num, ice_stat_str(status)); + + status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI, + ICE_FLOW_SEG_HDR_ESP); + if (status) + dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n", + vsi_num, status); } /** @@ -2917,6 +2924,8 @@ int ice_vsi_release(struct ice_vsi *vsi) } } + if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) + ice_clear_dflt_vsi(pf->first_sw); ice_fltr_remove_all(vsi); ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index ab2dea0d2c1a..f330bd0acf9f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1679,7 +1679,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf) * reset, so print the event prior to reset. */ ice_print_vf_rx_mdd_event(vf); + mutex_lock(&pf->vf[i].cfg_lock); ice_reset_vf(&pf->vf[i], false); + mutex_unlock(&pf->vf[i].cfg_lock); } } } @@ -2141,9 +2143,43 @@ static void ice_service_task(struct work_struct *work) return; } - if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) + if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { + struct iidc_event *event; + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (event) { + set_bit(IIDC_EVENT_CRIT_ERR, event->type); + /* report the entire OICR value to AUX driver */ + swap(event->reg, pf->oicr_err_reg); + ice_send_event_to_aux(pf, event); + kfree(event); + } + } + + if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) { + /* Plug aux device per request */ ice_plug_aux_dev(pf); + /* Mark plugging as done but check whether unplug was + * requested during ice_plug_aux_dev() call + * (e.g. from ice_clear_rdma_cap()) and if so then + * plug aux device. + */ + if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) + ice_unplug_aux_dev(pf); + } + + if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { + struct iidc_event *event; + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (event) { + set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); + ice_send_event_to_aux(pf, event); + kfree(event); + } + } + ice_clean_adminq_subtask(pf); ice_check_media_subtask(pf); ice_check_for_hang_subtask(pf); @@ -2576,8 +2612,10 @@ free_qmap: for (i = 0; i < vsi->num_xdp_txq; i++) if (vsi->xdp_rings[i]) { - if (vsi->xdp_rings[i]->desc) + if (vsi->xdp_rings[i]->desc) { + synchronize_rcu(); ice_free_tx_ring(vsi->xdp_rings[i]); + } kfree_rcu(vsi->xdp_rings[i], rcu); vsi->xdp_rings[i] = NULL; } @@ -2858,17 +2896,9 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) if (oicr & ICE_AUX_CRIT_ERR) { - struct iidc_event *event; - + pf->oicr_err_reg |= oicr; + set_bit(ICE_AUX_ERR_PENDING, pf->state); ena_mask &= ~ICE_AUX_CRIT_ERR; - event = kzalloc(sizeof(*event), GFP_KERNEL); - if (event) { - set_bit(IIDC_EVENT_CRIT_ERR, event->type); - /* report the entire OICR value to AUX driver */ - event->reg = oicr; - ice_send_event_to_aux(pf, event); - kfree(event); - } } /* Report any remaining unexpected interrupts */ @@ -6530,7 +6560,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; - struct iidc_event *event; u8 count = 0; int err = 0; @@ -6565,14 +6594,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) return -EBUSY; } - event = kzalloc(sizeof(*event), GFP_KERNEL); - if (!event) - return -ENOMEM; - - set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); - ice_send_event_to_aux(pf, event); - clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); - netdev->mtu = (unsigned int)new_mtu; /* if VSI is up, bring it down and then back up */ @@ -6580,21 +6601,18 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) err = ice_down(vsi); if (err) { netdev_err(netdev, "change MTU if_down err %d\n", err); - goto event_after; + return err; } err = ice_up(vsi); if (err) { netdev_err(netdev, "change MTU if_up err %d\n", err); - goto event_after; + return err; } } netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); -event_after: - set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); - ice_send_event_to_aux(pf, event); - kfree(event); + set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); return err; } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index ac27a4fe8b94..eb9193682579 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -846,9 +846,12 @@ exit: static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) { struct timespec64 now, then; + int ret; then = ns_to_timespec64(delta); - ice_ptp_gettimex64(info, &now, NULL); + ret = ice_ptp_gettimex64(info, &now, NULL); + if (ret) + return ret; now = timespec64_add(now, then); return ice_ptp_settime64(info, (const struct timespec64 *)&now); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index a78e8f00cf71..9d4d58757e04 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -615,8 +615,6 @@ void ice_free_vfs(struct ice_pf *pf) struct ice_hw *hw = &pf->hw; unsigned int tmp, i; - set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state); - if (!pf->vf) return; @@ -632,20 +630,26 @@ void ice_free_vfs(struct ice_pf *pf) else dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); - /* Avoid wait time by stopping all VFs at the same time */ - ice_for_each_vf(pf, i) - ice_dis_vf_qs(&pf->vf[i]); - tmp = pf->num_alloc_vfs; pf->num_qps_per_vf = 0; pf->num_alloc_vfs = 0; for (i = 0; i < tmp; i++) { - if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { + struct ice_vf *vf = &pf->vf[i]; + + mutex_lock(&vf->cfg_lock); + + ice_dis_vf_qs(vf); + + if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { /* disable VF qp mappings and set VF disable state */ - ice_dis_vf_mappings(&pf->vf[i]); - set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states); - ice_free_vf_res(&pf->vf[i]); + ice_dis_vf_mappings(vf); + set_bit(ICE_VF_STATE_DIS, vf->vf_states); + ice_free_vf_res(vf); } + + mutex_unlock(&vf->cfg_lock); + + mutex_destroy(&vf->cfg_lock); } if (ice_sriov_free_msix_res(pf)) @@ -681,7 +685,6 @@ void ice_free_vfs(struct ice_pf *pf) i); clear_bit(ICE_VF_DIS, pf->state); - clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state); clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); } @@ -1565,6 +1568,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_for_each_vf(pf, v) { vf = &pf->vf[v]; + mutex_lock(&vf->cfg_lock); + vf->driver_caps = 0; ice_vc_set_default_allowlist(vf); @@ -1579,6 +1584,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_vf_pre_vsi_rebuild(vf); ice_vf_rebuild_vsi(vf); ice_vf_post_vsi_rebuild(vf); + + mutex_unlock(&vf->cfg_lock); } ice_flush(hw); @@ -1625,6 +1632,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) u32 reg; int i; + lockdep_assert_held(&vf->cfg_lock); + dev = ice_pf_to_dev(pf); if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { @@ -1894,6 +1903,8 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf) */ ice_vf_ctrl_invalidate_vsi(vf); ice_vf_fdir_init(vf); + + mutex_init(&vf->cfg_lock); } } @@ -2109,9 +2120,12 @@ void ice_process_vflr_event(struct ice_pf *pf) bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; /* read GLGEN_VFLRSTAT register to find out the flr VFs */ reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); - if (reg & BIT(bit_idx)) + if (reg & BIT(bit_idx)) { /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ + mutex_lock(&vf->cfg_lock); ice_reset_vf(vf, true); + mutex_unlock(&vf->cfg_lock); + } } } @@ -2188,7 +2202,9 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) if (!vf) return; + mutex_lock(&vf->cfg_lock); ice_vc_reset_vf(vf); + mutex_unlock(&vf->cfg_lock); } /** @@ -2218,24 +2234,6 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, dev = ice_pf_to_dev(pf); - /* single place to detect unsuccessful return values */ - if (v_retval) { - vf->num_inval_msgs++; - dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id, - v_opcode, v_retval); - if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) { - dev_err(dev, "Number of invalid messages exceeded for VF %d\n", - vf->vf_id); - dev_err(dev, "Use PF Control I/F to enable the VF\n"); - set_bit(ICE_VF_STATE_DIS, vf->vf_states); - return -EIO; - } - } else { - vf->num_valid_msgs++; - /* reset the invalid counter, if a valid message is received. */ - vf->num_inval_msgs = 0; - } - aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { @@ -3337,9 +3335,9 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - /* Skip queue if not enabled */ if (!test_bit(vf_q_id, vf->txq_ena)) - continue; + dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n", + vf_q_id, vsi->vsi_num); ice_fill_txq_meta(vsi, ring, &txq_meta); @@ -4082,6 +4080,8 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, return 0; } + mutex_lock(&vf->cfg_lock); + vf->port_vlan_info = vlanprio; if (vf->port_vlan_info) @@ -4091,6 +4091,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id); ice_vc_reset_vf(vf); + mutex_unlock(&vf->cfg_lock); return 0; } @@ -4422,10 +4423,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) struct device *dev; int err = 0; - /* if de-init is underway, don't process messages from VF */ - if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state)) - return; - dev = ice_pf_to_dev(pf); if (ice_validate_vf_id(pf, vf_id)) { err = -EINVAL; @@ -4465,6 +4462,15 @@ error_handler: return; } + /* VF is being configured in another context that triggers a VFR, so no + * need to process this message + */ + if (!mutex_trylock(&vf->cfg_lock)) { + dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n", + vf->vf_id); + return; + } + switch (v_opcode) { case VIRTCHNL_OP_VERSION: err = ice_vc_get_ver_msg(vf, msg); @@ -4553,6 +4559,8 @@ error_handler: dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n", vf_id, v_opcode, err); } + + mutex_unlock(&vf->cfg_lock); } /** @@ -4668,6 +4676,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) return -EINVAL; } + mutex_lock(&vf->cfg_lock); + /* VF is notified of its new MAC via the PF's response to the * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset */ @@ -4686,6 +4696,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) } ice_vc_reset_vf(vf); + mutex_unlock(&vf->cfg_lock); return 0; } @@ -4715,11 +4726,15 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) if (trusted == vf->trusted) return 0; + mutex_lock(&vf->cfg_lock); + vf->trusted = trusted; ice_vc_reset_vf(vf); dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", vf_id, trusted ? "" : "un"); + mutex_unlock(&vf->cfg_lock); + return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 38b4dc82c5c1..532f57f01467 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -14,7 +14,6 @@ #define ICE_MAX_MACADDR_PER_VF 18 /* Malicious Driver Detection */ -#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10 #define ICE_MDD_EVENTS_THRESHOLD 30 /* Static VF transaction/status register def */ @@ -74,6 +73,11 @@ struct ice_mdd_vf_events { struct ice_vf { struct ice_pf *pf; + /* Used during virtchnl message handling and NDO ops against the VF + * that will trigger a VFR + */ + struct mutex cfg_lock; + u16 vf_id; /* VF ID in the PF space */ u16 lan_vsi_idx; /* index into PF struct */ u16 ctrl_vsi_idx; @@ -102,8 +106,6 @@ struct ice_vf { unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ - u64 num_inval_msgs; /* number of continuous invalid msgs */ - u64 num_valid_msgs; /* number of valid msgs detected */ unsigned long vf_caps; /* VF's adv. capabilities */ u8 num_req_qs; /* num of queue pairs requested by VF */ u16 num_mac; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 37c7dc6b44a9..2b1873061912 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -36,8 +36,10 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) { ice_clean_tx_ring(vsi->tx_rings[q_idx]); - if (ice_is_xdp_ena_vsi(vsi)) + if (ice_is_xdp_ena_vsi(vsi)) { + synchronize_rcu(); ice_clean_tx_ring(vsi->xdp_rings[q_idx]); + } ice_clean_rx_ring(vsi->rx_rings[q_idx]); } @@ -759,7 +761,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, struct ice_vsi *vsi = np->vsi; struct ice_ring *ring; - if (test_bit(ICE_DOWN, vsi->state)) + if (test_bit(ICE_VSI_DOWN, vsi->state)) return -ENETDOWN; if (!ice_is_xdp_ena_vsi(vsi)) diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index fb1029352c3e..3cbb5a89b336 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -961,10 +961,6 @@ static int igb_set_ringparam(struct net_device *netdev, memcpy(&temp_ring[i], adapter->rx_ring[i], sizeof(struct igb_ring)); - /* Clear copied XDP RX-queue info */ - memset(&temp_ring[i].xdp_rxq, 0, - sizeof(temp_ring[i].xdp_rxq)); - temp_ring[i].count = new_rx_count; err = igb_setup_rx_resources(&temp_ring[i]); if (err) { diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 82a712f77cb3..bf8ef81f6c0e 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -4345,7 +4345,18 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) { struct igb_adapter *adapter = netdev_priv(rx_ring->netdev); struct device *dev = rx_ring->dev; - int size; + int size, res; + + /* XDP RX-queue info */ + if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, + rx_ring->queue_index, 0); + if (res < 0) { + dev_err(dev, "Failed to register xdp_rxq index %u\n", + rx_ring->queue_index); + return res; + } size = sizeof(struct igb_rx_buffer) * rx_ring->count; @@ -4368,14 +4379,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) rx_ring->xdp_prog = adapter->xdp_prog; - /* XDP RX-queue info */ - if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, - rx_ring->queue_index, 0) < 0) - goto err; - return 0; err: + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c index b6807e16eea9..a0e2a404d535 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.c +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) { u32 swfw_sync; - while (igc_get_hw_semaphore_i225(hw)) - ; /* Empty */ + /* Releasing the resource requires first getting the HW semaphore. + * If we fail to get the semaphore, there is nothing we can do, + * except log an error and quit. We are not allowed to hang here + * indefinitely, as it may cause denial of service or system crash. + */ + if (igc_get_hw_semaphore_i225(hw)) { + hw_dbg("Failed to release SW_FW_SYNC.\n"); + return; + } swfw_sync = rd32(IGC_SW_FW_SYNC); swfw_sync &= ~mask; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index c7fa978cdf02..f99819fc559d 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -504,6 +504,9 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring) u8 index = rx_ring->queue_index; int size, desc_len, res; + /* XDP RX-queue info */ + if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, rx_ring->q_vector->napi.napi_id); if (res < 0) { @@ -2434,19 +2437,20 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, struct xdp_buff *xdp) { + unsigned int totalsize = xdp->data_end - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta; - unsigned int datasize = xdp->data_end - xdp->data; - unsigned int totalsize = metasize + datasize; struct sk_buff *skb; - skb = __napi_alloc_skb(&ring->q_vector->napi, - xdp->data_end - xdp->data_hard_start, + net_prefetch(xdp->data_meta); + + skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) return NULL; - skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); - memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize); + memcpy(__skb_put(skb, totalsize), xdp->data_meta, + ALIGN(totalsize, sizeof(long))); + if (metasize) { skb_metadata_set(skb, metasize); __skb_pull(skb, metasize); diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 5cad31c3c7b0..6961f65d36b9 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -581,7 +581,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data) * the lower time out */ for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { - usleep_range(500, 1000); + udelay(50); mdic = rd32(IGC_MDIC); if (mdic & IGC_MDIC_READY) break; @@ -638,7 +638,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data) * the lower time out */ for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { - usleep_range(500, 1000); + udelay(50); mdic = rd32(IGC_MDIC); if (mdic & IGC_MDIC_READY) break; @@ -746,8 +746,6 @@ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) if (ret_val) return ret_val; ret_val = igc_write_phy_reg_mdic(hw, offset, data); - if (ret_val) - return ret_val; hw->phy.ops.release(hw); } else { ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr, @@ -779,8 +777,6 @@ s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) if (ret_val) return ret_val; ret_val = igc_read_phy_reg_mdic(hw, offset, data); - if (ret_val) - return ret_val; hw->phy.ops.release(hw); } else { ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr, diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 4f9245aa79a1..8e521f99b80a 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -996,6 +996,17 @@ static void igc_ptp_time_restore(struct igc_adapter *adapter) igc_ptp_write_i225(adapter, &ts); } +static void igc_ptm_stop(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_EN; + + wr32(IGC_PTM_CTRL, ctrl); +} + /** * igc_ptp_suspend - Disable PTP work items and prepare for suspend * @adapter: Board private structure @@ -1013,8 +1024,10 @@ void igc_ptp_suspend(struct igc_adapter *adapter) adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); - if (pci_device_is_present(adapter->pdev)) + if (pci_device_is_present(adapter->pdev)) { igc_ptp_time_save(adapter); + igc_ptm_stop(adapter); + } } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index b1d22e4d5ec9..b399b9c14717 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -201,26 +201,28 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) } static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring, - struct ixgbe_rx_buffer *bi) + const struct xdp_buff *xdp) { - unsigned int metasize = bi->xdp->data - bi->xdp->data_meta; - unsigned int datasize = bi->xdp->data_end - bi->xdp->data; + unsigned int totalsize = xdp->data_end - xdp->data_meta; + unsigned int metasize = xdp->data - xdp->data_meta; struct sk_buff *skb; + net_prefetch(xdp->data_meta); + /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - bi->xdp->data_end - bi->xdp->data_hard_start, + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) return NULL; - skb_reserve(skb, bi->xdp->data - bi->xdp->data_hard_start); - memcpy(__skb_put(skb, datasize), bi->xdp->data, datasize); - if (metasize) + memcpy(__skb_put(skb, totalsize), xdp->data_meta, + ALIGN(totalsize, sizeof(long))); + + if (metasize) { skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } - xsk_buff_free(bi->xdp); - bi->xdp = NULL; return skb; } @@ -311,12 +313,15 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, } /* XDP_PASS path */ - skb = ixgbe_construct_skb_zc(rx_ring, bi); + skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp); if (!skb) { rx_ring->rx_stats.alloc_rx_buff_failed++; break; } + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + cleaned_count++; ixgbe_inc_ntc(rx_ring); @@ -388,12 +393,14 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) u32 cmd_type; while (budget-- > 0) { - if (unlikely(!ixgbe_desc_unused(xdp_ring)) || - !netif_carrier_ok(xdp_ring->netdev)) { + if (unlikely(!ixgbe_desc_unused(xdp_ring))) { work_done = false; break; } + if (!netif_carrier_ok(xdp_ring->netdev)) + break; + if (!xsk_tx_peek_desc(pool, &desc)) break; diff --git a/drivers/net/ethernet/litex/Kconfig b/drivers/net/ethernet/litex/Kconfig index 63bf01d28f0c..04345b929d8e 100644 --- a/drivers/net/ethernet/litex/Kconfig +++ b/drivers/net/ethernet/litex/Kconfig @@ -17,7 +17,7 @@ if NET_VENDOR_LITEX config LITEX_LITEETH tristate "LiteX Ethernet support" - depends on OF_NET + depends on OF && HAS_IOMEM help If you wish to compile a kernel for hardware with a LiteX LiteEth device then you should answer Y to this. diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 28d5ad296646..90fd5588e20d 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2700,6 +2700,16 @@ MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); static struct platform_device *port_platdev[3]; +static void mv643xx_eth_shared_of_remove(void) +{ + int n; + + for (n = 0; n < 3; n++) { + platform_device_del(port_platdev[n]); + port_platdev[n] = NULL; + } +} + static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, struct device_node *pnp) { @@ -2736,7 +2746,9 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, return -EINVAL; } - of_get_mac_address(pnp, ppd.mac_addr); + ret = of_get_mac_address(pnp, ppd.mac_addr); + if (ret == -EPROBE_DEFER) + return ret; mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); @@ -2800,21 +2812,13 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) ret = mv643xx_eth_shared_of_add_port(pdev, pnp); if (ret) { of_node_put(pnp); + mv643xx_eth_shared_of_remove(); return ret; } } return 0; } -static void mv643xx_eth_shared_of_remove(void) -{ - int n; - - for (n = 0; n < 3; n++) { - platform_device_del(port_platdev[n]); - port_platdev[n] = NULL; - } -} #else static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 34a089b71e55..6b335139abe7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -838,9 +838,6 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable) if (!cgx) return; - if (is_dev_rpm(cgx)) - return; - if (enable) { /* Enable inbound PTP timestamping */ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); @@ -1545,9 +1542,11 @@ static int cgx_lmac_exit(struct cgx *cgx) static void cgx_populate_features(struct cgx *cgx) { if (is_dev_rpm(cgx)) - cgx->hw_features = (RVU_MAC_RPM | RVU_LMAC_FEAT_FC); + cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM | + RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); else - cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); + cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 | + RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF); } static struct mac_ops cgx_mac_ops = { @@ -1571,6 +1570,9 @@ static struct mac_ops cgx_mac_ops = { .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status, .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm, .mac_pause_frm_config = cgx_lmac_pause_frm_config, + .mac_enadis_ptp_config = cgx_lmac_ptp_config, + .mac_rx_tx_enable = cgx_lmac_rx_tx_enable, + .mac_tx_enable = cgx_lmac_tx_enable, }; static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index c38306b3384a..b33e7d1d0851 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -102,6 +102,14 @@ struct mac_ops { void (*mac_pause_frm_config)(void *cgxd, int lmac_id, bool enable); + + /* Enable/Disable Inbound PTP */ + void (*mac_enadis_ptp_config)(void *cgxd, + int lmac_id, + bool enable); + + int (*mac_rx_tx_enable)(void *cgxd, int lmac_id, bool enable); + int (*mac_tx_enable)(void *cgxd, int lmac_id, bool enable); }; struct cgx { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 26ad71842b3b..c6643c7db1fc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -84,7 +84,7 @@ struct mbox_msghdr { #define OTX2_MBOX_REQ_SIG (0xdead) #define OTX2_MBOX_RSP_SIG (0xbeef) u16 sig; /* Signature, for validating corrupted msgs */ -#define OTX2_MBOX_VERSION (0x0009) +#define OTX2_MBOX_VERSION (0x000a) u16 ver; /* Version of msg's structure for this ID */ u16 next_msgoff; /* Offset of next msg within mailbox region */ int rc; /* Msg process'ed response code */ @@ -154,23 +154,23 @@ M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \ M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \ M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \ cgx_pause_frm_cfg) \ -M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \ -M(CGX_FEC_STATS, 0x211, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \ -M(CGX_GET_PHY_FEC_STATS, 0x212, cgx_get_phy_fec_stats, msg_req, msg_rsp) \ -M(CGX_FW_DATA_GET, 0x213, cgx_get_aux_link_info, msg_req, cgx_fw_data) \ -M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\ - cgx_set_link_mode_rsp) \ -M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \ - cgx_features_info_msg) \ -M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \ -M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \ - cgx_mac_addr_add_rsp) \ -M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \ +M(CGX_FW_DATA_GET, 0x20F, cgx_get_aux_link_info, msg_req, cgx_fw_data) \ +M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \ +M(CGX_MAC_ADDR_ADD, 0x211, cgx_mac_addr_add, cgx_mac_addr_add_req, \ + cgx_mac_addr_add_rsp) \ +M(CGX_MAC_ADDR_DEL, 0x212, cgx_mac_addr_del, cgx_mac_addr_del_req, \ msg_rsp) \ -M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \ +M(CGX_MAC_MAX_ENTRIES_GET, 0x213, cgx_mac_max_entries_get, msg_req, \ cgx_max_dmac_entries_get_rsp) \ -M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \ -M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \ +M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \ +M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\ + cgx_set_link_mode_rsp) \ +M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \ +M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \ + cgx_features_info_msg) \ +M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \ +M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \ +M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \ msg_rsp) \ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ @@ -229,6 +229,8 @@ M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \ M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \ npc_mcam_read_entry_req, \ npc_mcam_read_entry_rsp) \ +M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \ + npc_set_pkind, msg_rsp) \ M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \ msg_req, npc_mcam_read_base_rule_rsp) \ M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \ @@ -575,10 +577,13 @@ struct cgx_mac_addr_update_req { }; #define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */ -#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */ -#define RVU_MAC_VERSION BIT_ULL(2) -#define RVU_MAC_CGX BIT_ULL(3) -#define RVU_MAC_RPM BIT_ULL(4) +#define RVU_LMAC_FEAT_HIGIG2 BIT_ULL(1) + /* flow control from physical link higig2 messages */ +#define RVU_LMAC_FEAT_PTP BIT_ULL(2) /* precison time protocol */ +#define RVU_LMAC_FEAT_DMACF BIT_ULL(3) /* DMAC FILTER */ +#define RVU_MAC_VERSION BIT_ULL(4) +#define RVU_MAC_CGX BIT_ULL(5) +#define RVU_MAC_RPM BIT_ULL(6) struct cgx_features_info_msg { struct mbox_msghdr hdr; @@ -593,6 +598,22 @@ struct rpm_stats_rsp { u64 tx_stats[RPM_TX_STATS_COUNT]; }; +struct npc_set_pkind { + struct mbox_msghdr hdr; +#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0) +#define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63) + u64 mode; +#define PKIND_TX BIT_ULL(0) +#define PKIND_RX BIT_ULL(1) + u8 dir; + u8 pkind; /* valid only in case custom flag */ + u8 var_len_off; /* Offset of custom header length field. + * Valid only for pkind NPC_RX_CUSTOM_PRE_L2_PKIND + */ + u8 var_len_off_mask; /* Mask for length with in offset */ + u8 shift_dir; /* shift direction to get length of the header at var_len_off */ +}; + /* NPA mbox message formats */ /* NPA mailbox error codes diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 3a819b24accc..6e1192f52608 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -31,9 +31,9 @@ enum npc_kpu_la_ltype { NPC_LT_LA_HIGIG2_ETHER, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_LT_LA_CH_LEN_90B_ETHER, NPC_LT_LA_CPT_HDR, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + NPC_LT_LA_CUSTOM_PRE_L2_ETHER, NPC_LT_LA_CUSTOM0 = 0xE, NPC_LT_LA_CUSTOM1 = 0xF, }; @@ -148,10 +148,11 @@ enum npc_kpu_lh_ltype { * Software assigns pkind for each incoming port such as CGX * Ethernet interfaces, LBK interfaces, etc. */ -#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND +#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND enum npc_pkind_type { NPC_RX_LBK_PKIND = 0ULL, + NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL, NPC_RX_VLAN_EXDSA_PKIND = 56ULL, NPC_RX_CHLEN24B_PKIND = 57ULL, NPC_RX_CPT_HDR_PKIND, @@ -162,6 +163,10 @@ enum npc_pkind_type { NPC_TX_DEF_PKIND, /* NIX-TX PKIND */ }; +enum npc_interface_type { + NPC_INTF_MODE_DEF, +}; + /* list of known and supported fields in packet header and * fields present in key structure. */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h index 588822a0cf21..695123e32ba8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h @@ -176,9 +176,8 @@ enum npc_kpu_parser_state { NPC_S_KPU1_EXDSA, NPC_S_KPU1_HIGIG2, NPC_S_KPU1_IH_NIX_HIGIG2, - NPC_S_KPU1_CUSTOM_L2_90B, + NPC_S_KPU1_CUSTOM_PRE_L2, NPC_S_KPU1_CPT_HDR, - NPC_S_KPU1_CUSTOM_L2_24B, NPC_S_KPU1_VLAN_EXDSA, NPC_S_KPU2_CTAG, NPC_S_KPU2_CTAG2, @@ -187,7 +186,8 @@ enum npc_kpu_parser_state { NPC_S_KPU2_ETAG, NPC_S_KPU2_PREHEADER, NPC_S_KPU2_EXDSA, - NPC_S_KPU2_NGIO, + NPC_S_KPU2_CPT_CTAG, + NPC_S_KPU2_CPT_QINQ, NPC_S_KPU3_CTAG, NPC_S_KPU3_STAG, NPC_S_KPU3_QINQ, @@ -212,6 +212,7 @@ enum npc_kpu_parser_state { NPC_S_KPU5_NSH, NPC_S_KPU5_CPT_IP, NPC_S_KPU5_CPT_IP6, + NPC_S_KPU5_NGIO, NPC_S_KPU6_IP6_EXT, NPC_S_KPU6_IP6_HOP_DEST, NPC_S_KPU6_IP6_ROUT, @@ -979,8 +980,8 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, 0, 0, - NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, + NPC_S_KPU1_CUSTOM_PRE_L2, 0, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_PRE_L2_ETHER, 0, 0, 0, 0, 0, @@ -996,27 +997,27 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 36, 40, 44, 0, 0, - NPC_S_KPU1_CUSTOM_L2_24B, 0, 0, - NPC_LID_LA, NPC_LT_NA, + 12, 16, 20, 0, 0, + NPC_S_KPU1_CUSTOM_PRE_L2, 24, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 40, 54, 58, 0, 0, - NPC_S_KPU1_CPT_HDR, 0, 0, + 12, 16, 20, 0, 0, + NPC_S_KPU1_CPT_HDR, 40, 0, NPC_LID_LA, NPC_LT_NA, 0, - 0, 0, 0, 0, + 7, 7, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 102, 106, 110, 0, 0, - NPC_S_KPU1_CUSTOM_L2_90B, 0, 0, - NPC_LID_LA, NPC_LT_NA, + 12, 16, 20, 0, 0, + NPC_S_KPU1_CUSTOM_PRE_L2, 90, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, 0, 0, 0, 0, 0, @@ -1120,15 +1121,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_NGIO, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, - NPC_ETYPE_CTAG, - 0xffff, NPC_ETYPE_CTAG, 0xffff, 0x0000, @@ -1711,7 +1703,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_IP, 0xffff, 0x0000, @@ -1720,7 +1712,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_IP6, 0xffff, 0x0000, @@ -1729,7 +1721,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_ARP, 0xffff, 0x0000, @@ -1738,7 +1730,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_RARP, 0xffff, 0x0000, @@ -1747,7 +1739,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_PTP, 0xffff, 0x0000, @@ -1756,7 +1748,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_FCOE, 0xffff, 0x0000, @@ -1765,7 +1757,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_CTAG, @@ -1774,7 +1766,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_CTAG, 0xffff, 0x0000, @@ -1783,7 +1775,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_SBTAG, 0xffff, 0x0000, @@ -1792,7 +1784,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_QINQ, 0xffff, 0x0000, @@ -1801,7 +1793,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_ETAG, 0xffff, 0x0000, @@ -1810,7 +1802,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_MPLSU, 0xffff, 0x0000, @@ -1819,7 +1811,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_MPLSM, 0xffff, 0x0000, @@ -1828,7 +1820,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_NSH, 0xffff, 0x0000, @@ -1837,7 +1829,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, 0x0000, 0x0000, 0x0000, @@ -1847,150 +1839,24 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, { NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - NPC_ETYPE_IP6, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - NPC_ETYPE_CTAG, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - NPC_ETYPE_QINQ, - 0xffff, 0x0000, 0x0000, }, { NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - 0x0000, - 0x0000, - NPC_ETYPE_IP, - 0xffff, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - 0x0000, - 0x0000, NPC_ETYPE_IP6, 0xffff, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - 0x0000, 0x0000, - NPC_ETYPE_CTAG, - 0xffff, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, 0x0000, - 0xffff, 0x0000, 0x0000, - NPC_ETYPE_QINQ, - 0xffff, }, { NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_IP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_IP6, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_ARP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_RARP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_PTP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_FCOE, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_CTAG, - 0xffff, - NPC_ETYPE_CTAG, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, NPC_ETYPE_CTAG, 0xffff, 0x0000, @@ -1999,16 +1865,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_S_KPU1_CPT_HDR, 0xff, NPC_ETYPE_QINQ, 0xffff, 0x0000, @@ -2017,51 +1874,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_ETAG, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_MPLSU, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_MPLSM, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_NSH, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { NPC_S_KPU1_VLAN_EXDSA, 0xff, NPC_ETYPE_CTAG, 0xffff, @@ -2167,6 +1979,15 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = { }, { NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_NGIO, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_PPPOE, 0xffff, 0x0000, @@ -3057,11 +2878,38 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = { 0x0000, }, { - NPC_S_KPU2_NGIO, 0xff, + NPC_S_KPU2_CPT_CTAG, 0xff, + NPC_ETYPE_IP, + 0xffff, 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_CPT_CTAG, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CPT_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CPT_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, 0x0000, 0x0000, }, @@ -5349,6 +5197,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = { 0x0000, }, { + NPC_S_KPU5_NGIO, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { NPC_S_NA, 0X00, 0x0000, 0x0000, @@ -8645,14 +8502,6 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 12, 0, 0, 0, - NPC_S_KPU2_NGIO, 12, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 12, 0, 0, 0, NPC_S_KPU2_CTAG2, 12, 1, NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, @@ -9192,159 +9041,127 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 3, 0, - NPC_S_KPU5_IP, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_IP, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, 3, 0, - NPC_S_KPU5_IP6, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_IP6, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 3, 0, - NPC_S_KPU5_ARP, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_ARP, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 3, 0, - NPC_S_KPU5_RARP, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_RARP, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 3, 0, - NPC_S_KPU5_PTP, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_PTP, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 3, 0, - NPC_S_KPU5_FCOE, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_FCOE, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 12, 0, 0, 0, - NPC_S_KPU2_CTAG2, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + NPC_S_KPU2_CTAG2, 12, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 0, 0, 0, - NPC_S_KPU2_CTAG, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + NPC_S_KPU2_CTAG, 12, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 22, 0, 0, - NPC_S_KPU2_SBTAG, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + NPC_S_KPU2_SBTAG, 12, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 0, 0, 0, - NPC_S_KPU2_QINQ, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + NPC_S_KPU2_QINQ, 12, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 12, 26, 0, 0, - NPC_S_KPU2_ETAG, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG, + NPC_S_KPU2_ETAG, 12, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, 2, 0, - NPC_S_KPU4_MPLS, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_L_WITH_MPLS, + NPC_S_KPU4_MPLS, 14, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, 2, 0, - NPC_S_KPU4_MPLS, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_L_WITH_MPLS, + NPC_S_KPU4_MPLS, 14, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, 2, 0, - NPC_S_KPU4_NSH, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_L_WITH_NSH, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_L_UNK_ETYPE, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 3, 0, - NPC_S_KPU5_CPT_IP, 56, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, + NPC_S_KPU4_NSH, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 3, 0, - NPC_S_KPU5_CPT_IP6, 56, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_CTAG, 54, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_QINQ, 54, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 3, 0, - NPC_S_KPU5_CPT_IP, 60, 1, + NPC_S_KPU5_CPT_IP, 14, 1, NPC_LID_LA, NPC_LT_LA_CPT_HDR, 0, 0, 0, 0, 0, @@ -9352,7 +9169,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, 3, 0, - NPC_S_KPU5_CPT_IP6, 60, 1, + NPC_S_KPU5_CPT_IP6, 14, 1, NPC_LID_LA, NPC_LT_LA_CPT_HDR, 0, 0, 0, 0, 0, @@ -9360,7 +9177,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 0, 0, 0, - NPC_S_KPU2_CTAG, 58, 1, + NPC_S_KPU2_CPT_CTAG, 12, 1, NPC_LID_LA, NPC_LT_LA_CPT_HDR, NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, 0, 0, 0, 0, @@ -9368,141 +9185,13 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 0, 0, 0, - NPC_S_KPU2_QINQ, 58, 1, + NPC_S_KPU2_CPT_QINQ, 12, 1, NPC_LID_LA, NPC_LT_LA_CPT_HDR, NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, - NPC_F_LA_L_UNK_ETYPE, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 3, 0, - NPC_S_KPU5_IP, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 3, 0, - NPC_S_KPU5_IP6, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 3, 0, - NPC_S_KPU5_ARP, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 3, 0, - NPC_S_KPU5_RARP, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 3, 0, - NPC_S_KPU5_PTP, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 3, 0, - NPC_S_KPU5_FCOE, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 12, 0, 0, 0, - NPC_S_KPU2_CTAG2, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_CTAG, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 22, 0, 0, - NPC_S_KPU2_SBTAG, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_QINQ, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 12, 26, 0, 0, - NPC_S_KPU2_ETAG, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 6, 10, 2, 0, - NPC_S_KPU4_MPLS, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_L_WITH_MPLS, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 6, 10, 2, 0, - NPC_S_KPU4_MPLS, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_L_WITH_MPLS, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 0, 0, 2, 0, - NPC_S_KPU4_NSH, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_L_WITH_NSH, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_L_UNK_ETYPE, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 0, 0, 1, 0, NPC_S_KPU3_VLAN_EXDSA, 12, 1, NPC_LID_LA, NPC_LT_LA_ETHER, @@ -9596,6 +9285,14 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_NGIO, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 2, 0, NPC_S_KPU5_IP, 14, 1, NPC_LID_LB, NPC_LT_LB_PPPOE, @@ -10388,13 +10085,37 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, - NPC_LID_LC, NPC_LT_LC_NGIO, + 8, 0, 6, 2, 0, + NPC_S_KPU5_CPT_IP, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_CPT_IP6, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, 0, 0, }, { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_CPT_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_CPT_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, @@ -12426,6 +12147,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = { 0, 0, 0, 0, }, { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_NGIO, + 0, + 0, 0, 0, 0, + }, + { NPC_ERRLEV_LC, NPC_EC_UNK, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index b3803577324e..9ea2f6ac38ec 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -29,6 +29,9 @@ static struct mac_ops rpm_mac_ops = { .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status, .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm, .mac_pause_frm_config = rpm_lmac_pause_frm_config, + .mac_enadis_ptp_config = rpm_lmac_ptp_config, + .mac_rx_tx_enable = rpm_lmac_rx_tx_enable, + .mac_tx_enable = rpm_lmac_tx_enable, }; struct mac_ops *rpm_get_mac_ops(void) @@ -53,6 +56,43 @@ int rpm_get_nr_lmacs(void *rpmd) return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL); } +int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable) +{ + rpm_t *rpm = rpmd; + u64 cfg, last; + + if (!is_lmac_valid(rpm, lmac_id)) + return -ENODEV; + + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + last = cfg; + if (enable) + cfg |= RPM_TX_EN; + else + cfg &= ~(RPM_TX_EN); + + if (cfg != last) + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + return !!(last & RPM_TX_EN); +} + +int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable) +{ + rpm_t *rpm = rpmd; + u64 cfg; + + if (!is_lmac_valid(rpm, lmac_id)) + return -ENODEV; + + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + if (enable) + cfg |= RPM_RX_EN | RPM_TX_EN; + else + cfg &= ~(RPM_RX_EN | RPM_TX_EN); + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + return 0; +} + void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable) { rpm_t *rpm = rpmd; @@ -267,3 +307,19 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable) return 0; } + +void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable) +{ + rpm_t *rpm = rpmd; + u64 cfg; + + if (!is_lmac_valid(rpm, lmac_id)) + return; + + cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG); + if (enable) + cfg |= RPMX_RX_TS_PREPEND; + else + cfg &= ~RPMX_RX_TS_PREPEND; + rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h index f0b069442dcc..ff580311edd0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h @@ -14,6 +14,8 @@ #define PCI_DEVID_CN10K_RPM 0xA060 /* Registers */ +#define RPMX_CMRX_CFG 0x00 +#define RPMX_RX_TS_PREPEND BIT_ULL(22) #define RPMX_CMRX_SW_INT 0x180 #define RPMX_CMRX_SW_INT_W1S 0x188 #define RPMX_CMRX_SW_INT_ENA_W1S 0x198 @@ -41,6 +43,8 @@ #define RPMX_MTI_STAT_DATA_HI_CDC 0x10038 #define RPM_LMAC_FWI 0xa +#define RPM_TX_EN BIT_ULL(0) +#define RPM_RX_EN BIT_ULL(1) /* Function Declarations */ int rpm_get_nr_lmacs(void *rpmd); @@ -54,4 +58,7 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause); int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat); int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat); +void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable); +int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable); +int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable); #endif /* RPM_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 1d9411232f1d..a7213db38804 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -220,6 +220,7 @@ struct rvu_pfvf { u16 maxlen; u16 minlen; + bool hw_rx_tstamp_en; /* Is rx_tstamp enabled */ u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */ u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */ @@ -237,6 +238,7 @@ struct rvu_pfvf { bool cgx_in_use; /* this PF/VF using CGX? */ int cgx_users; /* number of cgx users - used only by PFs */ + int intf_mode; u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */ @@ -794,10 +796,12 @@ void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 src, struct mcam_entry *entry, u8 *intf, u8 *ena); +bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc); bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); u32 rvu_cgx_get_fifolen(struct rvu *rvu); void *rvu_first_cgx_pdata(struct rvu *rvu); int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id); +int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable); int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, int type); @@ -827,4 +831,7 @@ void rvu_switch_enable(struct rvu *rvu); void rvu_switch_disable(struct rvu *rvu); void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc); +int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, + u64 pkind, u8 var_len_off, u8 var_len_off_mask, + u8 shift_dir); #endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 81e8ea9ee30e..28ff67819566 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -411,7 +411,7 @@ int rvu_cgx_exit(struct rvu *rvu) * VF's of mapped PF and other PFs are not allowed. This fn() checks * whether a PFFUNC is permitted to do the config or not. */ -static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) +inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) { if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) @@ -442,16 +442,26 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable) int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) { int pf = rvu_get_pf(pcifunc); + struct mac_ops *mac_ops; u8 cgx_id, lmac_id; + void *cgxd; if (!is_cgx_config_permitted(rvu, pcifunc)) return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); - cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start); + return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start); +} - return 0; +int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable) +{ + struct mac_ops *mac_ops; + + mac_ops = get_mac_ops(cgxd); + return mac_ops->mac_tx_enable(cgxd, lmac_id, enable); } void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) @@ -694,7 +704,9 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int pf = rvu_get_pf(pcifunc); + struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -711,13 +723,16 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); - cgx_lmac_ptp_config(cgxd, lmac_id, enable); + mac_ops = get_mac_ops(cgxd); + mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true); /* If PTP is enabled then inform NPC that packets to be * parsed by this PF will have their data shifted by 8 bytes * and if PTP is disabled then no shift is required */ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable)) return -EINVAL; + /* This flag is required to clean up CGX conf if app gets killed */ + pfvf->hw_rx_tstamp_en = enable; return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 959266894cf1..603361c94786 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -2068,8 +2068,8 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, /* enable cgx tx if disabled */ if (is_pf_cgxmapped(rvu, pf)) { rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); - restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), - lmac_id, true); + restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), + lmac_id, true); } cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); @@ -2092,7 +2092,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, rvu_cgx_enadis_rx_bp(rvu, pf, true); /* restore cgx tx state */ if (restore_tx_en) - cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); + rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); return err; } @@ -3878,7 +3878,7 @@ nix_config_link_credits(struct rvu *rvu, int blkaddr, int link, /* Enable cgx tx if disabled for credits to be back */ if (is_pf_cgxmapped(rvu, pf)) { rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); - restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), + restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, true); } @@ -3918,7 +3918,7 @@ exit: /* Restore state of cgx tx */ if (restore_tx_en) - cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); + rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); mutex_unlock(&rvu->rsrc_lock); return rc; @@ -4519,6 +4519,10 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct hwctx_disable_req ctx_req; + int pf = rvu_get_pf(pcifunc); + struct mac_ops *mac_ops; + u8 cgx_id, lmac_id; + void *cgxd; int err; ctx_req.hdr.pcifunc = pcifunc; @@ -4555,6 +4559,22 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) dev_err(rvu->dev, "CQ ctx disable failed\n"); } + /* reset HW config done for Switch headers */ + rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT, + (PKIND_TX | PKIND_RX), 0, 0, 0, 0); + + /* Disabling CGX and NPC config done for PTP */ + if (pfvf->hw_rx_tstamp_en) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); + mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false); + /* Undo NPC config done for PTP */ + if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false)) + dev_err(rvu->dev, "NPC config for PTP failed\n"); + pfvf->hw_rx_tstamp_en = false; + } + nix_ctx_free(rvu, pfvf); nix_free_all_bandprof(rvu, pcifunc); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 87f18e32b463..c4a46b295d40 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -605,7 +605,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, struct npc_install_flow_req req = { 0 }; struct npc_install_flow_rsp rsp = { 0 }; struct npc_mcam *mcam = &rvu->hw->mcam; - struct nix_rx_action action; + struct nix_rx_action action = { 0 }; int blkaddr, index; /* AF's and SDP VFs work in promiscuous mode */ @@ -626,7 +626,6 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, *(u64 *)&action = npc_get_mcam_action(rvu, mcam, blkaddr, index); } else { - *(u64 *)&action = 0x00; action.op = NIX_RX_ACTIONOP_UCAST; action.pf_func = pcifunc; } @@ -657,7 +656,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_hwinfo *hw = rvu->hw; int blkaddr, ucast_idx, index; - struct nix_rx_action action; + struct nix_rx_action action = { 0 }; u64 relaxed_mask; if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc)) @@ -685,14 +684,14 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, blkaddr, ucast_idx); if (action.op != NIX_RX_ACTIONOP_RSS) { - *(u64 *)&action = 0x00; + *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_UCAST; } /* RX_ACTION set to MCAST for CGX PF's */ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list && is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { - *(u64 *)&action = 0x00; + *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_MCAST; pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); action.index = pfvf->promisc_mce_idx; @@ -832,7 +831,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, struct rvu_hwinfo *hw = rvu->hw; int blkaddr, ucast_idx, index; u8 mac_addr[ETH_ALEN] = { 0 }; - struct nix_rx_action action; + struct nix_rx_action action = { 0 }; struct rvu_pfvf *pfvf; u16 vf_func; @@ -861,14 +860,14 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, blkaddr, ucast_idx); if (action.op != NIX_RX_ACTIONOP_RSS) { - *(u64 *)&action = 0x00; + *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_UCAST; action.pf_func = pcifunc; } /* RX_ACTION set to MCAST for CGX PF's */ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) { - *(u64 *)&action = 0x00; + *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_MCAST; action.index = pfvf->mcast_mce_idx; } @@ -3183,6 +3182,102 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, return 0; } +static int +npc_set_var_len_offset_pkind(struct rvu *rvu, u16 pcifunc, u64 pkind, + u8 var_len_off, u8 var_len_off_mask, u8 shift_dir) +{ + struct npc_kpu_action0 *act0; + u8 shift_count = 0; + int blkaddr; + u64 val; + + if (!var_len_off_mask) + return -EINVAL; + + if (var_len_off_mask != 0xff) { + if (shift_dir) + shift_count = __ffs(var_len_off_mask); + else + shift_count = (8 - __fls(var_len_off_mask)); + } + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc); + if (blkaddr < 0) { + dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); + return -EINVAL; + } + val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind)); + act0 = (struct npc_kpu_action0 *)&val; + act0->var_len_shift = shift_count; + act0->var_len_right = shift_dir; + act0->var_len_mask = var_len_off_mask; + act0->var_len_offset = var_len_off; + rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val); + return 0; +} + +int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, + u64 pkind, u8 var_len_off, u8 var_len_off_mask, + u8 shift_dir) + +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int blkaddr, nixlf, rc, intf_mode; + int pf = rvu_get_pf(pcifunc); + u64 rxpkind, txpkind; + u8 cgx_id, lmac_id; + + /* use default pkind to disable edsa/higig */ + rxpkind = rvu_npc_get_pkind(rvu, pf); + txpkind = NPC_TX_DEF_PKIND; + intf_mode = NPC_INTF_MODE_DEF; + + if (mode & OTX2_PRIV_FLAGS_CUSTOM) { + if (pkind == NPC_RX_CUSTOM_PRE_L2_PKIND) { + rc = npc_set_var_len_offset_pkind(rvu, pcifunc, pkind, + var_len_off, + var_len_off_mask, + shift_dir); + if (rc) + return rc; + } + rxpkind = pkind; + txpkind = pkind; + } + + if (dir & PKIND_RX) { + /* rx pkind set req valid only for cgx mapped PFs */ + if (!is_cgx_config_permitted(rvu, pcifunc)) + return 0; + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, + rxpkind); + if (rc) + return rc; + } + + if (dir & PKIND_TX) { + /* Tx pkind set request valid if PCIFUNC has NIXLF attached */ + rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (rc) + return rc; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), + txpkind); + } + + pfvf->intf_mode = intf_mode; + return 0; +} + +int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu, struct npc_set_pkind *req, + struct msg_rsp *rsp) +{ + return rvu_npc_set_parse_mode(rvu, req->hdr.pcifunc, req->mode, + req->dir, req->pkind, req->var_len_off, + req->var_len_off_mask, req->shift_dir); +} + int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu, struct msg_req *req, struct npc_mcam_read_base_rule_rsp *rsp) diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index aa543b29799e..656c68cfd7ec 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -492,6 +492,7 @@ static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw) dev_info(prestera_dev(sw), "using random base mac address\n"); } of_node_put(base_mac_np); + of_node_put(np); return prestera_hw_switch_mac_set(sw, sw->base_mac); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 00f63fbfe9b4..e06a6104e91f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -130,11 +130,8 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd) static void cmd_free_index(struct mlx5_cmd *cmd, int idx) { - unsigned long flags; - - spin_lock_irqsave(&cmd->alloc_lock, flags); + lockdep_assert_held(&cmd->alloc_lock); set_bit(idx, &cmd->bitmask); - spin_unlock_irqrestore(&cmd->alloc_lock, flags); } static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) @@ -144,17 +141,21 @@ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) { + struct mlx5_cmd *cmd = ent->cmd; + unsigned long flags; + + spin_lock_irqsave(&cmd->alloc_lock, flags); if (!refcount_dec_and_test(&ent->refcnt)) - return; + goto out; if (ent->idx >= 0) { - struct mlx5_cmd *cmd = ent->cmd; - cmd_free_index(cmd, ent->idx); up(ent->page_queue ? &cmd->pages_sem : &cmd->sem); } cmd_free_ent(ent); +out: + spin_unlock_irqrestore(&cmd->alloc_lock, flags); } static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index d7576b6fa43b..7d56a927081d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -793,11 +793,14 @@ int mlx5_devlink_register(struct devlink *devlink) { int err; - err = devlink_params_register(devlink, mlx5_devlink_params, - ARRAY_SIZE(mlx5_devlink_params)); + err = devlink_register(devlink); if (err) return err; + err = devlink_params_register(devlink, mlx5_devlink_params, + ARRAY_SIZE(mlx5_devlink_params)); + if (err) + goto params_reg_err; mlx5_devlink_set_params_init_values(devlink); err = mlx5_devlink_auxdev_params_register(devlink); @@ -808,6 +811,7 @@ int mlx5_devlink_register(struct devlink *devlink) if (err) goto traps_reg_err; + devlink_params_publish(devlink); return 0; traps_reg_err: @@ -815,13 +819,17 @@ traps_reg_err: auxdev_reg_err: devlink_params_unregister(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); +params_reg_err: + devlink_unregister(devlink); return err; } void mlx5_devlink_unregister(struct devlink *devlink) { + devlink_params_unpublish(devlink); mlx5_devlink_traps_unregister(devlink); mlx5_devlink_auxdev_params_unregister(devlink); devlink_params_unregister(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); + devlink_unregister(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c index 60952b33b568..d2333310b56f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c @@ -60,37 +60,31 @@ static int parse_tunnel(struct mlx5e_priv *priv, void *headers_v) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); - struct flow_match_enc_keyid enc_keyid; struct flow_match_mpls match; void *misc2_c; void *misc2_v; - misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, - misc_parameters_2); - misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, - misc_parameters_2); - - if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) - return 0; - - if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) - return 0; - - flow_rule_match_enc_keyid(rule, &enc_keyid); - - if (!enc_keyid.mask->keyid) - return 0; - if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) && !(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP)) return -EOPNOTSUPP; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) + return -EOPNOTSUPP; + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) + return 0; + flow_rule_match_mpls(rule, &match); /* Only support matching the first LSE */ if (match.mask->used_lses != 1) return -EOPNOTSUPP; + misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters_2); + misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters_2); + MLX5_SET(fte_match_set_misc2, misc2_c, outer_first_mpls_over_udp.mpls_label, match.mask->ls[0].mpls_label); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 5120a59361e6..428881e0adcb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -127,6 +127,28 @@ out_disable: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } +static inline bool +mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, + struct mlx5_wqe_eth_seg *eseg) +{ + u8 inner_ipproto; + + if (!mlx5e_ipsec_eseg_meta(eseg)) + return false; + + eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; + inner_ipproto = xfrm_offload(skb)->inner_ipproto; + if (inner_ipproto) { + eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM; + if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP) + eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM; + } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; + sq->stats->csum_partial_inner++; + } + + return true; +} #else static inline void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, @@ -143,6 +165,13 @@ static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; static inline netdev_features_t mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features) { return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } + +static inline bool +mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, + struct mlx5_wqe_eth_seg *eseg) +{ + return false; +} #endif /* CONFIG_MLX5_EN_IPSEC */ #endif /* __MLX5E_IPSEC_RXTX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index dc9b8718c3c1..2d3cd237355a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1754,7 +1754,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, if (size_read < 0) { netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n", __func__, size_read); - return 0; + return size_read; } i += size_read; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f075bb8ccd00..01301bee420c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4914,6 +4914,7 @@ mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *prof } netif_carrier_off(netdev); + netif_tx_disable(netdev); dev_net_set(netdev, mlx5_core_net(mdev)); return netdev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 0015545d5235..d2de1e6c514c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -987,7 +987,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, } /* True when explicitly set via priv flag, or XDP prog is loaded */ - if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) + if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) || + get_cqe_tls_offload(cqe)) goto csum_unnecessary; /* CQE csum doesn't cover padding octets in short ethernet diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 188994d091c5..7fd33b356cc8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -38,6 +38,7 @@ #include "en/txrx.h" #include "ipoib/ipoib.h" #include "en_accel/en_accel.h" +#include "en_accel/ipsec_rxtx.h" #include "en/ptp.h" static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) @@ -213,30 +214,13 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); } -static void -ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, - struct mlx5_wqe_eth_seg *eseg) -{ - struct xfrm_offload *xo = xfrm_offload(skb); - - eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; - if (xo->inner_ipproto) { - eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM; - } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { - eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; - sq->stats->csum_partial_inner++; - } -} - static inline void mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_accel_tx_state *accel, struct mlx5_wqe_eth_seg *eseg) { - if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) { - ipsec_txwqe_build_eseg_csum(sq, skb, eseg); + if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg))) return; - } if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f3f23fdc2022..3194cdcd2f63 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2784,10 +2784,6 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) return false; - if (mlx5_core_is_ecpf_esw_manager(esw->dev) || - mlx5_ecpf_vport_exists(esw->dev)) - return false; - return true; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index fe501ba88bea..00834c914dc6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2041,6 +2041,8 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) fte->node.del_hw_func = NULL; up_write_ref_node(&fte->node, false); tree_put_node(&fte->node, false); + } else { + up_write_ref_node(&fte->node, false); } kfree(handle); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c index 30282d86e6b9..cb0a48d374a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c @@ -126,6 +126,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, return; } + /* Handle multipath entry with lower priority value */ + if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority) + return; + /* Handle add/replace event */ nhs = fib_info_num_path(fi); if (nhs == 1) { @@ -135,12 +139,13 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev); if (i < 0) - i = MLX5_LAG_NORMAL_AFFINITY; - else - ++i; + return; + i++; mlx5_lag_set_port_affinity(ldev, i); } + + mp->mfi = fi; return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 29b7297a836a..4ed740994279 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -516,7 +516,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) /* Check log_max_qp from HCA caps to set in current profile */ if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) { - prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp); + prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp)); } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) { mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n", prof->log_max_qp, @@ -1541,7 +1541,6 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); pci_save_state(pdev); - devlink_register(devlink); if (!mlx5_core_is_mp_slave(dev)) devlink_reload_enable(devlink); return 0; @@ -1564,7 +1563,6 @@ static void remove_one(struct pci_dev *pdev) struct devlink *devlink = priv_to_devlink(dev); devlink_reload_disable(devlink); - devlink_unregister(devlink); mlx5_crdump_disable(dev); mlx5_drain_health_wq(dev); mlx5_uninit_one(dev); @@ -1762,10 +1760,12 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */ + { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ { PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */ + { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */ { 0, } }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 7b16a1188aab..fd79860de723 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -433,35 +433,12 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev, struct mlx5_module_eeprom_query_params *params, u8 *data) { - u8 module_id; int err; err = mlx5_query_module_num(dev, ¶ms->module_number); if (err) return err; - err = mlx5_query_module_id(dev, params->module_number, &module_id); - if (err) - return err; - - switch (module_id) { - case MLX5_MODULE_ID_SFP: - if (params->page > 0) - return -EINVAL; - break; - case MLX5_MODULE_ID_QSFP: - case MLX5_MODULE_ID_QSFP28: - case MLX5_MODULE_ID_QSFP_PLUS: - if (params->page > 3) - return -EINVAL; - break; - case MLX5_MODULE_ID_DSFP: - break; - default: - mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); - return -EINVAL; - } - if (params->i2c_address != MLX5_I2C_ADDR_HIGH && params->i2c_address != MLX5_I2C_ADDR_LOW) { mlx5_core_err(dev, "I2C address not recognized: 0x%x\n", params->i2c_address); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index 3cf272fa2164..052f48068dc1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -46,7 +46,6 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err); goto init_one_err; } - devlink_register(devlink); devlink_reload_enable(devlink); return 0; @@ -66,7 +65,6 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev) devlink = priv_to_devlink(sf_dev->mdev); devlink_reload_disable(devlink); - devlink_unregister(devlink); mlx5_uninit_one(sf_dev->mdev); iounmap(sf_dev->mdev->iseg); mlx5_mdev_uninit(sf_dev->mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c index 66c24767e3b0..8ad8d73e17f0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c @@ -4,7 +4,6 @@ #include "dr_types.h" #define DR_ICM_MODIFY_HDR_ALIGN_BASE 64 -#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024) struct mlx5dr_icm_pool { enum mlx5dr_icm_type icm_type; @@ -136,37 +135,35 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr) kvfree(icm_mr); } -static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk) +static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy) { - chunk->ste_arr = kvzalloc(chunk->num_of_entries * - sizeof(chunk->ste_arr[0]), GFP_KERNEL); - if (!chunk->ste_arr) - return -ENOMEM; - - chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries * - DR_STE_SIZE_REDUCED, GFP_KERNEL); - if (!chunk->hw_ste_arr) - goto out_free_ste_arr; - - chunk->miss_list = kvmalloc(chunk->num_of_entries * - sizeof(chunk->miss_list[0]), GFP_KERNEL); - if (!chunk->miss_list) - goto out_free_hw_ste_arr; + /* We support only one type of STE size, both for ConnectX-5 and later + * devices. Once the support for match STE which has a larger tag is + * added (32B instead of 16B), the STE size for devices later than + * ConnectX-5 needs to account for that. + */ + return DR_STE_SIZE_REDUCED; +} - return 0; +static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset) +{ + struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem; + int index = offset / DR_STE_SIZE; -out_free_hw_ste_arr: - kvfree(chunk->hw_ste_arr); -out_free_ste_arr: - kvfree(chunk->ste_arr); - return -ENOMEM; + chunk->ste_arr = &buddy->ste_arr[index]; + chunk->miss_list = &buddy->miss_list[index]; + chunk->hw_ste_arr = buddy->hw_ste_arr + + index * dr_icm_buddy_get_ste_size(buddy); } static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk) { - kvfree(chunk->miss_list); - kvfree(chunk->hw_ste_arr); - kvfree(chunk->ste_arr); + struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem; + + memset(chunk->hw_ste_arr, 0, + chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy)); + memset(chunk->ste_arr, 0, + chunk->num_of_entries * sizeof(chunk->ste_arr[0])); } static enum mlx5dr_icm_type @@ -189,6 +186,44 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk, kvfree(chunk); } +static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy) +{ + int num_of_entries = + mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz); + + buddy->ste_arr = kvcalloc(num_of_entries, + sizeof(struct mlx5dr_ste), GFP_KERNEL); + if (!buddy->ste_arr) + return -ENOMEM; + + /* Preallocate full STE size on non-ConnectX-5 devices since + * we need to support both full and reduced with the same cache. + */ + buddy->hw_ste_arr = kvcalloc(num_of_entries, + dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL); + if (!buddy->hw_ste_arr) + goto free_ste_arr; + + buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL); + if (!buddy->miss_list) + goto free_hw_ste_arr; + + return 0; + +free_hw_ste_arr: + kvfree(buddy->hw_ste_arr); +free_ste_arr: + kvfree(buddy->ste_arr); + return -ENOMEM; +} + +static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy) +{ + kvfree(buddy->ste_arr); + kvfree(buddy->hw_ste_arr); + kvfree(buddy->miss_list); +} + static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool) { struct mlx5dr_icm_buddy_mem *buddy; @@ -208,11 +243,19 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool) buddy->icm_mr = icm_mr; buddy->pool = pool; + if (pool->icm_type == DR_ICM_TYPE_STE) { + /* Reduce allocations by preallocating and reusing the STE structures */ + if (dr_icm_buddy_init_ste_cache(buddy)) + goto err_cleanup_buddy; + } + /* add it to the -start- of the list in order to search in it first */ list_add(&buddy->list_node, &pool->buddy_mem_list); return 0; +err_cleanup_buddy: + mlx5dr_buddy_cleanup(buddy); err_free_buddy: kvfree(buddy); free_mr: @@ -234,6 +277,9 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy) mlx5dr_buddy_cleanup(buddy); + if (buddy->pool->icm_type == DR_ICM_TYPE_STE) + dr_icm_buddy_cleanup_ste_cache(buddy); + kvfree(buddy); } @@ -261,34 +307,30 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool, chunk->byte_size = mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type); chunk->seg = seg; + chunk->buddy_mem = buddy_mem_pool; - if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) { - mlx5dr_err(pool->dmn, - "Failed to init ste arrays (order: %d)\n", - chunk_size); - goto out_free_chunk; - } + if (pool->icm_type == DR_ICM_TYPE_STE) + dr_icm_chunk_ste_init(chunk, offset); buddy_mem_pool->used_memory += chunk->byte_size; - chunk->buddy_mem = buddy_mem_pool; INIT_LIST_HEAD(&chunk->chunk_list); /* chunk now is part of the used_list */ list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list); return chunk; - -out_free_chunk: - kvfree(chunk); - return NULL; } static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool) { - if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL) - return true; + int allow_hot_size; + + /* sync when hot memory reaches half of the pool size */ + allow_hot_size = + mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, + pool->icm_type) / 2; - return false; + return pool->hot_memory_size > allow_hot_size; } static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index b5409cc021d3..a19e8157c100 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -13,18 +13,6 @@ static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec) return (spec->dmac_47_16 || spec->dmac_15_0); } -static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec) -{ - return (spec->src_ip_127_96 || spec->src_ip_95_64 || - spec->src_ip_63_32 || spec->src_ip_31_0); -} - -static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec) -{ - return (spec->dst_ip_127_96 || spec->dst_ip_95_64 || - spec->dst_ip_63_32 || spec->dst_ip_31_0); -} - static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec) { return (spec->ip_protocol || spec->frag || spec->tcp_flags || @@ -480,11 +468,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, &mask, inner, rx); if (outer_ipv == DR_RULE_IPV6) { - if (dr_mask_is_dst_addr_set(&mask.outer)) + if (DR_MASK_IS_DST_IP_SET(&mask.outer)) mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++], &mask, inner, rx); - if (dr_mask_is_src_addr_set(&mask.outer)) + if (DR_MASK_IS_SRC_IP_SET(&mask.outer)) mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++], &mask, inner, rx); @@ -580,11 +568,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, &mask, inner, rx); if (inner_ipv == DR_RULE_IPV6) { - if (dr_mask_is_dst_addr_set(&mask.inner)) + if (DR_MASK_IS_DST_IP_SET(&mask.inner)) mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++], &mask, inner, rx); - if (dr_mask_is_src_addr_set(&mask.inner)) + if (DR_MASK_IS_SRC_IP_SET(&mask.inner)) mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++], &mask, inner, rx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 1cdfe4fccc7a..01246a1ae7d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -602,12 +602,34 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx, used_hw_action_num); } +static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn, + struct mlx5dr_match_spec *spec) +{ + if (spec->ip_version) { + if (spec->ip_version != 0xf) { + mlx5dr_err(dmn, + "Partial ip_version mask with src/dst IP is not supported\n"); + return -EINVAL; + } + } else if (spec->ethertype != 0xffff && + (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) { + mlx5dr_err(dmn, + "Partial/no ethertype mask with src/dst IP is not supported\n"); + return -EINVAL; + } + + return 0; +} + int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, u8 match_criteria, struct mlx5dr_match_param *mask, struct mlx5dr_match_param *value) { - if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) { + if (value) + return 0; + + if (match_criteria & DR_MATCHER_CRITERIA_MISC) { if (mask->misc.source_port && mask->misc.source_port != 0xffff) { mlx5dr_err(dmn, "Partial mask source_port is not supported\n"); @@ -621,6 +643,14 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, } } + if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) && + dr_ste_build_pre_check_spec(dmn, &mask->outer)) + return -EINVAL; + + if ((match_criteria & DR_MATCHER_CRITERIA_INNER) && + dr_ste_build_pre_check_spec(dmn, &mask->inner)) + return -EINVAL; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index b20e8aabb861..3d4e035698dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -740,6 +740,16 @@ struct mlx5dr_match_param { (_misc3)->icmpv4_code || \ (_misc3)->icmpv4_header_data) +#define DR_MASK_IS_SRC_IP_SET(_spec) ((_spec)->src_ip_127_96 || \ + (_spec)->src_ip_95_64 || \ + (_spec)->src_ip_63_32 || \ + (_spec)->src_ip_31_0) + +#define DR_MASK_IS_DST_IP_SET(_spec) ((_spec)->dst_ip_127_96 || \ + (_spec)->dst_ip_95_64 || \ + (_spec)->dst_ip_63_32 || \ + (_spec)->dst_ip_31_0) + struct mlx5dr_esw_caps { u64 drop_icm_address_rx; u64 drop_icm_address_tx; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index c5a8b1601999..5ef199543479 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -160,6 +160,11 @@ struct mlx5dr_icm_buddy_mem { * sync_ste command sets them free. */ struct list_head hot_list; + + /* Memory optimisation */ + struct mlx5dr_ste *ste_arr; + struct list_head *miss_list; + u8 *hw_ste_arr; }; int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy, diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c index 939b692ffc33..ce843ea91464 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -650,6 +650,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client, return 0; errout: + mutex_destroy(&mlxsw_i2c->cmd.lock); i2c_set_clientdata(client, NULL); return err; diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig index 93df3049cdc0..1b632cdd7630 100644 --- a/drivers/net/ethernet/micrel/Kconfig +++ b/drivers/net/ethernet/micrel/Kconfig @@ -39,6 +39,7 @@ config KS8851 config KS8851_MLL tristate "Micrel KS8851 MLL" depends on HAS_IOMEM + depends on PTP_1588_CLOCK_OPTIONAL select MII select CRC32 select EEPROM_93CX6 diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig index 7bdbb2d09a14..cc5e48e1bb4c 100644 --- a/drivers/net/ethernet/microchip/sparx5/Kconfig +++ b/drivers/net/ethernet/microchip/sparx5/Kconfig @@ -4,6 +4,8 @@ config SPARX5_SWITCH depends on HAS_IOMEM depends on OF depends on ARCH_SPARX5 || COMPILE_TEST + depends on PTP_1588_CLOCK_OPTIONAL + depends on BRIDGE || BRIDGE=n select PHYLINK select PHY_SPARX5_SERDES select RESET_CONTROLLER diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c index 7436f62fa152..174ad95e746a 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c @@ -420,6 +420,8 @@ static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5) db_hw->dataptr = phys; db_hw->status = 0; db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL); + if (!db) + return -ENOMEM; db->cpu_addr = cpu_addr; list_add_tail(&db->list, &tx->db_list); } diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c index dc7e5ea6ec15..148d431fcde4 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -145,9 +145,9 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap) skb_put(skb, byte_cnt - ETH_FCS_LEN); eth_skb_pad(skb); skb->protocol = eth_type_trans(skb, netdev); - netif_rx(skb); netdev->stats.rx_bytes += skb->len; netdev->stats.rx_packets++; + netif_rx(skb); } static int sparx5_inject(struct sparx5 *sparx5, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c index 4ce490a25f33..8e56ffa1c4f7 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c @@ -58,16 +58,6 @@ int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid, struct sparx5 *sparx5 = port->sparx5; int ret; - /* Make the port a member of the VLAN */ - set_bit(port->portno, sparx5->vlan_mask[vid]); - ret = sparx5_vlant_set_mask(sparx5, vid); - if (ret) - return ret; - - /* Default ingress vlan classification */ - if (pvid) - port->pvid = vid; - /* Untagged egress vlan classification */ if (untagged && port->vid != vid) { if (port->vid) { @@ -79,6 +69,16 @@ int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid, port->vid = vid; } + /* Make the port a member of the VLAN */ + set_bit(port->portno, sparx5->vlan_mask[vid]); + ret = sparx5_vlant_set_mask(sparx5, vid); + if (ret) + return ret; + + /* Default ingress vlan classification */ + if (pvid) + port->pvid = vid; + sparx5_vlan_port_apply(sparx5, port); return 0; diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig index b6a73d151dec..8dd8c7f425d2 100644 --- a/drivers/net/ethernet/mscc/Kconfig +++ b/drivers/net/ethernet/mscc/Kconfig @@ -28,7 +28,7 @@ config MSCC_OCELOT_SWITCH depends on BRIDGE || BRIDGE=n depends on NET_SWITCHDEV depends on HAS_IOMEM - depends on OF_NET + depends on OF select MSCC_OCELOT_SWITCH_LIB select GENERIC_PHY help diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 6aad0953e8fe..a59300d9e000 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1932,6 +1932,8 @@ static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port, val = BIT(port); ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC); + ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4); + ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6); } static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port, diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index afa062c5f82d..f1323af99b0c 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -54,6 +54,12 @@ static int ocelot_chain_to_block(int chain, bool ingress) */ static int ocelot_chain_to_lookup(int chain) { + /* Backwards compatibility with older, single-chain tc-flower + * offload support in Ocelot + */ + if (chain == 0) + return 0; + return (chain / VCAP_LOOKUP) % 10; } @@ -62,7 +68,15 @@ static int ocelot_chain_to_lookup(int chain) */ static int ocelot_chain_to_pag(int chain) { - int lookup = ocelot_chain_to_lookup(chain); + int lookup; + + /* Backwards compatibility with older, single-chain tc-flower + * offload support in Ocelot + */ + if (chain == 0) + return 0; + + lookup = ocelot_chain_to_lookup(chain); /* calculate PAG value as chain index relative to the first PAG */ return chain - VCAP_IS2_CHAIN(lookup, 0); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index c1a75b08ced7..052696ce5096 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -2900,11 +2900,9 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, status = myri10ge_xmit(curr, dev); if (status != 0) { dev_kfree_skb_any(curr); - if (segs != NULL) { - curr = segs; - segs = next; + skb_list_walk_safe(next, curr, next) { curr->next = NULL; - dev_kfree_skb_any(segs); + dev_kfree_skb_any(curr); } goto drop; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 6521675be85c..babd374333f3 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -922,8 +922,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, int port, bool mod) { struct nfp_flower_priv *priv = app->priv; - int ida_idx = NFP_MAX_MAC_INDEX, err; struct nfp_tun_offloaded_mac *entry; + int ida_idx = -1, err; u16 nfp_mac_idx = 0; entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); @@ -997,7 +997,7 @@ err_remove_hash: err_free_entry: kfree(entry); err_free_ida: - if (ida_idx != NFP_MAX_MAC_INDEX) + if (ida_idx != -1) ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); return err; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index c910fa2f40a4..919140522885 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1469,6 +1469,7 @@ static int lpc_eth_drv_resume(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct netdata_local *pldat; + int ret; if (device_may_wakeup(&pdev->dev)) disable_irq_wake(ndev->irq); @@ -1478,7 +1479,9 @@ static int lpc_eth_drv_resume(struct platform_device *pdev) pldat = netdev_priv(ndev); /* Enable interface clock */ - clk_enable(pldat->clk); + ret = clk_enable(pldat->clk); + if (ret) + return ret; /* Reset and initialize */ __lpc_eth_reset(pldat); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index 7e296fa71b36..40fa5bce2ac2 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -331,6 +331,9 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_deregister_lifs; } + mod_timer(&ionic->watchdog_timer, + round_jiffies(jiffies + ionic->watchdog_period)); + return 0; err_out_deregister_lifs: @@ -348,7 +351,6 @@ err_out_port_reset: err_out_reset: ionic_reset(ionic); err_out_teardown: - del_timer_sync(&ionic->watchdog_timer); pci_clear_master(pdev); /* Don't fail the probe for these errors, keep * the hw interface around for inspection diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 0d6858ab511c..b778d8264bca 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -122,9 +122,6 @@ int ionic_dev_setup(struct ionic *ionic) idev->fw_generation = IONIC_FW_STS_F_GENERATION & ioread8(&idev->dev_info_regs->fw_status); - mod_timer(&ionic->watchdog_timer, - round_jiffies(jiffies + ionic->watchdog_period)); - idev->db_pages = bar->vaddr; idev->phy_db_pages = bar->bus_addr; @@ -132,6 +129,16 @@ int ionic_dev_setup(struct ionic *ionic) } /* Devcmd Interface */ +bool ionic_is_fw_running(struct ionic_dev *idev) +{ + u8 fw_status = ioread8(&idev->dev_info_regs->fw_status); + + /* firmware is useful only if the running bit is set and + * fw_status != 0xff (bad PCI read) + */ + return (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING); +} + int ionic_heartbeat_check(struct ionic *ionic) { struct ionic_dev *idev = &ionic->idev; @@ -155,13 +162,10 @@ do_check_time: goto do_check_time; } - /* firmware is useful only if the running bit is set and - * fw_status != 0xff (bad PCI read) - * If fw_status is not ready don't bother with the generation. - */ fw_status = ioread8(&idev->dev_info_regs->fw_status); - if (fw_status == 0xff || !(fw_status & IONIC_FW_STS_F_RUNNING)) { + /* If fw_status is not ready don't bother with the generation */ + if (!ionic_is_fw_running(idev)) { fw_status_ready = false; } else { fw_generation = fw_status & IONIC_FW_STS_F_GENERATION; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index 8311086fb1f4..922bb6c9e01d 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -357,5 +357,6 @@ void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start); void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, unsigned int stop_index); int ionic_heartbeat_check(struct ionic *ionic); +bool ionic_is_fw_running(struct ionic_dev *idev); #endif /* _IONIC_DEV_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 6f07bf509efe..480f85bc17f9 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -328,10 +328,10 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) static void ionic_dev_cmd_clean(struct ionic *ionic) { - union __iomem ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs; + struct ionic_dev *idev = &ionic->idev; - iowrite32(0, ®s->doorbell); - memset_io(®s->cmd, 0, sizeof(regs->cmd)); + iowrite32(0, &idev->dev_cmd_regs->doorbell); + memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd)); } int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds) @@ -488,6 +488,9 @@ int ionic_reset(struct ionic *ionic) struct ionic_dev *idev = &ionic->idev; int err; + if (!ionic_is_fw_running(idev)) + return 0; + mutex_lock(&ionic->dev_cmd_lock); ionic_dev_cmd_reset(idev); err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); @@ -560,15 +563,17 @@ int ionic_port_init(struct ionic *ionic) int ionic_port_reset(struct ionic *ionic) { struct ionic_dev *idev = &ionic->idev; - int err; + int err = 0; if (!idev->port_info) return 0; - mutex_lock(&ionic->dev_cmd_lock); - ionic_dev_cmd_port_reset(idev); - err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); - mutex_unlock(&ionic->dev_cmd_lock); + if (ionic_is_fw_running(idev)) { + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_port_reset(idev); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + } dma_free_coherent(ionic->dev, idev->port_info_sz, idev->port_info, idev->port_info_pa); @@ -576,9 +581,6 @@ int ionic_port_reset(struct ionic *ionic) idev->port_info = NULL; idev->port_info_pa = 0; - if (err) - dev_err(ionic->dev, "Failed to reset port\n"); - return err; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index ed2b6fe5a78d..3eb05376e7c3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -2982,12 +2982,16 @@ static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn, u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; struct qed_filter_accept_flags *flags = ¶ms->accept_flags; struct qed_public_vf_info *vf_info; + u16 tlv_mask; + + tlv_mask = BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM) | + BIT(QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN); /* Untrusted VFs can't even be trusted to know that fact. * Simply indicate everything is configured fine, and trace * configuration 'behind their back'. */ - if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM))) + if (!(*tlvs & tlv_mask)) return 0; vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); @@ -3004,6 +3008,13 @@ static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn, flags->tx_accept_filter &= ~mask; } + if (params->update_accept_any_vlan_flg) { + vf_info->accept_any_vlan = params->accept_any_vlan; + + if (vf_info->forced_vlan && !vf_info->is_trusted_configured) + params->accept_any_vlan = false; + } + return 0; } @@ -3778,11 +3789,11 @@ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) return found; } -static void qed_iov_get_link(struct qed_hwfn *p_hwfn, - u16 vfid, - struct qed_mcp_link_params *p_params, - struct qed_mcp_link_state *p_link, - struct qed_mcp_link_capabilities *p_caps) +static int qed_iov_get_link(struct qed_hwfn *p_hwfn, + u16 vfid, + struct qed_mcp_link_params *p_params, + struct qed_mcp_link_state *p_link, + struct qed_mcp_link_capabilities *p_caps) { struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, vfid, @@ -3790,7 +3801,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn, struct qed_bulletin_content *p_bulletin; if (!p_vf) - return; + return -EINVAL; p_bulletin = p_vf->bulletin.p_virt; @@ -3800,6 +3811,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn, __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); if (p_caps) __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); + return 0; } static int @@ -4658,6 +4670,7 @@ static int qed_get_vf_config(struct qed_dev *cdev, struct qed_public_vf_info *vf_info; struct qed_mcp_link_state link; u32 tx_rate; + int ret; /* Sanitize request */ if (IS_VF(cdev)) @@ -4671,7 +4684,9 @@ static int qed_get_vf_config(struct qed_dev *cdev, vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); - qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); + ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); + if (ret) + return ret; /* Fill information about VF */ ivi->vf = vf_id; @@ -4687,6 +4702,7 @@ static int qed_get_vf_config(struct qed_dev *cdev, tx_rate = vf_info->tx_rate; ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); + ivi->trusted = vf_info->is_trusted_request; return 0; } @@ -5117,6 +5133,12 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) params.update_ctl_frame_check = 1; params.mac_chk_en = !vf_info->is_trusted_configured; + params.update_accept_any_vlan_flg = 0; + + if (vf_info->accept_any_vlan && vf_info->forced_vlan) { + params.update_accept_any_vlan_flg = 1; + params.accept_any_vlan = vf_info->accept_any_vlan; + } if (vf_info->rx_accept_mode & mask) { flags->update_rx_mode_config = 1; @@ -5132,13 +5154,20 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) if (!vf_info->is_trusted_configured) { flags->rx_accept_filter &= ~mask; flags->tx_accept_filter &= ~mask; + params.accept_any_vlan = false; } if (flags->update_rx_mode_config || flags->update_tx_mode_config || - params.update_ctl_frame_check) + params.update_ctl_frame_check || + params.update_accept_any_vlan_flg) { + DP_VERBOSE(hwfn, QED_MSG_IOV, + "vport update config for %s VF[abs 0x%x rel 0x%x]\n", + vf_info->is_trusted_configured ? "trusted" : "untrusted", + vf->abs_vf_id, vf->relative_vf_id); qed_sp_vport_update(hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); + } } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index eacd6457f195..7ff23ef8ccc1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -62,6 +62,7 @@ struct qed_public_vf_info { bool is_trusted_request; u8 rx_accept_mode; u8 tx_accept_mode; + bool accept_any_vlan; }; struct qed_iov_vf_init_params { diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 72a38d53d33f..e2a5a6a373cb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -513,6 +513,9 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) p_iov->bulletin.size, &p_iov->bulletin.phys, GFP_KERNEL); + if (!p_iov->bulletin.p_virt) + goto free_pf2vf_reply; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", p_iov->bulletin.p_virt, @@ -552,6 +555,10 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) return rc; +free_pf2vf_reply: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union pfvf_tlvs), + p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); free_vf2pf_request: dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(union vfpf_tlvs), diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 999abcfe3310..17f895250e04 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -747,6 +747,9 @@ qede_build_skb(struct qede_rx_queue *rxq, buf = page_address(bd->data) + bd->page_offset; skb = build_skb(buf, rxq->rx_buf_seg_size); + if (unlikely(!skb)) + return NULL; + skb_reserve(skb, pad); skb_put(skb, len); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index 5d79ee4370bc..7519773eaca6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -51,7 +51,7 @@ static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb) if (dcb && dcb->ops->get_hw_capability) return dcb->ops->get_hw_capability(dcb); - return 0; + return -EOPNOTSUPP; } static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb) @@ -65,7 +65,7 @@ static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb) if (dcb && dcb->ops->attach) return dcb->ops->attach(dcb); - return 0; + return -EOPNOTSUPP; } static inline int @@ -74,7 +74,7 @@ qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf) if (dcb && dcb->ops->query_hw_capability) return dcb->ops->query_hw_capability(dcb, buf); - return 0; + return -EOPNOTSUPP; } static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb) @@ -89,7 +89,7 @@ qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type) if (dcb && dcb->ops->query_cee_param) return dcb->ops->query_cee_param(dcb, buf, type); - return 0; + return -EOPNOTSUPP; } static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) @@ -97,7 +97,7 @@ static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) if (dcb && dcb->ops->get_cee_cfg) return dcb->ops->get_cee_cfg(dcb); - return 0; + return -EOPNOTSUPP; } static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 6781aa636d58..1b415fe6f9b9 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -2282,18 +2282,18 @@ static int __init sxgbe_cmdline_opt(char *str) char *opt; if (!str || !*str) - return -EINVAL; + return 1; while ((opt = strsep(&str, ",")) != NULL) { if (!strncmp(opt, "eee_timer:", 10)) { if (kstrtoint(opt + 10, 0, &eee_timer)) goto err; } } - return 0; + return 1; err: pr_err("%s: ERROR broken module parameter conversion\n", __func__); - return -EINVAL; + return 1; } __setup("sxgbeeth=", sxgbe_cmdline_opt); diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 3dbea028b325..1f8cfd806008 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -763,6 +763,85 @@ void efx_remove_channels(struct efx_nic *efx) kfree(efx->xdp_tx_queues); } +static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, + struct efx_tx_queue *tx_queue) +{ + if (xdp_queue_number >= efx->xdp_tx_queue_count) + return -EINVAL; + + netif_dbg(efx, drv, efx->net_dev, + "Channel %u TXQ %u is XDP %u, HW %u\n", + tx_queue->channel->channel, tx_queue->label, + xdp_queue_number, tx_queue->queue); + efx->xdp_tx_queues[xdp_queue_number] = tx_queue; + return 0; +} + +static void efx_set_xdp_channels(struct efx_nic *efx) +{ + struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + unsigned int next_queue = 0; + int xdp_queue_number = 0; + int rc; + + /* We need to mark which channels really have RX and TX + * queues, and adjust the TX queue numbers if we have separate + * RX-only and TX-only channels. + */ + efx_for_each_channel(channel, efx) { + if (channel->channel < efx->tx_channel_offset) + continue; + + if (efx_channel_is_xdp_tx(channel)) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->queue = next_queue++; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, + tx_queue); + if (rc == 0) + xdp_queue_number++; + } + } else { + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->queue = next_queue++; + netif_dbg(efx, drv, efx->net_dev, + "Channel %u TXQ %u is HW %u\n", + channel->channel, tx_queue->label, + tx_queue->queue); + } + + /* If XDP is borrowing queues from net stack, it must + * use the queue with no csum offload, which is the + * first one of the channel + * (note: tx_queue_by_type is not initialized yet) + */ + if (efx->xdp_txq_queues_mode == + EFX_XDP_TX_QUEUES_BORROWED) { + tx_queue = &channel->tx_queue[0]; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, + tx_queue); + if (rc == 0) + xdp_queue_number++; + } + } + } + WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED && + xdp_queue_number != efx->xdp_tx_queue_count); + WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && + xdp_queue_number > efx->xdp_tx_queue_count); + + /* If we have more CPUs than assigned XDP TX queues, assign the already + * existing queues to the exceeding CPUs + */ + next_queue = 0; + while (xdp_queue_number < efx->xdp_tx_queue_count) { + tx_queue = efx->xdp_tx_queues[next_queue++]; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); + if (rc == 0) + xdp_queue_number++; + } +} + int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) { struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; @@ -837,6 +916,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) efx_init_napi_channel(efx->channel[i]); } + efx_set_xdp_channels(efx); out: /* Destroy unused channel structures */ for (i = 0; i < efx->n_channels; i++) { @@ -872,26 +952,9 @@ rollback: goto out; } -static inline int -efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, - struct efx_tx_queue *tx_queue) -{ - if (xdp_queue_number >= efx->xdp_tx_queue_count) - return -EINVAL; - - netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", - tx_queue->channel->channel, tx_queue->label, - xdp_queue_number, tx_queue->queue); - efx->xdp_tx_queues[xdp_queue_number] = tx_queue; - return 0; -} - int efx_set_channels(struct efx_nic *efx) { - struct efx_tx_queue *tx_queue; struct efx_channel *channel; - unsigned int next_queue = 0; - int xdp_queue_number; int rc; efx->tx_channel_offset = @@ -909,61 +972,14 @@ int efx_set_channels(struct efx_nic *efx) return -ENOMEM; } - /* We need to mark which channels really have RX and TX - * queues, and adjust the TX queue numbers if we have separate - * RX-only and TX-only channels. - */ - xdp_queue_number = 0; efx_for_each_channel(channel, efx) { if (channel->channel < efx->n_rx_channels) channel->rx_queue.core_index = channel->channel; else channel->rx_queue.core_index = -1; - - if (channel->channel >= efx->tx_channel_offset) { - if (efx_channel_is_xdp_tx(channel)) { - efx_for_each_channel_tx_queue(tx_queue, channel) { - tx_queue->queue = next_queue++; - rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); - if (rc == 0) - xdp_queue_number++; - } - } else { - efx_for_each_channel_tx_queue(tx_queue, channel) { - tx_queue->queue = next_queue++; - netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n", - channel->channel, tx_queue->label, - tx_queue->queue); - } - - /* If XDP is borrowing queues from net stack, it must use the queue - * with no csum offload, which is the first one of the channel - * (note: channel->tx_queue_by_type is not initialized yet) - */ - if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { - tx_queue = &channel->tx_queue[0]; - rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); - if (rc == 0) - xdp_queue_number++; - } - } - } } - WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED && - xdp_queue_number != efx->xdp_tx_queue_count); - WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && - xdp_queue_number > efx->xdp_tx_queue_count); - /* If we have more CPUs than assigned XDP TX queues, assign the already - * existing queues to the exceeding CPUs - */ - next_queue = 0; - while (xdp_queue_number < efx->xdp_tx_queue_count) { - tx_queue = efx->xdp_tx_queues[next_queue++]; - rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); - if (rc == 0) - xdp_queue_number++; - } + efx_set_xdp_channels(efx); rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); if (rc) @@ -1107,7 +1123,7 @@ void efx_start_channels(struct efx_nic *efx) struct efx_rx_queue *rx_queue; struct efx_channel *channel; - efx_for_each_channel(channel, efx) { + efx_for_each_channel_rev(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) { efx_init_tx_queue(tx_queue); atomic_inc(&efx->active_queues); diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index be6bfd6b7ec7..50baf62b2cbc 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -163,9 +163,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ spin_lock_bh(&mcdi->iface_lock); ++mcdi->seqno; + seqno = mcdi->seqno & SEQ_MASK; spin_unlock_bh(&mcdi->iface_lock); - seqno = mcdi->seqno & SEQ_MASK; xflags = 0; if (mcdi->mode == MCDI_MODE_EVENTS) xflags |= MCDI_HEADER_XFLAGS_EVREQ; diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index 633ca77a26fd..b925de9b4302 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -166,6 +166,9 @@ static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue) struct efx_nic *efx = rx_queue->efx; int i; + if (unlikely(!rx_queue->page_ring)) + return; + /* Unmap and release the pages in the recycle ring. Remove the ring. */ for (i = 0; i <= rx_queue->page_ptr_mask; i++) { struct page *page = rx_queue->page_ring[i]; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index d16e031e95f4..6983799e1c05 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -443,6 +443,9 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, if (unlikely(!tx_queue)) return -EINVAL; + if (!tx_queue->initialised) + return -EINVAL; + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu); diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c index d530cde2b864..9bc8281b7f5b 100644 --- a/drivers/net/ethernet/sfc/tx_common.c +++ b/drivers/net/ethernet/sfc/tx_common.c @@ -101,6 +101,8 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "shutting down TX queue %d\n", tx_queue->queue); + tx_queue->initialised = false; + if (!tx_queue->buffer) return; diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c index cd478d2cd871..00f6d347eaf7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c @@ -57,10 +57,6 @@ #define TSE_PCS_USE_SGMII_ENA BIT(0) #define TSE_PCS_IF_USE_SGMII 0x03 -#define SGMII_ADAPTER_CTRL_REG 0x00 -#define SGMII_ADAPTER_DISABLE 0x0001 -#define SGMII_ADAPTER_ENABLE 0x0000 - #define AUTONEGO_LINK_TIMER 20 static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs) @@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev, unsigned int speed) { void __iomem *tse_pcs_base = pcs->tse_pcs_base; - void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base; u32 val; - writew(SGMII_ADAPTER_ENABLE, - sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); - pcs->autoneg = phy_dev->autoneg; if (phy_dev->autoneg == AUTONEG_ENABLE) { diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h index 442812c0a4bd..694ac25ef426 100644 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h @@ -10,6 +10,10 @@ #include <linux/phy.h> #include <linux/timer.h> +#define SGMII_ADAPTER_CTRL_REG 0x00 +#define SGMII_ADAPTER_ENABLE 0x0000 +#define SGMII_ADAPTER_DISABLE 0x0001 + struct tse_pcs { struct device *dev; void __iomem *tse_pcs_base; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index b7c2579c963b..ac9e6c7a33b5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -18,9 +18,6 @@ #include "altr_tse_pcs.h" -#define SGMII_ADAPTER_CTRL_REG 0x00 -#define SGMII_ADAPTER_DISABLE 0x0001 - #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2 @@ -62,16 +59,14 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) { struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv; void __iomem *splitter_base = dwmac->splitter_base; - void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base; void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base; struct device *dev = dwmac->dev; struct net_device *ndev = dev_get_drvdata(dev); struct phy_device *phy_dev = ndev->phydev; u32 val; - if ((tse_pcs_base) && (sgmii_adapter_base)) - writew(SGMII_ADAPTER_DISABLE, - sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + writew(SGMII_ADAPTER_DISABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); if (splitter_base) { val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG); @@ -93,7 +88,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG); } - if (tse_pcs_base && sgmii_adapter_base) + writew(SGMII_ADAPTER_ENABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + if (phy_dev) tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 873b9e3e5da2..05b5371ca036 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -334,8 +334,8 @@ void stmmac_set_ethtool_ops(struct net_device *netdev); int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags); void stmmac_ptp_register(struct stmmac_priv *priv); void stmmac_ptp_unregister(struct stmmac_priv *priv); -int stmmac_open(struct net_device *dev); -int stmmac_release(struct net_device *dev); +int stmmac_xdp_open(struct net_device *dev); +void stmmac_xdp_release(struct net_device *dev); int stmmac_resume(struct device *dev); int stmmac_suspend(struct device *dev); int stmmac_dvr_remove(struct device *dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index a7ec9f4d46ce..d68ef72dcdde 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -71,9 +71,9 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) writel(value, ioaddr + PTP_TCR); /* wait for present system time initialize to complete */ - return readl_poll_timeout(ioaddr + PTP_TCR, value, + return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value, !(value & PTP_TCR_TSINIT), - 10000, 100000); + 10, 100000); } static int config_addend(void __iomem *ioaddr, u32 addend) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e6af26b2dcb8..9376c4e28626 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2268,6 +2268,23 @@ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) stmmac_stop_tx(priv, priv->ioaddr, chan); } +static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv) +{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); + u32 chan; + + for (chan = 0; chan < dma_csr_ch; chan++) { + struct stmmac_channel *ch = &priv->channel[chan]; + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + spin_unlock_irqrestore(&ch->lock, flags); + } +} + /** * stmmac_start_all_dma - start all RX and TX DMA channels * @priv: driver private structure @@ -2903,8 +2920,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) stmmac_axi(priv, priv->ioaddr, priv->plat->axi); /* DMA CSR Channel configuration */ - for (chan = 0; chan < dma_csr_ch; chan++) + for (chan = 0; chan < dma_csr_ch; chan++) { stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + } /* DMA RX Channel Configuration */ for (chan = 0; chan < rx_channels_count; chan++) { @@ -3667,7 +3686,7 @@ static int stmmac_request_irq(struct net_device *dev) * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ -int stmmac_open(struct net_device *dev) +static int stmmac_open(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); int mode = priv->plat->phy_interface; @@ -3756,6 +3775,7 @@ int stmmac_open(struct net_device *dev) stmmac_enable_all_queues(priv); netif_tx_start_all_queues(priv->dev); + stmmac_enable_all_dma_irq(priv); return 0; @@ -3791,7 +3811,7 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) * Description: * This is the stop entry point of the driver. */ -int stmmac_release(struct net_device *dev) +static int stmmac_release(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); u32 chan; @@ -6456,6 +6476,143 @@ void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) spin_unlock_irqrestore(&ch->lock, flags); } +void stmmac_xdp_release(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 chan; + + /* Disable NAPI process */ + stmmac_disable_all_queues(priv); + + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + hrtimer_cancel(&priv->tx_queue[chan].txtimer); + + /* Free the IRQ lines */ + stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); + + /* Stop TX/RX DMA channels */ + stmmac_stop_all_dma(priv); + + /* Release and free the Rx/Tx resources */ + free_dma_desc_resources(priv); + + /* Disable the MAC Rx/Tx */ + stmmac_mac_set(priv, priv->ioaddr, false); + + /* set trans_start so we don't get spurious + * watchdogs during reset + */ + netif_trans_update(dev); + netif_carrier_off(dev); +} + +int stmmac_xdp_open(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_cnt = priv->plat->rx_queues_to_use; + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 dma_csr_ch = max(rx_cnt, tx_cnt); + struct stmmac_rx_queue *rx_q; + struct stmmac_tx_queue *tx_q; + u32 buf_size; + bool sph_en; + u32 chan; + int ret; + + ret = alloc_dma_desc_resources(priv); + if (ret < 0) { + netdev_err(dev, "%s: DMA descriptors allocation failed\n", + __func__); + goto dma_desc_error; + } + + ret = init_dma_desc_rings(dev, GFP_KERNEL); + if (ret < 0) { + netdev_err(dev, "%s: DMA descriptors initialization failed\n", + __func__); + goto init_error; + } + + /* DMA CSR Channel configuration */ + for (chan = 0; chan < dma_csr_ch; chan++) { + stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + } + + /* Adjust Split header */ + sph_en = (priv->hw->rx_csum > 0) && priv->sph; + + /* DMA RX Channel Configuration */ + for (chan = 0; chan < rx_cnt; chan++) { + rx_q = &priv->rx_queue[chan]; + + stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + rx_q->dma_rx_phy, chan); + + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (rx_q->buf_alloc_num * + sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, chan); + + if (rx_q->xsk_pool && rx_q->buf_alloc_num) { + buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); + stmmac_set_dma_bfsize(priv, priv->ioaddr, + buf_size, + rx_q->queue_index); + } else { + stmmac_set_dma_bfsize(priv, priv->ioaddr, + priv->dma_buf_sz, + rx_q->queue_index); + } + + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + } + + /* DMA TX Channel Configuration */ + for (chan = 0; chan < tx_cnt; chan++) { + tx_q = &priv->tx_queue[chan]; + + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, chan); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy; + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, + tx_q->tx_tail_addr, chan); + + hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + tx_q->txtimer.function = stmmac_tx_timer; + } + + /* Enable the MAC Rx/Tx */ + stmmac_mac_set(priv, priv->ioaddr, true); + + /* Start Rx & Tx DMA Channels */ + stmmac_start_all_dma(priv); + + ret = stmmac_request_irq(dev); + if (ret) + goto irq_error; + + /* Enable NAPI process*/ + stmmac_enable_all_queues(priv); + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + stmmac_enable_all_dma_irq(priv); + + return 0; + +irq_error: + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + hrtimer_cancel(&priv->tx_queue[chan].txtimer); + + stmmac_hw_teardown(dev); +init_error: + free_dma_desc_resources(priv); +dma_desc_error: + return ret; +} + int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) { struct stmmac_priv *priv = netdev_priv(dev); @@ -7320,6 +7477,7 @@ int stmmac_resume(struct device *dev) stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); stmmac_enable_all_queues(priv); + stmmac_enable_all_dma_irq(priv); mutex_unlock(&priv->lock); rtnl_unlock(); @@ -7336,7 +7494,7 @@ static int __init stmmac_cmdline_opt(char *str) char *opt; if (!str || !*str) - return -EINVAL; + return 1; while ((opt = strsep(&str, ",")) != NULL) { if (!strncmp(opt, "debug:", 6)) { if (kstrtoint(opt + 6, 0, &debug)) @@ -7367,11 +7525,11 @@ static int __init stmmac_cmdline_opt(char *str) goto err; } } - return 0; + return 1; err: pr_err("%s: ERROR broken module parameter conversion", __func__); - return -EINVAL; + return 1; } __setup("stmmaceth=", stmmac_cmdline_opt); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 5d29f336315b..11e1055e8260 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -431,8 +431,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) plat->phylink_node = np; /* Get max speed of operation from device tree */ - if (of_property_read_u32(np, "max-speed", &plat->max_speed)) - plat->max_speed = -1; + of_property_read_u32(np, "max-speed", &plat->max_speed); plat->bus_id = of_alias_get_id(np, "ethernet"); if (plat->bus_id < 0) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c index 2a616c6f7cd0..9d4d8c3dad0a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c @@ -119,7 +119,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, need_update = !!priv->xdp_prog != !!prog; if (if_running && need_update) - stmmac_release(dev); + stmmac_xdp_release(dev); old_prog = xchg(&priv->xdp_prog, prog); if (old_prog) @@ -129,7 +129,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv); if (if_running && need_update) - stmmac_open(dev); + stmmac_xdp_open(dev); return 0; } diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 62f81b0d14ed..b05ee2e0e305 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -3139,7 +3139,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, if (err) { printk(KERN_ERR "happymeal(PCI): Cannot register net device, " "aborting.\n"); - goto err_out_iounmap; + goto err_out_free_coherent; } pci_set_drvdata(pdev, hp); @@ -3172,6 +3172,10 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, return 0; +err_out_free_coherent: + dma_free_coherent(hp->dma_dev, PAGE_SIZE, + hp->happy_block, hp->hblock_dvma); + err_out_iounmap: iounmap(hp->gregs); diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c index 158c8d3793f4..b5bae6324970 100644 --- a/drivers/net/ethernet/ti/cpsw_ethtool.c +++ b/drivers/net/ethernet/ti/cpsw_ethtool.c @@ -364,11 +364,9 @@ int cpsw_ethtool_op_begin(struct net_device *ndev) struct cpsw_common *cpsw = priv->cpsw; int ret; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) cpsw_err(priv, drv, "ethtool begin failed %d\n", ret); - pm_runtime_put_noidle(cpsw->dev); - } return ret; } diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 43222a34cba0..f9514518700e 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -568,7 +568,9 @@ int cpts_register(struct cpts *cpts) for (i = 0; i < CPTS_MAX_EVENTS; i++) list_add(&cpts->pool_data[i].list, &cpts->pool); - clk_enable(cpts->refclk); + err = clk_enable(cpts->refclk); + if (err) + return err; cpts_write32(cpts, CPTS_EN, control); cpts_write32(cpts, TS_PEND_EN, int_enable); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 463094ced104..2ab29efa6b6e 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1427,6 +1427,8 @@ static int temac_probe(struct platform_device *pdev) lp->indirect_lock = devm_kmalloc(&pdev->dev, sizeof(*lp->indirect_lock), GFP_KERNEL); + if (!lp->indirect_lock) + return -ENOMEM; spin_lock_init(lp->indirect_lock); } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 2169417210c2..fbbbcfe0e891 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -857,46 +857,53 @@ static void axienet_recv(struct net_device *ndev) while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { dma_addr_t phys; - tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; - /* Ensure we see complete descriptor update */ dma_rmb(); - phys = desc_get_phys_addr(lp, cur_p); - dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, - DMA_FROM_DEVICE); skb = cur_p->skb; cur_p->skb = NULL; - length = cur_p->app4 & 0x0000FFFF; - - skb_put(skb, length); - skb->protocol = eth_type_trans(skb, ndev); - /*skb_checksum_none_assert(skb);*/ - skb->ip_summed = CHECKSUM_NONE; - - /* if we're doing Rx csum offload, set it up */ - if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { - csumstatus = (cur_p->app2 & - XAE_FULL_CSUM_STATUS_MASK) >> 3; - if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || - (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* skb could be NULL if a previous pass already received the + * packet for this slot in the ring, but failed to refill it + * with a newly allocated buffer. In this case, don't try to + * receive it again. + */ + if (likely(skb)) { + length = cur_p->app4 & 0x0000FFFF; + + phys = desc_get_phys_addr(lp, cur_p); + dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, + DMA_FROM_DEVICE); + + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, ndev); + /*skb_checksum_none_assert(skb);*/ + skb->ip_summed = CHECKSUM_NONE; + + /* if we're doing Rx csum offload, set it up */ + if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { + csumstatus = (cur_p->app2 & + XAE_FULL_CSUM_STATUS_MASK) >> 3; + if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || + csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && + skb->protocol == htons(ETH_P_IP) && + skb->len > 64) { + skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); + skb->ip_summed = CHECKSUM_COMPLETE; } - } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && - skb->protocol == htons(ETH_P_IP) && - skb->len > 64) { - skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); - skb->ip_summed = CHECKSUM_COMPLETE; - } - netif_rx(skb); + netif_rx(skb); - size += length; - packets++; + size += length; + packets++; + } new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); if (!new_skb) - return; + break; phys = dma_map_single(ndev->dev.parent, new_skb->data, lp->max_frm_size, @@ -905,7 +912,7 @@ static void axienet_recv(struct net_device *ndev) if (net_ratelimit()) netdev_err(ndev, "RX DMA mapping error\n"); dev_kfree_skb(new_skb); - return; + break; } desc_set_phys_addr(lp, phys, cur_p); @@ -913,6 +920,11 @@ static void axienet_recv(struct net_device *ndev) cur_p->status = 0; cur_p->skb = new_skb; + /* Only update tail_p to mark this slot as usable after it has + * been successfully refilled. + */ + tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; + if (++lp->rx_bd_ci >= lp->rx_bd_num) lp->rx_bd_ci = 0; cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; @@ -2115,15 +2127,14 @@ static int axienet_probe(struct platform_device *pdev) if (ret) goto cleanup_clk; - lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); - if (lp->phy_node) { - ret = axienet_mdio_setup(lp); - if (ret) - dev_warn(&pdev->dev, - "error registering MDIO bus: %d\n", ret); - } + ret = axienet_mdio_setup(lp); + if (ret) + dev_warn(&pdev->dev, + "error registering MDIO bus: %d\n", ret); + if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { + lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (!lp->phy_node) { dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n"); ret = -EINVAL; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index b780aad3550a..5524ac4fae80 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1185,7 +1185,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) if (rc) { dev_err(dev, "Cannot register network device, aborting\n"); - goto error; + goto put_node; } dev_info(dev, @@ -1193,6 +1193,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev) (unsigned long __force)ndev->mem_start, lp->base_addr, ndev->irq); return 0; +put_node: + of_node_put(lp->phy_node); error: free_netdev(ndev); return rc; diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 6192244b304a..36a9fbb70402 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -306,7 +306,6 @@ static void sp_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ dev->netdev_ops = &sp_netdev_ops; - dev->needs_free_netdev = true; dev->mtu = SIXP_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->header_ops = &ax25_header_ops; @@ -669,14 +668,16 @@ static void sixpack_close(struct tty_struct *tty) */ netif_stop_queue(sp->dev); + unregister_netdev(sp->dev); + del_timer_sync(&sp->tx_t); del_timer_sync(&sp->resync_t); - /* Free all 6pack frame buffers. */ + /* Free all 6pack frame buffers after unreg. */ kfree(sp->rbuff); kfree(sp->xbuff); - unregister_netdev(sp->dev); + free_netdev(sp->dev); } /* Perform I/O control on an active 6pack channel. */ diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 35728f1af6a6..763d435a9564 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -31,6 +31,8 @@ #define AX_MTU 236 +/* some arch define END as assembly function ending, just undef it */ +#undef END /* SLIP/KISS protocol characters. */ #define END 0300 /* indicates end of frame */ #define ESC 0333 /* indicates byte stuffing */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 382bebc2420d..bdfcf75f0827 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1586,6 +1586,9 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, pcpu_sum = kvmalloc_array(num_possible_cpus(), sizeof(struct netvsc_ethtool_pcpu_stats), GFP_KERNEL); + if (!pcpu_sum) + return; + netvsc_get_pcpu_stats(dev, pcpu_sum); for_each_present_cpu(cpu) { struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu]; diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 7d67f41387f5..4f5ef8a9a9a8 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -100,6 +100,7 @@ struct at86rf230_local { unsigned long cal_timeout; bool is_tx; bool is_tx_from_off; + bool was_tx; u8 tx_retry; struct sk_buff *tx_skb; struct at86rf230_state_change tx; @@ -343,7 +344,11 @@ at86rf230_async_error_recover_complete(void *context) if (ctx->free) kfree(ctx); - ieee802154_wake_queue(lp->hw); + if (lp->was_tx) { + lp->was_tx = 0; + dev_kfree_skb_any(lp->tx_skb); + ieee802154_wake_queue(lp->hw); + } } static void @@ -352,7 +357,11 @@ at86rf230_async_error_recover(void *context) struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - lp->is_tx = 0; + if (lp->is_tx) { + lp->was_tx = 1; + lp->is_tx = 0; + } + at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, at86rf230_async_error_recover_complete); } diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 97fbe850de9b..96592a20c61f 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -2977,8 +2977,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw) ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND; ca8210_hw->phy->cca_ed_level = -9800; ca8210_hw->phy->symbol_duration = 16; - ca8210_hw->phy->lifs_period = 40; - ca8210_hw->phy->sifs_period = 12; + ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration; + ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration; ca8210_hw->flags = IEEE802154_HW_AFILT | IEEE802154_HW_OMIT_CKSUM | diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig index d037682fb7ad..6782c2cbf542 100644 --- a/drivers/net/ipa/Kconfig +++ b/drivers/net/ipa/Kconfig @@ -2,7 +2,9 @@ config QCOM_IPA tristate "Qualcomm IPA support" depends on NET && QCOM_SMEM depends on ARCH_QCOM || COMPILE_TEST + depends on INTERCONNECT depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST) + depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n select QCOM_MDT_LOADER if ARCH_QCOM select QCOM_SCM select QCOM_QMI_HELPERS diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c index b1c6c0fcb654..f2989aac47a6 100644 --- a/drivers/net/ipa/ipa_power.c +++ b/drivers/net/ipa/ipa_power.c @@ -11,6 +11,8 @@ #include <linux/pm_runtime.h> #include <linux/bitops.h> +#include "linux/soc/qcom/qcom_aoss.h" + #include "ipa.h" #include "ipa_power.h" #include "ipa_endpoint.h" @@ -64,6 +66,7 @@ enum ipa_power_flag { * struct ipa_power - IPA power management information * @dev: IPA device pointer * @core: IPA core clock + * @qmp: QMP handle for AOSS communication * @spinlock: Protects modem TX queue enable/disable * @flags: Boolean state flags * @interconnect_count: Number of elements in interconnect[] @@ -72,6 +75,7 @@ enum ipa_power_flag { struct ipa_power { struct device *dev; struct clk *core; + struct qmp *qmp; spinlock_t spinlock; /* used with STOPPED/STARTED power flags */ DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT); u32 interconnect_count; @@ -382,6 +386,47 @@ void ipa_power_modem_queue_active(struct ipa *ipa) clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags); } +static int ipa_power_retention_init(struct ipa_power *power) +{ + struct qmp *qmp = qmp_get(power->dev); + + if (IS_ERR(qmp)) { + if (PTR_ERR(qmp) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + /* We assume any other error means it's not defined/needed */ + qmp = NULL; + } + power->qmp = qmp; + + return 0; +} + +static void ipa_power_retention_exit(struct ipa_power *power) +{ + qmp_put(power->qmp); + power->qmp = NULL; +} + +/* Control register retention on power collapse */ +void ipa_power_retention(struct ipa *ipa, bool enable) +{ + static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }"; + struct ipa_power *power = ipa->power; + char buf[36]; /* Exactly enough for fmt[]; size a multiple of 4 */ + int ret; + + if (!power->qmp) + return; /* Not needed on this platform */ + + (void)snprintf(buf, sizeof(buf), fmt, enable ? '1' : '0'); + + ret = qmp_send(power->qmp, buf, sizeof(buf)); + if (ret) + dev_err(power->dev, "error %d sending QMP %sable request\n", + ret, enable ? "en" : "dis"); +} + int ipa_power_setup(struct ipa *ipa) { int ret; @@ -438,12 +483,18 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data) if (ret) goto err_kfree; + ret = ipa_power_retention_init(power); + if (ret) + goto err_interconnect_exit; + pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(dev); pm_runtime_enable(dev); return power; +err_interconnect_exit: + ipa_interconnect_exit(power); err_kfree: kfree(power); err_clk_put: @@ -460,6 +511,7 @@ void ipa_power_exit(struct ipa_power *power) pm_runtime_disable(dev); pm_runtime_dont_use_autosuspend(dev); + ipa_power_retention_exit(power); ipa_interconnect_exit(power); kfree(power); clk_put(clk); diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h index 2151805d7fbb..6f84f057a209 100644 --- a/drivers/net/ipa/ipa_power.h +++ b/drivers/net/ipa/ipa_power.h @@ -41,6 +41,13 @@ void ipa_power_modem_queue_wake(struct ipa *ipa); void ipa_power_modem_queue_active(struct ipa *ipa); /** + * ipa_power_retention() - Control register retention on power collapse + * @ipa: IPA pointer + * @enable: Whether retention should be enabled or disabled + */ +void ipa_power_retention(struct ipa *ipa, bool enable); + +/** * ipa_power_setup() - Set up IPA power management * @ipa: IPA pointer * diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c index 856e55a080a7..fe11910518d9 100644 --- a/drivers/net/ipa/ipa_uc.c +++ b/drivers/net/ipa/ipa_uc.c @@ -11,6 +11,7 @@ #include "ipa.h" #include "ipa_uc.h" +#include "ipa_power.h" /** * DOC: The IPA embedded microcontroller @@ -154,6 +155,7 @@ static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id) case IPA_UC_RESPONSE_INIT_COMPLETED: if (ipa->uc_powered) { ipa->uc_loaded = true; + ipa_power_retention(ipa, true); pm_runtime_mark_last_busy(dev); (void)pm_runtime_put_autosuspend(dev); ipa->uc_powered = false; @@ -184,6 +186,9 @@ void ipa_uc_deconfig(struct ipa *ipa) ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1); ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0); + if (ipa->uc_loaded) + ipa_power_retention(ipa, false); + if (!ipa->uc_powered) return; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 35f46ad040b0..a9a515cf5a46 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -460,8 +460,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) return RX_HANDLER_CONSUMED; *pskb = skb; eth = eth_hdr(skb); - if (macvlan_forward_source(skb, port, eth->h_source)) + if (macvlan_forward_source(skb, port, eth->h_source)) { + kfree_skb(skb); return RX_HANDLER_CONSUMED; + } src = macvlan_hash_lookup(port, eth->h_source); if (src && src->mode != MACVLAN_MODE_VEPA && src->mode != MACVLAN_MODE_BRIDGE) { @@ -480,8 +482,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) return RX_HANDLER_PASS; } - if (macvlan_forward_source(skb, port, eth->h_source)) + if (macvlan_forward_source(skb, port, eth->h_source)) { + kfree_skb(skb); return RX_HANDLER_CONSUMED; + } if (macvlan_passthru(port)) vlan = list_first_or_null_rcu(&port->vlans, struct macvlan_dev, list); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 694e2f5dbbe5..39801c31e507 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -133,11 +133,17 @@ static void macvtap_setup(struct net_device *dev) dev->tx_queue_len = TUN_READQ_SIZE; } +static struct net *macvtap_link_net(const struct net_device *dev) +{ + return dev_net(macvlan_dev_real_dev(dev)); +} + static struct rtnl_link_ops macvtap_link_ops __read_mostly = { .kind = "macvtap", .setup = macvtap_setup, .newlink = macvtap_newlink, .dellink = macvtap_dellink, + .get_link_net = macvtap_link_net, .priv_size = sizeof(struct macvtap_dev), }; diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig index d8f966cedc89..dc71657d9184 100644 --- a/drivers/net/mctp/Kconfig +++ b/drivers/net/mctp/Kconfig @@ -3,6 +3,36 @@ if MCTP menu "MCTP Device Drivers" +config MCTP_SERIAL + tristate "MCTP serial transport" + depends on TTY + select CRC_CCITT + help + This driver provides an MCTP-over-serial interface, through a + serial line-discipline, as defined by DMTF specification "DSP0253 - + MCTP Serial Transport Binding". By attaching the ldisc to a serial + device, we get a new net device to transport MCTP packets. + + This allows communication with external MCTP endpoints which use + serial as their transport. It can also be used as an easy way to + provide MCTP connectivity between virtual machines, by forwarding + data between simple virtual serial devices. + + Say y here if you need to connect to MCTP endpoints over serial. To + compile as a module, use m; the module will be called mctp-serial. + +config MCTP_TRANSPORT_I2C + tristate "MCTP SMBus/I2C transport" + # i2c-mux is optional, but we must build as a module if i2c-mux is a module + depends on I2C_MUX || !I2C_MUX + depends on I2C + depends on I2C_SLAVE + select MCTP_FLOWS + help + Provides a driver to access MCTP devices over SMBus/I2C transport, + from DMTF specification DSP0237. A MCTP protocol network device is + created for each I2C bus that has been assigned a mctp-i2c device. + endmenu endif diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile index e69de29bb2d1..1ca3e6028f77 100644 --- a/drivers/net/mctp/Makefile +++ b/drivers/net/mctp/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_MCTP_SERIAL) += mctp-serial.o +obj-$(CONFIG_MCTP_TRANSPORT_I2C) += mctp-i2c.o diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c new file mode 100644 index 000000000000..baf7afac7857 --- /dev/null +++ b/drivers/net/mctp/mctp-i2c.c @@ -0,0 +1,1082 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Management Controller Transport Protocol (MCTP) + * Implements DMTF specification + * "DSP0237 Management Component Transport Protocol (MCTP) SMBus/I2C + * Transport Binding" + * https://www.dmtf.org/sites/default/files/standards/documents/DSP0237_1.2.0.pdf + * + * A netdev is created for each I2C bus that handles MCTP. In the case of an I2C + * mux topology a single I2C client is attached to the root of the mux topology, + * shared between all mux I2C busses underneath. For non-mux cases an I2C client + * is attached per netdev. + * + * mctp-i2c-controller.yml devicetree binding has further details. + * + * Copyright (c) 2022 Code Construct + * Copyright (c) 2022 Google + */ + +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/i2c.h> +#include <linux/i2c-mux.h> +#include <linux/if_arp.h> +#include <net/mctp.h> +#include <net/mctpdevice.h> + +/* byte_count is limited to u8 */ +#define MCTP_I2C_MAXBLOCK 255 +/* One byte is taken by source_slave */ +#define MCTP_I2C_MAXMTU (MCTP_I2C_MAXBLOCK - 1) +#define MCTP_I2C_MINMTU (64 + 4) +/* Allow space for dest_address, command, byte_count, data, PEC */ +#define MCTP_I2C_BUFSZ (3 + MCTP_I2C_MAXBLOCK + 1) +#define MCTP_I2C_MINLEN 8 +#define MCTP_I2C_COMMANDCODE 0x0f +#define MCTP_I2C_TX_WORK_LEN 100 +/* Sufficient for 64kB at min mtu */ +#define MCTP_I2C_TX_QUEUE_LEN 1100 + +#define MCTP_I2C_OF_PROP "mctp-controller" + +enum { + MCTP_I2C_FLOW_STATE_NEW = 0, + MCTP_I2C_FLOW_STATE_ACTIVE, +}; + +/* List of all struct mctp_i2c_client + * Lock protects driver_clients and also prevents adding/removing adapters + * during mctp_i2c_client probe/remove. + */ +static DEFINE_MUTEX(driver_clients_lock); +static LIST_HEAD(driver_clients); + +struct mctp_i2c_client; + +/* The netdev structure. One of these per I2C adapter. */ +struct mctp_i2c_dev { + struct net_device *ndev; + struct i2c_adapter *adapter; + struct mctp_i2c_client *client; + struct list_head list; /* For mctp_i2c_client.devs */ + + size_t rx_pos; + u8 rx_buffer[MCTP_I2C_BUFSZ]; + struct completion rx_done; + + struct task_struct *tx_thread; + wait_queue_head_t tx_wq; + struct sk_buff_head tx_queue; + u8 tx_scratch[MCTP_I2C_BUFSZ]; + + /* A fake entry in our tx queue to perform an unlock operation */ + struct sk_buff unlock_marker; + + /* Spinlock protects i2c_lock_count, release_count, allow_rx */ + spinlock_t lock; + int i2c_lock_count; + int release_count; + /* Indicates that the netif is ready to receive incoming packets */ + bool allow_rx; + +}; + +/* The i2c client structure. One per hardware i2c bus at the top of the + * mux tree, shared by multiple netdevs + */ +struct mctp_i2c_client { + struct i2c_client *client; + u8 lladdr; + + struct mctp_i2c_dev *sel; + struct list_head devs; + spinlock_t sel_lock; /* Protects sel and devs */ + + struct list_head list; /* For driver_clients */ +}; + +/* Header on the wire. */ +struct mctp_i2c_hdr { + u8 dest_slave; + u8 command; + /* Count of bytes following byte_count, excluding PEC */ + u8 byte_count; + u8 source_slave; +}; + +static int mctp_i2c_recv(struct mctp_i2c_dev *midev); +static int mctp_i2c_slave_cb(struct i2c_client *client, + enum i2c_slave_event event, u8 *val); +static void mctp_i2c_ndo_uninit(struct net_device *dev); +static int mctp_i2c_ndo_open(struct net_device *dev); + +static struct i2c_adapter *mux_root_adapter(struct i2c_adapter *adap) +{ +#if IS_ENABLED(CONFIG_I2C_MUX) + return i2c_root_adapter(&adap->dev); +#else + /* In non-mux config all i2c adapters are root adapters */ + return adap; +#endif +} + +/* Creates a new i2c slave device attached to the root adapter. + * Sets up the slave callback. + * Must be called with a client on a root adapter. + */ +static struct mctp_i2c_client *mctp_i2c_new_client(struct i2c_client *client) +{ + struct mctp_i2c_client *mcli = NULL; + struct i2c_adapter *root = NULL; + int rc; + + if (client->flags & I2C_CLIENT_TEN) { + dev_err(&client->dev, "failed, MCTP requires a 7-bit I2C address, addr=0x%x\n", + client->addr); + rc = -EINVAL; + goto err; + } + + root = mux_root_adapter(client->adapter); + if (!root) { + dev_err(&client->dev, "failed to find root adapter\n"); + rc = -ENOENT; + goto err; + } + if (root != client->adapter) { + dev_err(&client->dev, + "A mctp-i2c-controller client cannot be placed on an I2C mux adapter.\n" + " It should be placed on the mux tree root adapter\n" + " then set mctp-controller property on adapters to attach\n"); + rc = -EINVAL; + goto err; + } + + mcli = kzalloc(sizeof(*mcli), GFP_KERNEL); + if (!mcli) { + rc = -ENOMEM; + goto err; + } + spin_lock_init(&mcli->sel_lock); + INIT_LIST_HEAD(&mcli->devs); + INIT_LIST_HEAD(&mcli->list); + mcli->lladdr = client->addr & 0xff; + mcli->client = client; + i2c_set_clientdata(client, mcli); + + rc = i2c_slave_register(mcli->client, mctp_i2c_slave_cb); + if (rc < 0) { + dev_err(&client->dev, "i2c register failed %d\n", rc); + mcli->client = NULL; + i2c_set_clientdata(client, NULL); + goto err; + } + + return mcli; +err: + if (mcli) { + if (mcli->client) + i2c_unregister_device(mcli->client); + kfree(mcli); + } + return ERR_PTR(rc); +} + +static void mctp_i2c_free_client(struct mctp_i2c_client *mcli) +{ + int rc; + + WARN_ON(!mutex_is_locked(&driver_clients_lock)); + WARN_ON(!list_empty(&mcli->devs)); + WARN_ON(mcli->sel); /* sanity check, no locking */ + + rc = i2c_slave_unregister(mcli->client); + /* Leak if it fails, we can't propagate errors upwards */ + if (rc < 0) + dev_err(&mcli->client->dev, "i2c unregister failed %d\n", rc); + else + kfree(mcli); +} + +/* Switch the mctp i2c device to receive responses. + * Call with sel_lock held + */ +static void __mctp_i2c_device_select(struct mctp_i2c_client *mcli, + struct mctp_i2c_dev *midev) +{ + assert_spin_locked(&mcli->sel_lock); + if (midev) + dev_hold(midev->ndev); + if (mcli->sel) + dev_put(mcli->sel->ndev); + mcli->sel = midev; +} + +/* Switch the mctp i2c device to receive responses */ +static void mctp_i2c_device_select(struct mctp_i2c_client *mcli, + struct mctp_i2c_dev *midev) +{ + unsigned long flags; + + spin_lock_irqsave(&mcli->sel_lock, flags); + __mctp_i2c_device_select(mcli, midev); + spin_unlock_irqrestore(&mcli->sel_lock, flags); +} + +static int mctp_i2c_slave_cb(struct i2c_client *client, + enum i2c_slave_event event, u8 *val) +{ + struct mctp_i2c_client *mcli = i2c_get_clientdata(client); + struct mctp_i2c_dev *midev = NULL; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&mcli->sel_lock, flags); + midev = mcli->sel; + if (midev) + dev_hold(midev->ndev); + spin_unlock_irqrestore(&mcli->sel_lock, flags); + + if (!midev) + return 0; + + switch (event) { + case I2C_SLAVE_WRITE_RECEIVED: + if (midev->rx_pos < MCTP_I2C_BUFSZ) { + midev->rx_buffer[midev->rx_pos] = *val; + midev->rx_pos++; + } else { + midev->ndev->stats.rx_over_errors++; + } + + break; + case I2C_SLAVE_WRITE_REQUESTED: + /* dest_slave as first byte */ + midev->rx_buffer[0] = mcli->lladdr << 1; + midev->rx_pos = 1; + break; + case I2C_SLAVE_STOP: + rc = mctp_i2c_recv(midev); + break; + default: + break; + } + + dev_put(midev->ndev); + return rc; +} + +/* Processes incoming data that has been accumulated by the slave cb */ +static int mctp_i2c_recv(struct mctp_i2c_dev *midev) +{ + struct net_device *ndev = midev->ndev; + struct mctp_i2c_hdr *hdr; + struct mctp_skb_cb *cb; + struct sk_buff *skb; + unsigned long flags; + u8 pec, calc_pec; + size_t recvlen; + int status; + + /* + 1 for the PEC */ + if (midev->rx_pos < MCTP_I2C_MINLEN + 1) { + ndev->stats.rx_length_errors++; + return -EINVAL; + } + /* recvlen excludes PEC */ + recvlen = midev->rx_pos - 1; + + hdr = (void *)midev->rx_buffer; + if (hdr->command != MCTP_I2C_COMMANDCODE) { + ndev->stats.rx_dropped++; + return -EINVAL; + } + + if (hdr->byte_count + offsetof(struct mctp_i2c_hdr, source_slave) != recvlen) { + ndev->stats.rx_length_errors++; + return -EINVAL; + } + + pec = midev->rx_buffer[midev->rx_pos - 1]; + calc_pec = i2c_smbus_pec(0, midev->rx_buffer, recvlen); + if (pec != calc_pec) { + ndev->stats.rx_crc_errors++; + return -EINVAL; + } + + skb = netdev_alloc_skb(ndev, recvlen); + if (!skb) { + ndev->stats.rx_dropped++; + return -ENOMEM; + } + + skb->protocol = htons(ETH_P_MCTP); + skb_put_data(skb, midev->rx_buffer, recvlen); + skb_reset_mac_header(skb); + skb_pull(skb, sizeof(struct mctp_i2c_hdr)); + skb_reset_network_header(skb); + + cb = __mctp_cb(skb); + cb->halen = 1; + cb->haddr[0] = hdr->source_slave >> 1; + + /* We need to ensure that the netif is not used once netdev + * unregister occurs + */ + spin_lock_irqsave(&midev->lock, flags); + if (midev->allow_rx) { + reinit_completion(&midev->rx_done); + spin_unlock_irqrestore(&midev->lock, flags); + + status = netif_rx(skb); + complete(&midev->rx_done); + } else { + status = NET_RX_DROP; + spin_unlock_irqrestore(&midev->lock, flags); + } + + if (status == NET_RX_SUCCESS) { + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += recvlen; + } else { + ndev->stats.rx_dropped++; + } + return 0; +} + +enum mctp_i2c_flow_state { + MCTP_I2C_TX_FLOW_INVALID, + MCTP_I2C_TX_FLOW_NONE, + MCTP_I2C_TX_FLOW_NEW, + MCTP_I2C_TX_FLOW_EXISTING, +}; + +static enum mctp_i2c_flow_state +mctp_i2c_get_tx_flow_state(struct mctp_i2c_dev *midev, struct sk_buff *skb) +{ + enum mctp_i2c_flow_state state; + struct mctp_sk_key *key; + struct mctp_flow *flow; + unsigned long flags; + + flow = skb_ext_find(skb, SKB_EXT_MCTP); + if (!flow) + return MCTP_I2C_TX_FLOW_NONE; + + key = flow->key; + if (!key) + return MCTP_I2C_TX_FLOW_NONE; + + spin_lock_irqsave(&key->lock, flags); + /* If the key is present but invalid, we're unlikely to be able + * to handle the flow at all; just drop now + */ + if (!key->valid) { + state = MCTP_I2C_TX_FLOW_INVALID; + + } else if (key->dev_flow_state == MCTP_I2C_FLOW_STATE_NEW) { + key->dev_flow_state = MCTP_I2C_FLOW_STATE_ACTIVE; + state = MCTP_I2C_TX_FLOW_NEW; + } else { + state = MCTP_I2C_TX_FLOW_EXISTING; + } + + spin_unlock_irqrestore(&key->lock, flags); + + return state; +} + +/* We're not contending with ourselves here; we only need to exclude other + * i2c clients from using the bus. refcounts are simply to prevent + * recursive locking. + */ +static void mctp_i2c_lock_nest(struct mctp_i2c_dev *midev) +{ + unsigned long flags; + bool lock; + + spin_lock_irqsave(&midev->lock, flags); + lock = midev->i2c_lock_count == 0; + midev->i2c_lock_count++; + spin_unlock_irqrestore(&midev->lock, flags); + + if (lock) + i2c_lock_bus(midev->adapter, I2C_LOCK_SEGMENT); +} + +static void mctp_i2c_unlock_nest(struct mctp_i2c_dev *midev) +{ + unsigned long flags; + bool unlock; + + spin_lock_irqsave(&midev->lock, flags); + if (!WARN_ONCE(midev->i2c_lock_count == 0, "lock count underflow!")) + midev->i2c_lock_count--; + unlock = midev->i2c_lock_count == 0; + spin_unlock_irqrestore(&midev->lock, flags); + + if (unlock) + i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT); +} + +/* Unlocks the bus if was previously locked, used for cleanup */ +static void mctp_i2c_unlock_reset(struct mctp_i2c_dev *midev) +{ + unsigned long flags; + bool unlock; + + spin_lock_irqsave(&midev->lock, flags); + unlock = midev->i2c_lock_count > 0; + midev->i2c_lock_count = 0; + spin_unlock_irqrestore(&midev->lock, flags); + + if (unlock) + i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT); +} + +static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb) +{ + struct net_device_stats *stats = &midev->ndev->stats; + enum mctp_i2c_flow_state fs; + struct mctp_i2c_hdr *hdr; + struct i2c_msg msg = {0}; + u8 *pecp; + int rc; + + fs = mctp_i2c_get_tx_flow_state(midev, skb); + + hdr = (void *)skb_mac_header(skb); + /* Sanity check that packet contents matches skb length, + * and can't exceed MCTP_I2C_BUFSZ + */ + if (skb->len != hdr->byte_count + 3) { + dev_warn_ratelimited(&midev->adapter->dev, + "Bad tx length %d vs skb %u\n", + hdr->byte_count + 3, skb->len); + return; + } + + if (skb_tailroom(skb) >= 1) { + /* Linear case with space, we can just append the PEC */ + skb_put(skb, 1); + } else { + /* Otherwise need to copy the buffer */ + skb_copy_bits(skb, 0, midev->tx_scratch, skb->len); + hdr = (void *)midev->tx_scratch; + } + + pecp = (void *)&hdr->source_slave + hdr->byte_count; + *pecp = i2c_smbus_pec(0, (u8 *)hdr, hdr->byte_count + 3); + msg.buf = (void *)&hdr->command; + /* command, bytecount, data, pec */ + msg.len = 2 + hdr->byte_count + 1; + msg.addr = hdr->dest_slave >> 1; + + switch (fs) { + case MCTP_I2C_TX_FLOW_NONE: + /* no flow: full lock & unlock */ + mctp_i2c_lock_nest(midev); + mctp_i2c_device_select(midev->client, midev); + rc = __i2c_transfer(midev->adapter, &msg, 1); + mctp_i2c_unlock_nest(midev); + break; + + case MCTP_I2C_TX_FLOW_NEW: + /* new flow: lock, tx, but don't unlock; that will happen + * on flow release + */ + mctp_i2c_lock_nest(midev); + mctp_i2c_device_select(midev->client, midev); + fallthrough; + + case MCTP_I2C_TX_FLOW_EXISTING: + /* existing flow: we already have the lock; just tx */ + rc = __i2c_transfer(midev->adapter, &msg, 1); + break; + + case MCTP_I2C_TX_FLOW_INVALID: + return; + } + + if (rc < 0) { + dev_warn_ratelimited(&midev->adapter->dev, + "__i2c_transfer failed %d\n", rc); + stats->tx_errors++; + } else { + stats->tx_bytes += skb->len; + stats->tx_packets++; + } +} + +static void mctp_i2c_flow_release(struct mctp_i2c_dev *midev) +{ + unsigned long flags; + bool unlock; + + spin_lock_irqsave(&midev->lock, flags); + if (midev->release_count > midev->i2c_lock_count) { + WARN_ONCE(1, "release count overflow"); + midev->release_count = midev->i2c_lock_count; + } + + midev->i2c_lock_count -= midev->release_count; + unlock = midev->i2c_lock_count == 0 && midev->release_count > 0; + midev->release_count = 0; + spin_unlock_irqrestore(&midev->lock, flags); + + if (unlock) + i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT); +} + +static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned int len) +{ + struct mctp_i2c_hdr *hdr; + struct mctp_hdr *mhdr; + u8 lldst, llsrc; + + if (len > MCTP_I2C_MAXMTU) + return -EMSGSIZE; + + lldst = *((u8 *)daddr); + llsrc = *((u8 *)saddr); + + skb_push(skb, sizeof(struct mctp_i2c_hdr)); + skb_reset_mac_header(skb); + hdr = (void *)skb_mac_header(skb); + mhdr = mctp_hdr(skb); + hdr->dest_slave = (lldst << 1) & 0xff; + hdr->command = MCTP_I2C_COMMANDCODE; + hdr->byte_count = len + 1; + hdr->source_slave = ((llsrc << 1) & 0xff) | 0x01; + mhdr->ver = 0x01; + + return 0; +} + +static int mctp_i2c_tx_thread(void *data) +{ + struct mctp_i2c_dev *midev = data; + struct sk_buff *skb; + unsigned long flags; + + for (;;) { + if (kthread_should_stop()) + break; + + spin_lock_irqsave(&midev->tx_queue.lock, flags); + skb = __skb_dequeue(&midev->tx_queue); + if (netif_queue_stopped(midev->ndev)) + netif_wake_queue(midev->ndev); + spin_unlock_irqrestore(&midev->tx_queue.lock, flags); + + if (skb == &midev->unlock_marker) { + mctp_i2c_flow_release(midev); + + } else if (skb) { + mctp_i2c_xmit(midev, skb); + kfree_skb(skb); + + } else { + wait_event_idle(midev->tx_wq, + !skb_queue_empty(&midev->tx_queue) || + kthread_should_stop()); + } + } + + return 0; +} + +static netdev_tx_t mctp_i2c_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct mctp_i2c_dev *midev = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&midev->tx_queue.lock, flags); + if (skb_queue_len(&midev->tx_queue) >= MCTP_I2C_TX_WORK_LEN) { + netif_stop_queue(dev); + spin_unlock_irqrestore(&midev->tx_queue.lock, flags); + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + __skb_queue_tail(&midev->tx_queue, skb); + if (skb_queue_len(&midev->tx_queue) == MCTP_I2C_TX_WORK_LEN) + netif_stop_queue(dev); + spin_unlock_irqrestore(&midev->tx_queue.lock, flags); + + wake_up(&midev->tx_wq); + return NETDEV_TX_OK; +} + +static void mctp_i2c_release_flow(struct mctp_dev *mdev, + struct mctp_sk_key *key) + +{ + struct mctp_i2c_dev *midev = netdev_priv(mdev->dev); + unsigned long flags; + + spin_lock_irqsave(&midev->lock, flags); + midev->release_count++; + spin_unlock_irqrestore(&midev->lock, flags); + + /* Ensure we have a release operation queued, through the fake + * marker skb + */ + spin_lock(&midev->tx_queue.lock); + if (!midev->unlock_marker.next) + __skb_queue_tail(&midev->tx_queue, &midev->unlock_marker); + spin_unlock(&midev->tx_queue.lock); + + wake_up(&midev->tx_wq); +} + +static const struct net_device_ops mctp_i2c_ops = { + .ndo_start_xmit = mctp_i2c_start_xmit, + .ndo_uninit = mctp_i2c_ndo_uninit, + .ndo_open = mctp_i2c_ndo_open, +}; + +static const struct header_ops mctp_i2c_headops = { + .create = mctp_i2c_header_create, +}; + +static const struct mctp_netdev_ops mctp_i2c_mctp_ops = { + .release_flow = mctp_i2c_release_flow, +}; + +static void mctp_i2c_net_setup(struct net_device *dev) +{ + dev->type = ARPHRD_MCTP; + + dev->mtu = MCTP_I2C_MAXMTU; + dev->min_mtu = MCTP_I2C_MINMTU; + dev->max_mtu = MCTP_I2C_MAXMTU; + dev->tx_queue_len = MCTP_I2C_TX_QUEUE_LEN; + + dev->hard_header_len = sizeof(struct mctp_i2c_hdr); + dev->addr_len = 1; + + dev->netdev_ops = &mctp_i2c_ops; + dev->header_ops = &mctp_i2c_headops; +} + +/* Populates the mctp_i2c_dev priv struct for a netdev. + * Returns an error pointer on failure. + */ +static struct mctp_i2c_dev *mctp_i2c_midev_init(struct net_device *dev, + struct mctp_i2c_client *mcli, + struct i2c_adapter *adap) +{ + struct mctp_i2c_dev *midev = netdev_priv(dev); + unsigned long flags; + + midev->tx_thread = kthread_create(mctp_i2c_tx_thread, midev, + "%s/tx", dev->name); + if (IS_ERR(midev->tx_thread)) + return ERR_CAST(midev->tx_thread); + + midev->ndev = dev; + get_device(&adap->dev); + midev->adapter = adap; + get_device(&mcli->client->dev); + midev->client = mcli; + INIT_LIST_HEAD(&midev->list); + spin_lock_init(&midev->lock); + midev->i2c_lock_count = 0; + midev->release_count = 0; + init_completion(&midev->rx_done); + complete(&midev->rx_done); + init_waitqueue_head(&midev->tx_wq); + skb_queue_head_init(&midev->tx_queue); + + /* Add to the parent mcli */ + spin_lock_irqsave(&mcli->sel_lock, flags); + list_add(&midev->list, &mcli->devs); + /* Select a device by default */ + if (!mcli->sel) + __mctp_i2c_device_select(mcli, midev); + spin_unlock_irqrestore(&mcli->sel_lock, flags); + + /* Start the worker thread */ + wake_up_process(midev->tx_thread); + + return midev; +} + +/* Counterpart of mctp_i2c_midev_init */ +static void mctp_i2c_midev_free(struct mctp_i2c_dev *midev) +{ + struct mctp_i2c_client *mcli = midev->client; + unsigned long flags; + + if (midev->tx_thread) { + kthread_stop(midev->tx_thread); + midev->tx_thread = NULL; + } + + /* Unconditionally unlock on close */ + mctp_i2c_unlock_reset(midev); + + /* Remove the netdev from the parent i2c client. */ + spin_lock_irqsave(&mcli->sel_lock, flags); + list_del(&midev->list); + if (mcli->sel == midev) { + struct mctp_i2c_dev *first; + + first = list_first_entry_or_null(&mcli->devs, struct mctp_i2c_dev, list); + __mctp_i2c_device_select(mcli, first); + } + spin_unlock_irqrestore(&mcli->sel_lock, flags); + + skb_queue_purge(&midev->tx_queue); + put_device(&midev->adapter->dev); + put_device(&mcli->client->dev); +} + +/* Stops, unregisters, and frees midev */ +static void mctp_i2c_unregister(struct mctp_i2c_dev *midev) +{ + unsigned long flags; + + /* Stop tx thread prior to unregister, it uses netif_() functions */ + kthread_stop(midev->tx_thread); + midev->tx_thread = NULL; + + /* Prevent any new rx in mctp_i2c_recv(), let any pending work finish */ + spin_lock_irqsave(&midev->lock, flags); + midev->allow_rx = false; + spin_unlock_irqrestore(&midev->lock, flags); + wait_for_completion(&midev->rx_done); + + mctp_unregister_netdev(midev->ndev); + /* midev has been freed now by mctp_i2c_ndo_uninit callback */ + + free_netdev(midev->ndev); +} + +static void mctp_i2c_ndo_uninit(struct net_device *dev) +{ + struct mctp_i2c_dev *midev = netdev_priv(dev); + + /* Perform cleanup here to ensure that mcli->sel isn't holding + * a reference that would prevent unregister_netdevice() + * from completing. + */ + mctp_i2c_midev_free(midev); +} + +static int mctp_i2c_ndo_open(struct net_device *dev) +{ + struct mctp_i2c_dev *midev = netdev_priv(dev); + unsigned long flags; + + /* i2c rx handler can only pass packets once the netdev is registered */ + spin_lock_irqsave(&midev->lock, flags); + midev->allow_rx = true; + spin_unlock_irqrestore(&midev->lock, flags); + + return 0; +} + +static int mctp_i2c_add_netdev(struct mctp_i2c_client *mcli, + struct i2c_adapter *adap) +{ + struct mctp_i2c_dev *midev = NULL; + struct net_device *ndev = NULL; + struct i2c_adapter *root; + unsigned long flags; + char namebuf[30]; + int rc; + + root = mux_root_adapter(adap); + if (root != mcli->client->adapter) { + dev_err(&mcli->client->dev, + "I2C adapter %s is not a child bus of %s\n", + mcli->client->adapter->name, root->name); + return -EINVAL; + } + + WARN_ON(!mutex_is_locked(&driver_clients_lock)); + snprintf(namebuf, sizeof(namebuf), "mctpi2c%d", adap->nr); + ndev = alloc_netdev(sizeof(*midev), namebuf, NET_NAME_ENUM, mctp_i2c_net_setup); + if (!ndev) { + dev_err(&mcli->client->dev, "alloc netdev failed\n"); + rc = -ENOMEM; + goto err; + } + dev_net_set(ndev, current->nsproxy->net_ns); + SET_NETDEV_DEV(ndev, &adap->dev); + dev_addr_set(ndev, &mcli->lladdr); + + midev = mctp_i2c_midev_init(ndev, mcli, adap); + if (IS_ERR(midev)) { + rc = PTR_ERR(midev); + midev = NULL; + goto err; + } + + rc = mctp_register_netdev(ndev, &mctp_i2c_mctp_ops); + if (rc < 0) { + dev_err(&mcli->client->dev, + "register netdev \"%s\" failed %d\n", + ndev->name, rc); + goto err; + } + + spin_lock_irqsave(&midev->lock, flags); + midev->allow_rx = false; + spin_unlock_irqrestore(&midev->lock, flags); + + return 0; +err: + if (midev) + mctp_i2c_midev_free(midev); + if (ndev) + free_netdev(ndev); + return rc; +} + +/* Removes any netdev for adap. mcli is the parent root i2c client */ +static void mctp_i2c_remove_netdev(struct mctp_i2c_client *mcli, + struct i2c_adapter *adap) +{ + struct mctp_i2c_dev *midev = NULL, *m = NULL; + unsigned long flags; + + WARN_ON(!mutex_is_locked(&driver_clients_lock)); + spin_lock_irqsave(&mcli->sel_lock, flags); + /* List size is limited by number of MCTP netdevs on a single hardware bus */ + list_for_each_entry(m, &mcli->devs, list) + if (m->adapter == adap) { + midev = m; + break; + } + spin_unlock_irqrestore(&mcli->sel_lock, flags); + + if (midev) + mctp_i2c_unregister(midev); +} + +/* Determines whether a device is an i2c adapter. + * Optionally returns the root i2c_adapter + */ +static struct i2c_adapter *mctp_i2c_get_adapter(struct device *dev, + struct i2c_adapter **ret_root) +{ + struct i2c_adapter *root, *adap; + + if (dev->type != &i2c_adapter_type) + return NULL; + adap = to_i2c_adapter(dev); + root = mux_root_adapter(adap); + WARN_ONCE(!root, "MCTP I2C failed to find root adapter for %s\n", + dev_name(dev)); + if (!root) + return NULL; + if (ret_root) + *ret_root = root; + return adap; +} + +/* Determines whether a device is an i2c adapter with the "mctp-controller" + * devicetree property set. If adap is not an OF node, returns match_no_of + */ +static bool mctp_i2c_adapter_match(struct i2c_adapter *adap, bool match_no_of) +{ + if (!adap->dev.of_node) + return match_no_of; + return of_property_read_bool(adap->dev.of_node, MCTP_I2C_OF_PROP); +} + +/* Called for each existing i2c device (adapter or client) when a + * new mctp-i2c client is probed. + */ +static int mctp_i2c_client_try_attach(struct device *dev, void *data) +{ + struct i2c_adapter *adap = NULL, *root = NULL; + struct mctp_i2c_client *mcli = data; + + adap = mctp_i2c_get_adapter(dev, &root); + if (!adap) + return 0; + if (mcli->client->adapter != root) + return 0; + /* Must either have mctp-controller property on the adapter, or + * be a root adapter if it's non-devicetree + */ + if (!mctp_i2c_adapter_match(adap, adap == root)) + return 0; + + return mctp_i2c_add_netdev(mcli, adap); +} + +static void mctp_i2c_notify_add(struct device *dev) +{ + struct mctp_i2c_client *mcli = NULL, *m = NULL; + struct i2c_adapter *root = NULL, *adap = NULL; + int rc; + + adap = mctp_i2c_get_adapter(dev, &root); + if (!adap) + return; + /* Check for mctp-controller property on the adapter */ + if (!mctp_i2c_adapter_match(adap, false)) + return; + + /* Find an existing mcli for adap's root */ + mutex_lock(&driver_clients_lock); + list_for_each_entry(m, &driver_clients, list) { + if (m->client->adapter == root) { + mcli = m; + break; + } + } + + if (mcli) { + rc = mctp_i2c_add_netdev(mcli, adap); + if (rc < 0) + dev_warn(dev, "Failed adding mctp-i2c net device\n"); + } + mutex_unlock(&driver_clients_lock); +} + +static void mctp_i2c_notify_del(struct device *dev) +{ + struct i2c_adapter *root = NULL, *adap = NULL; + struct mctp_i2c_client *mcli = NULL; + + adap = mctp_i2c_get_adapter(dev, &root); + if (!adap) + return; + + mutex_lock(&driver_clients_lock); + list_for_each_entry(mcli, &driver_clients, list) { + if (mcli->client->adapter == root) { + mctp_i2c_remove_netdev(mcli, adap); + break; + } + } + mutex_unlock(&driver_clients_lock); +} + +static int mctp_i2c_probe(struct i2c_client *client) +{ + struct mctp_i2c_client *mcli = NULL; + int rc; + + mutex_lock(&driver_clients_lock); + mcli = mctp_i2c_new_client(client); + if (IS_ERR(mcli)) { + rc = PTR_ERR(mcli); + mcli = NULL; + goto out; + } else { + list_add(&mcli->list, &driver_clients); + } + + /* Add a netdev for adapters that have a 'mctp-controller' property */ + i2c_for_each_dev(mcli, mctp_i2c_client_try_attach); + rc = 0; +out: + mutex_unlock(&driver_clients_lock); + return rc; +} + +static int mctp_i2c_remove(struct i2c_client *client) +{ + struct mctp_i2c_client *mcli = i2c_get_clientdata(client); + struct mctp_i2c_dev *midev = NULL, *tmp = NULL; + + mutex_lock(&driver_clients_lock); + list_del(&mcli->list); + /* Remove all child adapter netdevs */ + list_for_each_entry_safe(midev, tmp, &mcli->devs, list) + mctp_i2c_unregister(midev); + + mctp_i2c_free_client(mcli); + mutex_unlock(&driver_clients_lock); + /* Callers ignore return code */ + return 0; +} + +/* We look for a 'mctp-controller' property on I2C busses as they are + * added/deleted, creating/removing netdevs as required. + */ +static int mctp_i2c_notifier_call(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + mctp_i2c_notify_add(dev); + break; + case BUS_NOTIFY_DEL_DEVICE: + mctp_i2c_notify_del(dev); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block mctp_i2c_notifier = { + .notifier_call = mctp_i2c_notifier_call, +}; + +static const struct i2c_device_id mctp_i2c_id[] = { + { "mctp-i2c-interface", 0 }, + {}, +}; +MODULE_DEVICE_TABLE(i2c, mctp_i2c_id); + +static const struct of_device_id mctp_i2c_of_match[] = { + { .compatible = "mctp-i2c-controller" }, + {}, +}; +MODULE_DEVICE_TABLE(of, mctp_i2c_of_match); + +static struct i2c_driver mctp_i2c_driver = { + .driver = { + .name = "mctp-i2c-interface", + .of_match_table = mctp_i2c_of_match, + }, + .probe_new = mctp_i2c_probe, + .remove = mctp_i2c_remove, + .id_table = mctp_i2c_id, +}; + +static __init int mctp_i2c_mod_init(void) +{ + int rc; + + pr_info("MCTP I2C interface driver\n"); + rc = i2c_add_driver(&mctp_i2c_driver); + if (rc < 0) + return rc; + rc = bus_register_notifier(&i2c_bus_type, &mctp_i2c_notifier); + if (rc < 0) { + i2c_del_driver(&mctp_i2c_driver); + return rc; + } + return 0; +} + +static __exit void mctp_i2c_mod_exit(void) +{ + int rc; + + rc = bus_unregister_notifier(&i2c_bus_type, &mctp_i2c_notifier); + if (rc < 0) + pr_warn("MCTP I2C could not unregister notifier, %d\n", rc); + i2c_del_driver(&mctp_i2c_driver); +} + +module_init(mctp_i2c_mod_init); +module_exit(mctp_i2c_mod_exit); + +MODULE_DESCRIPTION("MCTP I2C device"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Matt Johnston <matt@codeconstruct.com.au>"); diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c new file mode 100644 index 000000000000..62723a7faa2d --- /dev/null +++ b/drivers/net/mctp/mctp-serial.c @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Management Component Transport Protocol (MCTP) - serial transport + * binding. This driver is an implementation of the DMTF specificiation + * "DSP0253 - Management Component Transport Protocol (MCTP) Serial Transport + * Binding", available at: + * + * https://www.dmtf.org/sites/default/files/standards/documents/DSP0253_1.0.0.pdf + * + * This driver provides DSP0253-type MCTP-over-serial transport using a Linux + * tty device, by setting the N_MCTP line discipline on the tty. + * + * Copyright (c) 2021 Code Construct + */ + +#include <linux/idr.h> +#include <linux/if_arp.h> +#include <linux/module.h> +#include <linux/skbuff.h> +#include <linux/tty.h> +#include <linux/workqueue.h> +#include <linux/crc-ccitt.h> + +#include <linux/mctp.h> +#include <net/mctp.h> +#include <net/pkt_sched.h> + +#define MCTP_SERIAL_MTU 68 /* base mtu (64) + mctp header */ +#define MCTP_SERIAL_FRAME_MTU (MCTP_SERIAL_MTU + 6) /* + serial framing */ + +#define MCTP_SERIAL_VERSION 0x1 /* DSP0253 defines a single version: 1 */ + +#define BUFSIZE MCTP_SERIAL_FRAME_MTU + +#define BYTE_FRAME 0x7e +#define BYTE_ESC 0x7d + +static DEFINE_IDA(mctp_serial_ida); + +enum mctp_serial_state { + STATE_IDLE, + STATE_START, + STATE_HEADER, + STATE_DATA, + STATE_ESCAPE, + STATE_TRAILER, + STATE_DONE, + STATE_ERR, +}; + +struct mctp_serial { + struct net_device *netdev; + struct tty_struct *tty; + + int idx; + + /* protects our rx & tx state machines; held during both paths */ + spinlock_t lock; + + struct work_struct tx_work; + enum mctp_serial_state txstate, rxstate; + u16 txfcs, rxfcs, rxfcs_rcvd; + unsigned int txlen, rxlen; + unsigned int txpos, rxpos; + unsigned char txbuf[BUFSIZE], + rxbuf[BUFSIZE]; +}; + +static bool needs_escape(unsigned char c) +{ + return c == BYTE_ESC || c == BYTE_FRAME; +} + +static int next_chunk_len(struct mctp_serial *dev) +{ + int i; + + /* either we have no bytes to send ... */ + if (dev->txpos == dev->txlen) + return 0; + + /* ... or the next byte to send is an escaped byte; requiring a + * single-byte chunk... + */ + if (needs_escape(dev->txbuf[dev->txpos])) + return 1; + + /* ... or we have one or more bytes up to the next escape - this chunk + * will be those non-escaped bytes, and does not include the escaped + * byte. + */ + for (i = 1; i + dev->txpos + 1 < dev->txlen; i++) { + if (needs_escape(dev->txbuf[dev->txpos + i + 1])) + break; + } + + return i; +} + +static int write_chunk(struct mctp_serial *dev, unsigned char *buf, int len) +{ + return dev->tty->ops->write(dev->tty, buf, len); +} + +static void mctp_serial_tx_work(struct work_struct *work) +{ + struct mctp_serial *dev = container_of(work, struct mctp_serial, + tx_work); + unsigned char c, buf[3]; + unsigned long flags; + int len, txlen; + + spin_lock_irqsave(&dev->lock, flags); + + /* txstate represents the next thing to send */ + switch (dev->txstate) { + case STATE_START: + dev->txpos = 0; + fallthrough; + case STATE_HEADER: + buf[0] = BYTE_FRAME; + buf[1] = MCTP_SERIAL_VERSION; + buf[2] = dev->txlen; + + if (!dev->txpos) + dev->txfcs = crc_ccitt(0, buf + 1, 2); + + txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos); + if (txlen <= 0) { + dev->txstate = STATE_ERR; + } else { + dev->txpos += txlen; + if (dev->txpos == 3) { + dev->txstate = STATE_DATA; + dev->txpos = 0; + } + } + break; + + case STATE_ESCAPE: + buf[0] = dev->txbuf[dev->txpos] & ~0x20; + txlen = write_chunk(dev, buf, 1); + if (txlen <= 0) { + dev->txstate = STATE_ERR; + } else { + dev->txpos += txlen; + if (dev->txpos == dev->txlen) { + dev->txstate = STATE_TRAILER; + dev->txpos = 0; + } + } + + break; + + case STATE_DATA: + len = next_chunk_len(dev); + if (len) { + c = dev->txbuf[dev->txpos]; + if (len == 1 && needs_escape(c)) { + buf[0] = BYTE_ESC; + buf[1] = c & ~0x20; + dev->txfcs = crc_ccitt_byte(dev->txfcs, c); + txlen = write_chunk(dev, buf, 2); + if (txlen == 2) + dev->txpos++; + else if (txlen == 1) + dev->txstate = STATE_ESCAPE; + else + dev->txstate = STATE_ERR; + } else { + txlen = write_chunk(dev, + dev->txbuf + dev->txpos, + len); + if (txlen <= 0) { + dev->txstate = STATE_ERR; + } else { + dev->txfcs = crc_ccitt(dev->txfcs, + dev->txbuf + + dev->txpos, + txlen); + dev->txpos += txlen; + } + } + if (dev->txstate == STATE_DATA && + dev->txpos == dev->txlen) { + dev->txstate = STATE_TRAILER; + dev->txpos = 0; + } + break; + } + dev->txstate = STATE_TRAILER; + dev->txpos = 0; + fallthrough; + + case STATE_TRAILER: + buf[0] = dev->txfcs >> 8; + buf[1] = dev->txfcs & 0xff; + buf[2] = BYTE_FRAME; + txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos); + if (txlen <= 0) { + dev->txstate = STATE_ERR; + } else { + dev->txpos += txlen; + if (dev->txpos == 3) { + dev->txstate = STATE_DONE; + dev->txpos = 0; + } + } + break; + default: + netdev_err_once(dev->netdev, "invalid tx state %d\n", + dev->txstate); + } + + if (dev->txstate == STATE_DONE) { + dev->netdev->stats.tx_packets++; + dev->netdev->stats.tx_bytes += dev->txlen; + dev->txlen = 0; + dev->txpos = 0; + clear_bit(TTY_DO_WRITE_WAKEUP, &dev->tty->flags); + dev->txstate = STATE_IDLE; + spin_unlock_irqrestore(&dev->lock, flags); + + netif_wake_queue(dev->netdev); + } else { + spin_unlock_irqrestore(&dev->lock, flags); + } +} + +static netdev_tx_t mctp_serial_tx(struct sk_buff *skb, struct net_device *ndev) +{ + struct mctp_serial *dev = netdev_priv(ndev); + unsigned long flags; + + WARN_ON(dev->txstate != STATE_IDLE); + + if (skb->len > MCTP_SERIAL_MTU) { + dev->netdev->stats.tx_dropped++; + goto out; + } + + spin_lock_irqsave(&dev->lock, flags); + netif_stop_queue(dev->netdev); + skb_copy_bits(skb, 0, dev->txbuf, skb->len); + dev->txpos = 0; + dev->txlen = skb->len; + dev->txstate = STATE_START; + spin_unlock_irqrestore(&dev->lock, flags); + + set_bit(TTY_DO_WRITE_WAKEUP, &dev->tty->flags); + schedule_work(&dev->tx_work); + +out: + kfree_skb(skb); + return NETDEV_TX_OK; +} + +static void mctp_serial_tty_write_wakeup(struct tty_struct *tty) +{ + struct mctp_serial *dev = tty->disc_data; + + schedule_work(&dev->tx_work); +} + +static void mctp_serial_rx(struct mctp_serial *dev) +{ + struct mctp_skb_cb *cb; + struct sk_buff *skb; + + if (dev->rxfcs != dev->rxfcs_rcvd) { + dev->netdev->stats.rx_dropped++; + dev->netdev->stats.rx_crc_errors++; + return; + } + + skb = netdev_alloc_skb(dev->netdev, dev->rxlen); + if (!skb) { + dev->netdev->stats.rx_dropped++; + return; + } + + skb->protocol = htons(ETH_P_MCTP); + skb_put_data(skb, dev->rxbuf, dev->rxlen); + skb_reset_network_header(skb); + + cb = __mctp_cb(skb); + cb->halen = 0; + + netif_rx_ni(skb); + dev->netdev->stats.rx_packets++; + dev->netdev->stats.rx_bytes += dev->rxlen; +} + +static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c) +{ + switch (dev->rxpos) { + case 0: + if (c == BYTE_FRAME) + dev->rxpos++; + else + dev->rxstate = STATE_ERR; + break; + case 1: + if (c == MCTP_SERIAL_VERSION) { + dev->rxpos++; + dev->rxfcs = crc_ccitt_byte(0, c); + } else { + dev->rxstate = STATE_ERR; + } + break; + case 2: + if (c > MCTP_SERIAL_FRAME_MTU) { + dev->rxstate = STATE_ERR; + } else { + dev->rxlen = c; + dev->rxpos = 0; + dev->rxstate = STATE_DATA; + dev->rxfcs = crc_ccitt_byte(dev->rxfcs, c); + } + break; + } +} + +static void mctp_serial_push_trailer(struct mctp_serial *dev, unsigned char c) +{ + switch (dev->rxpos) { + case 0: + dev->rxfcs_rcvd = c << 8; + dev->rxpos++; + break; + case 1: + dev->rxfcs_rcvd |= c; + dev->rxpos++; + break; + case 2: + if (c != BYTE_FRAME) { + dev->rxstate = STATE_ERR; + } else { + mctp_serial_rx(dev); + dev->rxlen = 0; + dev->rxpos = 0; + dev->rxstate = STATE_IDLE; + } + break; + } +} + +static void mctp_serial_push(struct mctp_serial *dev, unsigned char c) +{ + switch (dev->rxstate) { + case STATE_IDLE: + dev->rxstate = STATE_HEADER; + fallthrough; + case STATE_HEADER: + mctp_serial_push_header(dev, c); + break; + + case STATE_ESCAPE: + c |= 0x20; + fallthrough; + case STATE_DATA: + if (dev->rxstate != STATE_ESCAPE && c == BYTE_ESC) { + dev->rxstate = STATE_ESCAPE; + } else { + dev->rxfcs = crc_ccitt_byte(dev->rxfcs, c); + dev->rxbuf[dev->rxpos] = c; + dev->rxpos++; + dev->rxstate = STATE_DATA; + if (dev->rxpos == dev->rxlen) { + dev->rxpos = 0; + dev->rxstate = STATE_TRAILER; + } + } + break; + + case STATE_TRAILER: + mctp_serial_push_trailer(dev, c); + break; + + case STATE_ERR: + if (c == BYTE_FRAME) + dev->rxstate = STATE_IDLE; + break; + + default: + netdev_err_once(dev->netdev, "invalid rx state %d\n", + dev->rxstate); + } +} + +static void mctp_serial_tty_receive_buf(struct tty_struct *tty, + const unsigned char *c, + const char *f, int len) +{ + struct mctp_serial *dev = tty->disc_data; + int i; + + if (!netif_running(dev->netdev)) + return; + + /* we don't (currently) use the flag bytes, just data. */ + for (i = 0; i < len; i++) + mctp_serial_push(dev, c[i]); +} + +static void mctp_serial_uninit(struct net_device *ndev) +{ + struct mctp_serial *dev = netdev_priv(ndev); + + cancel_work_sync(&dev->tx_work); +} + +static const struct net_device_ops mctp_serial_netdev_ops = { + .ndo_start_xmit = mctp_serial_tx, + .ndo_uninit = mctp_serial_uninit, +}; + +static void mctp_serial_setup(struct net_device *ndev) +{ + ndev->type = ARPHRD_MCTP; + + /* we limit at the fixed MTU, which is also the MCTP-standard + * baseline MTU, so is also our minimum + */ + ndev->mtu = MCTP_SERIAL_MTU; + ndev->max_mtu = MCTP_SERIAL_MTU; + ndev->min_mtu = MCTP_SERIAL_MTU; + + ndev->hard_header_len = 0; + ndev->addr_len = 0; + ndev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; + ndev->flags = IFF_NOARP; + ndev->netdev_ops = &mctp_serial_netdev_ops; + ndev->needs_free_netdev = true; +} + +static int mctp_serial_open(struct tty_struct *tty) +{ + struct mctp_serial *dev; + struct net_device *ndev; + char name[32]; + int idx, rc; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (!tty->ops->write) + return -EOPNOTSUPP; + + idx = ida_alloc(&mctp_serial_ida, GFP_KERNEL); + if (idx < 0) + return idx; + + snprintf(name, sizeof(name), "mctpserial%d", idx); + ndev = alloc_netdev(sizeof(*dev), name, NET_NAME_ENUM, + mctp_serial_setup); + if (!ndev) { + rc = -ENOMEM; + goto free_ida; + } + + dev = netdev_priv(ndev); + dev->idx = idx; + dev->tty = tty; + dev->netdev = ndev; + dev->txstate = STATE_IDLE; + dev->rxstate = STATE_IDLE; + spin_lock_init(&dev->lock); + INIT_WORK(&dev->tx_work, mctp_serial_tx_work); + + rc = register_netdev(ndev); + if (rc) + goto free_netdev; + + tty->receive_room = 64 * 1024; + tty->disc_data = dev; + + return 0; + +free_netdev: + free_netdev(ndev); + +free_ida: + ida_free(&mctp_serial_ida, idx); + return rc; +} + +static void mctp_serial_close(struct tty_struct *tty) +{ + struct mctp_serial *dev = tty->disc_data; + int idx = dev->idx; + + unregister_netdev(dev->netdev); + ida_free(&mctp_serial_ida, idx); +} + +static struct tty_ldisc_ops mctp_ldisc = { + .owner = THIS_MODULE, + .num = N_MCTP, + .name = "mctp", + .open = mctp_serial_open, + .close = mctp_serial_close, + .receive_buf = mctp_serial_tty_receive_buf, + .write_wakeup = mctp_serial_tty_write_wakeup, +}; + +static int __init mctp_serial_init(void) +{ + return tty_register_ldisc(&mctp_ldisc); +} + +static void __exit mctp_serial_exit(void) +{ + tty_unregister_ldisc(&mctp_ldisc); +} + +module_init(mctp_serial_init); +module_exit(mctp_serial_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>"); +MODULE_DESCRIPTION("MCTP Serial transport"); diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c index 1becb1a731f6..1c1584fca632 100644 --- a/drivers/net/mdio/fwnode_mdio.c +++ b/drivers/net/mdio/fwnode_mdio.c @@ -43,6 +43,11 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio, int rc; rc = fwnode_irq_get(child, 0); + /* Don't wait forever if the IRQ provider doesn't become available, + * just fall back to poll mode + */ + if (rc == -EPROBE_DEFER) + rc = driver_deferred_probe_check_state(&phy->mdio.dev); if (rc == -EPROBE_DEFER) return rc; diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index e2273588c75b..944d005d2bd1 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -3,6 +3,7 @@ #include <linux/bitfield.h> #include <linux/delay.h> +#include <linux/reset.h> #include <linux/iopoll.h> #include <linux/mdio.h> #include <linux/module.h> @@ -21,6 +22,10 @@ #define ASPEED_MDIO_CTRL_OP GENMASK(27, 26) #define MDIO_C22_OP_WRITE 0b01 #define MDIO_C22_OP_READ 0b10 +#define MDIO_C45_OP_ADDR 0b00 +#define MDIO_C45_OP_WRITE 0b01 +#define MDIO_C45_OP_PREAD 0b10 +#define MDIO_C45_OP_READ 0b11 #define ASPEED_MDIO_CTRL_PHYAD GENMASK(25, 21) #define ASPEED_MDIO_CTRL_REGAD GENMASK(20, 16) #define ASPEED_MDIO_CTRL_MIIWDATA GENMASK(15, 0) @@ -37,36 +42,38 @@ struct aspeed_mdio { void __iomem *base; + struct reset_control *reset; }; -static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) +static int aspeed_mdio_op(struct mii_bus *bus, u8 st, u8 op, u8 phyad, u8 regad, + u16 data) { struct aspeed_mdio *ctx = bus->priv; u32 ctrl; - u32 data; - int rc; - dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr, - regnum); - - /* Just clause 22 for the moment */ - if (regnum & MII_ADDR_C45) - return -EOPNOTSUPP; + dev_dbg(&bus->dev, "%s: st: %u op: %u, phyad: %u, regad: %u, data: %u\n", + __func__, st, op, phyad, regad, data); ctrl = ASPEED_MDIO_CTRL_FIRE - | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22) - | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_READ) - | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr) - | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum); + | FIELD_PREP(ASPEED_MDIO_CTRL_ST, st) + | FIELD_PREP(ASPEED_MDIO_CTRL_OP, op) + | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, phyad) + | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regad) + | FIELD_PREP(ASPEED_MDIO_DATA_MIIRDATA, data); iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL); - rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, + return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, !(ctrl & ASPEED_MDIO_CTRL_FIRE), ASPEED_MDIO_INTERVAL_US, ASPEED_MDIO_TIMEOUT_US); - if (rc < 0) - return rc; +} + +static int aspeed_mdio_get_data(struct mii_bus *bus) +{ + struct aspeed_mdio *ctx = bus->priv; + u32 data; + int rc; rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data, data & ASPEED_MDIO_DATA_IDLE, @@ -78,31 +85,80 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) return FIELD_GET(ASPEED_MDIO_DATA_MIIRDATA, data); } -static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) +static int aspeed_mdio_read_c22(struct mii_bus *bus, int addr, int regnum) { - struct aspeed_mdio *ctx = bus->priv; - u32 ctrl; + int rc; - dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n", - __func__, addr, regnum, val); + rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_READ, + addr, regnum, 0); + if (rc < 0) + return rc; + + return aspeed_mdio_get_data(bus); +} + +static int aspeed_mdio_write_c22(struct mii_bus *bus, int addr, int regnum, + u16 val) +{ + return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_WRITE, + addr, regnum, val); +} + +static int aspeed_mdio_read_c45(struct mii_bus *bus, int addr, int regnum) +{ + u8 c45_dev = (regnum >> 16) & 0x1F; + u16 c45_addr = regnum & 0xFFFF; + int rc; + + rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR, + addr, c45_dev, c45_addr); + if (rc < 0) + return rc; + + rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_READ, + addr, c45_dev, 0); + if (rc < 0) + return rc; + + return aspeed_mdio_get_data(bus); +} + +static int aspeed_mdio_write_c45(struct mii_bus *bus, int addr, int regnum, + u16 val) +{ + u8 c45_dev = (regnum >> 16) & 0x1F; + u16 c45_addr = regnum & 0xFFFF; + int rc; + + rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR, + addr, c45_dev, c45_addr); + if (rc < 0) + return rc; + + return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_WRITE, + addr, c45_dev, val); +} + +static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) +{ + dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr, + regnum); - /* Just clause 22 for the moment */ if (regnum & MII_ADDR_C45) - return -EOPNOTSUPP; + return aspeed_mdio_read_c45(bus, addr, regnum); - ctrl = ASPEED_MDIO_CTRL_FIRE - | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22) - | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_WRITE) - | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr) - | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum) - | FIELD_PREP(ASPEED_MDIO_CTRL_MIIWDATA, val); + return aspeed_mdio_read_c22(bus, addr, regnum); +} - iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL); +static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) +{ + dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n", + __func__, addr, regnum, val); - return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, - !(ctrl & ASPEED_MDIO_CTRL_FIRE), - ASPEED_MDIO_INTERVAL_US, - ASPEED_MDIO_TIMEOUT_US); + if (regnum & MII_ADDR_C45) + return aspeed_mdio_write_c45(bus, addr, regnum, val); + + return aspeed_mdio_write_c22(bus, addr, regnum, val); } static int aspeed_mdio_probe(struct platform_device *pdev) @@ -120,15 +176,23 @@ static int aspeed_mdio_probe(struct platform_device *pdev) if (IS_ERR(ctx->base)) return PTR_ERR(ctx->base); + ctx->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL); + if (IS_ERR(ctx->reset)) + return PTR_ERR(ctx->reset); + + reset_control_deassert(ctx->reset); + bus->name = DRV_NAME; snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id); bus->parent = &pdev->dev; bus->read = aspeed_mdio_read; bus->write = aspeed_mdio_write; + bus->probe_capabilities = MDIOBUS_C22_C45; rc = of_mdiobus_register(bus, pdev->dev.of_node); if (rc) { dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); + reset_control_assert(ctx->reset); return rc; } @@ -139,7 +203,11 @@ static int aspeed_mdio_probe(struct platform_device *pdev) static int aspeed_mdio_remove(struct platform_device *pdev) { - mdiobus_unregister(platform_get_drvdata(pdev)); + struct mii_bus *bus = (struct mii_bus *)platform_get_drvdata(pdev); + struct aspeed_mdio *ctx = bus->priv; + + reset_control_assert(ctx->reset); + mdiobus_unregister(bus); return 0; } diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c index 5f4cd24a0241..4eba5a91075c 100644 --- a/drivers/net/mdio/mdio-ipq4019.c +++ b/drivers/net/mdio/mdio-ipq4019.c @@ -200,7 +200,11 @@ static int ipq_mdio_reset(struct mii_bus *bus) if (ret) return ret; - return clk_prepare_enable(priv->mdio_clk); + ret = clk_prepare_enable(priv->mdio_clk); + if (ret == 0) + mdelay(10); + + return ret; } static int ipq4019_mdio_probe(struct platform_device *pdev) diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index 17f98f609ec8..5070ca2f2637 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -76,6 +76,9 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum) u32 val; int ret; + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + ret = mscc_miim_wait_pending(bus); if (ret) goto out; @@ -105,6 +108,9 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id, struct mscc_miim_dev *miim = bus->priv; int ret; + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + ret = mscc_miim_wait_pending(bus); if (ret < 0) goto out; diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 4300261e2f9e..378ee779061c 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -623,14 +623,14 @@ static int nsim_fib6_rt_append(struct nsim_fib_data *data, if (err) goto err_fib6_rt_nh_del; - fib6_event->rt_arr[i]->trap = true; + WRITE_ONCE(fib6_event->rt_arr[i]->trap, true); } return 0; err_fib6_rt_nh_del: for (i--; i >= 0; i--) { - fib6_event->rt_arr[i]->trap = false; + WRITE_ONCE(fib6_event->rt_arr[i]->trap, false); nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]); } return err; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index bdac087058b2..5ae39d236b30 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -666,25 +666,7 @@ static int at803x_probe(struct phy_device *phydev) return ret; } - /* Some bootloaders leave the fiber page selected. - * Switch to the copper page, as otherwise we read - * the PHY capabilities from the fiber side. - */ - if (phydev->drv->phy_id == ATH8031_PHY_ID) { - phy_lock_mdio_bus(phydev); - ret = at803x_write_page(phydev, AT803X_PAGE_COPPER); - phy_unlock_mdio_bus(phydev); - if (ret) - goto err; - } - return 0; - -err: - if (priv->vddio) - regulator_disable(priv->vddio); - - return ret; } static void at803x_remove(struct phy_device *phydev) @@ -785,6 +767,22 @@ static int at803x_config_init(struct phy_device *phydev) { int ret; + if (phydev->drv->phy_id == ATH8031_PHY_ID) { + /* Some bootloaders leave the fiber page selected. + * Switch to the copper page, as otherwise we read + * the PHY capabilities from the fiber side. + */ + phy_lock_mdio_bus(phydev); + ret = at803x_write_page(phydev, AT803X_PAGE_COPPER); + phy_unlock_mdio_bus(phydev); + if (ret) + return ret; + + ret = at8031_pll_config(phydev); + if (ret < 0) + return ret; + } + /* The RX and TX delay default is: * after HW reset: RX delay enabled and TX delay disabled * after SW reset: RX delay enabled, while TX delay retains the @@ -814,12 +812,6 @@ static int at803x_config_init(struct phy_device *phydev) if (ret < 0) return ret; - if (phydev->drv->phy_id == ATH8031_PHY_ID) { - ret = at8031_pll_config(phydev); - if (ret < 0) - return ret; - } - /* Ar803x extended next page bit is enabled by default. Cisco * multigig switches read this bit and attempt to negotiate 10Gbps * rates even if the next page bit is disabled. This is incorrect diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index db26ff8ce7db..b330efb98209 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -11,6 +11,7 @@ */ #include "bcm-phy-lib.h" +#include <linux/delay.h> #include <linux/module.h> #include <linux/phy.h> #include <linux/brcmphy.h> @@ -553,6 +554,26 @@ static int brcm_fet_config_init(struct phy_device *phydev) if (err < 0) return err; + /* The datasheet indicates the PHY needs up to 1us to complete a reset, + * build some slack here. + */ + usleep_range(1000, 2000); + + /* The PHY requires 65 MDC clock cycles to complete a write operation + * and turnaround the line properly. + * + * We ignore -EIO here as the MDIO controller (e.g.: mdio-bcm-unimac) + * may flag the lack of turn-around as a read failure. This is + * particularly true with this combination since the MDIO controller + * only used 64 MDC cycles. This is not a critical failure in this + * specific case and it has no functional impact otherwise, so we let + * that one go through. If there is a genuine bus error, the next read + * of MII_BRCM_FET_INTREG will error out. + */ + err = phy_read(phydev, MII_BMCR); + if (err < 0 && err != -EIO) + return err; + reg = phy_read(phydev, MII_BRCM_FET_INTREG); if (reg < 0) return reg; diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 211b5476a6f5..ce17b2af3218 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -274,7 +274,7 @@ static int dp83822_config_intr(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_DP83822_MISR1, 0); + err = phy_write(phydev, MII_DP83822_MISR2, 0); if (err < 0) return err; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index cfda625dbea5..4d726ee03ce2 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1693,8 +1693,8 @@ static int marvell_suspend(struct phy_device *phydev) int err; /* Suspend the fiber mode first */ - if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, - phydev->supported)) { + if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported)) { err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); if (err < 0) goto error; @@ -1728,8 +1728,8 @@ static int marvell_resume(struct phy_device *phydev) int err; /* Resume the fiber mode first */ - if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, - phydev->supported)) { + if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported)) { err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); if (err < 0) goto error; diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c index b7a5ae20edd5..68ee434f9dea 100644 --- a/drivers/net/phy/mediatek-ge.c +++ b/drivers/net/phy/mediatek-ge.c @@ -55,9 +55,6 @@ static int mt7530_phy_config_init(struct phy_device *phydev) static int mt7531_phy_config_init(struct phy_device *phydev) { - if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL) - return -EINVAL; - mtk_gephy_config_init(phydev); /* PHY link down power saving enable */ diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index 7e7904fee1d9..73f7962a37d3 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -30,8 +30,12 @@ #define INTSRC_LINK_DOWN BIT(4) #define INTSRC_REMOTE_FAULT BIT(5) #define INTSRC_ANEG_COMPLETE BIT(6) +#define INTSRC_ENERGY_DETECT BIT(7) #define INTSRC_MASK 30 +#define INT_SOURCES (INTSRC_LINK_DOWN | INTSRC_ANEG_COMPLETE | \ + INTSRC_ENERGY_DETECT) + #define BANK_ANALOG_DSP 0 #define BANK_WOL 1 #define BANK_BIST 3 @@ -200,7 +204,6 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev) static int meson_gxl_config_intr(struct phy_device *phydev) { - u16 val; int ret; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { @@ -209,16 +212,9 @@ static int meson_gxl_config_intr(struct phy_device *phydev) if (ret) return ret; - val = INTSRC_ANEG_PR - | INTSRC_PARALLEL_FAULT - | INTSRC_ANEG_LP_ACK - | INTSRC_LINK_DOWN - | INTSRC_REMOTE_FAULT - | INTSRC_ANEG_COMPLETE; - ret = phy_write(phydev, INTSRC_MASK, val); + ret = phy_write(phydev, INTSRC_MASK, INT_SOURCES); } else { - val = 0; - ret = phy_write(phydev, INTSRC_MASK, val); + ret = phy_write(phydev, INTSRC_MASK, 0); /* Ack any pending IRQ */ ret = meson_gxl_ack_interrupt(phydev); @@ -237,10 +233,23 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev) return IRQ_NONE; } + irq_status &= INT_SOURCES; + if (irq_status == 0) return IRQ_NONE; - phy_trigger_machine(phydev); + /* Aneg-complete interrupt is used for link-up detection */ + if (phydev->autoneg == AUTONEG_ENABLE && + irq_status == INTSRC_ENERGY_DETECT) + return IRQ_HANDLED; + + /* Give PHY some time before MAC starts sending data. This works + * around an issue where network doesn't come up properly. + */ + if (!(irq_status & INTSRC_LINK_DOWN)) + phy_queue_state_machine(phydev, msecs_to_jiffies(100)); + else + phy_trigger_machine(phydev); return IRQ_HANDLED; } diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 6e32da28e138..f2e3a67198dd 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -2685,3 +2685,6 @@ MODULE_DEVICE_TABLE(mdio, vsc85xx_tbl); MODULE_DESCRIPTION("Microsemi VSC85xx PHY driver"); MODULE_AUTHOR("Nagaraju Lakkaraju"); MODULE_LICENSE("Dual MIT/GPL"); + +MODULE_FIRMWARE(MSCC_VSC8584_REVB_INT8051_FW); +MODULE_FIRMWARE(MSCC_VSC8574_REVB_INT8051_FW); diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index ef2c6a09eb0f..4369d6249e7b 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -75,6 +75,12 @@ static const struct sfp_quirk sfp_quirks[] = { .part = "MA5671A", .modes = sfp_quirk_2500basex, }, { + // Lantech 8330-262D-E can operate at 2500base-X, but + // incorrectly report 2500MBd NRZ in their EEPROM + .vendor = "Lantech", + .part = "8330-262D-E", + .modes = sfp_quirk_2500basex, + }, { .vendor = "UBNT", .part = "UF-INSTANT", .modes = sfp_quirk_ubnt_uf_instant, diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 5435b5689ce6..2a3892528ec3 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -469,7 +469,7 @@ static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue) spin_lock(&sl->lock); if (netif_queue_stopped(dev)) { - if (!netif_running(dev)) + if (!netif_running(dev) || !sl->tty) goto out; /* May be we must check transmitter timeout here ? diff --git a/drivers/net/slip/slip.h b/drivers/net/slip/slip.h index c420e5948522..3d7f88b330c1 100644 --- a/drivers/net/slip/slip.h +++ b/drivers/net/slip/slip.h @@ -40,6 +40,8 @@ insmod -oslip_maxdev=nnn */ #define SL_MTU 296 /* 296; I am used to 600- FvK */ +/* some arch define END as assembly function ending, just undef it */ +#undef END /* SLIP protocol characters. */ #define END 0300 /* indicates end of frame */ #define ESC 0333 /* indicates byte stuffing */ diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 8e3a28ba6b28..ba2ef5437e16 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1198,7 +1198,8 @@ static int tap_sendmsg(struct socket *sock, struct msghdr *m, struct xdp_buff *xdp; int i; - if (ctl && (ctl->type == TUN_MSG_PTR)) { + if (m->msg_controllen == sizeof(struct tun_msg_ctl) && + ctl && ctl->type == TUN_MSG_PTR) { for (i = 0; i < ctl->num; i++) { xdp = &((struct xdp_buff *)ctl->ptr)[i]; tap_get_user_xdp(q, xdp); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 45a67e72a02c..02de8d998bfa 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2489,7 +2489,8 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) if (!tun) return -EBADFD; - if (ctl && (ctl->type == TUN_MSG_PTR)) { + if (m->msg_controllen == sizeof(struct tun_msg_ctl) && + ctl && ctl->type == TUN_MSG_PTR) { struct tun_page tpage; int n = ctl->num; int flush = 0; diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index 73b97f4cc1ec..e8d49886d695 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -1102,10 +1102,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb) if (start_of_descs != desc_offset) goto err; - /* self check desc_offset from header*/ - if (desc_offset >= skb_len) + /* self check desc_offset from header and make sure that the + * bounds of the metadata array are inside the SKB + */ + if (pkt_count * 2 + desc_offset >= skb_len) goto err; + /* Packets must not overlap the metadata array */ + skb_trim(skb, desc_offset); + if (pkt_count == 0) goto err; diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h index 2a1e31defe71..4334aafab59a 100644 --- a/drivers/net/usb/asix.h +++ b/drivers/net/usb/asix.h @@ -192,8 +192,8 @@ extern const struct driver_info ax88172a_info; /* ASIX specific flags */ #define FLAG_EEPROM_MAC (1UL << 0) /* init device MAC from eeprom */ -int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, - u16 size, void *data, int in_pm); +int __must_check asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, + u16 size, void *data, int in_pm); int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data, int in_pm); diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 9aa92076500a..f39188b7717a 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -11,8 +11,8 @@ #define AX_HOST_EN_RETRIES 30 -int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, - u16 size, void *data, int in_pm) +int __must_check asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, + u16 size, void *data, int in_pm) { int ret; int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16); @@ -27,9 +27,12 @@ int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, size); - if (unlikely(ret < 0)) + if (unlikely(ret < size)) { + ret = ret < 0 ? ret : -ENODATA; + netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n", index, ret); + } return ret; } @@ -79,7 +82,7 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm) 0, 0, 1, &smsr, in_pm); if (ret == -ENODEV) break; - else if (ret < sizeof(smsr)) + else if (ret < 0) continue; else if (smsr & AX_HOST_EN) break; @@ -579,8 +582,12 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc) return ret; } - asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id, - (__u16)loc, 2, &res, 1); + ret = asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id, + (__u16)loc, 2, &res, 1); + if (ret < 0) { + mutex_unlock(&dev->phy_mutex); + return ret; + } asix_set_hw_mii(dev, 1); mutex_unlock(&dev->phy_mutex); diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 30821f6a6d7a..bd8f8619ad6f 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -755,7 +755,12 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) priv->phy_addr = ret; priv->embd_phy = ((priv->phy_addr & 0x1f) == 0x10); - asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0); + ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0); + if (ret < 0) { + netdev_dbg(dev->net, "Failed to read STATMNGSTS_REG: %d\n", ret); + return ret; + } + chipcode &= AX_CHIPCODE_MASK; ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) : @@ -920,11 +925,21 @@ static int ax88178_reset(struct usbnet *dev) int gpio0 = 0; u32 phyid; - asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status, 0); + ret = asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status, 0); + if (ret < 0) { + netdev_dbg(dev->net, "Failed to read GPIOS: %d\n", ret); + return ret; + } + netdev_dbg(dev->net, "GPIO Status: 0x%04x\n", status); asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL, 0); - asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom, 0); + ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom, 0); + if (ret < 0) { + netdev_dbg(dev->net, "Failed to read EEPROM: %d\n", ret); + return ret; + } + asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL, 0); netdev_dbg(dev->net, "EEPROM index 0x17 is 0x%04x\n", eeprom); diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index eb3817d70f2b..9b4dfa3001d6 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -583,6 +583,11 @@ static const struct usb_device_id products[] = { .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE +#define ZAURUS_FAKE_INTERFACE \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + /* SA-1100 based Sharp Zaurus ("collie"), or compatible; * wire-incompatible with true CDC Ethernet implementations. * (And, it seems, needlessly so...) @@ -640,6 +645,13 @@ static const struct usb_device_id products[] = { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, + .idProduct = 0x9032, /* SL-6000 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, /* reported with some C860 units */ .idProduct = 0x9050, /* C-860 */ ZAURUS_MASTER_INTERFACE, diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 82bb5ed94c48..c0b8b4aa78f3 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -659,6 +659,11 @@ static const struct usb_device_id mbim_devs[] = { .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, }, + /* Telit FN990 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1071, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, + }, + /* default entry */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info_zlp, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index e303b522efb5..15f91d691bba 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1715,10 +1715,10 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) { struct sk_buff *skb; struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; - int len; + unsigned int len; int nframes; int x; - int offset; + unsigned int offset; union { struct usb_cdc_ncm_ndp16 *ndp16; struct usb_cdc_ncm_ndp32 *ndp32; @@ -1790,8 +1790,8 @@ next_ndp: break; } - /* sanity checking */ - if (((offset + len) > skb_in->len) || + /* sanity checking - watch out for integer wrap*/ + if ((offset > skb_in->len) || (len > skb_in->len - offset) || (len > ctx->rx_max) || (len < ETH_HLEN)) { netif_dbg(dev, rx_err, dev->net, "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n", diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 33ada2c59952..0c7f02ca6822 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1395,6 +1395,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ + {QMI_FIXED_INTF(0x413c, 0x81e4, 0)}, /* Dell Wireless 5829e with eSIM support*/ + {QMI_FIXED_INTF(0x413c, 0x81e6, 0)}, /* Dell Wireless 5829e */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 026e7487c45b..eb0d325e92b7 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -84,9 +84,10 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) { - netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", - index, ret); + if (ret < 0) { + if (ret != -ENODEV) + netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", + index, ret); return ret; } @@ -116,7 +117,7 @@ static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) + if (ret < 0 && ret != -ENODEV) netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n", index, ret); @@ -159,6 +160,9 @@ static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev, do { ret = __smsc95xx_read_reg(dev, MII_ADDR, &val, in_pm); if (ret < 0) { + /* Ignore -ENODEV error during disconnect() */ + if (ret == -ENODEV) + return 0; netdev_warn(dev->net, "Error reading MII_ACCESS\n"); return ret; } @@ -194,7 +198,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx, addr = mii_address_cmd(phy_id, idx, MII_READ_ | MII_BUSY_); ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_ADDR\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error writing MII_ADDR\n"); goto done; } @@ -206,7 +211,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx, ret = __smsc95xx_read_reg(dev, MII_DATA, &val, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error reading MII_DATA\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error reading MII_DATA\n"); goto done; } @@ -214,6 +220,10 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx, done: mutex_unlock(&dev->phy_mutex); + + /* Ignore -ENODEV error during disconnect() */ + if (ret == -ENODEV) + return 0; return ret; } @@ -235,7 +245,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id, val = regval; ret = __smsc95xx_write_reg(dev, MII_DATA, val, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_DATA\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error writing MII_DATA\n"); goto done; } @@ -243,7 +254,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id, addr = mii_address_cmd(phy_id, idx, MII_WRITE_ | MII_BUSY_); ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_ADDR\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error writing MII_ADDR\n"); goto done; } diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index 6516a37893e2..0c50f24671da 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) /* ignore the CRC length */ len = (skb->data[1] | (skb->data[2] << 8)) - 4; - if (len > ETH_FRAME_LEN) + if (len > ETH_FRAME_LEN || len > skb->len) return 0; /* the last packet of current skb */ diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c index 8e717a0b559b..7984f2157d22 100644 --- a/drivers/net/usb/zaurus.c +++ b/drivers/net/usb/zaurus.c @@ -256,6 +256,11 @@ static const struct usb_device_id products [] = { .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE +#define ZAURUS_FAKE_INTERFACE \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + /* SA-1100 based Sharp Zaurus ("collie"), or compatible. */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO @@ -315,6 +320,13 @@ static const struct usb_device_id products [] = { .driver_info = ZAURUS_PXA_INFO, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x9032, /* SL-6000 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, /* reported with some C860 units */ diff --git a/drivers/net/veth.c b/drivers/net/veth.c index f478fe7e2b82..64fa8e9c0a22 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -327,7 +327,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) rcu_read_lock(); rcv = rcu_dereference(priv->peer); - if (unlikely(!rcv)) { + if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) { kfree_skb(skb); goto drop; } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index b2242a082431..091dd7caf10c 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1265,6 +1265,7 @@ static int vrf_prepare_mac_header(struct sk_buff *skb, eth = (struct ethhdr *)skb->data; skb_reset_mac_header(skb); + skb_reset_mac_len(skb); /* we set the ethernet destination and the source addresses to the * address of the VRF device. @@ -1294,9 +1295,9 @@ static int vrf_prepare_mac_header(struct sk_buff *skb, */ static int vrf_add_mac_header_if_unset(struct sk_buff *skb, struct net_device *vrf_dev, - u16 proto) + u16 proto, struct net_device *orig_dev) { - if (skb_mac_header_was_set(skb)) + if (skb_mac_header_was_set(skb) && dev_has_header(orig_dev)) return 0; return vrf_prepare_mac_header(skb, vrf_dev, proto); @@ -1402,6 +1403,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, /* if packet is NDISC then keep the ingress interface */ if (!is_ndisc) { + struct net_device *orig_dev = skb->dev; + vrf_rx_stats(vrf_dev, skb->len); skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; @@ -1410,7 +1413,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, int err; err = vrf_add_mac_header_if_unset(skb, vrf_dev, - ETH_P_IPV6); + ETH_P_IPV6, + orig_dev); if (likely(!err)) { skb_push(skb, skb->mac_len); dev_queue_xmit_nit(skb, vrf_dev); @@ -1440,6 +1444,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, struct sk_buff *skb) { + struct net_device *orig_dev = skb->dev; + skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; IPCB(skb)->flags |= IPSKB_L3SLAVE; @@ -1460,7 +1466,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, if (!list_empty(&vrf_dev->ptype_all)) { int err; - err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP); + err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP, + orig_dev); if (likely(!err)) { skb_push(skb, skb->mac_len); dev_queue_xmit_nit(skb, vrf_dev); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 141635a35c28..129e270e9a7c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -711,11 +711,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f, rd = kmalloc(sizeof(*rd), GFP_ATOMIC); if (rd == NULL) - return -ENOBUFS; + return -ENOMEM; if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) { kfree(rd); - return -ENOBUFS; + return -ENOMEM; } rd->remote_ip = *ip; diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c index 1de413b19e34..8084e7408c0a 100644 --- a/drivers/net/wireguard/queueing.c +++ b/drivers/net/wireguard/queueing.c @@ -4,6 +4,7 @@ */ #include "queueing.h" +#include <linux/skb_array.h> struct multicore_worker __percpu * wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) @@ -42,7 +43,7 @@ void wg_packet_queue_free(struct crypt_queue *queue, bool purge) { free_percpu(queue->worker); WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); - ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL); + ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL); } #define NEXT(skb) ((skb)->prev) diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c index 6f07b949cb81..0414d7a6ce74 100644 --- a/drivers/net/wireguard/socket.c +++ b/drivers/net/wireguard/socket.c @@ -160,6 +160,7 @@ out: rcu_read_unlock_bh(); return ret; #else + kfree_skb(skb); return -EAFNOSUPPORT; #endif } @@ -241,7 +242,7 @@ int wg_socket_endpoint_from_skb(struct endpoint *endpoint, endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr; endpoint->src4.s_addr = ip_hdr(skb)->daddr; endpoint->src_if4 = skb->skb_iif; - } else if (skb->protocol == htons(ETH_P_IPV6)) { + } else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) { endpoint->addr6.sin6_family = AF_INET6; endpoint->addr6.sin6_port = udp_hdr(skb)->source; endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr; @@ -284,7 +285,7 @@ void wg_socket_set_peer_endpoint(struct wg_peer *peer, peer->endpoint.addr4 = endpoint->addr4; peer->endpoint.src4 = endpoint->src4; peer->endpoint.src_if4 = endpoint->src_if4; - } else if (endpoint->addr.sa_family == AF_INET6) { + } else if (IS_ENABLED(CONFIG_IPV6) && endpoint->addr.sa_family == AF_INET6) { peer->endpoint.addr6 = endpoint->addr6; peer->endpoint.src6 = endpoint->src6; } else { diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 9513ab696fff..f79dd9a71690 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1556,11 +1556,11 @@ static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size) node = of_parse_phandle(dev->of_node, "memory-region", 0); if (node) { ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) { dev_err(dev, "failed to resolve msa fixed region\n"); return ret; } - of_node_put(node); ar->msa.paddr = r.start; ar->msa.mem_size = resource_size(&r); diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index 7d65c115669f..20b9aa8ddf7d 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -337,14 +337,15 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif, if (patterns[i].mask[j / 8] & BIT(j % 8)) bitmask[j] = 0xff; old_pattern.mask = bitmask; - new_pattern = old_pattern; if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) { - if (patterns[i].pkt_offset < ETH_HLEN) + if (patterns[i].pkt_offset < ETH_HLEN) { ath10k_wow_convert_8023_to_80211(&new_pattern, &old_pattern); - else + } else { + new_pattern = old_pattern; new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN; + } } if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE)) diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index 3fb0aa000825..24bd0520926b 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -391,6 +391,8 @@ static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab) for (j = 0; j < irq_grp->num_irq; j++) free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); + + netif_napi_del(&irq_grp->napi); } } diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 3834be158705..07004564a3ec 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -2156,6 +2156,19 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, if (ret) ath11k_warn(ar->ab, "failed to update bcn template: %d\n", ret); + if (vif->bss_conf.he_support) { + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + WMI_VDEV_PARAM_BA_MODE, + WMI_BA_MODE_BUFFER_SIZE_256); + if (ret) + ath11k_warn(ar->ab, + "failed to set BA BUFFER SIZE 256 for vdev: %d\n", + arvif->vdev_id); + else + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "Set BA BUFFER SIZE 256 for VDEV: %d\n", + arvif->vdev_id); + } } if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { @@ -2191,14 +2204,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, if (arvif->is_up && vif->bss_conf.he_support && vif->bss_conf.he_oper.params) { - ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, - WMI_VDEV_PARAM_BA_MODE, - WMI_BA_MODE_BUFFER_SIZE_256); - if (ret) - ath11k_warn(ar->ab, - "failed to set BA BUFFER SIZE 256 for vdev: %d\n", - arvif->vdev_id); - param_id = WMI_VDEV_PARAM_HEOPS_0_31; param_value = vif->bss_conf.he_oper.params; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index 49c0b1ad40a0..f2149241fb13 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -519,7 +519,7 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci, ret = 0; break; case ATH11K_MHI_POWER_ON: - ret = mhi_async_power_up(ab_pci->mhi_ctrl); + ret = mhi_sync_power_up(ab_pci->mhi_ctrl); break; case ATH11K_MHI_POWER_OFF: mhi_power_down(ab_pci->mhi_ctrl, true); diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index 54ce08f1c6e0..353a2d669fcd 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -1382,6 +1382,11 @@ static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev) struct ath11k_base *ab = dev_get_drvdata(dev); int ret; + if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { + ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci suspend as qmi is not initialised\n"); + return 0; + } + ret = ath11k_core_suspend(ab); if (ret) ath11k_warn(ab, "failed to suspend core: %d\n", ret); @@ -1394,6 +1399,11 @@ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev) struct ath11k_base *ab = dev_get_drvdata(dev); int ret; + if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { + ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci resume as qmi is not initialised\n"); + return 0; + } + ret = ath11k_core_resume(ab); if (ret) ath11k_warn(ab, "failed to resume core: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c index 1fbc2c19848f..d444b3d70ba2 100644 --- a/drivers/net/wireless/ath/ath5k/eeprom.c +++ b/drivers/net/wireless/ath/ath5k/eeprom.c @@ -746,6 +746,9 @@ ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode, } } + if (idx == AR5K_EEPROM_N_PD_CURVES) + goto err_out; + ee->ee_pd_gains[mode] = 1; pd = &chinfo[pier].pd_curves[idx]; diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 510e61e97dbc..994ec48b2f66 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -30,6 +30,7 @@ static int htc_issue_send(struct htc_target *target, struct sk_buff* skb, hdr->endpoint_id = epid; hdr->flags = flags; hdr->payload_len = cpu_to_be16(len); + memset(hdr->control, 0, sizeof(hdr->control)); status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb); @@ -272,6 +273,10 @@ int htc_connect_service(struct htc_target *target, conn_msg->dl_pipeid = endpoint->dl_pipeid; conn_msg->ul_pipeid = endpoint->ul_pipeid; + /* To prevent infoleak */ + conn_msg->svc_meta_len = 0; + conn_msg->pad = 0; + ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); if (ret) goto err; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 98090e40e1cf..e2791d45f5f5 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix) continue; txinfo = IEEE80211_SKB_CB(bf->bf_mpdu); - fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0]; + fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0]; if (fi->keyix == keyix) return true; } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 5691bd6eb82c..6555abf02f18 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); BUILD_BUG_ON(sizeof(struct ath_frame_info) > - sizeof(tx_info->rate_driver_data)); - return (struct ath_frame_info *) &tx_info->rate_driver_data[0]; + sizeof(tx_info->status.status_driver_data)); + return (struct ath_frame_info *) &tx_info->status.status_driver_data[0]; } static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) @@ -2501,6 +2501,16 @@ skip_tx_complete: spin_unlock_irqrestore(&sc->tx.txbuflock, flags); } +static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info) +{ + void *ptr = &tx_info->status; + + memset(ptr + sizeof(tx_info->status.rates), 0, + sizeof(tx_info->status) - + sizeof(tx_info->status.rates) - + sizeof(tx_info->status.status_driver_data)); +} + static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_status *ts, int nframes, int nbad, int txok) @@ -2512,6 +2522,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, struct ath_hw *ah = sc->sc_ah; u8 i, tx_rateindex; + ath_clear_tx_status(tx_info); + if (txok) tx_info->status.ack_signal = ts->ts_rssi; @@ -2526,6 +2538,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, tx_info->status.ampdu_len = nframes; tx_info->status.ampdu_ack_len = nframes - nbad; + tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; + + for (i = tx_rateindex + 1; i < hw->max_rates; i++) { + tx_info->status.rates[i].count = 0; + tx_info->status.rates[i].idx = -1; + } + if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) { /* @@ -2547,16 +2566,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, tx_info->status.rates[tx_rateindex].count = hw->max_rate_tries; } - - for (i = tx_rateindex + 1; i < hw->max_rates; i++) { - tx_info->status.rates[i].count = 0; - tx_info->status.rates[i].idx = -1; - } - - tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; - - /* we report airtime in ath_tx_count_airtime(), don't report twice */ - tx_info->status.tx_time = 0; } static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index cca3b086aa70..a87476383c54 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -1915,7 +1915,7 @@ static int carl9170_parse_eeprom(struct ar9170 *ar) WARN_ON(!(tx_streams >= 1 && tx_streams <= IEEE80211_HT_MCS_TX_MAX_STREAMS)); - tx_params = (tx_streams - 1) << + tx_params |= (tx_streams - 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params; diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index b2400e2417a5..f15e7bd690b5 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -667,14 +667,14 @@ ath_regd_init_wiphy(struct ath_regulatory *reg, /* * Some users have reported their EEPROM programmed with - * 0x8000 or 0x0 set, this is not a supported regulatory - * domain but since we have more than one user with it we - * need a solution for them. We default to 0x64, which is - * the default Atheros world regulatory domain. + * 0x8000 set, this is not a supported regulatory domain + * but since we have more than one user with it we need + * a solution for them. We default to 0x64, which is the + * default Atheros world regulatory domain. */ static void ath_regd_sanitize(struct ath_regulatory *reg) { - if (reg->current_rd != COUNTRY_ERD_FLAG && reg->current_rd != 0) + if (reg->current_rd != COUNTRY_ERD_FLAG) return; printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n"); reg->current_rd = 0x64; diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index cf9e1396bd04..d51a78330135 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -1474,6 +1474,9 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn, if (iris_node) { if (of_device_is_compatible(iris_node, "qcom,wcn3620")) wcn->rf_id = RF_IRIS_WCN3620; + if (of_device_is_compatible(iris_node, "qcom,wcn3660") || + of_device_is_compatible(iris_node, "qcom,wcn3660b")) + wcn->rf_id = RF_IRIS_WCN3660; if (of_device_is_compatible(iris_node, "qcom,wcn3680")) wcn->rf_id = RF_IRIS_WCN3680; of_node_put(iris_node); diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 428546a6047f..597f740f3c25 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -97,6 +97,7 @@ enum wcn36xx_ampdu_state { #define RF_UNKNOWN 0x0000 #define RF_IRIS_WCN3620 0x3620 +#define RF_IRIS_WCN3660 0x3660 #define RF_IRIS_WCN3680 0x3680 static inline void buff_to_be(u32 *buf, size_t len) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 0eb13e5df517..dcbe55b56e43 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -207,6 +207,8 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp, size = BRCMF_FW_MAX_NVRAM_SIZE; else size = data_len; + /* Add space for properties we may add */ + size += strlen(BRCMF_FW_DEFAULT_BOARDREV) + 1; /* Alloc for extra 0 byte + roundup by 4 + length field */ size += 1 + 3 + sizeof(u32); nvp->nvram = kzalloc(size, GFP_KERNEL); @@ -693,7 +695,7 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req, { struct brcmf_fw_item *first = &req->items[0]; struct brcmf_fw *fwctx; - char *alt_path; + char *alt_path = NULL; int ret; brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev)); @@ -712,7 +714,9 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req, fwctx->done = fw_cb; /* First try alternative board-specific path if any */ - alt_path = brcm_alt_fw_path(first->path, fwctx->req->board_type); + if (fwctx->req->board_type) + alt_path = brcm_alt_fw_path(first->path, + fwctx->req->board_type); if (alt_path) { ret = request_firmware_nowait(THIS_MODULE, true, alt_path, fwctx->dev, GFP_KERNEL, fwctx, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 8b149996fc00..3ff4997e1c97 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -12,6 +12,7 @@ #include <linux/interrupt.h> #include <linux/bcma/bcma.h> #include <linux/sched.h> +#include <linux/io.h> #include <asm/unaligned.h> #include <soc.h> @@ -59,6 +60,13 @@ BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie"); BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie"); BRCMF_FW_DEF(4371, "brcmfmac4371-pcie"); +/* firmware config files */ +MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.txt"); +MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt"); + +/* per-board firmware binaries */ +MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.bin"); + static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602), BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C), @@ -448,47 +456,6 @@ brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset, static void -brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset, - void *srcaddr, u32 len) -{ - void __iomem *address = devinfo->tcm + mem_offset; - __le32 *src32; - __le16 *src16; - u8 *src8; - - if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) { - if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) { - src8 = (u8 *)srcaddr; - while (len) { - iowrite8(*src8, address); - address++; - src8++; - len--; - } - } else { - len = len / 2; - src16 = (__le16 *)srcaddr; - while (len) { - iowrite16(le16_to_cpu(*src16), address); - address += 2; - src16++; - len--; - } - } - } else { - len = len / 4; - src32 = (__le32 *)srcaddr; - while (len) { - iowrite32(le32_to_cpu(*src32), address); - address += 4; - src32++; - len--; - } - } -} - - -static void brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset, void *dstaddr, u32 len) { @@ -1348,6 +1315,18 @@ static void brcmf_pcie_down(struct device *dev) { } +static int brcmf_pcie_preinit(struct device *dev) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie; + + brcmf_dbg(PCIE, "Enter\n"); + + brcmf_pcie_intr_enable(buspub->devinfo); + brcmf_pcie_hostready(buspub->devinfo); + + return 0; +} static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb) { @@ -1456,6 +1435,7 @@ static int brcmf_pcie_reset(struct device *dev) } static const struct brcmf_bus_ops brcmf_pcie_bus_ops = { + .preinit = brcmf_pcie_preinit, .txdata = brcmf_pcie_tx, .stop = brcmf_pcie_down, .txctl = brcmf_pcie_tx_ctlpkt, @@ -1563,8 +1543,8 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, return err; brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name); - brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase, - (void *)fw->data, fw->size); + memcpy_toio(devinfo->tcm + devinfo->ci->rambase, + (void *)fw->data, fw->size); resetintr = get_unaligned_le32(fw->data); release_firmware(fw); @@ -1578,7 +1558,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name); address = devinfo->ci->rambase + devinfo->ci->ramsize - nvram_len; - brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len); + memcpy_toio(devinfo->tcm + address, nvram, nvram_len); brcmf_fw_nvram_free(nvram); } else { brcmf_dbg(PCIE, "No matching NVRAM file found %s\n", @@ -1777,6 +1757,8 @@ static void brcmf_pcie_setup(struct device *dev, int ret, ret = brcmf_chip_get_raminfo(devinfo->ci); if (ret) { brcmf_err(bus, "Failed to get RAM info\n"); + release_firmware(fw); + brcmf_fw_nvram_free(nvram); goto fail; } @@ -1826,9 +1808,6 @@ static void brcmf_pcie_setup(struct device *dev, int ret, init_waitqueue_head(&devinfo->mbdata_resp_wait); - brcmf_pcie_intr_enable(devinfo); - brcmf_pcie_hostready(devinfo); - ret = brcmf_attach(&devinfo->pdev->dev); if (ret) goto fail; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 8effeb7a7269..f7961b22e051 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype { BRCMF_SDIO_FT_SUB, }; -#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu)) +#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu)) /* SDIO Pad drive strength to select value mappings */ struct sdiod_drive_str { @@ -629,7 +629,6 @@ BRCMF_FW_CLM_DEF(43752, "brcmfmac43752-sdio"); /* firmware config files */ MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.txt"); -MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt"); /* per-board firmware binaries */ MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.bin"); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 75e7665773c5..90fe4adca492 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -304,7 +304,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw) priv->is_open = 1; IWL_DEBUG_MAC80211(priv, "leave\n"); - return 0; + return ret; } static void iwlagn_mac_stop(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 1efac0b2a94d..9e00d1d7e146 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2019-2021 Intel Corporation + * Copyright (C) 2019-2022 Intel Corporation */ #include <linux/uuid.h> #include "iwl-drv.h" @@ -814,10 +814,11 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) * only one using version 36, so skip this version entirely. */ return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 || - IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 || - (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 && - ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) == - CSR_HW_REV_TYPE_7265D)); + (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 && + fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) || + (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 && + ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) == + CSR_HW_REV_TYPE_7265D)); } IWL_EXPORT_SYMBOL(iwl_sar_geo_support); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 6dcafd0a3d4b..b00cf92c8965 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1532,8 +1532,6 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt, return -EBUSY; range->range_data_size = reg->dev_addr.size; - iwl_write_prph_no_grab(fwrt->trans, DBGI_SRAM_TARGET_ACCESS_CFG, - DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK); for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) { prph_data = iwl_read_prph(fwrt->trans, (i % 2) ? DBGI_SRAM_TARGET_ACCESS_RDATA_MSB : diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 845a09d0daba..c8dff76ac03c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -319,6 +319,7 @@ enum { #define CSR_HW_REV_TYPE_2x00 (0x0000100) #define CSR_HW_REV_TYPE_105 (0x0000110) #define CSR_HW_REV_TYPE_135 (0x0000120) +#define CSR_HW_REV_TYPE_3160 (0x0000164) #define CSR_HW_REV_TYPE_7265D (0x0000210) #define CSR_HW_REV_TYPE_NONE (0x00001F0) #define CSR_HW_REV_TYPE_QNJ (0x0000360) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index b7f7b9c5b670..524b0ad87357 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1614,6 +1614,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) out_unbind: complete(&drv->request_firmware_complete); device_release_driver(drv->trans->dev); + /* drv has just been freed by the release */ + failure = false; free: if (failure) iwl_dealloc_ucode(drv); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 475f951d4b1e..fc40cca096c2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -541,8 +541,7 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = - IEEE80211_HE_MAC_CAP0_HTC_HE | - IEEE80211_HE_MAC_CAP0_TWT_REQ, + IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index d0a7d58336a9..6c4f1c949541 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -350,8 +350,6 @@ #define WFPM_GP2 0xA030B4 /* DBGI SRAM Register details */ -#define DBGI_SRAM_TARGET_ACCESS_CFG 0x00A2E14C -#define DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK 0x10000 #define DBGI_SRAM_TARGET_ACCESS_RDATA_LSB 0x00A2E154 #define DBGI_SRAM_TARGET_ACCESS_RDATA_MSB 0x00A2E158 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index d3013a51a509..00ca17f3b263 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -2499,7 +2499,9 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) /* start pseudo D3 */ rtnl_lock(); + wiphy_lock(mvm->hw->wiphy); err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); + wiphy_unlock(mvm->hw->wiphy); rtnl_unlock(); if (err > 0) err = -EINVAL; @@ -2555,7 +2557,9 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); rtnl_lock(); + wiphy_lock(mvm->hw->wiphy); __iwl_mvm_resume(mvm, true); + wiphy_unlock(mvm->hw->wiphy); rtnl_unlock(); iwl_mvm_resume_tcm(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 5dc39fbb74d6..d398a06b2656 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -5,6 +5,7 @@ * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include <linux/vmalloc.h> +#include <linux/err.h> #include <linux/ieee80211.h> #include <linux/netdevice.h> @@ -2044,7 +2045,6 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) { struct dentry *bcast_dir __maybe_unused; - char buf[100]; spin_lock_init(&mvm->drv_stats_lock); @@ -2140,6 +2140,11 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) * Create a symlink with mac80211. It will be removed when mac80211 * exists (before the opmode exists which removes the target.) */ - snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent); - debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf); + if (!IS_ERR(mvm->debugfs_dir)) { + char buf[100]; + + snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent); + debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, + buf); + } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 74404c96063b..6d439ae7b50b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1489,8 +1489,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm) while (!sband && i < NUM_NL80211_BANDS) sband = mvm->hw->wiphy->bands[i++]; - if (WARN_ON_ONCE(!sband)) + if (WARN_ON_ONCE(!sband)) { + ret = -ENODEV; goto error; + } chan = &sband->channels[0]; @@ -1572,7 +1574,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ret = iwl_mvm_sar_init(mvm); if (ret == 0) ret = iwl_mvm_sar_geo_init(mvm); - else if (ret < 0) + if (ret < 0) goto error; iwl_mvm_tas_init(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 750217393f48..56c7a68a6491 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -295,7 +295,6 @@ static const u8 he_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, - [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, }; static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 49c32a8132a0..c77d98c88811 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -238,7 +238,8 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm, */ mvm->fw_static_smps_request = req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE); - ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + ieee80211_iterate_interfaces(mvm->hw, + IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER, iwl_mvm_intf_dual_chain_req, NULL); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index 035336a9e755..6d82725cb87d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH */ @@ -295,18 +295,31 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) * otherwise we might not be able to reuse this phy. */ if (ctxt->ref == 0) { - struct ieee80211_channel *chan; + struct ieee80211_channel *chan = NULL; struct cfg80211_chan_def chandef; - struct ieee80211_supported_band *sband = NULL; - enum nl80211_band band = NL80211_BAND_2GHZ; + struct ieee80211_supported_band *sband; + enum nl80211_band band; + int channel; - while (!sband && band < NUM_NL80211_BANDS) - sband = mvm->hw->wiphy->bands[band++]; + for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { + sband = mvm->hw->wiphy->bands[band]; - if (WARN_ON(!sband)) - return; + if (!sband) + continue; + + for (channel = 0; channel < sband->n_channels; channel++) + if (!(sband->channels[channel].flags & + IEEE80211_CHAN_DISABLED)) { + chan = &sband->channels[channel]; + break; + } - chan = &sband->channels[0]; + if (chan) + break; + } + + if (WARN_ON(!chan)) + return; cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 5461bf399959..65e382756de6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1890,7 +1890,10 @@ static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm, IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; /* set fragmented ebs for fragmented scan on HB channels */ - if (iwl_mvm_is_scan_fragmented(params->hb_type)) + if ((!iwl_mvm_is_cdb_supported(mvm) && + iwl_mvm_is_scan_fragmented(params->type)) || + (iwl_mvm_is_cdb_supported(mvm) && + iwl_mvm_is_scan_fragmented(params->hb_type))) flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG; return flags; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 06fbd9ab37df..b5368cb57ca8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -271,15 +271,14 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, /* info->control is only relevant for non HW rate control */ if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) { - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - /* HT rate doesn't make sense for a non data frame */ WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS && !ieee80211_is_data(fc), "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n", info->control.rates[0].flags, info->control.rates[0].idx, - le16_to_cpu(fc), sta ? mvmsta->sta_state : -1); + le16_to_cpu(fc), + sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1); rate_idx = info->control.rates[0].idx; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index bf0c32a74ca4..a9c19be29e92 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -408,8 +408,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); - ret = -EIO; - goto out; + return -EIO; } iwl_enable_rfkill_int(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index f252680f18e8..02da9cc8646c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1273,8 +1273,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); - ret = -EIO; - goto out; + return -EIO; } iwl_enable_rfkill_int(trans); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 0adae76eb8df..0aeb1e1ec93f 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2336,6 +2336,15 @@ static void hw_scan_work(struct work_struct *work) if (req->ie_len) skb_put_data(probe, req->ie, req->ie_len); + if (!ieee80211_tx_prepare_skb(hwsim->hw, + hwsim->hw_scan_vif, + probe, + hwsim->tmp_chan->band, + NULL)) { + kfree_skb(probe); + continue; + } + local_bh_disable(); mac80211_hwsim_tx_frame(hwsim->hw, probe, hwsim->tmp_chan); @@ -3641,6 +3650,10 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, } txi->flags |= IEEE80211_TX_STAT_ACK; } + + if (hwsim_flags & HWSIM_TX_CTL_NO_ACK) + txi->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + ieee80211_tx_status_irqsafe(data2->hw, skb); return 0; out: diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 5e1c1506a4c6..7aecde35cb9a 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -465,6 +465,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) qbuf.addr = addr + offset; qbuf.len = len - offset; + qbuf.skip_unmap = false; mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL); frames++; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 4d01fd85283d..6e4d69715927 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -19,7 +19,7 @@ #define MT_MCU_RING_SIZE 32 #define MT_RX_BUF_SIZE 2048 -#define MT_SKB_HEAD_LEN 128 +#define MT_SKB_HEAD_LEN 256 #define MT_MAX_NON_AQL_PKT 16 #define MT_TXQ_FREE_THR 32 diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index 8edea1e7a602..7f52a4a11cea 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -620,6 +620,9 @@ mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates); int i; + if (!sta_rates) + return; + spin_lock_bh(&dev->mt76.lock); for (i = 0; i < ARRAY_SIZE(msta->rates); i++) { msta->rates[i].idx = sta_rates->rate[i].idx; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index f2704149834a..8f4a5d4929e0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -1732,7 +1732,7 @@ mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy, struct mt7615_dev *dev = phy->dev; int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck; bool ext_phy = phy != &dev->phy; - u16 def_th = ofdm ? -98 : -110; + s16 def_th = ofdm ? -98 : -110; bool update = false; s8 *sensitivity; int signal; @@ -2000,6 +2000,14 @@ void mt7615_pm_power_save_work(struct work_struct *work) test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state)) goto out; + if (mutex_is_locked(&dev->mt76.mutex)) + /* if mt76 mutex is held we should not put the device + * to sleep since we are currently accessing device + * register map. We need to wait for the next power_save + * trigger. + */ + goto out; + if (time_is_after_jiffies(dev->pm.last_activity + delta)) { delta = dev->pm.last_activity + delta - jiffies; goto out; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index fc266da54fe7..60a41d082961 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -682,6 +682,9 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates); int i; + if (!sta_rates) + return; + spin_lock_bh(&dev->mt76.lock); for (i = 0; i < ARRAY_SIZE(msta->rates); i++) { msta->rates[i].idx = sta_rates->rate[i].idx; diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index d25b50e76932..017bd59c4ea8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -295,7 +295,7 @@ mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid, } if (sta_hdr) - sta_hdr->len = cpu_to_le16(sizeof(hdr)); + le16_add_cpu(&sta_hdr->len, sizeof(hdr)); return skb_put_data(nskb, &hdr, sizeof(hdr)); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index adf288e50e21..5cd0379d86de 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id) mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9); /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */ - mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf); + mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf); /* RG_SSUSB_CDR_BR_PE1D = 0x3 */ mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index ff613d705611..7691292526e0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -899,6 +899,7 @@ mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi, val = MT_TXD3_SN_VALID | FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); txwi[3] |= cpu_to_le32(val); + txwi[7] &= ~cpu_to_le32(MT_TXD7_HW_AMSDU); } val = FIELD_PREP(MT_TXD7_TYPE, fc_type) | diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 7440f2b443ec..e9d854e3293e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -1396,8 +1396,11 @@ mt7915_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, generic = (struct wtbl_generic *)tlv; if (sta) { + if (vif->type == NL80211_IFTYPE_STATION) + generic->partial_aid = cpu_to_le16(vif->bss_conf.aid); + else + generic->partial_aid = cpu_to_le16(sta->aid); memcpy(generic->peer_addr, sta->addr, ETH_ALEN); - generic->partial_aid = cpu_to_le16(sta->aid); generic->muar_idx = mvif->omac_idx; generic->qos = sta->wme; } else { @@ -1451,12 +1454,15 @@ mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: basic->conn_type = cpu_to_le32(CONNECTION_INFRA_STA); + basic->aid = cpu_to_le16(sta->aid); break; case NL80211_IFTYPE_STATION: basic->conn_type = cpu_to_le32(CONNECTION_INFRA_AP); + basic->aid = cpu_to_le16(vif->bss_conf.aid); break; case NL80211_IFTYPE_ADHOC: basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC); + basic->aid = cpu_to_le16(sta->aid); break; default: WARN_ON(1); @@ -1464,7 +1470,6 @@ mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, } memcpy(basic->peer_addr, sta->addr, ETH_ALEN); - basic->aid = cpu_to_le16(sta->aid); basic->qos = sta->wme; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c index 30f3b3085c78..8d5e261cd10f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c @@ -130,23 +130,22 @@ mt7921_queues_acq(struct seq_file *s, void *data) mt7921_mutex_acquire(dev); - for (i = 0; i < 16; i++) { - int j, acs = i / 4, index = i % 4; + for (i = 0; i < 4; i++) { u32 ctrl, val, qlen = 0; + int j; - val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index)); - ctrl = BIT(31) | BIT(15) | (acs << 8); + val = mt76_rr(dev, MT_PLE_AC_QEMPTY(i)); + ctrl = BIT(31) | BIT(11) | (i << 24); for (j = 0; j < 32; j++) { if (val & BIT(j)) continue; - mt76_wr(dev, MT_PLE_FL_Q0_CTRL, - ctrl | (j + (index << 5))); + mt76_wr(dev, MT_PLE_FL_Q0_CTRL, ctrl | j); qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL, GENMASK(11, 0)); } - seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen); + seq_printf(s, "AC%d: queued=%d\n", i, qlen); } mt7921_mutex_release(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index 04a288029c98..c093920a597d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -1550,6 +1550,14 @@ void mt7921_pm_power_save_work(struct work_struct *work) test_bit(MT76_HW_SCHED_SCANNING, &mphy->state)) goto out; + if (mutex_is_locked(&dev->mt76.mutex)) + /* if mt76 mutex is held we should not put the device + * to sleep since we are currently accessing device + * register map. We need to wait for the next power_save + * trigger. + */ + goto out; + if (time_is_after_jiffies(dev->pm.last_activity + delta)) { delta = dev->pm.last_activity + delta - jiffies; goto out; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 9eb90e6f0103..30252f408ddc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -224,6 +224,7 @@ static void mt7921_stop(struct ieee80211_hw *hw) cancel_delayed_work_sync(&dev->pm.ps_work); cancel_work_sync(&dev->pm.wake_work); + cancel_work_sync(&dev->reset_work); mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); mt7921_mutex_acquire(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h index 26fb11823762..41c2855e7a3d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h @@ -17,13 +17,12 @@ #define MT_PLE_BASE 0x8000 #define MT_PLE(ofs) (MT_PLE_BASE + (ofs)) -#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0) -#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4) -#define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8) -#define MT_PLE_FL_Q3_CTRL MT_PLE(0x1bc) +#define MT_PLE_FL_Q0_CTRL MT_PLE(0x3e0) +#define MT_PLE_FL_Q1_CTRL MT_PLE(0x3e4) +#define MT_PLE_FL_Q2_CTRL MT_PLE(0x3e8) +#define MT_PLE_FL_Q3_CTRL MT_PLE(0x3ec) -#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \ - ((n) << 2)) +#define MT_PLE_AC_QEMPTY(_n) MT_PLE(0x500 + 0x40 * (_n)) #define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2)) #define MT_MDP_BASE 0xf000 diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 0f5009c47cd0..f8409e93fe33 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -382,6 +382,8 @@ static int ray_config(struct pcmcia_device *link) goto failed; local->sram = ioremap(link->resource[2]->start, resource_size(link->resource[2])); + if (!local->sram) + goto failed; /*** Set up 16k window for shared memory (receive buffer) ***************/ link->resource[3]->flags |= @@ -396,6 +398,8 @@ static int ray_config(struct pcmcia_device *link) goto failed; local->rmem = ioremap(link->resource[3]->start, resource_size(link->resource[3])); + if (!local->rmem) + goto failed; /*** Set up window for attribute memory ***********************************/ link->resource[4]->flags |= @@ -410,6 +414,8 @@ static int ray_config(struct pcmcia_device *link) goto failed; local->amem = ioremap(link->resource[4]->start, resource_size(link->resource[4])); + if (!local->amem) + goto failed; dev_dbg(&link->dev, "ray_config sram=%p\n", local->sram); dev_dbg(&link->dev, "ray_config rmem=%p\n", local->rmem); diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index d24b7a7993aa..990360d75cb6 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -256,6 +256,7 @@ static void backend_disconnect(struct backend_info *be) unsigned int queue_index; xen_unregister_watchers(vif); + xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); #ifdef CONFIG_DEBUG_FS xenvif_debugfs_delif(vif); #endif /* CONFIG_DEBUG_FS */ @@ -675,7 +676,6 @@ static void hotplug_status_changed(struct xenbus_watch *watch, /* Not interested in this watch anymore. */ unregister_hotplug_status_watch(be); - xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); } kfree(str); } @@ -824,15 +824,11 @@ static void connect(struct backend_info *be) xenvif_carrier_on(be->vif); unregister_hotplug_status_watch(be); - if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) { - err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, - NULL, hotplug_status_changed, - "%s/%s", dev->nodename, - "hotplug-status"); - if (err) - goto err; + err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL, + hotplug_status_changed, + "%s/%s", dev->nodename, "hotplug-status"); + if (!err) be->have_hotplug_status_watch = 1; - } netif_tx_wake_all_queues(be->vif->dev); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8126e08f11a9..2492a27467b4 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -424,14 +424,12 @@ static bool xennet_tx_buf_gc(struct netfront_queue *queue) queue->tx_link[id] = TX_LINK_NONE; skb = queue->tx_skbs[id]; queue->tx_skbs[id] = NULL; - if (unlikely(gnttab_query_foreign_access( - queue->grant_tx_ref[id]) != 0)) { + if (unlikely(!gnttab_end_foreign_access_ref( + queue->grant_tx_ref[id], GNTMAP_readonly))) { dev_alert(dev, "Grant still in use by backend domain\n"); goto err; } - gnttab_end_foreign_access_ref( - queue->grant_tx_ref[id], GNTMAP_readonly); gnttab_release_grant_reference( &queue->gref_tx_head, queue->grant_tx_ref[id]); queue->grant_tx_ref[id] = GRANT_INVALID_REF; @@ -842,6 +840,28 @@ static int xennet_close(struct net_device *dev) return 0; } +static void xennet_destroy_queues(struct netfront_info *info) +{ + unsigned int i; + + for (i = 0; i < info->netdev->real_num_tx_queues; i++) { + struct netfront_queue *queue = &info->queues[i]; + + if (netif_running(info->netdev)) + napi_disable(&queue->napi); + netif_napi_del(&queue->napi); + } + + kfree(info->queues); + info->queues = NULL; +} + +static void xennet_uninit(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + xennet_destroy_queues(np); +} + static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) { unsigned long flags; @@ -968,7 +988,6 @@ static int xennet_get_responses(struct netfront_queue *queue, struct device *dev = &queue->info->netdev->dev; struct bpf_prog *xdp_prog; struct xdp_buff xdp; - unsigned long ret; int slots = 1; int err = 0; u32 verdict; @@ -1010,8 +1029,13 @@ static int xennet_get_responses(struct netfront_queue *queue, goto next; } - ret = gnttab_end_foreign_access_ref(ref, 0); - BUG_ON(!ret); + if (!gnttab_end_foreign_access_ref(ref, 0)) { + dev_alert(dev, + "Grant still in use by backend domain\n"); + queue->info->broken = true; + dev_alert(dev, "Disabled for further use\n"); + return -EINVAL; + } gnttab_release_grant_reference(&queue->gref_rx_head, ref); @@ -1232,6 +1256,10 @@ static int xennet_poll(struct napi_struct *napi, int budget) &need_xdp_flush); if (unlikely(err)) { + if (queue->info->broken) { + spin_unlock(&queue->rx_lock); + return 0; + } err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); @@ -1611,6 +1639,7 @@ static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp) } static const struct net_device_ops xennet_netdev_ops = { + .ndo_uninit = xennet_uninit, .ndo_open = xennet_open, .ndo_stop = xennet_close, .ndo_start_xmit = xennet_start_xmit, @@ -1895,7 +1924,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_queue *queue, unsigned int feature_split_evtchn) { struct xen_netif_tx_sring *txs; - struct xen_netif_rx_sring *rxs; + struct xen_netif_rx_sring *rxs = NULL; grant_ref_t gref; int err; @@ -1915,21 +1944,21 @@ static int setup_netfront(struct xenbus_device *dev, err = xenbus_grant_ring(dev, txs, 1, &gref); if (err < 0) - goto grant_tx_ring_fail; + goto fail; queue->tx_ring_ref = gref; rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); - goto alloc_rx_ring_fail; + goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); err = xenbus_grant_ring(dev, rxs, 1, &gref); if (err < 0) - goto grant_rx_ring_fail; + goto fail; queue->rx_ring_ref = gref; if (feature_split_evtchn) @@ -1942,22 +1971,28 @@ static int setup_netfront(struct xenbus_device *dev, err = setup_netfront_single(queue); if (err) - goto alloc_evtchn_fail; + goto fail; return 0; /* If we fail to setup netfront, it is safe to just revoke access to * granted pages because backend is not accessing it at this point. */ -alloc_evtchn_fail: - gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0); -grant_rx_ring_fail: - free_page((unsigned long)rxs); -alloc_rx_ring_fail: - gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0); -grant_tx_ring_fail: - free_page((unsigned long)txs); -fail: + fail: + if (queue->rx_ring_ref != GRANT_INVALID_REF) { + gnttab_end_foreign_access(queue->rx_ring_ref, 0, + (unsigned long)rxs); + queue->rx_ring_ref = GRANT_INVALID_REF; + } else { + free_page((unsigned long)rxs); + } + if (queue->tx_ring_ref != GRANT_INVALID_REF) { + gnttab_end_foreign_access(queue->tx_ring_ref, 0, + (unsigned long)txs); + queue->tx_ring_ref = GRANT_INVALID_REF; + } else { + free_page((unsigned long)txs); + } return err; } @@ -2103,22 +2138,6 @@ error: return err; } -static void xennet_destroy_queues(struct netfront_info *info) -{ - unsigned int i; - - for (i = 0; i < info->netdev->real_num_tx_queues; i++) { - struct netfront_queue *queue = &info->queues[i]; - - if (netif_running(info->netdev)) - napi_disable(&queue->napi); - netif_napi_del(&queue->napi); - } - - kfree(info->queues); - info->queues = NULL; -} - static int xennet_create_page_pool(struct netfront_queue *queue) diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c index 16ceb763594f..90e30e2f1512 100644 --- a/drivers/nfc/port100.c +++ b/drivers/nfc/port100.c @@ -1612,7 +1612,9 @@ free_nfc_dev: nfc_digital_free_device(dev->nfc_digital_dev); error: + usb_kill_urb(dev->in_urb); usb_free_urb(dev->in_urb); + usb_kill_urb(dev->out_urb); usb_free_urb(dev->out_urb); usb_put_dev(dev->udev); diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index c8bdf078d111..0841e0e370a0 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -320,6 +320,11 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, return -ENOMEM; transaction->aid_len = skb->data[1]; + + /* Checking if the length of the AID is valid */ + if (transaction->aid_len > sizeof(transaction->aid)) + return -EINVAL; + memcpy(transaction->aid, &skb->data[2], transaction->aid_len); @@ -329,6 +334,11 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, return -EPROTO; transaction->params_len = skb->data[transaction->aid_len + 3]; + + /* Total size is allocated (skb->len - 2) minus fixed array members */ + if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction))) + return -EINVAL; + memcpy(transaction->params, skb->data + transaction->aid_len + 4, transaction->params_len); diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c index fede05151f69..4081fc538ff4 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen4.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c @@ -168,6 +168,18 @@ static enum ntb_topo gen4_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) return NTB_TOPO_NONE; } +static enum ntb_topo spr_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) +{ + switch (ppd & SPR_PPD_TOPO_MASK) { + case SPR_PPD_TOPO_B2B_USD: + return NTB_TOPO_B2B_USD; + case SPR_PPD_TOPO_B2B_DSD: + return NTB_TOPO_B2B_DSD; + } + + return NTB_TOPO_NONE; +} + int gen4_init_dev(struct intel_ntb_dev *ndev) { struct pci_dev *pdev = ndev->ntb.pdev; @@ -183,7 +195,10 @@ int gen4_init_dev(struct intel_ntb_dev *ndev) } ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET); - ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1); + if (pdev_is_ICX(pdev)) + ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1); + else if (pdev_is_SPR(pdev)) + ndev->ntb.topo = spr_ppd_topo(ndev, ppd1); dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1, ntb_topo_string(ndev->ntb.topo)); if (ndev->ntb.topo == NTB_TOPO_NONE) diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.h b/drivers/ntb/hw/intel/ntb_hw_gen4.h index 3fcd3fdce9ed..f91323eaf5ce 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen4.h +++ b/drivers/ntb/hw/intel/ntb_hw_gen4.h @@ -49,10 +49,14 @@ #define GEN4_PPD_CLEAR_TRN 0x0001 #define GEN4_PPD_LINKTRN 0x0008 #define GEN4_PPD_CONN_MASK 0x0300 +#define SPR_PPD_CONN_MASK 0x0700 #define GEN4_PPD_CONN_B2B 0x0200 #define GEN4_PPD_DEV_MASK 0x1000 #define GEN4_PPD_DEV_DSD 0x1000 #define GEN4_PPD_DEV_USD 0x0000 +#define SPR_PPD_DEV_MASK 0x4000 +#define SPR_PPD_DEV_DSD 0x4000 +#define SPR_PPD_DEV_USD 0x0000 #define GEN4_LINK_CTRL_LINK_DISABLE 0x0010 #define GEN4_SLOTSTS 0xb05a @@ -62,6 +66,10 @@ #define GEN4_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_USD) #define GEN4_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_DSD) +#define SPR_PPD_TOPO_MASK (SPR_PPD_CONN_MASK | SPR_PPD_DEV_MASK) +#define SPR_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_USD) +#define SPR_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_DSD) + #define GEN4_DB_COUNT 32 #define GEN4_DB_LINK 32 #define GEN4_DB_LINK_BIT BIT_ULL(GEN4_DB_LINK) @@ -112,4 +120,12 @@ static inline int pdev_is_ICX(struct pci_dev *pdev) return 0; } +static inline int pdev_is_SPR(struct pci_dev *pdev) +{ + if (pdev_is_gen4(pdev) && + pdev->revision > PCI_DEVICE_REVISION_ICX_MAX) + return 1; + return 0; +} + #endif diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c index 4c6eb61a6ac6..ec9cb6c81eda 100644 --- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c +++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c @@ -419,8 +419,8 @@ static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev, enum ntb_width *width) { struct switchtec_dev *stdev = sndev->stdev; - - u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id); + u32 pff = + ioread32(&stdev->mmio_part_cfg_all[partition].vep_pff_inst_id); u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]); if (speed) @@ -840,7 +840,6 @@ static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev) u64 tpart_vec; int self; u64 part_map; - int bit; sndev->ntb.pdev = sndev->stdev->pdev; sndev->ntb.topo = NTB_TOPO_SWITCH; @@ -861,29 +860,28 @@ static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev) part_map = ioread64(&sndev->mmio_ntb->ep_map); part_map &= ~(1 << sndev->self_partition); - if (!ffs(tpart_vec)) { + if (!tpart_vec) { if (sndev->stdev->partition_count != 2) { dev_err(&sndev->stdev->dev, "ntb target partition not defined\n"); return -ENODEV; } - bit = ffs(part_map); - if (!bit) { + if (!part_map) { dev_err(&sndev->stdev->dev, "peer partition is not NT partition\n"); return -ENODEV; } - sndev->peer_partition = bit - 1; + sndev->peer_partition = __ffs64(part_map); } else { - if (ffs(tpart_vec) != fls(tpart_vec)) { + if (__ffs64(tpart_vec) != (fls64(tpart_vec) - 1)) { dev_err(&sndev->stdev->dev, "ntb driver only supports 1 pair of 1-1 ntb mapping\n"); return -ENODEV; } - sndev->peer_partition = ffs(tpart_vec) - 1; + sndev->peer_partition = __ffs64(tpart_vec); if (!(part_map & (1ULL << sndev->peer_partition))) { dev_err(&sndev->stdev->dev, "ntb target partition is not NT partition\n"); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 9ccf3d608799..70ad891a76ba 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -1025,6 +1025,9 @@ static unsigned long default_align(struct nd_region *nd_region) } } + if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX) + align = PAGE_SIZE; + mappings = max_t(u16, 1, nd_region->ndr_mappings); div_u64_rem(align, mappings, &remainder); if (remainder) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f8dd664b2eda..f2bb57615762 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -131,7 +131,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns) if (test_and_set_bit(NVME_NS_DEAD, &ns->flags)) return; - blk_set_queue_dying(ns->queue); + blk_mark_disk_dead(ns->disk); blk_mq_unquiesce_queue(ns->queue); set_capacity_and_notify(ns->disk, 0); @@ -1354,6 +1354,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, warn_str, cur->nidl); return -1; } + if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) + return NVME_NIDT_EUI64_LEN; memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); return NVME_NIDT_EUI64_LEN; case NVME_NIDT_NGUID: @@ -1362,6 +1364,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, warn_str, cur->nidl); return -1; } + if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) + return NVME_NIDT_NGUID_LEN; memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); return NVME_NIDT_NGUID_LEN; case NVME_NIDT_UUID: @@ -1370,6 +1374,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, warn_str, cur->nidl); return -1; } + if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) + return NVME_NIDT_UUID_LEN; uuid_copy(&ids->uuid, data + sizeof(*cur)); return NVME_NIDT_UUID_LEN; case NVME_NIDT_CSI: @@ -1466,12 +1472,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, if ((*id)->ncap == 0) /* namespace not allocated or attached */ goto out_free_id; - if (ctrl->vs >= NVME_VS(1, 1, 0) && - !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) - memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64)); - if (ctrl->vs >= NVME_VS(1, 2, 0) && - !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) - memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid)); + + if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { + dev_info(ctrl->device, + "Ignoring bogus Namespace Identifiers\n"); + } else { + if (ctrl->vs >= NVME_VS(1, 1, 0) && + !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) + memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64)); + if (ctrl->vs >= NVME_VS(1, 2, 0) && + !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) + memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid)); + } return 0; @@ -1674,13 +1686,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); } -static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) -{ - return !uuid_is_null(&ids->uuid) || - memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) || - memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); -} - static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) { return uuid_equal(&a->uuid, &b->uuid) && @@ -1845,9 +1850,6 @@ static void nvme_update_disk_info(struct gendisk *disk, nvme_config_discard(disk, ns); blk_queue_max_write_zeroes_sectors(disk->queue, ns->ctrl->max_zeroes_sectors); - - set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) || - test_bit(NVME_NS_FORCE_RO, &ns->flags)); } static inline bool nvme_first_scan(struct gendisk *disk) @@ -1908,18 +1910,23 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) goto out_unfreeze; } + set_disk_ro(ns->disk, (id->nsattr & NVME_NS_ATTR_RO) || + test_bit(NVME_NS_FORCE_RO, &ns->flags)); set_bit(NVME_NS_READY, &ns->flags); blk_mq_unfreeze_queue(ns->disk->queue); if (blk_queue_is_zoned(ns->queue)) { ret = nvme_revalidate_zones(ns); if (ret && !nvme_first_scan(ns->disk)) - goto out; + return ret; } if (nvme_ns_head_multipath(ns->head)) { blk_mq_freeze_queue(ns->head->disk->queue); nvme_update_disk_info(ns->head->disk, ns, id); + set_disk_ro(ns->head->disk, + (id->nsattr & NVME_NS_ATTR_RO) || + test_bit(NVME_NS_FORCE_RO, &ns->flags)); nvme_mpath_revalidate_paths(ns); blk_stack_limits(&ns->head->disk->queue->limits, &ns->queue->limits, 0); @@ -1929,16 +1936,16 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) return 0; out_unfreeze: - blk_mq_unfreeze_queue(ns->disk->queue); -out: /* * If probing fails due an unsupported feature, hide the block device, * but still allow other access. */ if (ret == -ENODEV) { ns->disk->flags |= GENHD_FL_HIDDEN; + set_bit(NVME_NS_READY, &ns->flags); ret = 0; } + blk_mq_unfreeze_queue(ns->disk->queue); return ret; } @@ -3517,15 +3524,20 @@ static const struct attribute_group *nvme_dev_attr_groups[] = { NULL, }; -static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys, +static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns_head *h; - lockdep_assert_held(&subsys->lock); + lockdep_assert_held(&ctrl->subsys->lock); - list_for_each_entry(h, &subsys->nsheads, entry) { - if (h->ns_id != nsid) + list_for_each_entry(h, &ctrl->subsys->nsheads, entry) { + /* + * Private namespaces can share NSIDs under some conditions. + * In that case we can't use the same ns_head for namespaces + * with the same NSID. + */ + if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h)) continue; if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) return h; @@ -3534,16 +3546,24 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys, return NULL; } -static int __nvme_check_ids(struct nvme_subsystem *subsys, - struct nvme_ns_head *new) +static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys, + struct nvme_ns_ids *ids) { + bool has_uuid = !uuid_is_null(&ids->uuid); + bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid)); + bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); struct nvme_ns_head *h; lockdep_assert_held(&subsys->lock); list_for_each_entry(h, &subsys->nsheads, entry) { - if (nvme_ns_ids_valid(&new->ids) && - nvme_ns_ids_equal(&new->ids, &h->ids)) + if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid)) + return -EINVAL; + if (has_nguid && + memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0) + return -EINVAL; + if (has_eui64 && + memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0) return -EINVAL; } @@ -3642,7 +3662,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, head->ids = *ids; kref_init(&head->ref); - ret = __nvme_check_ids(ctrl->subsys, head); + ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &head->ids); if (ret) { dev_err(ctrl->device, "duplicate IDs for nsid %d\n", nsid); @@ -3685,7 +3705,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, int ret = 0; mutex_lock(&ctrl->subsys->lock); - head = nvme_find_ns_head(ctrl->subsys, nsid); + head = nvme_find_ns_head(ctrl, nsid); if (!head) { head = nvme_alloc_ns_head(ctrl, nsid, ids); if (IS_ERR(head)) { @@ -4187,7 +4207,14 @@ static void nvme_async_event_work(struct work_struct *work) container_of(work, struct nvme_ctrl, async_event_work); nvme_aen_uevent(ctrl); - ctrl->ops->submit_async_event(ctrl); + + /* + * The transport drivers must guarantee AER submission here is safe by + * flushing ctrl async_event_work after changing the controller state + * from LIVE and before freeing the admin queue. + */ + if (ctrl->state == NVME_CTRL_LIVE) + ctrl->ops->submit_async_event(ctrl); } static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 2f76969408b2..e9301b51db76 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -462,10 +462,11 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) /* * Add a multipath node if the subsystems supports multiple controllers. - * We also do this for private namespaces as the namespace sharing data could - * change after a rescan. + * We also do this for private namespaces as the namespace sharing flag + * could change after a rescan. */ - if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath) + if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || + !nvme_is_unique_nsid(ctrl, head) || !multipath) return 0; head->disk = blk_alloc_disk(ctrl->numa_node); @@ -792,7 +793,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) { if (!head->disk) return; - blk_set_queue_dying(head->disk->queue); + blk_mark_disk_dead(head->disk); /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index ed79a6c7e804..f1e5c7564cae 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -144,6 +144,11 @@ enum nvme_quirks { * encoding the generation sequence number. */ NVME_QUIRK_SKIP_CID_GEN = (1 << 17), + + /* + * Reports garbage in the namespace identifiers (eui64, nguid, uuid). + */ + NVME_QUIRK_BOGUS_NID = (1 << 18), }; /* @@ -693,6 +698,25 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, return true; return __nvme_check_ready(ctrl, rq, queue_live); } + +/* + * NSID shall be unique for all shared namespaces, or if at least one of the + * following conditions is met: + * 1. Namespace Management is supported by the controller + * 2. ANA is supported by the controller + * 3. NVM Set are supported by the controller + * + * In other case, private namespace are not required to report a unique NSID. + */ +static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl, + struct nvme_ns_head *head) +{ + return head->shared || + (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) || + (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) || + (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS); +} + int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, void *buf, unsigned bufflen); int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index b925a5f4afc3..d7695bdbde8d 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3314,7 +3314,10 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS | - NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + NVME_QUIRK_DISABLE_WRITE_ZEROES | + NVME_QUIRK_BOGUS_NID, }, + { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ @@ -3352,6 +3355,10 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 0498801542eb..d51f52e296f5 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1192,6 +1192,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) struct nvme_rdma_ctrl, err_work); nvme_stop_keep_alive(&ctrl->ctrl); + flush_work(&ctrl->ctrl.async_event_work); nvme_rdma_teardown_io_queues(ctrl, false); nvme_start_queues(&ctrl->ctrl); nvme_rdma_teardown_admin_queue(ctrl, false); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index efa9037da53c..10882d3d554c 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -30,6 +30,44 @@ static int so_priority; module_param(so_priority, int, 0644); MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority"); +#ifdef CONFIG_DEBUG_LOCK_ALLOC +/* lockdep can detect a circular dependency of the form + * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock + * because dependencies are tracked for both nvme-tcp and user contexts. Using + * a separate class prevents lockdep from conflating nvme-tcp socket use with + * user-space socket API use. + */ +static struct lock_class_key nvme_tcp_sk_key[2]; +static struct lock_class_key nvme_tcp_slock_key[2]; + +static void nvme_tcp_reclassify_socket(struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (WARN_ON_ONCE(!sock_allow_reclassification(sk))) + return; + + switch (sk->sk_family) { + case AF_INET: + sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME", + &nvme_tcp_slock_key[0], + "sk_lock-AF_INET-NVME", + &nvme_tcp_sk_key[0]); + break; + case AF_INET6: + sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME", + &nvme_tcp_slock_key[1], + "sk_lock-AF_INET6-NVME", + &nvme_tcp_sk_key[1]); + break; + default: + WARN_ON_ONCE(1); + } +} +#else +static void nvme_tcp_reclassify_socket(struct socket *sock) { } +#endif + enum nvme_tcp_send_state { NVME_TCP_SEND_CMD_PDU = 0, NVME_TCP_SEND_H2C_PDU, @@ -1436,6 +1474,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, goto err_destroy_mutex; } + nvme_tcp_reclassify_socket(queue->sock); + /* Single syn retry */ tcp_sock_set_syncnt(queue->sock->sk, 1); @@ -2105,6 +2145,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work) struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; nvme_stop_keep_alive(ctrl); + flush_work(&ctrl->async_event_work); nvme_tcp_teardown_io_queues(ctrl, false); /* unquiesce to fail fast pending requests */ nvme_start_queues(ctrl); diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 9aecb83021a2..fb7840c73765 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -768,7 +768,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) if (config->wp_gpio) nvmem->wp_gpio = config->wp_gpio; - else + else if (!config->ignore_wp) nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", GPIOD_OUT_HIGH); if (IS_ERR(nvmem->wp_gpio)) { diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 3dfeae8912df..80b5fd44ab1c 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -70,10 +70,6 @@ config OF_IRQ def_bool y depends on !SPARC && IRQ_DOMAIN -config OF_NET - depends on NETDEVICES - def_bool y - config OF_RESERVED_MEM def_bool OF_EARLY_FLATTREE diff --git a/drivers/of/Makefile b/drivers/of/Makefile index c13b982084a3..e0360a44306e 100644 --- a/drivers/of/Makefile +++ b/drivers/of/Makefile @@ -7,7 +7,6 @@ obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o obj-$(CONFIG_OF_PROMTREE) += pdt.o obj-$(CONFIG_OF_ADDRESS) += address.o obj-$(CONFIG_OF_IRQ) += irq.o -obj-$(CONFIG_OF_NET) += of_net.o obj-$(CONFIG_OF_UNITTEST) += unittest.o obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o obj-$(CONFIG_OF_RESOLVE) += resolver.o diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c deleted file mode 100644 index dbac3a172a11..000000000000 --- a/drivers/of/of_net.c +++ /dev/null @@ -1,145 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * OF helpers for network devices. - * - * Initially copied out of arch/powerpc/kernel/prom_parse.c - */ -#include <linux/etherdevice.h> -#include <linux/kernel.h> -#include <linux/of_net.h> -#include <linux/of_platform.h> -#include <linux/phy.h> -#include <linux/export.h> -#include <linux/device.h> -#include <linux/nvmem-consumer.h> - -/** - * of_get_phy_mode - Get phy mode for given device_node - * @np: Pointer to the given device_node - * @interface: Pointer to the result - * - * The function gets phy interface string from property 'phy-mode' or - * 'phy-connection-type'. The index in phy_modes table is set in - * interface and 0 returned. In case of error interface is set to - * PHY_INTERFACE_MODE_NA and an errno is returned, e.g. -ENODEV. - */ -int of_get_phy_mode(struct device_node *np, phy_interface_t *interface) -{ - const char *pm; - int err, i; - - *interface = PHY_INTERFACE_MODE_NA; - - err = of_property_read_string(np, "phy-mode", &pm); - if (err < 0) - err = of_property_read_string(np, "phy-connection-type", &pm); - if (err < 0) - return err; - - for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) - if (!strcasecmp(pm, phy_modes(i))) { - *interface = i; - return 0; - } - - return -ENODEV; -} -EXPORT_SYMBOL_GPL(of_get_phy_mode); - -static int of_get_mac_addr(struct device_node *np, const char *name, u8 *addr) -{ - struct property *pp = of_find_property(np, name, NULL); - - if (pp && pp->length == ETH_ALEN && is_valid_ether_addr(pp->value)) { - memcpy(addr, pp->value, ETH_ALEN); - return 0; - } - return -ENODEV; -} - -static int of_get_mac_addr_nvmem(struct device_node *np, u8 *addr) -{ - struct platform_device *pdev = of_find_device_by_node(np); - struct nvmem_cell *cell; - const void *mac; - size_t len; - int ret; - - /* Try lookup by device first, there might be a nvmem_cell_lookup - * associated with a given device. - */ - if (pdev) { - ret = nvmem_get_mac_address(&pdev->dev, addr); - put_device(&pdev->dev); - return ret; - } - - cell = of_nvmem_cell_get(np, "mac-address"); - if (IS_ERR(cell)) - return PTR_ERR(cell); - - mac = nvmem_cell_read(cell, &len); - nvmem_cell_put(cell); - - if (IS_ERR(mac)) - return PTR_ERR(mac); - - if (len != ETH_ALEN || !is_valid_ether_addr(mac)) { - kfree(mac); - return -EINVAL; - } - - memcpy(addr, mac, ETH_ALEN); - kfree(mac); - - return 0; -} - -/** - * of_get_mac_address() - * @np: Caller's Device Node - * @addr: Pointer to a six-byte array for the result - * - * Search the device tree for the best MAC address to use. 'mac-address' is - * checked first, because that is supposed to contain to "most recent" MAC - * address. If that isn't set, then 'local-mac-address' is checked next, - * because that is the default address. If that isn't set, then the obsolete - * 'address' is checked, just in case we're using an old device tree. If any - * of the above isn't set, then try to get MAC address from nvmem cell named - * 'mac-address'. - * - * Note that the 'address' property is supposed to contain a virtual address of - * the register set, but some DTS files have redefined that property to be the - * MAC address. - * - * All-zero MAC addresses are rejected, because those could be properties that - * exist in the device tree, but were not set by U-Boot. For example, the - * DTS could define 'mac-address' and 'local-mac-address', with zero MAC - * addresses. Some older U-Boots only initialized 'local-mac-address'. In - * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists - * but is all zeros. - * - * Return: 0 on success and errno in case of error. -*/ -int of_get_mac_address(struct device_node *np, u8 *addr) -{ - int ret; - - if (!np) - return -ENODEV; - - ret = of_get_mac_addr(np, "mac-address", addr); - if (!ret) - return 0; - - ret = of_get_mac_addr(np, "local-mac-address", addr); - if (!ret) - return 0; - - ret = of_get_mac_addr(np, "address", addr); - if (!ret) - return 0; - - return of_get_mac_addr_nvmem(np, addr); -} -EXPORT_SYMBOL(of_get_mac_address); diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c index 596c185b5dda..b5f2f9f39392 100644 --- a/drivers/opp/debugfs.c +++ b/drivers/opp/debugfs.c @@ -10,6 +10,7 @@ #include <linux/debugfs.h> #include <linux/device.h> #include <linux/err.h> +#include <linux/of.h> #include <linux/init.h> #include <linux/limits.h> #include <linux/slab.h> @@ -131,9 +132,13 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table) debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend); debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate); debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate); + debugfs_create_u32("level", S_IRUGO, d, &opp->level); debugfs_create_ulong("clock_latency_ns", S_IRUGO, d, &opp->clock_latency_ns); + opp->of_name = of_node_full_name(opp->np); + debugfs_create_str("of_name", S_IRUGO, d, (char **)&opp->of_name); + opp_debug_create_supplies(opp, opp_table, d); opp_debug_create_bw(opp, opp_table, d); diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 407c3bfe51d9..45e3a55239a1 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -96,6 +96,7 @@ struct dev_pm_opp { #ifdef CONFIG_DEBUG_FS struct dentry *dentry; + const char *of_name; #endif }; diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 059566f54429..9be007c9420f 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1003,7 +1003,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ioc->usg_calls++; #endif - while(sg_dma_len(sglist) && nents--) { + while (nents && sg_dma_len(sglist)) { #ifdef CCIO_COLLECT_STATS ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; @@ -1011,6 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ccio_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction, 0); ++sglist; + nents--; } DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 952a92504df6..e33036281327 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -142,9 +142,8 @@ struct dino_device { struct pci_hba_data hba; /* 'C' inheritance - must be first */ spinlock_t dinosaur_pen; - unsigned long txn_addr; /* EIR addr to generate interrupt */ - u32 txn_data; /* EIR data assign to each dino */ u32 imr; /* IRQ's which are enabled */ + struct gsc_irq gsc_irq; int global_irq[DINO_LOCAL_IRQS]; /* map IMR bit to global irq */ #ifdef DINO_DEBUG unsigned int dino_irr0; /* save most recent IRQ line stat */ @@ -339,14 +338,43 @@ static void dino_unmask_irq(struct irq_data *d) if (tmp & DINO_MASK_IRQ(local_irq)) { DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n", __func__, tmp); - gsc_writel(dino_dev->txn_data, dino_dev->txn_addr); + gsc_writel(dino_dev->gsc_irq.txn_data, dino_dev->gsc_irq.txn_addr); } } +#ifdef CONFIG_SMP +static int dino_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, + bool force) +{ + struct dino_device *dino_dev = irq_data_get_irq_chip_data(d); + struct cpumask tmask; + int cpu_irq; + u32 eim; + + if (!cpumask_and(&tmask, dest, cpu_online_mask)) + return -EINVAL; + + cpu_irq = cpu_check_affinity(d, &tmask); + if (cpu_irq < 0) + return cpu_irq; + + dino_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq); + eim = ((u32) dino_dev->gsc_irq.txn_addr) | dino_dev->gsc_irq.txn_data; + __raw_writel(eim, dino_dev->hba.base_addr+DINO_IAR0); + + irq_data_update_effective_affinity(d, &tmask); + + return IRQ_SET_MASK_OK; +} +#endif + static struct irq_chip dino_interrupt_type = { .name = "GSC-PCI", .irq_unmask = dino_unmask_irq, .irq_mask = dino_mask_irq, +#ifdef CONFIG_SMP + .irq_set_affinity = dino_set_affinity_irq, +#endif }; @@ -806,7 +834,6 @@ static int __init dino_common_init(struct parisc_device *dev, { int status; u32 eim; - struct gsc_irq gsc_irq; struct resource *res; pcibios_register_hba(&dino_dev->hba); @@ -821,10 +848,8 @@ static int __init dino_common_init(struct parisc_device *dev, ** still only has 11 IRQ input lines - just map some of them ** to a different processor. */ - dev->irq = gsc_alloc_irq(&gsc_irq); - dino_dev->txn_addr = gsc_irq.txn_addr; - dino_dev->txn_data = gsc_irq.txn_data; - eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; + dev->irq = gsc_alloc_irq(&dino_dev->gsc_irq); + eim = ((u32) dino_dev->gsc_irq.txn_addr) | dino_dev->gsc_irq.txn_data; /* ** Dino needs a PA "IRQ" to get a processor's attention. diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c index ed9371acf37e..ec175ae99873 100644 --- a/drivers/parisc/gsc.c +++ b/drivers/parisc/gsc.c @@ -135,10 +135,41 @@ static void gsc_asic_unmask_irq(struct irq_data *d) */ } +#ifdef CONFIG_SMP +static int gsc_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, + bool force) +{ + struct gsc_asic *gsc_dev = irq_data_get_irq_chip_data(d); + struct cpumask tmask; + int cpu_irq; + + if (!cpumask_and(&tmask, dest, cpu_online_mask)) + return -EINVAL; + + cpu_irq = cpu_check_affinity(d, &tmask); + if (cpu_irq < 0) + return cpu_irq; + + gsc_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq); + gsc_dev->eim = ((u32) gsc_dev->gsc_irq.txn_addr) | gsc_dev->gsc_irq.txn_data; + + /* switch IRQ's for devices below LASI/WAX to other CPU */ + gsc_writel(gsc_dev->eim, gsc_dev->hpa + OFFSET_IAR); + + irq_data_update_effective_affinity(d, &tmask); + + return IRQ_SET_MASK_OK; +} +#endif + + static struct irq_chip gsc_asic_interrupt_type = { .name = "GSC-ASIC", .irq_unmask = gsc_asic_unmask_irq, .irq_mask = gsc_asic_mask_irq, +#ifdef CONFIG_SMP + .irq_set_affinity = gsc_set_affinity_irq, +#endif }; int gsc_assign_irq(struct irq_chip *type, void *data) diff --git a/drivers/parisc/gsc.h b/drivers/parisc/gsc.h index 86abad3fa215..73cbd0bb1975 100644 --- a/drivers/parisc/gsc.h +++ b/drivers/parisc/gsc.h @@ -31,6 +31,7 @@ struct gsc_asic { int version; int type; int eim; + struct gsc_irq gsc_irq; int global_irq[32]; }; diff --git a/drivers/parisc/lasi.c b/drivers/parisc/lasi.c index 4e4fd12c2112..6ef621adb63a 100644 --- a/drivers/parisc/lasi.c +++ b/drivers/parisc/lasi.c @@ -163,7 +163,6 @@ static int __init lasi_init_chip(struct parisc_device *dev) { extern void (*chassis_power_off)(void); struct gsc_asic *lasi; - struct gsc_irq gsc_irq; int ret; lasi = kzalloc(sizeof(*lasi), GFP_KERNEL); @@ -185,7 +184,7 @@ static int __init lasi_init_chip(struct parisc_device *dev) lasi_init_irq(lasi); /* the IRQ lasi should use */ - dev->irq = gsc_alloc_irq(&gsc_irq); + dev->irq = gsc_alloc_irq(&lasi->gsc_irq); if (dev->irq < 0) { printk(KERN_ERR "%s(): cannot get GSC irq\n", __func__); @@ -193,9 +192,9 @@ static int __init lasi_init_chip(struct parisc_device *dev) return -EBUSY; } - lasi->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; + lasi->eim = ((u32) lasi->gsc_irq.txn_addr) | lasi->gsc_irq.txn_data; - ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi); + ret = request_irq(lasi->gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi); if (ret < 0) { kfree(lasi); return ret; diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index e60690d38d67..374b9199878d 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1047,7 +1047,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, spin_unlock_irqrestore(&ioc->res_lock, flags); #endif - while (sg_dma_len(sglist) && nents--) { + while (nents && sg_dma_len(sglist)) { sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction, 0); @@ -1056,6 +1056,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ioc->usingle_calls--; /* kluge since call is unmap_sg() */ #endif ++sglist; + nents--; } DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); diff --git a/drivers/parisc/wax.c b/drivers/parisc/wax.c index 5b6df1516235..73a2b01f8d9c 100644 --- a/drivers/parisc/wax.c +++ b/drivers/parisc/wax.c @@ -68,7 +68,6 @@ static int __init wax_init_chip(struct parisc_device *dev) { struct gsc_asic *wax; struct parisc_device *parent; - struct gsc_irq gsc_irq; int ret; wax = kzalloc(sizeof(*wax), GFP_KERNEL); @@ -85,7 +84,7 @@ static int __init wax_init_chip(struct parisc_device *dev) wax_init_irq(wax); /* the IRQ wax should use */ - dev->irq = gsc_claim_irq(&gsc_irq, WAX_GSC_IRQ); + dev->irq = gsc_claim_irq(&wax->gsc_irq, WAX_GSC_IRQ); if (dev->irq < 0) { printk(KERN_ERR "%s(): cannot get GSC irq\n", __func__); @@ -93,9 +92,9 @@ static int __init wax_init_chip(struct parisc_device *dev) return -EBUSY; } - wax->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; + wax->eim = ((u32) wax->gsc_irq.txn_addr) | wax->gsc_irq.txn_data; - ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "wax", wax); + ret = request_irq(wax->gsc_irq.irq, gsc_asic_intr, 0, "wax", wax); if (ret < 0) { kfree(wax); return ret; diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 46935695cfb9..8d0d1f61c650 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -160,9 +160,12 @@ int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, * write happen to have any RW1C (write-one-to-clear) bits set, we * just inadvertently cleared something we shouldn't have. */ - dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n", - size, pci_domain_nr(bus), bus->number, - PCI_SLOT(devfn), PCI_FUNC(devfn), where); + if (!bus->unsafe_warn) { + dev_warn(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n", + size, pci_domain_nr(bus), bus->number, + PCI_SLOT(devfn), PCI_FUNC(devfn), where); + bus->unsafe_warn = 1; + } mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); tmp = readl(addr) & mask; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 80fc98acf097..2c05f2f7d1c0 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -779,9 +779,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci) /* Start LTSSM. */ imx6_pcie_ltssm_enable(dev); - ret = dw_pcie_wait_for_link(pci); - if (ret) - goto err_reset_phy; + dw_pcie_wait_for_link(pci); if (pci->link_gen == 2) { /* Allow Gen2 mode after the link is up. */ @@ -817,11 +815,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci) } /* Make sure link training is finished as well! */ - ret = dw_pcie_wait_for_link(pci); - if (ret) { - dev_err(dev, "Failed to bring link up!\n"); - goto err_reset_phy; - } + dw_pcie_wait_for_link(pci); } else { dev_info(dev, "Link: Gen2 disabled\n"); } diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index a945f0c0e73d..3254f60d1713 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -671,10 +671,11 @@ void dw_pcie_iatu_detect(struct dw_pcie *pci) if (!pci->atu_base) { struct resource *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu"); - if (res) + if (res) { pci->atu_size = resource_size(res); - pci->atu_base = devm_ioremap_resource(dev, res); - if (IS_ERR(pci->atu_base)) + pci->atu_base = devm_ioremap_resource(dev, res); + } + if (!pci->atu_base || IS_ERR(pci->atu_base)) pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; } diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c index 00cde9a248b5..78d002be4f82 100644 --- a/drivers/pci/controller/dwc/pcie-fu740.c +++ b/drivers/pci/controller/dwc/pcie-fu740.c @@ -181,10 +181,59 @@ static int fu740_pcie_start_link(struct dw_pcie *pci) { struct device *dev = pci->dev; struct fu740_pcie *afp = dev_get_drvdata(dev); + u8 cap_exp = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + int ret; + u32 orig, tmp; + + /* + * Force 2.5GT/s when starting the link, due to some devices not + * probing at higher speeds. This happens with the PCIe switch + * on the Unmatched board when U-Boot has not initialised the PCIe. + * The fix in U-Boot is to force 2.5GT/s, which then gets cleared + * by the soft reset done by this driver. + */ + dev_dbg(dev, "cap_exp at %x\n", cap_exp); + dw_pcie_dbi_ro_wr_en(pci); + + tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP); + orig = tmp & PCI_EXP_LNKCAP_SLS; + tmp &= ~PCI_EXP_LNKCAP_SLS; + tmp |= PCI_EXP_LNKCAP_SLS_2_5GB; + dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp); /* Enable LTSSM */ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_LTSSM_ENABLE); - return 0; + + ret = dw_pcie_wait_for_link(pci); + if (ret) { + dev_err(dev, "error: link did not start\n"); + goto err; + } + + tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP); + if ((tmp & PCI_EXP_LNKCAP_SLS) != orig) { + dev_dbg(dev, "changing speed back to original\n"); + + tmp &= ~PCI_EXP_LNKCAP_SLS; + tmp |= orig; + dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp); + + tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); + tmp |= PORT_LOGIC_SPEED_CHANGE; + dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); + + ret = dw_pcie_wait_for_link(pci); + if (ret) { + dev_err(dev, "error: link did not start at new speed\n"); + goto err; + } + } + + ret = 0; +err: + WARN_ON(ret); /* we assume that errors will be very rare */ + dw_pcie_dbi_ro_wr_dis(pci); + return ret; } static int fu740_pcie_host_init(struct pcie_port *pp) diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 85323cbc4888..6277b3f3031a 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -844,7 +844,9 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, case PCI_EXP_RTSTA: { u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG); u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG); - *value = (isr0 & PCIE_MSG_PM_PME_MASK) << 16 | (msglog >> 16); + *value = msglog >> 16; + if (isr0 & PCIE_MSG_PM_PME_MASK) + *value |= PCI_EXP_RTSTA_PME; return PCI_BRIDGE_EMUL_HANDLED; } @@ -1177,7 +1179,7 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data, msg->address_lo = lower_32_bits(msi_msg); msg->address_hi = upper_32_bits(msi_msg); - msg->data = data->irq; + msg->data = data->hwirq; } static int advk_msi_set_affinity(struct irq_data *irq_data, @@ -1194,15 +1196,11 @@ static int advk_msi_irq_domain_alloc(struct irq_domain *domain, int hwirq, i; mutex_lock(&pcie->msi_used_lock); - hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM, - 0, nr_irqs, 0); - if (hwirq >= MSI_IRQ_NUM) { - mutex_unlock(&pcie->msi_used_lock); - return -ENOSPC; - } - - bitmap_set(pcie->msi_used, hwirq, nr_irqs); + hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM, + order_base_2(nr_irqs)); mutex_unlock(&pcie->msi_used_lock); + if (hwirq < 0) + return -ENOSPC; for (i = 0; i < nr_irqs; i++) irq_domain_set_info(domain, virq + i, hwirq + i, @@ -1220,7 +1218,7 @@ static void advk_msi_irq_domain_free(struct irq_domain *domain, struct advk_pcie *pcie = domain->host_data; mutex_lock(&pcie->msi_used_lock); - bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs); + bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs)); mutex_unlock(&pcie->msi_used_lock); } @@ -1381,7 +1379,6 @@ static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie) static void advk_pcie_handle_msi(struct advk_pcie *pcie) { u32 msi_val, msi_mask, msi_status, msi_idx; - u16 msi_data; msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG); msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG); @@ -1391,13 +1388,9 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie) if (!(BIT(msi_idx) & msi_status)) continue; - /* - * msi_idx contains bits [4:0] of the msi_data and msi_data - * contains 16bit MSI interrupt number - */ advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG); - msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & PCIE_MSI_DATA_MASK; - generic_handle_irq(msi_data); + if (generic_handle_domain_irq(pcie->msi_inner_domain, msi_idx) == -EINVAL) + dev_err_ratelimited(&pcie->pdev->dev, "unexpected MSI 0x%02x\n", msi_idx); } advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING, @@ -1537,8 +1530,7 @@ static int advk_pcie_probe(struct platform_device *pdev) * only PIO for issuing configuration transfers which does * not use PCIe window configuration. */ - if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 && - type != IORESOURCE_IO) + if (type != IORESOURCE_MEM && type != IORESOURCE_IO) continue; /* @@ -1546,8 +1538,7 @@ static int advk_pcie_probe(struct platform_device *pdev) * configuration is set to transparent memory access so it * does not need window configuration. */ - if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) && - entry->offset == 0) + if (type == IORESOURCE_MEM && entry->offset == 0) continue; /* diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 67c46e52c0dc..5b156c563e3a 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -1899,8 +1899,17 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus) if (!hv_dev) continue; - if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY) - set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node); + if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY && + hv_dev->desc.virtual_numa_node < num_possible_nodes()) + /* + * The kernel may boot with some NUMA nodes offline + * (e.g. in a KDUMP kernel) or with NUMA disabled via + * "numa=off". In those cases, adjust the host provided + * NUMA node to a valid NUMA node used by the kernel. + */ + set_dev_node(&dev->dev, + numa_map_to_online_node( + hv_dev->desc.virtual_numa_node)); put_pcichild(hv_dev); } @@ -3139,6 +3148,15 @@ static int hv_pci_probe(struct hv_device *hdev, hbus->bridge->domain_nr = dom; #ifdef CONFIG_X86 hbus->sysdata.domain = dom; +#elif defined(CONFIG_ARM64) + /* + * Set the PCI bus parent to be the corresponding VMbus + * device. Then the VMbus device will be assigned as the + * ACPI companion in pcibios_root_bridge_prepare() and + * pci_dma_configure() will propagate device coherence + * information to devices created on the bus. + */ + hbus->sysdata.parent = hdev->device.parent; #endif hbus->hdev = hdev; diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index 2dc6890dbcaa..2a3bf82aa4e2 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -51,10 +51,14 @@ PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \ PCIE_CONF_ADDR_EN) #define PCIE_CONF_DATA_OFF 0x18fc +#define PCIE_INT_CAUSE_OFF 0x1900 +#define PCIE_INT_PM_PME BIT(28) #define PCIE_MASK_OFF 0x1910 #define PCIE_MASK_ENABLE_INTS 0x0f000000 #define PCIE_CTRL_OFF 0x1a00 #define PCIE_CTRL_X1_MODE 0x0001 +#define PCIE_CTRL_RC_MODE BIT(1) +#define PCIE_CTRL_MASTER_HOT_RESET BIT(24) #define PCIE_STAT_OFF 0x1a04 #define PCIE_STAT_BUS 0xff00 #define PCIE_STAT_DEV 0x1f0000 @@ -125,6 +129,11 @@ static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); } +static u8 mvebu_pcie_get_local_bus_nr(struct mvebu_pcie_port *port) +{ + return (mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_BUS) >> 8; +} + static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr) { u32 stat; @@ -213,18 +222,21 @@ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) { - u32 cmd, mask; + u32 ctrl, cmd, mask; - /* Point PCIe unit MBUS decode windows to DRAM space. */ - mvebu_pcie_setup_wins(port); + /* Setup PCIe controller to Root Complex mode. */ + ctrl = mvebu_readl(port, PCIE_CTRL_OFF); + ctrl |= PCIE_CTRL_RC_MODE; + mvebu_writel(port, ctrl, PCIE_CTRL_OFF); - /* Master + slave enable. */ + /* Disable Root Bridge I/O space, memory space and bus mastering. */ cmd = mvebu_readl(port, PCIE_CMD_OFF); - cmd |= PCI_COMMAND_IO; - cmd |= PCI_COMMAND_MEMORY; - cmd |= PCI_COMMAND_MASTER; + cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); mvebu_writel(port, cmd, PCIE_CMD_OFF); + /* Point PCIe unit MBUS decode windows to DRAM space. */ + mvebu_pcie_setup_wins(port); + /* Enable interrupt lines A-D. */ mask = mvebu_readl(port, PCIE_MASK_OFF); mask |= PCIE_MASK_ENABLE_INTS; @@ -371,8 +383,7 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) /* Are the new iobase/iolimit values invalid? */ if (conf->iolimit < conf->iobase || - conf->iolimitupper < conf->iobaseupper || - !(conf->command & PCI_COMMAND_IO)) { + conf->iolimitupper < conf->iobaseupper) { mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired, &port->iowin); return; @@ -409,8 +420,7 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) struct pci_bridge_emul_conf *conf = &port->bridge.conf; /* Are the new membase/memlimit values invalid? */ - if (conf->memlimit < conf->membase || - !(conf->command & PCI_COMMAND_MEMORY)) { + if (conf->memlimit < conf->membase) { mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired, &port->memwin); return; @@ -431,6 +441,54 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) } static pci_bridge_emul_read_status_t +mvebu_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge, + int reg, u32 *value) +{ + struct mvebu_pcie_port *port = bridge->data; + + switch (reg) { + case PCI_COMMAND: + *value = mvebu_readl(port, PCIE_CMD_OFF); + break; + + case PCI_PRIMARY_BUS: { + /* + * From the whole 32bit register we support reading from HW only + * secondary bus number which is mvebu local bus number. + * Other bits are retrieved only from emulated config buffer. + */ + __le32 *cfgspace = (__le32 *)&bridge->conf; + u32 val = le32_to_cpu(cfgspace[PCI_PRIMARY_BUS / 4]); + val &= ~0xff00; + val |= mvebu_pcie_get_local_bus_nr(port) << 8; + *value = val; + break; + } + + case PCI_INTERRUPT_LINE: { + /* + * From the whole 32bit register we support reading from HW only + * one bit: PCI_BRIDGE_CTL_BUS_RESET. + * Other bits are retrieved only from emulated config buffer. + */ + __le32 *cfgspace = (__le32 *)&bridge->conf; + u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]); + if (mvebu_readl(port, PCIE_CTRL_OFF) & PCIE_CTRL_MASTER_HOT_RESET) + val |= PCI_BRIDGE_CTL_BUS_RESET << 16; + else + val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16); + *value = val; + break; + } + + default: + return PCI_BRIDGE_EMUL_NOT_HANDLED; + } + + return PCI_BRIDGE_EMUL_HANDLED; +} + +static pci_bridge_emul_read_status_t mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, int reg, u32 *value) { @@ -442,9 +500,7 @@ mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, break; case PCI_EXP_DEVCTL: - *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) & - ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE | - PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE); + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL); break; case PCI_EXP_LNKCAP: @@ -468,6 +524,18 @@ mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, *value = mvebu_readl(port, PCIE_RC_RTSTA); break; + case PCI_EXP_DEVCAP2: + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP2); + break; + + case PCI_EXP_DEVCTL2: + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2); + break; + + case PCI_EXP_LNKCTL2: + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2); + break; + default: return PCI_BRIDGE_EMUL_NOT_HANDLED; } @@ -484,26 +552,16 @@ mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, switch (reg) { case PCI_COMMAND: - { - if (!mvebu_has_ioport(port)) - conf->command &= ~PCI_COMMAND_IO; - - if ((old ^ new) & PCI_COMMAND_IO) - mvebu_pcie_handle_iobase_change(port); - if ((old ^ new) & PCI_COMMAND_MEMORY) - mvebu_pcie_handle_membase_change(port); + if (!mvebu_has_ioport(port)) { + conf->command = cpu_to_le16( + le16_to_cpu(conf->command) & ~PCI_COMMAND_IO); + new &= ~PCI_COMMAND_IO; + } + mvebu_writel(port, new, PCIE_CMD_OFF); break; - } case PCI_IO_BASE: - /* - * We keep bit 1 set, it is a read-only bit that - * indicates we support 32 bits addressing for the - * I/O - */ - conf->iobase |= PCI_IO_RANGE_TYPE_32; - conf->iolimit |= PCI_IO_RANGE_TYPE_32; mvebu_pcie_handle_iobase_change(port); break; @@ -516,7 +574,19 @@ mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, break; case PCI_PRIMARY_BUS: - mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus); + if (mask & 0xff00) + mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus); + break; + + case PCI_INTERRUPT_LINE: + if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) { + u32 ctrl = mvebu_readl(port, PCIE_CTRL_OFF); + if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16)) + ctrl |= PCIE_CTRL_MASTER_HOT_RESET; + else + ctrl &= ~PCIE_CTRL_MASTER_HOT_RESET; + mvebu_writel(port, ctrl, PCIE_CTRL_OFF); + } break; default: @@ -532,13 +602,6 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, switch (reg) { case PCI_EXP_DEVCTL: - /* - * Armada370 data says these bits must always - * be zero when in root complex mode. - */ - new &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE | - PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE); - mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL); break; @@ -555,12 +618,31 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, break; case PCI_EXP_RTSTA: - mvebu_writel(port, new, PCIE_RC_RTSTA); + /* + * PME Status bit in Root Status Register (PCIE_RC_RTSTA) + * is read-only and can be cleared only by writing 0b to the + * Interrupt Cause RW0C register (PCIE_INT_CAUSE_OFF). So + * clear PME via Interrupt Cause. + */ + if (new & PCI_EXP_RTSTA_PME) + mvebu_writel(port, ~PCIE_INT_PM_PME, PCIE_INT_CAUSE_OFF); + break; + + case PCI_EXP_DEVCTL2: + mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2); + break; + + case PCI_EXP_LNKCTL2: + mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2); + break; + + default: break; } } static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = { + .read_base = mvebu_pci_bridge_emul_base_conf_read, .write_base = mvebu_pci_bridge_emul_base_conf_write, .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read, .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write, @@ -570,7 +652,7 @@ static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = { * Initialize the configuration space of the PCI-to-PCI bridge * associated with the given PCIe interface. */ -static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) +static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) { struct pci_bridge_emul *bridge = &port->bridge; u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP); @@ -597,7 +679,7 @@ static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) bridge->data = port; bridge->ops = &mvebu_pci_bridge_emul_ops; - pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR); + return pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR); } static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys) @@ -1120,9 +1202,94 @@ static int mvebu_pcie_probe(struct platform_device *pdev) continue; } + ret = mvebu_pci_bridge_emul_init(port); + if (ret < 0) { + dev_err(dev, "%s: cannot init emulated bridge\n", + port->name); + devm_iounmap(dev, port->base); + port->base = NULL; + mvebu_pcie_powerdown(port); + continue; + } + + /* + * PCIe topology exported by mvebu hw is quite complicated. In + * reality has something like N fully independent host bridges + * where each host bridge has one PCIe Root Port (which acts as + * PCI Bridge device). Each host bridge has its own independent + * internal registers, independent access to PCI config space, + * independent interrupt lines, independent window and memory + * access configuration. But additionally there is some kind of + * peer-to-peer support between PCIe devices behind different + * host bridges limited just to forwarding of memory and I/O + * transactions (forwarding of error messages and config cycles + * is not supported). So we could say there are N independent + * PCIe Root Complexes. + * + * For this kind of setup DT should have been structured into + * N independent PCIe controllers / host bridges. But instead + * structure in past was defined to put PCIe Root Ports of all + * host bridges into one bus zero, like in classic multi-port + * Root Complex setup with just one host bridge. + * + * This means that pci-mvebu.c driver provides "virtual" bus 0 + * on which registers all PCIe Root Ports (PCI Bridge devices) + * specified in DT by their BDF addresses and virtually routes + * PCI config access of each PCI bridge device to specific PCIe + * host bridge. + * + * Normally PCI Bridge should choose between Type 0 and Type 1 + * config requests based on primary and secondary bus numbers + * configured on the bridge itself. But because mvebu PCI Bridge + * does not have registers for primary and secondary bus numbers + * in its config space, it determinates type of config requests + * via its own custom way. + * + * There are two options how mvebu determinate type of config + * request. + * + * 1. If Secondary Bus Number Enable bit is not set or is not + * available (applies for pre-XP PCIe controllers) then Type 0 + * is used if target bus number equals Local Bus Number (bits + * [15:8] in register 0x1a04) and target device number differs + * from Local Device Number (bits [20:16] in register 0x1a04). + * Type 1 is used if target bus number differs from Local Bus + * Number. And when target bus number equals Local Bus Number + * and target device equals Local Device Number then request is + * routed to Local PCI Bridge (PCIe Root Port). + * + * 2. If Secondary Bus Number Enable bit is set (bit 7 in + * register 0x1a2c) then mvebu hw determinate type of config + * request like compliant PCI Bridge based on primary bus number + * which is configured via Local Bus Number (bits [15:8] in + * register 0x1a04) and secondary bus number which is configured + * via Secondary Bus Number (bits [7:0] in register 0x1a2c). + * Local PCI Bridge (PCIe Root Port) is available on primary bus + * as device with Local Device Number (bits [20:16] in register + * 0x1a04). + * + * Secondary Bus Number Enable bit is disabled by default and + * option 2. is not available on pre-XP PCIe controllers. Hence + * this driver always use option 1. + * + * Basically it means that primary and secondary buses shares + * one virtual number configured via Local Bus Number bits and + * Local Device Number bits determinates if accessing primary + * or secondary bus. Set Local Device Number to 1 and redirect + * all writes of PCI Bridge Secondary Bus Number register to + * Local Bus Number (bits [15:8] in register 0x1a04). + * + * So when accessing devices on buses behind secondary bus + * number it would work correctly. And also when accessing + * device 0 at secondary bus number via config space would be + * correctly routed to secondary bus. Due to issues described + * in mvebu_pcie_setup_hw(), PCI Bridges at primary bus (zero) + * are not accessed directly via PCI config space but rarher + * indirectly via kernel emulated PCI bridge driver. + */ mvebu_pcie_setup_hw(port); mvebu_pcie_set_local_dev_nr(port, 1); - mvebu_pci_bridge_emul_init(port); + mvebu_pcie_set_local_bus_nr(port, 0); } pcie->nports = i; diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index 7d7d8970fdc2..4641e57487cf 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -466,7 +466,7 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) return 1; } - if ((size > SZ_1K) && (size < SZ_4G) && !(*ib_reg_mask & (1 << 0))) { + if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) { *ib_reg_mask |= (1 << 0); return 0; } @@ -480,28 +480,27 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) } static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port, - struct resource_entry *entry, - u8 *ib_reg_mask) + struct of_pci_range *range, u8 *ib_reg_mask) { void __iomem *cfg_base = port->cfg_base; struct device *dev = port->dev; void __iomem *bar_addr; u32 pim_reg; - u64 cpu_addr = entry->res->start; - u64 pci_addr = cpu_addr - entry->offset; - u64 size = resource_size(entry->res); + u64 cpu_addr = range->cpu_addr; + u64 pci_addr = range->pci_addr; + u64 size = range->size; u64 mask = ~(size - 1) | EN_REG; u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64; u32 bar_low; int region; - region = xgene_pcie_select_ib_reg(ib_reg_mask, size); + region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size); if (region < 0) { dev_warn(dev, "invalid pcie dma-range config\n"); return; } - if (entry->res->flags & IORESOURCE_PREFETCH) + if (range->flags & IORESOURCE_PREFETCH) flags |= PCI_BASE_ADDRESS_MEM_PREFETCH; bar_low = pcie_bar_low_val((u32)cpu_addr, flags); @@ -532,13 +531,25 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port, static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port) { - struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port); - struct resource_entry *entry; + struct device_node *np = port->node; + struct of_pci_range range; + struct of_pci_range_parser parser; + struct device *dev = port->dev; u8 ib_reg_mask = 0; - resource_list_for_each_entry(entry, &bridge->dma_ranges) - xgene_pcie_setup_ib_reg(port, entry, &ib_reg_mask); + if (of_pci_dma_range_parser_init(&parser, np)) { + dev_err(dev, "missing dma-ranges property\n"); + return -EINVAL; + } + + /* Get the dma-ranges from DT */ + for_each_of_pci_range(&parser, &range) { + u64 end = range.cpu_addr + range.size - 1; + dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", + range.flags, range.cpu_addr, end, range.pci_addr); + xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask); + } return 0; } diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index 17c59b0d6978..21207df680cc 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -79,6 +79,9 @@ #define PCIE_ICMD_PM_REG 0x198 #define PCIE_TURN_OFF_LINK BIT(4) +#define PCIE_MISC_CTRL_REG 0x348 +#define PCIE_DISABLE_DVFSRC_VLT_REQ BIT(1) + #define PCIE_TRANS_TABLE_BASE_REG 0x800 #define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4 #define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8 @@ -297,6 +300,11 @@ static int mtk_pcie_startup_port(struct mtk_pcie_port *port) val &= ~PCIE_INTX_ENABLE; writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG); + /* Disable DVFSRC voltage request */ + val = readl_relaxed(port->base + PCIE_MISC_CTRL_REG); + val |= PCIE_DISABLE_DVFSRC_VLT_REQ; + writel_relaxed(val, port->base + PCIE_MISC_CTRL_REG); + /* Assert all reset signals */ val = readl_relaxed(port->base + PCIE_RST_CTRL_REG); val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB; diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index 8f3131844e77..bfb13f358d07 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -52,10 +52,10 @@ struct rcar_msi { */ static void __iomem *pcie_base; /* - * Static copy of bus clock pointer, so we can check whether the clock - * is enabled or not. + * Static copy of PCIe device pointer, so we can check whether the + * device is runtime suspended or not. */ -static struct clk *pcie_bus_clk; +static struct device *pcie_dev; #endif /* Structure representing the PCIe interface */ @@ -794,7 +794,7 @@ static int rcar_pcie_get_resources(struct rcar_pcie_host *host) #ifdef CONFIG_ARM /* Cache static copy for L1 link state fixup hook on aarch32 */ pcie_base = pcie->base; - pcie_bus_clk = host->bus_clk; + pcie_dev = pcie->dev; #endif return 0; @@ -1064,7 +1064,7 @@ static int rcar_pcie_aarch32_abort_handler(unsigned long addr, spin_lock_irqsave(&pmsr_lock, flags); - if (!pcie_base || !__clk_is_enabled(pcie_bus_clk)) { + if (!pcie_base || pm_runtime_suspended(pcie_dev)) { ret = 1; goto unlock_exit; } diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 90d84d3bc868..5b833f00e980 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -285,7 +285,17 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test) if (ret) dev_err(dev, "Data transfer failed\n"); } else { - memcpy(dst_addr, src_addr, reg->size); + void *buf; + + buf = kzalloc(reg->size, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto err_map_addr; + } + + memcpy_fromio(buf, src_addr, reg->size); + memcpy_toio(dst_addr, buf, reg->size); + kfree(buf); } ktime_get_ts64(&end); pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma); @@ -441,7 +451,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test) if (!epf_test->dma_supported) { dev_err(dev, "Cannot transfer data using DMA\n"); ret = -EINVAL; - goto err_map_addr; + goto err_dma_map; } src_phys_addr = dma_map_single(dma_dev, buf, reg->size, diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index b0692f33b03a..8bedbc77fe95 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -98,6 +98,8 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout) if (slot_status & PCI_EXP_SLTSTA_CC) { pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); + ctrl->cmd_busy = 0; + smp_mb(); return 1; } msleep(10); @@ -1058,6 +1060,8 @@ static void quirk_cmd_compl(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110, + PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400, PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401, diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 0663762ea69d..4893b1e82403 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1811,6 +1811,18 @@ static void quirk_alder_ioapic(struct pci_dev *pdev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic); #endif +static void quirk_no_msi(struct pci_dev *dev) +{ + pci_info(dev, "avoiding MSI to work around a hardware defect\n"); + dev->no_msi = 1; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4386, quirk_no_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4387, quirk_no_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4388, quirk_no_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4389, quirk_no_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x438a, quirk_no_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x438b, quirk_no_msi); + static void quirk_pcie_mch(struct pci_dev *pdev) { pdev->no_msi = 1; @@ -5344,11 +5356,6 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); */ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) { - if ((pdev->device == 0x7312 && pdev->revision != 0x00) || - (pdev->device == 0x7340 && pdev->revision != 0xc5) || - (pdev->device == 0x7341 && pdev->revision != 0x00)) - return; - if (pdev->device == 0x15d8) { if (pdev->revision == 0xcf && pdev->subsystem_vendor == 0xea50 && @@ -5370,10 +5377,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats); /* AMD Iceland dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats); /* AMD Navi10 dGPU */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7310, quirk_amd_harvest_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7318, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7319, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731a, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731b, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731e, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731f, quirk_amd_harvest_no_ats); /* AMD Navi14 dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats); /* AMD Raven platform iGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats); #endif /* CONFIG_PCI_ATS */ diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 295cc7952d0e..57d20cf3da7a 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -398,6 +398,9 @@ validate_group(struct perf_event *event) if (!validate_event(event->pmu, &fake_pmu, leader)) return -EINVAL; + if (event == leader) + return 0; + for_each_sibling_event(sibling, leader) { if (!validate_event(event->pmu, &fake_pmu, sibling)) return -EINVAL; @@ -487,12 +490,7 @@ __hw_perf_event_init(struct perf_event *event) local64_set(&hwc->period_left, hwc->sample_period); } - if (event->group_leader != event) { - if (validate_group(event) != 0) - return -EINVAL; - } - - return 0; + return validate_group(event); } static int armpmu_event_init(struct perf_event *event) diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 94ebc1ecace7..b1b2a55de77f 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -29,7 +29,7 @@ #define CNTL_OVER_MASK 0xFFFFFFFE #define CNTL_CSV_SHIFT 24 -#define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT) +#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT) #define EVENT_CYCLES_ID 0 #define EVENT_CYCLES_COUNTER 0 diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index 5b093badd0f6..f60e79fac202 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -736,7 +736,7 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster( { u64 mpidr; int cpu_cluster_id; - struct cluster_pmu *cluster = NULL; + struct cluster_pmu *cluster; /* * This assumes that the cluster_id is in MPIDR[aff1] for @@ -758,10 +758,10 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster( cluster->cluster_id); cpumask_set_cpu(cpu, &cluster->cluster_cpus); *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster; - break; + return cluster; } - return cluster; + return NULL; } static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) diff --git a/drivers/phy/amlogic/phy-meson-gxl-usb2.c b/drivers/phy/amlogic/phy-meson-gxl-usb2.c index 2b3c0d730f20..db17c3448bfe 100644 --- a/drivers/phy/amlogic/phy-meson-gxl-usb2.c +++ b/drivers/phy/amlogic/phy-meson-gxl-usb2.c @@ -114,8 +114,10 @@ static int phy_meson_gxl_usb2_init(struct phy *phy) return ret; ret = clk_prepare_enable(priv->clk); - if (ret) + if (ret) { + reset_control_rearm(priv->reset); return ret; + } return 0; } @@ -125,6 +127,7 @@ static int phy_meson_gxl_usb2_exit(struct phy *phy) struct phy_meson_gxl_usb2_priv *priv = phy_get_drvdata(phy); clk_disable_unprepare(priv->clk); + reset_control_rearm(priv->reset); return 0; } diff --git a/drivers/phy/amlogic/phy-meson8b-usb2.c b/drivers/phy/amlogic/phy-meson8b-usb2.c index cf10bed40528..dd96763911b8 100644 --- a/drivers/phy/amlogic/phy-meson8b-usb2.c +++ b/drivers/phy/amlogic/phy-meson8b-usb2.c @@ -154,6 +154,7 @@ static int phy_meson8b_usb2_power_on(struct phy *phy) ret = clk_prepare_enable(priv->clk_usb_general); if (ret) { dev_err(&phy->dev, "Failed to enable USB general clock\n"); + reset_control_rearm(priv->reset); return ret; } @@ -161,6 +162,7 @@ static int phy_meson8b_usb2_power_on(struct phy *phy) if (ret) { dev_err(&phy->dev, "Failed to enable USB DDR clock\n"); clk_disable_unprepare(priv->clk_usb_general); + reset_control_rearm(priv->reset); return ret; } @@ -199,6 +201,7 @@ static int phy_meson8b_usb2_power_on(struct phy *phy) dev_warn(&phy->dev, "USB ID detect failed!\n"); clk_disable_unprepare(priv->clk_usb); clk_disable_unprepare(priv->clk_usb_general); + reset_control_rearm(priv->reset); return -EINVAL; } } @@ -218,6 +221,7 @@ static int phy_meson8b_usb2_power_off(struct phy *phy) clk_disable_unprepare(priv->clk_usb); clk_disable_unprepare(priv->clk_usb_general); + reset_control_rearm(priv->reset); /* power off the PHY by putting it into reset mode */ regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_POWER_ON_RESET, @@ -265,8 +269,9 @@ static int phy_meson8b_usb2_probe(struct platform_device *pdev) return PTR_ERR(priv->clk_usb); priv->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL); - if (PTR_ERR(priv->reset) == -EPROBE_DEFER) - return PTR_ERR(priv->reset); + if (IS_ERR(priv->reset)) + return dev_err_probe(&pdev->dev, PTR_ERR(priv->reset), + "Failed to get the reset line"); priv->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1); if (priv->dr_mode == USB_DR_MODE_UNKNOWN) { diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c index 9391ab42a12b..dd0f66288fbd 100644 --- a/drivers/phy/broadcom/phy-brcm-usb-init.c +++ b/drivers/phy/broadcom/phy-brcm-usb-init.c @@ -79,6 +79,7 @@ enum brcm_family_type { BRCM_FAMILY_3390A0, + BRCM_FAMILY_4908, BRCM_FAMILY_7250B0, BRCM_FAMILY_7271A0, BRCM_FAMILY_7364A0, @@ -96,6 +97,7 @@ enum brcm_family_type { static const char *family_names[BRCM_FAMILY_COUNT] = { USB_BRCM_FAMILY(3390A0), + USB_BRCM_FAMILY(4908), USB_BRCM_FAMILY(7250B0), USB_BRCM_FAMILY(7271A0), USB_BRCM_FAMILY(7364A0), @@ -203,6 +205,27 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = { USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK, ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */ }, + /* 4908 */ + [BRCM_FAMILY_4908] = { + 0, /* USB_CTRL_SETUP_SCB1_EN_MASK */ + 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */ + 0, /* USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK */ + 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ + 0, /* USB_CTRL_SETUP_OC3_DISABLE_MASK */ + 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ + 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ + USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, + USB_CTRL_USB_PM_USB_PWRDN_MASK, + 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */ + 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */ + 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */ + 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */ + 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */ + 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */ + 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */ + 0, /* USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK */ + 0, /* USB_CTRL_SETUP ENDIAN bits */ + }, /* 7250b0 */ [BRCM_FAMILY_7250B0] = { USB_CTRL_SETUP_SCB1_EN_MASK, @@ -559,6 +582,7 @@ static void brcmusb_usb3_pll_54mhz(struct brcm_usb_init_params *params) */ switch (params->selected_family) { case BRCM_FAMILY_3390A0: + case BRCM_FAMILY_4908: case BRCM_FAMILY_7250B0: case BRCM_FAMILY_7366C0: case BRCM_FAMILY_74371A0: @@ -1004,6 +1028,18 @@ static const struct brcm_usb_init_ops bcm7445_ops = { .set_dual_select = usb_set_dual_select, }; +void brcm_usb_dvr_init_4908(struct brcm_usb_init_params *params) +{ + int fam; + + fam = BRCM_FAMILY_4908; + params->selected_family = fam; + params->usb_reg_bits_map = + &usb_reg_bits_map_table[fam][0]; + params->family_name = family_names[fam]; + params->ops = &bcm7445_ops; +} + void brcm_usb_dvr_init_7445(struct brcm_usb_init_params *params) { int fam; diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h index a39f30fa2e99..1ccb5ddab865 100644 --- a/drivers/phy/broadcom/phy-brcm-usb-init.h +++ b/drivers/phy/broadcom/phy-brcm-usb-init.h @@ -64,6 +64,7 @@ struct brcm_usb_init_params { bool suspend_with_clocks; }; +void brcm_usb_dvr_init_4908(struct brcm_usb_init_params *params); void brcm_usb_dvr_init_7445(struct brcm_usb_init_params *params); void brcm_usb_dvr_init_7216(struct brcm_usb_init_params *params); void brcm_usb_dvr_init_7211b0(struct brcm_usb_init_params *params); diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c index 116fb23aebd9..2cb3779fcdf8 100644 --- a/drivers/phy/broadcom/phy-brcm-usb.c +++ b/drivers/phy/broadcom/phy-brcm-usb.c @@ -18,6 +18,7 @@ #include <linux/soc/brcmstb/brcmstb.h> #include <dt-bindings/phy/phy.h> #include <linux/mfd/syscon.h> +#include <linux/suspend.h> #include "phy-brcm-usb-init.h" @@ -70,12 +71,35 @@ struct brcm_usb_phy_data { int init_count; int wake_irq; struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX]; + struct notifier_block pm_notifier; + bool pm_active; }; static s8 *node_reg_names[BRCM_REGS_MAX] = { "crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec" }; +static int brcm_pm_notifier(struct notifier_block *notifier, + unsigned long pm_event, + void *unused) +{ + struct brcm_usb_phy_data *priv = + container_of(notifier, struct brcm_usb_phy_data, pm_notifier); + + switch (pm_event) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + priv->pm_active = true; + break; + case PM_POST_RESTORE: + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + priv->pm_active = false; + break; + } + return NOTIFY_DONE; +} + static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id) { struct phy *gphy = dev_id; @@ -91,6 +115,9 @@ static int brcm_usb_phy_init(struct phy *gphy) struct brcm_usb_phy_data *priv = container_of(phy, struct brcm_usb_phy_data, phys[phy->id]); + if (priv->pm_active) + return 0; + /* * Use a lock to make sure a second caller waits until * the base phy is inited before using it. @@ -120,6 +147,9 @@ static int brcm_usb_phy_exit(struct phy *gphy) struct brcm_usb_phy_data *priv = container_of(phy, struct brcm_usb_phy_data, phys[phy->id]); + if (priv->pm_active) + return 0; + dev_dbg(&gphy->dev, "EXIT\n"); if (phy->id == BRCM_USB_PHY_2_0) brcm_usb_uninit_eohci(&priv->ini); @@ -253,6 +283,15 @@ static const struct attribute_group brcm_usb_phy_group = { .attrs = brcm_usb_phy_attrs, }; +static const struct match_chip_info chip_info_4908 = { + .init_func = &brcm_usb_dvr_init_4908, + .required_regs = { + BRCM_REGS_CTRL, + BRCM_REGS_XHCI_EC, + -1, + }, +}; + static const struct match_chip_info chip_info_7216 = { .init_func = &brcm_usb_dvr_init_7216, .required_regs = { @@ -288,7 +327,7 @@ static const struct match_chip_info chip_info_7445 = { static const struct of_device_id brcm_usb_dt_ids[] = { { .compatible = "brcm,bcm4908-usb-phy", - .data = &chip_info_7445, + .data = &chip_info_4908, }, { .compatible = "brcm,bcm7216-usb-phy", @@ -488,6 +527,9 @@ static int brcm_usb_phy_probe(struct platform_device *pdev) if (err) return err; + priv->pm_notifier.notifier_call = brcm_pm_notifier; + register_pm_notifier(&priv->pm_notifier); + mutex_init(&priv->mutex); /* make sure invert settings are correct */ @@ -528,7 +570,10 @@ static int brcm_usb_phy_probe(struct platform_device *pdev) static int brcm_usb_phy_remove(struct platform_device *pdev) { + struct brcm_usb_phy_data *priv = dev_get_drvdata(&pdev->dev); + sysfs_remove_group(&pdev->dev.kobj, &brcm_usb_phy_group); + unregister_pm_notifier(&priv->pm_notifier); return 0; } @@ -539,6 +584,7 @@ static int brcm_usb_phy_suspend(struct device *dev) struct brcm_usb_phy_data *priv = dev_get_drvdata(dev); if (priv->init_count) { + dev_dbg(dev, "SUSPEND\n"); priv->ini.wake_enabled = device_may_wakeup(dev); if (priv->phys[BRCM_USB_PHY_3_0].inited) brcm_usb_uninit_xhci(&priv->ini); @@ -578,6 +624,7 @@ static int brcm_usb_phy_resume(struct device *dev) * Uninitialize anything that wasn't previously initialized. */ if (priv->init_count) { + dev_dbg(dev, "RESUME\n"); if (priv->wake_irq >= 0) disable_irq_wake(priv->wake_irq); brcm_usb_init_common(&priv->ini); diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c index 98a942c607a6..db39b0c4649a 100644 --- a/drivers/phy/mediatek/phy-mtk-tphy.c +++ b/drivers/phy/mediatek/phy-mtk-tphy.c @@ -1125,7 +1125,7 @@ static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instanc /* no efuse, ignore it */ if (!instance->efuse_intr && !instance->efuse_rx_imp && - !instance->efuse_rx_imp) { + !instance->efuse_tx_imp) { dev_warn(dev, "no u3 intr efuse, but dts enable it\n"); instance->efuse_sw_en = 0; break; diff --git a/drivers/phy/phy-core-mipi-dphy.c b/drivers/phy/phy-core-mipi-dphy.c index ccb4045685cd..929e86d6558e 100644 --- a/drivers/phy/phy-core-mipi-dphy.c +++ b/drivers/phy/phy-core-mipi-dphy.c @@ -64,10 +64,10 @@ int phy_mipi_dphy_get_default_config(unsigned long pixel_clock, cfg->hs_trail = max(4 * 8 * ui, 60000 + 4 * 4 * ui); cfg->init = 100; - cfg->lpx = 60000; + cfg->lpx = 50000; cfg->ta_get = 5 * cfg->lpx; cfg->ta_go = 4 * cfg->lpx; - cfg->ta_sure = 2 * cfg->lpx; + cfg->ta_sure = cfg->lpx; cfg->wakeup = 1000; cfg->hs_clk_rate = hs_clk_rate; diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c index 837f3145bff6..f77c7139d7d6 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c @@ -1236,18 +1236,17 @@ FUNC_GROUP_DECL(SALT8, AA12); FUNC_GROUP_DECL(WDTRST4, AA12); #define AE12 196 -SIG_EXPR_LIST_DECL_SEMG(AE12, FWSPIDQ2, FWQSPID, FWSPID, - SIG_DESC_SET(SCU438, 4)); +SIG_EXPR_LIST_DECL_SESG(AE12, FWSPIQ2, FWQSPI, SIG_DESC_SET(SCU438, 4)); SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4); -PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIDQ2), +PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIQ2), SIG_EXPR_LIST_PTR(AE12, GPIOY4)); #define AF12 197 -SIG_EXPR_LIST_DECL_SEMG(AF12, FWSPIDQ3, FWQSPID, FWSPID, - SIG_DESC_SET(SCU438, 5)); +SIG_EXPR_LIST_DECL_SESG(AF12, FWSPIQ3, FWQSPI, SIG_DESC_SET(SCU438, 5)); SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5); -PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIDQ3), +PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIQ3), SIG_EXPR_LIST_PTR(AF12, GPIOY5)); +FUNC_GROUP_DECL(FWQSPI, AE12, AF12); #define AC12 198 SSSF_PIN_DECL(AC12, GPIOY6, FWSPIABR, SIG_DESC_SET(SCU438, 6)); @@ -1520,9 +1519,8 @@ SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3)); PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7); GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4); -GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12); GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4); -FUNC_DECL_2(FWSPID, FWSPID, FWQSPID); +FUNC_DECL_1(FWSPID, FWSPID); FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4); FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8); /* @@ -1918,7 +1916,7 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = { ASPEED_PINCTRL_GROUP(FSI2), ASPEED_PINCTRL_GROUP(FWSPIABR), ASPEED_PINCTRL_GROUP(FWSPID), - ASPEED_PINCTRL_GROUP(FWQSPID), + ASPEED_PINCTRL_GROUP(FWQSPI), ASPEED_PINCTRL_GROUP(FWSPIWP), ASPEED_PINCTRL_GROUP(GPIT0), ASPEED_PINCTRL_GROUP(GPIT1), @@ -2160,6 +2158,7 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = { ASPEED_PINCTRL_FUNC(FSI2), ASPEED_PINCTRL_FUNC(FWSPIABR), ASPEED_PINCTRL_FUNC(FWSPID), + ASPEED_PINCTRL_FUNC(FWQSPI), ASPEED_PINCTRL_FUNC(FWSPIWP), ASPEED_PINCTRL_FUNC(GPIT0), ASPEED_PINCTRL_FUNC(GPIT1), diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig index c9c5efc92731..5973a279e6b8 100644 --- a/drivers/pinctrl/bcm/Kconfig +++ b/drivers/pinctrl/bcm/Kconfig @@ -35,6 +35,7 @@ config PINCTRL_BCM63XX select PINCONF select GENERIC_PINCONF select GPIOLIB + select REGMAP select GPIO_REGMAP config PINCTRL_BCM6318 diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c index 0bcd19597e4a..3ddaeffc0415 100644 --- a/drivers/pinctrl/intel/pinctrl-tigerlake.c +++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c @@ -749,7 +749,6 @@ static const struct acpi_device_id tgl_pinctrl_acpi_match[] = { { "INT34C5", (kernel_ulong_t)&tgllp_soc_data }, { "INT34C6", (kernel_ulong_t)&tglh_soc_data }, { "INTC1055", (kernel_ulong_t)&tgllp_soc_data }, - { "INTC1057", (kernel_ulong_t)&tgllp_soc_data }, { } }; MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match); diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 5f7c421ab6e7..334cb85855a9 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -1038,6 +1038,7 @@ int mtk_pctrl_init(struct platform_device *pdev, node = of_parse_phandle(np, "mediatek,pctl-regmap", 0); if (node) { pctl->regmap1 = syscon_node_to_regmap(node); + of_node_put(node); if (IS_ERR(pctl->regmap1)) return PTR_ERR(pctl->regmap1); } else if (regmap) { @@ -1051,6 +1052,7 @@ int mtk_pctrl_init(struct platform_device *pdev, node = of_parse_phandle(np, "mediatek,pctl-regmap", 1); if (node) { pctl->regmap2 = syscon_node_to_regmap(node); + of_node_put(node); if (IS_ERR(pctl->regmap2)) return PTR_ERR(pctl->regmap2); } diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c index 85db2e4377f0..02e2a259edd3 100644 --- a/drivers/pinctrl/mediatek/pinctrl-paris.c +++ b/drivers/pinctrl/mediatek/pinctrl-paris.c @@ -96,20 +96,16 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev, err = hw->soc->bias_get_combo(hw, desc, &pullup, &ret); if (err) goto out; + if (ret == MTK_PUPD_SET_R1R0_00) + ret = MTK_DISABLE; if (param == PIN_CONFIG_BIAS_DISABLE) { - if (ret == MTK_PUPD_SET_R1R0_00) - ret = MTK_DISABLE; + if (ret != MTK_DISABLE) + err = -EINVAL; } else if (param == PIN_CONFIG_BIAS_PULL_UP) { - /* When desire to get pull-up value, return - * error if current setting is pull-down - */ - if (!pullup) + if (!pullup || ret == MTK_DISABLE) err = -EINVAL; } else if (param == PIN_CONFIG_BIAS_PULL_DOWN) { - /* When desire to get pull-down value, return - * error if current setting is pull-up - */ - if (pullup) + if (pullup || ret == MTK_DISABLE) err = -EINVAL; } } else { @@ -188,8 +184,7 @@ out: } static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, - enum pin_config_param param, - enum pin_config_param arg) + enum pin_config_param param, u32 arg) { struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev); const struct mtk_pin_desc *desc; @@ -585,6 +580,9 @@ ssize_t mtk_pctrl_show_one_pin(struct mtk_pinctrl *hw, if (gpio >= hw->soc->npins) return -EINVAL; + if (mtk_is_virt_gpio(hw, gpio)) + return -EINVAL; + desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio]; pinmux = mtk_pctrl_get_pinmux(hw, gpio); if (pinmux >= hw->soc->nfuncs) @@ -719,10 +717,10 @@ static int mtk_pconf_group_get(struct pinctrl_dev *pctldev, unsigned group, unsigned long *config) { struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev); + struct mtk_pinctrl_group *grp = &hw->groups[group]; - *config = hw->groups[group].config; - - return 0; + /* One pin per group only */ + return mtk_pinconf_get(pctldev, grp->pin, config); } static int mtk_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group, @@ -738,8 +736,6 @@ static int mtk_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group, pinconf_to_config_argument(configs[i])); if (ret < 0) return ret; - - grp->config = configs[i]; } return 0; diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index 39828e9c3120..4757bf964d3c 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -1883,8 +1883,10 @@ static int nmk_pinctrl_probe(struct platform_device *pdev) } prcm_np = of_parse_phandle(np, "prcm", 0); - if (prcm_np) + if (prcm_np) { npct->prcm_base = of_iomap(prcm_np, 0); + of_node_put(prcm_np); + } if (!npct->prcm_base) { if (version == PINCTRL_NMK_STN8815) { dev_info(&pdev->dev, diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c index 855362978a13..5fd4a50a1ea6 100644 --- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c +++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c @@ -91,7 +91,6 @@ struct npcm7xx_gpio { struct gpio_chip gc; int irqbase; int irq; - void *priv; struct irq_chip irq_chip; u32 pinctrl_id; int (*direction_input)(struct gpio_chip *chip, unsigned offset); @@ -240,7 +239,7 @@ static void npcmgpio_irq_handler(struct irq_desc *desc) chained_irq_enter(chip, desc); sts = ioread32(bank->base + NPCM7XX_GP_N_EVST); en = ioread32(bank->base + NPCM7XX_GP_N_EVEN); - dev_dbg(chip->parent_device, "==> got irq sts %.8x %.8x\n", sts, + dev_dbg(bank->gc.parent, "==> got irq sts %.8x %.8x\n", sts, en); sts &= en; @@ -255,33 +254,33 @@ static int npcmgpio_set_irq_type(struct irq_data *d, unsigned int type) gpiochip_get_data(irq_data_get_irq_chip_data(d)); unsigned int gpio = BIT(d->hwirq); - dev_dbg(d->chip->parent_device, "setirqtype: %u.%u = %u\n", gpio, + dev_dbg(bank->gc.parent, "setirqtype: %u.%u = %u\n", gpio, d->irq, type); switch (type) { case IRQ_TYPE_EDGE_RISING: - dev_dbg(d->chip->parent_device, "edge.rising\n"); + dev_dbg(bank->gc.parent, "edge.rising\n"); npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio); npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); break; case IRQ_TYPE_EDGE_FALLING: - dev_dbg(d->chip->parent_device, "edge.falling\n"); + dev_dbg(bank->gc.parent, "edge.falling\n"); npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio); npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); break; case IRQ_TYPE_EDGE_BOTH: - dev_dbg(d->chip->parent_device, "edge.both\n"); + dev_dbg(bank->gc.parent, "edge.both\n"); npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio); break; case IRQ_TYPE_LEVEL_LOW: - dev_dbg(d->chip->parent_device, "level.low\n"); + dev_dbg(bank->gc.parent, "level.low\n"); npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); break; case IRQ_TYPE_LEVEL_HIGH: - dev_dbg(d->chip->parent_device, "level.high\n"); + dev_dbg(bank->gc.parent, "level.high\n"); npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); break; default: - dev_dbg(d->chip->parent_device, "invalid irq type\n"); + dev_dbg(bank->gc.parent, "invalid irq type\n"); return -EINVAL; } @@ -303,7 +302,7 @@ static void npcmgpio_irq_ack(struct irq_data *d) gpiochip_get_data(irq_data_get_irq_chip_data(d)); unsigned int gpio = d->hwirq; - dev_dbg(d->chip->parent_device, "irq_ack: %u.%u\n", gpio, d->irq); + dev_dbg(bank->gc.parent, "irq_ack: %u.%u\n", gpio, d->irq); iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVST); } @@ -315,7 +314,7 @@ static void npcmgpio_irq_mask(struct irq_data *d) unsigned int gpio = d->hwirq; /* Clear events */ - dev_dbg(d->chip->parent_device, "irq_mask: %u.%u\n", gpio, d->irq); + dev_dbg(bank->gc.parent, "irq_mask: %u.%u\n", gpio, d->irq); iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENC); } @@ -327,7 +326,7 @@ static void npcmgpio_irq_unmask(struct irq_data *d) unsigned int gpio = d->hwirq; /* Enable events */ - dev_dbg(d->chip->parent_device, "irq_unmask: %u.%u\n", gpio, d->irq); + dev_dbg(bank->gc.parent, "irq_unmask: %u.%u\n", gpio, d->irq); iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENS); } @@ -337,7 +336,7 @@ static unsigned int npcmgpio_irq_startup(struct irq_data *d) unsigned int gpio = d->hwirq; /* active-high, input, clear interrupt, enable interrupt */ - dev_dbg(d->chip->parent_device, "startup: %u.%u\n", gpio, d->irq); + dev_dbg(gc->parent, "startup: %u.%u\n", gpio, d->irq); npcmgpio_direction_input(gc, gpio); npcmgpio_irq_ack(d); npcmgpio_irq_unmask(d); @@ -952,7 +951,7 @@ static struct npcm7xx_func npcm7xx_funcs[] = { #define DRIVE_STRENGTH_HI_SHIFT 12 #define DRIVE_STRENGTH_MASK 0x0000FF00 -#define DS(lo, hi) (((lo) << DRIVE_STRENGTH_LO_SHIFT) | \ +#define DSTR(lo, hi) (((lo) << DRIVE_STRENGTH_LO_SHIFT) | \ ((hi) << DRIVE_STRENGTH_HI_SHIFT)) #define DSLO(x) (((x) >> DRIVE_STRENGTH_LO_SHIFT) & 0xF) #define DSHI(x) (((x) >> DRIVE_STRENGTH_HI_SHIFT) & 0xF) @@ -972,31 +971,31 @@ struct npcm7xx_pincfg { static const struct npcm7xx_pincfg pincfg[] = { /* PIN FUNCTION 1 FUNCTION 2 FUNCTION 3 FLAGS */ NPCM7XX_PINCFG(0, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(1, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(2, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(1, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(2, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(3, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(4, iox2, MFSEL3, 14, smb1d, I2CSEGSEL, 7, none, NONE, 0, SLEW), NPCM7XX_PINCFG(5, iox2, MFSEL3, 14, smb1d, I2CSEGSEL, 7, none, NONE, 0, SLEW), NPCM7XX_PINCFG(6, iox2, MFSEL3, 14, smb2d, I2CSEGSEL, 10, none, NONE, 0, SLEW), NPCM7XX_PINCFG(7, iox2, MFSEL3, 14, smb2d, I2CSEGSEL, 10, none, NONE, 0, SLEW), - NPCM7XX_PINCFG(8, lkgpo1, FLOCKR1, 4, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(9, lkgpo2, FLOCKR1, 8, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(10, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(11, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(8, lkgpo1, FLOCKR1, 4, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(9, lkgpo2, FLOCKR1, 8, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(10, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(11, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(12, gspi, MFSEL1, 24, smb5b, I2CSEGSEL, 19, none, NONE, 0, SLEW), NPCM7XX_PINCFG(13, gspi, MFSEL1, 24, smb5b, I2CSEGSEL, 19, none, NONE, 0, SLEW), NPCM7XX_PINCFG(14, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, SLEW), NPCM7XX_PINCFG(15, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, SLEW), - NPCM7XX_PINCFG(16, lkgpo0, FLOCKR1, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(17, pspi2, MFSEL3, 13, smb4den, I2CSEGSEL, 23, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(18, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(19, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(20, hgpio0, MFSEL2, 24, smb15, MFSEL3, 8, smb4c, I2CSEGSEL, 15, 0), - NPCM7XX_PINCFG(21, hgpio1, MFSEL2, 25, smb15, MFSEL3, 8, smb4c, I2CSEGSEL, 15, 0), - NPCM7XX_PINCFG(22, hgpio2, MFSEL2, 26, smb14, MFSEL3, 7, smb4d, I2CSEGSEL, 16, 0), - NPCM7XX_PINCFG(23, hgpio3, MFSEL2, 27, smb14, MFSEL3, 7, smb4d, I2CSEGSEL, 16, 0), - NPCM7XX_PINCFG(24, hgpio4, MFSEL2, 28, ioxh, MFSEL3, 18, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(25, hgpio5, MFSEL2, 29, ioxh, MFSEL3, 18, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(16, lkgpo0, FLOCKR1, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(17, pspi2, MFSEL3, 13, smb4den, I2CSEGSEL, 23, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(18, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(19, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(20, smb4c, I2CSEGSEL, 15, smb15, MFSEL3, 8, none, NONE, 0, 0), + NPCM7XX_PINCFG(21, smb4c, I2CSEGSEL, 15, smb15, MFSEL3, 8, none, NONE, 0, 0), + NPCM7XX_PINCFG(22, smb4d, I2CSEGSEL, 16, smb14, MFSEL3, 7, none, NONE, 0, 0), + NPCM7XX_PINCFG(23, smb4d, I2CSEGSEL, 16, smb14, MFSEL3, 7, none, NONE, 0, 0), + NPCM7XX_PINCFG(24, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(25, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(26, smb5, MFSEL1, 2, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(27, smb5, MFSEL1, 2, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(28, smb4, MFSEL1, 1, none, NONE, 0, none, NONE, 0, 0), @@ -1012,12 +1011,12 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(39, smb3b, I2CSEGSEL, 11, none, NONE, 0, none, NONE, 0, SLEW), NPCM7XX_PINCFG(40, smb3b, I2CSEGSEL, 11, none, NONE, 0, none, NONE, 0, SLEW), NPCM7XX_PINCFG(41, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(42, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, DS(2, 4) | GPO), + NPCM7XX_PINCFG(42, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, DSTR(2, 4) | GPO), NPCM7XX_PINCFG(43, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, bmcuart1, MFSEL3, 24, 0), NPCM7XX_PINCFG(44, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, bmcuart1, MFSEL3, 24, 0), NPCM7XX_PINCFG(45, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(46, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DS(2, 8)), - NPCM7XX_PINCFG(47, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DS(2, 8)), + NPCM7XX_PINCFG(46, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DSTR(2, 8)), + NPCM7XX_PINCFG(47, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DSTR(2, 8)), NPCM7XX_PINCFG(48, uart2, MFSEL1, 11, bmcuart0b, MFSEL4, 1, none, NONE, 0, GPO), NPCM7XX_PINCFG(49, uart2, MFSEL1, 11, bmcuart0b, MFSEL4, 1, none, NONE, 0, 0), NPCM7XX_PINCFG(50, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0), @@ -1027,10 +1026,10 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(54, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(55, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(56, r1err, MFSEL1, 12, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(57, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DS(2, 4)), - NPCM7XX_PINCFG(58, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DS(2, 4)), - NPCM7XX_PINCFG(59, hgpio6, MFSEL2, 30, smb3d, I2CSEGSEL, 13, none, NONE, 0, 0), - NPCM7XX_PINCFG(60, hgpio7, MFSEL2, 31, smb3d, I2CSEGSEL, 13, none, NONE, 0, 0), + NPCM7XX_PINCFG(57, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DSTR(2, 4)), + NPCM7XX_PINCFG(58, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DSTR(2, 4)), + NPCM7XX_PINCFG(59, smb3d, I2CSEGSEL, 13, none, NONE, 0, none, NONE, 0, 0), + NPCM7XX_PINCFG(60, smb3d, I2CSEGSEL, 13, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(61, uart1, MFSEL1, 10, none, NONE, 0, none, NONE, 0, GPO), NPCM7XX_PINCFG(62, uart1, MFSEL1, 10, bmcuart1, MFSEL3, 24, none, NONE, 0, GPO), NPCM7XX_PINCFG(63, uart1, MFSEL1, 10, bmcuart1, MFSEL3, 24, none, NONE, 0, GPO), @@ -1051,19 +1050,19 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(77, fanin13, MFSEL2, 13, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(78, fanin14, MFSEL2, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(79, fanin15, MFSEL2, 15, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(80, pwm0, MFSEL2, 16, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(81, pwm1, MFSEL2, 17, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(82, pwm2, MFSEL2, 18, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(83, pwm3, MFSEL2, 19, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(84, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(85, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(86, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(80, pwm0, MFSEL2, 16, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(81, pwm1, MFSEL2, 17, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(82, pwm2, MFSEL2, 18, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(83, pwm3, MFSEL2, 19, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(84, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(85, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(86, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(87, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(88, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(89, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(90, r2err, MFSEL1, 15, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(91, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DS(2, 4)), - NPCM7XX_PINCFG(92, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DS(2, 4)), + NPCM7XX_PINCFG(91, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DSTR(2, 4)), + NPCM7XX_PINCFG(92, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DSTR(2, 4)), NPCM7XX_PINCFG(93, ga20kbc, MFSEL1, 17, smb5d, I2CSEGSEL, 21, none, NONE, 0, 0), NPCM7XX_PINCFG(94, ga20kbc, MFSEL1, 17, smb5d, I2CSEGSEL, 21, none, NONE, 0, 0), NPCM7XX_PINCFG(95, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, 0), @@ -1109,34 +1108,34 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(133, smb10, MFSEL4, 13, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(134, smb11, MFSEL4, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(135, smb11, MFSEL4, 14, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(136, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(137, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(138, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(139, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(140, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(136, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(137, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(138, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(139, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(140, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(141, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(142, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(142, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(143, sd1, MFSEL3, 12, sd1pwr, MFSEL4, 5, none, NONE, 0, 0), - NPCM7XX_PINCFG(144, pwm4, MFSEL2, 20, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(145, pwm5, MFSEL2, 21, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(146, pwm6, MFSEL2, 22, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(147, pwm7, MFSEL2, 23, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(148, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(149, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(150, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(151, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(152, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(144, pwm4, MFSEL2, 20, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(145, pwm5, MFSEL2, 21, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(146, pwm6, MFSEL2, 22, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(147, pwm7, MFSEL2, 23, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(148, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(149, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(150, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(151, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(152, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(153, mmcwp, FLOCKR1, 24, none, NONE, 0, none, NONE, 0, 0), /* Z1/A1 */ - NPCM7XX_PINCFG(154, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(154, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(155, mmccd, MFSEL3, 25, mmcrst, MFSEL4, 6, none, NONE, 0, 0), /* Z1/A1 */ - NPCM7XX_PINCFG(156, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(157, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(158, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(159, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - - NPCM7XX_PINCFG(160, clkout, MFSEL1, 21, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(161, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, DS(8, 12)), - NPCM7XX_PINCFG(162, serirq, NONE, 0, gpio, MFSEL1, 31, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(156, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(157, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(158, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(159, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + + NPCM7XX_PINCFG(160, clkout, MFSEL1, 21, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(161, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, DSTR(8, 12)), + NPCM7XX_PINCFG(162, serirq, NONE, 0, gpio, MFSEL1, 31, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(163, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, 0), NPCM7XX_PINCFG(164, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, SLEWLPC), NPCM7XX_PINCFG(165, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, SLEWLPC), @@ -1149,25 +1148,25 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(172, smb6, MFSEL3, 1, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(173, smb7, MFSEL3, 2, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(174, smb7, MFSEL3, 2, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(175, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(176, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(177, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(178, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(179, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(180, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(175, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(176, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(177, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(178, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(179, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(180, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(181, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(182, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(183, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(184, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO), - NPCM7XX_PINCFG(185, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO), - NPCM7XX_PINCFG(186, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(187, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(188, spi3quad, MFSEL4, 20, spi3cs2, MFSEL4, 18, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(189, spi3quad, MFSEL4, 20, spi3cs3, MFSEL4, 19, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(190, gpio, FLOCKR1, 20, nprd_smi, NONE, 0, none, NONE, 0, DS(2, 4)), - NPCM7XX_PINCFG(191, none, NONE, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)), /* XX */ - - NPCM7XX_PINCFG(192, none, NONE, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)), /* XX */ + NPCM7XX_PINCFG(183, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(184, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO), + NPCM7XX_PINCFG(185, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO), + NPCM7XX_PINCFG(186, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(187, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(188, spi3quad, MFSEL4, 20, spi3cs2, MFSEL4, 18, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(189, spi3quad, MFSEL4, 20, spi3cs3, MFSEL4, 19, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(190, gpio, FLOCKR1, 20, nprd_smi, NONE, 0, none, NONE, 0, DSTR(2, 4)), + NPCM7XX_PINCFG(191, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), /* XX */ + + NPCM7XX_PINCFG(192, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), /* XX */ NPCM7XX_PINCFG(193, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(194, smb0b, I2CSEGSEL, 0, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(195, smb0b, I2CSEGSEL, 0, none, NONE, 0, none, NONE, 0, 0), @@ -1178,11 +1177,11 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(200, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(201, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(202, smb0c, I2CSEGSEL, 1, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(203, faninx, MFSEL3, 3, none, NONE, 0, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(203, faninx, MFSEL3, 3, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(204, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, SLEW), NPCM7XX_PINCFG(205, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, SLEW), - NPCM7XX_PINCFG(206, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(207, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DS(4, 8)), + NPCM7XX_PINCFG(206, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(207, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DSTR(4, 8)), NPCM7XX_PINCFG(208, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0), NPCM7XX_PINCFG(209, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0), NPCM7XX_PINCFG(210, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0), @@ -1194,20 +1193,20 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(216, rg2mdio, MFSEL4, 23, ddr, MFSEL3, 26, none, NONE, 0, 0), NPCM7XX_PINCFG(217, rg2mdio, MFSEL4, 23, ddr, MFSEL3, 26, none, NONE, 0, 0), NPCM7XX_PINCFG(218, wdog1, MFSEL3, 19, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(219, wdog2, MFSEL3, 20, none, NONE, 0, none, NONE, 0, DS(4, 8)), + NPCM7XX_PINCFG(219, wdog2, MFSEL3, 20, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), NPCM7XX_PINCFG(220, smb12, MFSEL3, 5, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(221, smb12, MFSEL3, 5, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(222, smb13, MFSEL3, 6, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(223, smb13, MFSEL3, 6, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(224, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, SLEW), - NPCM7XX_PINCFG(225, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO), - NPCM7XX_PINCFG(226, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO), - NPCM7XX_PINCFG(227, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(228, spixcs1, MFSEL4, 28, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(229, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(230, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(231, clkreq, MFSEL4, 9, none, NONE, 0, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(225, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO), + NPCM7XX_PINCFG(226, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO), + NPCM7XX_PINCFG(227, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(228, spixcs1, MFSEL4, 28, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(229, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(230, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(231, clkreq, MFSEL4, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(253, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC1 power */ NPCM7XX_PINCFG(254, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC2 power */ NPCM7XX_PINCFG(255, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* DACOSEL */ @@ -1650,7 +1649,7 @@ static int npcm7xx_get_groups_count(struct pinctrl_dev *pctldev) { struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev); - dev_dbg(npcm->dev, "group size: %d\n", ARRAY_SIZE(npcm7xx_groups)); + dev_dbg(npcm->dev, "group size: %zu\n", ARRAY_SIZE(npcm7xx_groups)); return ARRAY_SIZE(npcm7xx_groups); } diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c index 2751e545efae..4c37e1f732c1 100644 --- a/drivers/pinctrl/pinconf-generic.c +++ b/drivers/pinctrl/pinconf-generic.c @@ -30,10 +30,10 @@ static const struct pin_config_item conf_items[] = { PCONFDUMP(PIN_CONFIG_BIAS_BUS_HOLD, "input bias bus hold", NULL, false), PCONFDUMP(PIN_CONFIG_BIAS_DISABLE, "input bias disabled", NULL, false), PCONFDUMP(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, "input bias high impedance", NULL, false), - PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", NULL, false), + PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", "ohms", true), PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, - "input bias pull to pin specific state", NULL, false), - PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false), + "input bias pull to pin specific state", "ohms", true), + PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", "ohms", true), PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false), PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false), PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false), diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index 2712f51eb238..fa6becca1788 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -119,6 +119,8 @@ struct ingenic_chip_info { unsigned int num_functions; const u32 *pull_ups, *pull_downs; + + const struct regmap_access_table *access_table; }; struct ingenic_pinctrl { @@ -2179,6 +2181,17 @@ static const struct function_desc x1000_functions[] = { { "mac", x1000_mac_groups, ARRAY_SIZE(x1000_mac_groups), }, }; +static const struct regmap_range x1000_access_ranges[] = { + regmap_reg_range(0x000, 0x400 - 4), + regmap_reg_range(0x700, 0x800 - 4), +}; + +/* shared with X1500 */ +static const struct regmap_access_table x1000_access_table = { + .yes_ranges = x1000_access_ranges, + .n_yes_ranges = ARRAY_SIZE(x1000_access_ranges), +}; + static const struct ingenic_chip_info x1000_chip_info = { .num_chips = 4, .reg_offset = 0x100, @@ -2189,6 +2202,7 @@ static const struct ingenic_chip_info x1000_chip_info = { .num_functions = ARRAY_SIZE(x1000_functions), .pull_ups = x1000_pull_ups, .pull_downs = x1000_pull_downs, + .access_table = &x1000_access_table, }; static int x1500_uart0_data_pins[] = { 0x4a, 0x4b, }; @@ -2300,6 +2314,7 @@ static const struct ingenic_chip_info x1500_chip_info = { .num_functions = ARRAY_SIZE(x1500_functions), .pull_ups = x1000_pull_ups, .pull_downs = x1000_pull_downs, + .access_table = &x1000_access_table, }; static const u32 x1830_pull_ups[4] = { @@ -2506,6 +2521,16 @@ static const struct function_desc x1830_functions[] = { { "mac", x1830_mac_groups, ARRAY_SIZE(x1830_mac_groups), }, }; +static const struct regmap_range x1830_access_ranges[] = { + regmap_reg_range(0x0000, 0x4000 - 4), + regmap_reg_range(0x7000, 0x8000 - 4), +}; + +static const struct regmap_access_table x1830_access_table = { + .yes_ranges = x1830_access_ranges, + .n_yes_ranges = ARRAY_SIZE(x1830_access_ranges), +}; + static const struct ingenic_chip_info x1830_chip_info = { .num_chips = 4, .reg_offset = 0x1000, @@ -2516,6 +2541,7 @@ static const struct ingenic_chip_info x1830_chip_info = { .num_functions = ARRAY_SIZE(x1830_functions), .pull_ups = x1830_pull_ups, .pull_downs = x1830_pull_downs, + .access_table = &x1830_access_table, }; static const u32 x2000_pull_ups[5] = { @@ -2969,6 +2995,17 @@ static const struct function_desc x2000_functions[] = { { "otg", x2000_otg_groups, ARRAY_SIZE(x2000_otg_groups), }, }; +static const struct regmap_range x2000_access_ranges[] = { + regmap_reg_range(0x000, 0x500 - 4), + regmap_reg_range(0x700, 0x800 - 4), +}; + +/* shared with X2100 */ +static const struct regmap_access_table x2000_access_table = { + .yes_ranges = x2000_access_ranges, + .n_yes_ranges = ARRAY_SIZE(x2000_access_ranges), +}; + static const struct ingenic_chip_info x2000_chip_info = { .num_chips = 5, .reg_offset = 0x100, @@ -2979,6 +3016,7 @@ static const struct ingenic_chip_info x2000_chip_info = { .num_functions = ARRAY_SIZE(x2000_functions), .pull_ups = x2000_pull_ups, .pull_downs = x2000_pull_downs, + .access_table = &x2000_access_table, }; static const u32 x2100_pull_ups[5] = { @@ -3189,6 +3227,7 @@ static const struct ingenic_chip_info x2100_chip_info = { .num_functions = ARRAY_SIZE(x2100_functions), .pull_ups = x2100_pull_ups, .pull_downs = x2100_pull_downs, + .access_table = &x2000_access_table, }; static u32 ingenic_gpio_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg) @@ -4168,7 +4207,12 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev) return PTR_ERR(base); regmap_config = ingenic_pinctrl_regmap_config; - regmap_config.max_register = chip_info->num_chips * chip_info->reg_offset; + if (chip_info->access_table) { + regmap_config.rd_table = chip_info->access_table; + regmap_config.wr_table = chip_info->access_table; + } else { + regmap_config.max_register = chip_info->num_chips * chip_info->reg_offset - 4; + } jzpc->map = devm_regmap_init_mmio(dev, base, ®map_config); if (IS_ERR(jzpc->map)) { diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c index 49e32684dbb2..ecab6bf63dc6 100644 --- a/drivers/pinctrl/pinctrl-k210.c +++ b/drivers/pinctrl/pinctrl-k210.c @@ -482,7 +482,7 @@ static int k210_pinconf_get_drive(unsigned int max_strength_ua) { int i; - for (i = K210_PC_DRIVE_MAX; i; i--) { + for (i = K210_PC_DRIVE_MAX; i >= 0; i--) { if (k210_pinconf_drive_strength[i] <= max_strength_ua) return i; } @@ -527,7 +527,7 @@ static int k210_pinconf_set_param(struct pinctrl_dev *pctldev, case PIN_CONFIG_BIAS_PULL_UP: if (!arg) return -EINVAL; - val |= K210_PC_PD; + val |= K210_PC_PU; break; case PIN_CONFIG_DRIVE_STRENGTH: arg *= 1000; diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c index 072bccdea2a5..dfa374195694 100644 --- a/drivers/pinctrl/pinctrl-microchip-sgpio.c +++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c @@ -17,6 +17,8 @@ #include <linux/pinctrl/pinmux.h> #include <linux/platform_device.h> #include <linux/property.h> +#include <linux/reset.h> +#include <linux/spinlock.h> #include "core.h" #include "pinconf.h" @@ -114,6 +116,7 @@ struct sgpio_priv { u32 clock; u32 __iomem *regs; const struct sgpio_properties *properties; + spinlock_t lock; }; struct sgpio_port_addr { @@ -215,6 +218,7 @@ static void sgpio_output_set(struct sgpio_priv *priv, int value) { unsigned int bit = SGPIO_SRC_BITS * addr->bit; + unsigned long flags; u32 clr, set; switch (priv->properties->arch) { @@ -233,7 +237,10 @@ static void sgpio_output_set(struct sgpio_priv *priv, default: return; } + + spin_lock_irqsave(&priv->lock, flags); sgpio_clrsetbits(priv, REG_PORT_CONFIG, addr->port, clr, set); + spin_unlock_irqrestore(&priv->lock, flags); } static int sgpio_output_get(struct sgpio_priv *priv, @@ -561,10 +568,13 @@ static void microchip_sgpio_irq_settype(struct irq_data *data, struct sgpio_bank *bank = gpiochip_get_data(chip); unsigned int gpio = irqd_to_hwirq(data); struct sgpio_port_addr addr; + unsigned long flags; u32 ena; sgpio_pin_to_addr(bank->priv, gpio, &addr); + spin_lock_irqsave(&bank->priv->lock, flags); + /* Disable interrupt while changing type */ ena = sgpio_readl(bank->priv, REG_INT_ENABLE, addr.bit); sgpio_writel(bank->priv, ena & ~BIT(addr.port), REG_INT_ENABLE, addr.bit); @@ -581,6 +591,8 @@ static void microchip_sgpio_irq_settype(struct irq_data *data, /* Possibly re-enable interrupts */ sgpio_writel(bank->priv, ena, REG_INT_ENABLE, addr.bit); + + spin_unlock_irqrestore(&bank->priv->lock, flags); } static void microchip_sgpio_irq_setreg(struct irq_data *data, @@ -591,13 +603,16 @@ static void microchip_sgpio_irq_setreg(struct irq_data *data, struct sgpio_bank *bank = gpiochip_get_data(chip); unsigned int gpio = irqd_to_hwirq(data); struct sgpio_port_addr addr; + unsigned long flags; sgpio_pin_to_addr(bank->priv, gpio, &addr); + spin_lock_irqsave(&bank->priv->lock, flags); if (clear) sgpio_clrsetbits(bank->priv, reg, addr.bit, BIT(addr.port), 0); else sgpio_clrsetbits(bank->priv, reg, addr.bit, 0, BIT(addr.port)); + spin_unlock_irqrestore(&bank->priv->lock, flags); } static void microchip_sgpio_irq_mask(struct irq_data *data) @@ -803,6 +818,7 @@ static int microchip_sgpio_probe(struct platform_device *pdev) int div_clock = 0, ret, port, i, nbanks; struct device *dev = &pdev->dev; struct fwnode_handle *fwnode; + struct reset_control *reset; struct sgpio_priv *priv; struct clk *clk; u32 val; @@ -812,6 +828,12 @@ static int microchip_sgpio_probe(struct platform_device *pdev) return -ENOMEM; priv->dev = dev; + spin_lock_init(&priv->lock); + + reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch"); + if (IS_ERR(reset)) + return dev_err_probe(dev, PTR_ERR(reset), "Failed to get reset\n"); + reset_control_reset(reset); clk = devm_clk_get(dev, NULL); if (IS_ERR(clk)) diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index dc52da94af0b..923ff21a44c0 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -2702,6 +2702,7 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev) node = of_parse_phandle(np, "rockchip,grf", 0); if (node) { info->regmap_base = syscon_node_to_regmap(node); + of_node_put(node); if (IS_ERR(info->regmap_base)) return PTR_ERR(info->regmap_base); } else { @@ -2738,6 +2739,7 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev) node = of_parse_phandle(np, "rockchip,pmu", 0); if (node) { info->regmap_pmu = syscon_node_to_regmap(node); + of_node_put(node); if (IS_ERR(info->regmap_pmu)) return PTR_ERR(info->regmap_pmu); } diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c index f29130957e49..bc17f3131de5 100644 --- a/drivers/pinctrl/renesas/core.c +++ b/drivers/pinctrl/renesas/core.c @@ -739,7 +739,7 @@ static int sh_pfc_suspend_init(struct sh_pfc *pfc) { return 0; } #ifdef DEBUG #define SH_PFC_MAX_REGS 300 -#define SH_PFC_MAX_ENUMS 3000 +#define SH_PFC_MAX_ENUMS 5000 static unsigned int sh_pfc_errors __initdata = 0; static unsigned int sh_pfc_warnings __initdata = 0; @@ -851,7 +851,8 @@ static void __init sh_pfc_check_cfg_reg(const char *drvname, sh_pfc_check_reg(drvname, cfg_reg->reg); if (cfg_reg->field_width) { - n = cfg_reg->reg_width / cfg_reg->field_width; + fw = cfg_reg->field_width; + n = (cfg_reg->reg_width / fw) << fw; /* Skip field checks (done at build time) */ goto check_enum_ids; } diff --git a/drivers/pinctrl/renesas/pfc-r8a77470.c b/drivers/pinctrl/renesas/pfc-r8a77470.c index e6e5487691c1..cf7153d06a95 100644 --- a/drivers/pinctrl/renesas/pfc-r8a77470.c +++ b/drivers/pinctrl/renesas/pfc-r8a77470.c @@ -2140,7 +2140,7 @@ static const unsigned int vin0_clk_mux[] = { VI0_CLK_MARK, }; /* - VIN1 ------------------------------------------------------------------- */ -static const union vin_data vin1_data_pins = { +static const union vin_data12 vin1_data_pins = { .data12 = { RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 4), @@ -2150,7 +2150,7 @@ static const union vin_data vin1_data_pins = { RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 16), }, }; -static const union vin_data vin1_data_mux = { +static const union vin_data12 vin1_data_mux = { .data12 = { VI1_DATA0_MARK, VI1_DATA1_MARK, VI1_DATA2_MARK, VI1_DATA3_MARK, diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c index fe5f6046fbd5..cc66f852ef7b 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c @@ -504,13 +504,11 @@ static const struct samsung_pin_ctrl exynos850_pin_ctrl[] __initconst = { /* pin-controller instance 0 ALIVE data */ .pin_banks = exynos850_pin_banks0, .nr_banks = ARRAY_SIZE(exynos850_pin_banks0), - .eint_gpio_init = exynos_eint_gpio_init, .eint_wkup_init = exynos_eint_wkup_init, }, { /* pin-controller instance 1 CMGP data */ .pin_banks = exynos850_pin_banks1, .nr_banks = ARRAY_SIZE(exynos850_pin_banks1), - .eint_gpio_init = exynos_eint_gpio_init, .eint_wkup_init = exynos_eint_wkup_init, }, { /* pin-controller instance 2 AUD data */ diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index 2a0fc63516f1..463b9e578237 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -1002,6 +1002,16 @@ samsung_pinctrl_get_soc_data_for_of_alias(struct platform_device *pdev) return &(of_data->ctrl[id]); } +static void samsung_banks_of_node_put(struct samsung_pinctrl_drv_data *d) +{ + struct samsung_pin_bank *bank; + unsigned int i; + + bank = d->pin_banks; + for (i = 0; i < d->nr_banks; ++i, ++bank) + of_node_put(bank->of_node); +} + /* retrieve the soc specific data */ static const struct samsung_pin_ctrl * samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, @@ -1116,19 +1126,19 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) if (ctrl->retention_data) { drvdata->retention_ctrl = ctrl->retention_data->init(drvdata, ctrl->retention_data); - if (IS_ERR(drvdata->retention_ctrl)) - return PTR_ERR(drvdata->retention_ctrl); + if (IS_ERR(drvdata->retention_ctrl)) { + ret = PTR_ERR(drvdata->retention_ctrl); + goto err_put_banks; + } } ret = samsung_pinctrl_register(pdev, drvdata); if (ret) - return ret; + goto err_put_banks; ret = samsung_gpiolib_register(pdev, drvdata); - if (ret) { - samsung_pinctrl_unregister(pdev, drvdata); - return ret; - } + if (ret) + goto err_unregister; if (ctrl->eint_gpio_init) ctrl->eint_gpio_init(drvdata); @@ -1138,6 +1148,12 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) platform_set_drvdata(pdev, drvdata); return 0; + +err_unregister: + samsung_pinctrl_unregister(pdev, drvdata); +err_put_banks: + samsung_banks_of_node_put(drvdata); + return ret; } /* diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 862c84efb718..ce3f9ea41511 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -36,6 +36,13 @@ #include "../core.h" #include "pinctrl-sunxi.h" +/* + * These lock classes tell lockdep that GPIO IRQs are in a different + * category than their parents, so it won't report false recursion. + */ +static struct lock_class_key sunxi_pinctrl_irq_lock_class; +static struct lock_class_key sunxi_pinctrl_irq_request_class; + static struct irq_chip sunxi_pinctrl_edge_irq_chip; static struct irq_chip sunxi_pinctrl_level_irq_chip; @@ -1551,6 +1558,8 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev, for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) { int irqno = irq_create_mapping(pctl->domain, i); + irq_set_lockdep_class(irqno, &sunxi_pinctrl_irq_lock_class, + &sunxi_pinctrl_irq_request_class); irq_set_chip_and_handler(irqno, &sunxi_pinctrl_edge_irq_chip, handle_edge_irq); irq_set_chip_data(irqno, pctl); diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile index f901d2e43166..88cbc434c06b 100644 --- a/drivers/platform/chrome/Makefile +++ b/drivers/platform/chrome/Makefile @@ -2,6 +2,7 @@ # tell define_trace.h where to find the cros ec trace header CFLAGS_cros_ec_trace.o:= -I$(src) +CFLAGS_cros_ec_sensorhub_ring.o:= -I$(src) obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o @@ -20,7 +21,7 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o -cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o cros_ec_trace.o +cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c index 98e37080f760..71948dade0e2 100644 --- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c +++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c @@ -17,7 +17,8 @@ #include <linux/sort.h> #include <linux/slab.h> -#include "cros_ec_trace.h" +#define CREATE_TRACE_POINTS +#include "cros_ec_sensorhub_trace.h" /* Precision of fixed point for the m values from the filter */ #define M_PRECISION BIT(23) diff --git a/drivers/platform/chrome/cros_ec_sensorhub_trace.h b/drivers/platform/chrome/cros_ec_sensorhub_trace.h new file mode 100644 index 000000000000..57d9b4785969 --- /dev/null +++ b/drivers/platform/chrome/cros_ec_sensorhub_trace.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Trace events for the ChromeOS Sensorhub kernel module + * + * Copyright 2021 Google LLC. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cros_ec + +#if !defined(_CROS_EC_SENSORHUB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _CROS_EC_SENSORHUB_TRACE_H_ + +#include <linux/types.h> +#include <linux/platform_data/cros_ec_sensorhub.h> + +#include <linux/tracepoint.h> + +TRACE_EVENT(cros_ec_sensorhub_timestamp, + TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp, + s64 current_timestamp, s64 current_time), + TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp, + current_time), + TP_STRUCT__entry( + __field(u32, ec_sample_timestamp) + __field(u32, ec_fifo_timestamp) + __field(s64, fifo_timestamp) + __field(s64, current_timestamp) + __field(s64, current_time) + __field(s64, delta) + ), + TP_fast_assign( + __entry->ec_sample_timestamp = ec_sample_timestamp; + __entry->ec_fifo_timestamp = ec_fifo_timestamp; + __entry->fifo_timestamp = fifo_timestamp; + __entry->current_timestamp = current_timestamp; + __entry->current_time = current_time; + __entry->delta = current_timestamp - current_time; + ), + TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld", + __entry->ec_sample_timestamp, + __entry->ec_fifo_timestamp, + __entry->fifo_timestamp, + __entry->current_timestamp, + __entry->current_time, + __entry->delta + ) +); + +TRACE_EVENT(cros_ec_sensorhub_data, + TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp, + s64 current_timestamp, s64 current_time), + TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time), + TP_STRUCT__entry( + __field(u32, ec_sensor_num) + __field(u32, ec_fifo_timestamp) + __field(s64, fifo_timestamp) + __field(s64, current_timestamp) + __field(s64, current_time) + __field(s64, delta) + ), + TP_fast_assign( + __entry->ec_sensor_num = ec_sensor_num; + __entry->ec_fifo_timestamp = ec_fifo_timestamp; + __entry->fifo_timestamp = fifo_timestamp; + __entry->current_timestamp = current_timestamp; + __entry->current_time = current_time; + __entry->delta = current_timestamp - current_time; + ), + TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld", + __entry->ec_sensor_num, + __entry->ec_fifo_timestamp, + __entry->fifo_timestamp, + __entry->current_timestamp, + __entry->current_time, + __entry->delta + ) +); + +TRACE_EVENT(cros_ec_sensorhub_filter, + TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy), + TP_ARGS(state, dx, dy), + TP_STRUCT__entry( + __field(s64, dx) + __field(s64, dy) + __field(s64, median_m) + __field(s64, median_error) + __field(s64, history_len) + __field(s64, x) + __field(s64, y) + ), + TP_fast_assign( + __entry->dx = dx; + __entry->dy = dy; + __entry->median_m = state->median_m; + __entry->median_error = state->median_error; + __entry->history_len = state->history_len; + __entry->x = state->x_offset; + __entry->y = state->y_offset; + ), + TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld", + __entry->dx, + __entry->dy, + __entry->median_m, + __entry->median_error, + __entry->history_len, + __entry->x, + __entry->y + ) +); + + +#endif /* _CROS_EC_SENSORHUB_TRACE_H_ */ + +/* this part must be outside header guard */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE cros_ec_sensorhub_trace + +#include <trace/define_trace.h> diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h index 7e7cfc98657a..9bb5cd2c98b8 100644 --- a/drivers/platform/chrome/cros_ec_trace.h +++ b/drivers/platform/chrome/cros_ec_trace.h @@ -15,7 +15,6 @@ #include <linux/types.h> #include <linux/platform_data/cros_ec_commands.h> #include <linux/platform_data/cros_ec_proto.h> -#include <linux/platform_data/cros_ec_sensorhub.h> #include <linux/tracepoint.h> @@ -71,100 +70,6 @@ TRACE_EVENT(cros_ec_request_done, __entry->retval) ); -TRACE_EVENT(cros_ec_sensorhub_timestamp, - TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp, - s64 current_timestamp, s64 current_time), - TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp, - current_time), - TP_STRUCT__entry( - __field(u32, ec_sample_timestamp) - __field(u32, ec_fifo_timestamp) - __field(s64, fifo_timestamp) - __field(s64, current_timestamp) - __field(s64, current_time) - __field(s64, delta) - ), - TP_fast_assign( - __entry->ec_sample_timestamp = ec_sample_timestamp; - __entry->ec_fifo_timestamp = ec_fifo_timestamp; - __entry->fifo_timestamp = fifo_timestamp; - __entry->current_timestamp = current_timestamp; - __entry->current_time = current_time; - __entry->delta = current_timestamp - current_time; - ), - TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld", - __entry->ec_sample_timestamp, - __entry->ec_fifo_timestamp, - __entry->fifo_timestamp, - __entry->current_timestamp, - __entry->current_time, - __entry->delta - ) -); - -TRACE_EVENT(cros_ec_sensorhub_data, - TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp, - s64 current_timestamp, s64 current_time), - TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time), - TP_STRUCT__entry( - __field(u32, ec_sensor_num) - __field(u32, ec_fifo_timestamp) - __field(s64, fifo_timestamp) - __field(s64, current_timestamp) - __field(s64, current_time) - __field(s64, delta) - ), - TP_fast_assign( - __entry->ec_sensor_num = ec_sensor_num; - __entry->ec_fifo_timestamp = ec_fifo_timestamp; - __entry->fifo_timestamp = fifo_timestamp; - __entry->current_timestamp = current_timestamp; - __entry->current_time = current_time; - __entry->delta = current_timestamp - current_time; - ), - TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld", - __entry->ec_sensor_num, - __entry->ec_fifo_timestamp, - __entry->fifo_timestamp, - __entry->current_timestamp, - __entry->current_time, - __entry->delta - ) -); - -TRACE_EVENT(cros_ec_sensorhub_filter, - TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy), - TP_ARGS(state, dx, dy), - TP_STRUCT__entry( - __field(s64, dx) - __field(s64, dy) - __field(s64, median_m) - __field(s64, median_error) - __field(s64, history_len) - __field(s64, x) - __field(s64, y) - ), - TP_fast_assign( - __entry->dx = dx; - __entry->dy = dy; - __entry->median_m = state->median_m; - __entry->median_error = state->median_error; - __entry->history_len = state->history_len; - __entry->x = state->x_offset; - __entry->y = state->y_offset; - ), - TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld", - __entry->dx, - __entry->dy, - __entry->median_m, - __entry->median_error, - __entry->history_len, - __entry->x, - __entry->y - ) -); - - #endif /* _CROS_EC_TRACE_H_ */ /* this part must be outside header guard */ diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c index 262a891eded3..4027c3ef90d7 100644 --- a/drivers/platform/chrome/cros_ec_typec.c +++ b/drivers/platform/chrome/cros_ec_typec.c @@ -1106,7 +1106,13 @@ static int cros_typec_probe(struct platform_device *pdev) return -ENOMEM; typec->dev = dev; + typec->ec = dev_get_drvdata(pdev->dev.parent); + if (!typec->ec) { + dev_err(dev, "couldn't find parent EC device\n"); + return -ENODEV; + } + platform_set_drvdata(pdev, typec); ret = cros_typec_get_cmd_version(typec); diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c index 90c1568ea4e0..3cc004c68bdb 100644 --- a/drivers/platform/surface/surface3_power.c +++ b/drivers/platform/surface/surface3_power.c @@ -233,14 +233,21 @@ static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix) } bix->last_full_charg_capacity = ret; - /* get serial number */ + /* + * Get serial number, on some devices (with unofficial replacement + * battery?) reading any of the serial number range addresses gets + * nacked in this case just leave the serial number empty. + */ ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO, sizeof(buf), buf); - if (ret != sizeof(buf)) { + if (ret == -EREMOTEIO) { + /* no serial number available */ + } else if (ret != sizeof(buf)) { dev_err(&client->dev, "Error reading serial no: %d\n", ret); return ret; + } else { + snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); } - snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); /* get cycle count */ ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT); diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c index a2d846c4a7ee..eac3e6b4ea11 100644 --- a/drivers/platform/x86/huawei-wmi.c +++ b/drivers/platform/x86/huawei-wmi.c @@ -470,10 +470,17 @@ static DEVICE_ATTR_RW(charge_control_thresholds); static int huawei_wmi_battery_add(struct power_supply *battery) { - device_create_file(&battery->dev, &dev_attr_charge_control_start_threshold); - device_create_file(&battery->dev, &dev_attr_charge_control_end_threshold); + int err = 0; - return 0; + err = device_create_file(&battery->dev, &dev_attr_charge_control_start_threshold); + if (err) + return err; + + err = device_create_file(&battery->dev, &dev_attr_charge_control_end_threshold); + if (err) + device_remove_file(&battery->dev, &dev_attr_charge_control_start_threshold); + + return err; } static int huawei_wmi_battery_remove(struct power_supply *battery) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index c9a85eb2e860..e8424e70d81d 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -596,7 +596,10 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, return ret; } -static DEFINE_MUTEX(punit_misc_dev_lock); +/* Lock to prevent module registration when already opened by user space */ +static DEFINE_MUTEX(punit_misc_dev_open_lock); +/* Lock to allow one share misc device for all ISST interace */ +static DEFINE_MUTEX(punit_misc_dev_reg_lock); static int misc_usage_count; static int misc_device_ret; static int misc_device_open; @@ -606,7 +609,7 @@ static int isst_if_open(struct inode *inode, struct file *file) int i, ret = 0; /* Fail open, if a module is going away */ - mutex_lock(&punit_misc_dev_lock); + mutex_lock(&punit_misc_dev_open_lock); for (i = 0; i < ISST_IF_DEV_MAX; ++i) { struct isst_if_cmd_cb *cb = &punit_callbacks[i]; @@ -628,7 +631,7 @@ static int isst_if_open(struct inode *inode, struct file *file) } else { misc_device_open++; } - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); return ret; } @@ -637,7 +640,7 @@ static int isst_if_relase(struct inode *inode, struct file *f) { int i; - mutex_lock(&punit_misc_dev_lock); + mutex_lock(&punit_misc_dev_open_lock); misc_device_open--; for (i = 0; i < ISST_IF_DEV_MAX; ++i) { struct isst_if_cmd_cb *cb = &punit_callbacks[i]; @@ -645,7 +648,7 @@ static int isst_if_relase(struct inode *inode, struct file *f) if (cb->registered) module_put(cb->owner); } - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); return 0; } @@ -662,6 +665,43 @@ static struct miscdevice isst_if_char_driver = { .fops = &isst_if_char_driver_ops, }; +static int isst_misc_reg(void) +{ + mutex_lock(&punit_misc_dev_reg_lock); + if (misc_device_ret) + goto unlock_exit; + + if (!misc_usage_count) { + misc_device_ret = isst_if_cpu_info_init(); + if (misc_device_ret) + goto unlock_exit; + + misc_device_ret = misc_register(&isst_if_char_driver); + if (misc_device_ret) { + isst_if_cpu_info_exit(); + goto unlock_exit; + } + } + misc_usage_count++; + +unlock_exit: + mutex_unlock(&punit_misc_dev_reg_lock); + + return misc_device_ret; +} + +static void isst_misc_unreg(void) +{ + mutex_lock(&punit_misc_dev_reg_lock); + if (misc_usage_count) + misc_usage_count--; + if (!misc_usage_count && !misc_device_ret) { + misc_deregister(&isst_if_char_driver); + isst_if_cpu_info_exit(); + } + mutex_unlock(&punit_misc_dev_reg_lock); +} + /** * isst_if_cdev_register() - Register callback for IOCTL * @device_type: The device type this callback handling. @@ -679,38 +719,31 @@ static struct miscdevice isst_if_char_driver = { */ int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb) { - if (misc_device_ret) - return misc_device_ret; + int ret; if (device_type >= ISST_IF_DEV_MAX) return -EINVAL; - mutex_lock(&punit_misc_dev_lock); + mutex_lock(&punit_misc_dev_open_lock); + /* Device is already open, we don't want to add new callbacks */ if (misc_device_open) { - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); return -EAGAIN; } - if (!misc_usage_count) { - int ret; - - misc_device_ret = misc_register(&isst_if_char_driver); - if (misc_device_ret) - goto unlock_exit; - - ret = isst_if_cpu_info_init(); - if (ret) { - misc_deregister(&isst_if_char_driver); - misc_device_ret = ret; - goto unlock_exit; - } - } memcpy(&punit_callbacks[device_type], cb, sizeof(*cb)); punit_callbacks[device_type].registered = 1; - misc_usage_count++; -unlock_exit: - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); - return misc_device_ret; + ret = isst_misc_reg(); + if (ret) { + /* + * No need of mutex as the misc device register failed + * as no one can open device yet. Hence no contention. + */ + punit_callbacks[device_type].registered = 0; + return ret; + } + return 0; } EXPORT_SYMBOL_GPL(isst_if_cdev_register); @@ -725,16 +758,12 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register); */ void isst_if_cdev_unregister(int device_type) { - mutex_lock(&punit_misc_dev_lock); - misc_usage_count--; + isst_misc_unreg(); + mutex_lock(&punit_misc_dev_open_lock); punit_callbacks[device_type].registered = 0; if (device_type == ISST_IF_DEV_MBOX) isst_delete_hash(); - if (!misc_usage_count && !misc_device_ret) { - misc_deregister(&isst_if_char_driver); - isst_if_cpu_info_exit(); - } - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); } EXPORT_SYMBOL_GPL(isst_if_cdev_unregister); diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index 7ee010aa740a..404bdb4cbfae 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev, if (value > samsung->kbd_led.max_brightness) value = samsung->kbd_led.max_brightness; - else if (value < 0) - value = 0; samsung->kbd_led_wk = value; queue_work(samsung->led_workqueue, &samsung->kbd_led_work); diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 033f797861d8..c608078538a7 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -773,6 +773,21 @@ static const struct ts_dmi_data predia_basic_data = { .properties = predia_basic_props, }; +static const struct property_entry rwc_nanote_p8_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-y", 46), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1728), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct ts_dmi_data rwc_nanote_p8_data = { + .acpi_name = "MSSL1680:00", + .properties = rwc_nanote_p8_props, +}; + static const struct property_entry schneider_sct101ctm_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1715), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), @@ -1380,6 +1395,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* RWC NANOTE P8 */ + .driver_data = (void *)&rwc_nanote_p8_data, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Default string"), + DMI_MATCH(DMI_PRODUCT_NAME, "AY07J"), + DMI_MATCH(DMI_PRODUCT_SKU, "0001") + }, + }, + { /* Schneider SCT101CTM */ .driver_data = (void *)&schneider_sct101ctm_data, .matches = { diff --git a/drivers/power/reset/gemini-poweroff.c b/drivers/power/reset/gemini-poweroff.c index 90e35c07240a..b7f7a8225f22 100644 --- a/drivers/power/reset/gemini-poweroff.c +++ b/drivers/power/reset/gemini-poweroff.c @@ -107,8 +107,8 @@ static int gemini_poweroff_probe(struct platform_device *pdev) return PTR_ERR(gpw->base); irq = platform_get_irq(pdev, 0); - if (!irq) - return -EINVAL; + if (irq < 0) + return irq; gpw->dev = dev; diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c index ff4b26b1ceca..b809fa5abbba 100644 --- a/drivers/power/supply/ab8500_chargalg.c +++ b/drivers/power/supply/ab8500_chargalg.c @@ -2019,11 +2019,11 @@ static int ab8500_chargalg_probe(struct platform_device *pdev) psy_cfg.drv_data = di; /* Initilialize safety timer */ - hrtimer_init(&di->safety_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); + hrtimer_init(&di->safety_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); di->safety_timer.function = ab8500_chargalg_safety_timer_expired; /* Initilialize maintenance timer */ - hrtimer_init(&di->maintenance_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); + hrtimer_init(&di->maintenance_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); di->maintenance_timer.function = ab8500_chargalg_maintenance_timer_expired; diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index 05fe9724ba50..57799a8079d4 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -2545,8 +2545,10 @@ static int ab8500_fg_sysfs_init(struct ab8500_fg *di) ret = kobject_init_and_add(&di->fg_kobject, &ab8500_fg_ktype, NULL, "battery"); - if (ret < 0) + if (ret < 0) { + kobject_put(&di->fg_kobject); dev_err(di->dev, "failed to create sysfs entry\n"); + } return ret; } diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c index 18a9db0df4b1..335e12cc5e2f 100644 --- a/drivers/power/supply/axp20x_battery.c +++ b/drivers/power/supply/axp20x_battery.c @@ -186,7 +186,6 @@ static int axp20x_battery_get_prop(struct power_supply *psy, union power_supply_propval *val) { struct axp20x_batt_ps *axp20x_batt = power_supply_get_drvdata(psy); - struct iio_channel *chan; int ret = 0, reg, val1; switch (psp) { @@ -266,12 +265,12 @@ static int axp20x_battery_get_prop(struct power_supply *psy, if (ret) return ret; - if (reg & AXP20X_PWR_STATUS_BAT_CHARGING) - chan = axp20x_batt->batt_chrg_i; - else - chan = axp20x_batt->batt_dischrg_i; - - ret = iio_read_channel_processed(chan, &val->intval); + if (reg & AXP20X_PWR_STATUS_BAT_CHARGING) { + ret = iio_read_channel_processed(axp20x_batt->batt_chrg_i, &val->intval); + } else { + ret = iio_read_channel_processed(axp20x_batt->batt_dischrg_i, &val1); + val->intval = -val1; + } if (ret) return ret; diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index b9553be9bed5..fb9db7f43895 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -41,11 +41,11 @@ #define VBUS_ISPOUT_CUR_LIM_1500MA 0x1 /* 1500mA */ #define VBUS_ISPOUT_CUR_LIM_2000MA 0x2 /* 2000mA */ #define VBUS_ISPOUT_CUR_NO_LIM 0x3 /* 2500mA */ -#define VBUS_ISPOUT_VHOLD_SET_MASK 0x31 +#define VBUS_ISPOUT_VHOLD_SET_MASK 0x38 #define VBUS_ISPOUT_VHOLD_SET_BIT_POS 0x3 #define VBUS_ISPOUT_VHOLD_SET_OFFSET 4000 /* 4000mV */ #define VBUS_ISPOUT_VHOLD_SET_LSB_RES 100 /* 100mV */ -#define VBUS_ISPOUT_VHOLD_SET_4300MV 0x3 /* 4300mV */ +#define VBUS_ISPOUT_VHOLD_SET_4400MV 0x4 /* 4400mV */ #define VBUS_ISPOUT_VBUS_PATH_DIS BIT(7) #define CHRG_CCCV_CC_MASK 0xf /* 4 bits */ @@ -744,6 +744,16 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info) ret = axp288_charger_vbus_path_select(info, true); if (ret < 0) return ret; + } else { + /* Set Vhold to the factory default / recommended 4.4V */ + val = VBUS_ISPOUT_VHOLD_SET_4400MV << VBUS_ISPOUT_VHOLD_SET_BIT_POS; + ret = regmap_update_bits(info->regmap, AXP20X_VBUS_IPSOUT_MGMT, + VBUS_ISPOUT_VHOLD_SET_MASK, val); + if (ret < 0) { + dev_err(&info->pdev->dev, "register(%x) write error(%d)\n", + AXP20X_VBUS_IPSOUT_MGMT, ret); + return ret; + } } /* Read current charge voltage and current limit */ diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c index 35ff0c8fe96f..16c4876fe5af 100644 --- a/drivers/power/supply/bq24190_charger.c +++ b/drivers/power/supply/bq24190_charger.c @@ -39,6 +39,7 @@ #define BQ24190_REG_POC_CHG_CONFIG_DISABLE 0x0 #define BQ24190_REG_POC_CHG_CONFIG_CHARGE 0x1 #define BQ24190_REG_POC_CHG_CONFIG_OTG 0x2 +#define BQ24190_REG_POC_CHG_CONFIG_OTG_ALT 0x3 #define BQ24190_REG_POC_SYS_MIN_MASK (BIT(3) | BIT(2) | BIT(1)) #define BQ24190_REG_POC_SYS_MIN_SHIFT 1 #define BQ24190_REG_POC_SYS_MIN_MIN 3000 @@ -550,7 +551,11 @@ static int bq24190_vbus_is_enabled(struct regulator_dev *dev) pm_runtime_mark_last_busy(bdi->dev); pm_runtime_put_autosuspend(bdi->dev); - return ret ? ret : val == BQ24190_REG_POC_CHG_CONFIG_OTG; + if (ret) + return ret; + + return (val == BQ24190_REG_POC_CHG_CONFIG_OTG || + val == BQ24190_REG_POC_CHG_CONFIG_OTG_ALT); } static const struct regulator_ops bq24190_vbus_ops = { diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c index 6fa65d118ec1..b08f7d0c4181 100644 --- a/drivers/power/supply/sbs-charger.c +++ b/drivers/power/supply/sbs-charger.c @@ -18,6 +18,7 @@ #include <linux/interrupt.h> #include <linux/regmap.h> #include <linux/bitops.h> +#include <linux/devm-helpers.h> #define SBS_CHARGER_REG_SPEC_INFO 0x11 #define SBS_CHARGER_REG_STATUS 0x13 @@ -209,7 +210,12 @@ static int sbs_probe(struct i2c_client *client, if (ret) return dev_err_probe(&client->dev, ret, "Failed to request irq\n"); } else { - INIT_DELAYED_WORK(&chip->work, sbs_delayed_work); + ret = devm_delayed_work_autocancel(&client->dev, &chip->work, + sbs_delayed_work); + if (ret) + return dev_err_probe(&client->dev, ret, + "Failed to init work for polling\n"); + schedule_delayed_work(&chip->work, msecs_to_jiffies(SBS_CHARGER_POLL_TIME)); } @@ -220,15 +226,6 @@ static int sbs_probe(struct i2c_client *client, return 0; } -static int sbs_remove(struct i2c_client *client) -{ - struct sbs_info *chip = i2c_get_clientdata(client); - - cancel_delayed_work_sync(&chip->work); - - return 0; -} - #ifdef CONFIG_OF static const struct of_device_id sbs_dt_ids[] = { { .compatible = "sbs,sbs-charger" }, @@ -245,7 +242,6 @@ MODULE_DEVICE_TABLE(i2c, sbs_id); static struct i2c_driver sbs_driver = { .probe = sbs_probe, - .remove = sbs_remove, .id_table = sbs_id, .driver = { .name = "sbs-charger", diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c index e05cee457471..908cfd45d262 100644 --- a/drivers/power/supply/wm8350_power.c +++ b/drivers/power/supply/wm8350_power.c @@ -408,44 +408,112 @@ static const struct power_supply_desc wm8350_usb_desc = { * Initialisation *********************************************************************/ -static void wm8350_init_charger(struct wm8350 *wm8350) +static int wm8350_init_charger(struct wm8350 *wm8350) { + int ret; + /* register our interest in charger events */ - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT, + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT, wm8350_charger_handler, 0, "Battery hot", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD, + if (ret) + goto err; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD, wm8350_charger_handler, 0, "Battery cold", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL, + if (ret) + goto free_chg_bat_hot; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL, wm8350_charger_handler, 0, "Battery fail", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_TO, + if (ret) + goto free_chg_bat_cold; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_TO, wm8350_charger_handler, 0, "Charger timeout", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_END, + if (ret) + goto free_chg_bat_fail; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_END, wm8350_charger_handler, 0, "Charge end", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_START, + if (ret) + goto free_chg_to; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_START, wm8350_charger_handler, 0, "Charge start", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, + if (ret) + goto free_chg_end; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, wm8350_charger_handler, 0, "Fast charge ready", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, + if (ret) + goto free_chg_start; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, wm8350_charger_handler, 0, "Battery <3.9V", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, + if (ret) + goto free_chg_fast_rdy; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, wm8350_charger_handler, 0, "Battery <3.1V", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, + if (ret) + goto free_chg_vbatt_lt_3p9; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, wm8350_charger_handler, 0, "Battery <2.85V", wm8350); + if (ret) + goto free_chg_vbatt_lt_3p1; /* and supply change events */ - wm8350_register_irq(wm8350, WM8350_IRQ_EXT_USB_FB, + ret = wm8350_register_irq(wm8350, WM8350_IRQ_EXT_USB_FB, wm8350_charger_handler, 0, "USB", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_EXT_WALL_FB, + if (ret) + goto free_chg_vbatt_lt_2p85; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_EXT_WALL_FB, wm8350_charger_handler, 0, "Wall", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_EXT_BAT_FB, + if (ret) + goto free_ext_usb_fb; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_EXT_BAT_FB, wm8350_charger_handler, 0, "Battery", wm8350); + if (ret) + goto free_ext_wall_fb; + + return 0; + +free_ext_wall_fb: + wm8350_free_irq(wm8350, WM8350_IRQ_EXT_WALL_FB, wm8350); +free_ext_usb_fb: + wm8350_free_irq(wm8350, WM8350_IRQ_EXT_USB_FB, wm8350); +free_chg_vbatt_lt_2p85: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, wm8350); +free_chg_vbatt_lt_3p1: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, wm8350); +free_chg_vbatt_lt_3p9: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, wm8350); +free_chg_fast_rdy: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, wm8350); +free_chg_start: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_START, wm8350); +free_chg_end: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_END, wm8350); +free_chg_to: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_TO, wm8350); +free_chg_bat_fail: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL, wm8350); +free_chg_bat_cold: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD, wm8350); +free_chg_bat_hot: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT, wm8350); +err: + return ret; } static void free_charger_irq(struct wm8350 *wm8350) @@ -456,6 +524,7 @@ static void free_charger_irq(struct wm8350 *wm8350) wm8350_free_irq(wm8350, WM8350_IRQ_CHG_TO, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_END, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_START, wm8350); + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, wm8350); diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c index 35799e6401c9..2f4b11b4dfcd 100644 --- a/drivers/pps/clients/pps-gpio.c +++ b/drivers/pps/clients/pps-gpio.c @@ -169,7 +169,7 @@ static int pps_gpio_probe(struct platform_device *pdev) /* GPIO setup */ ret = pps_gpio_setup(dev); if (ret) - return -EINVAL; + return ret; /* IRQ setup */ ret = gpiod_to_irq(data->gpio_pin); diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index f9b2d66b0443..8a652a367625 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -317,11 +317,18 @@ no_memory: } EXPORT_SYMBOL(ptp_clock_register); +static int unregister_vclock(struct device *dev, void *data) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + + ptp_vclock_unregister(info_to_vclock(ptp->info)); + return 0; +} + int ptp_clock_unregister(struct ptp_clock *ptp) { if (ptp_vclock_in_use(ptp)) { - pr_err("ptp: virtual clock in use\n"); - return -EBUSY; + device_for_each_child(&ptp->dev, NULL, unregister_vclock); } ptp->defunct = 1; diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index 41b92dc2f011..9233bfedeb17 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -14,7 +14,7 @@ static ssize_t clock_name_show(struct device *dev, struct device_attribute *attr, char *page) { struct ptp_clock *ptp = dev_get_drvdata(dev); - return snprintf(page, PAGE_SIZE-1, "%s\n", ptp->info->name); + return sysfs_emit(page, "%s\n", ptp->info->name); } static DEVICE_ATTR_RO(clock_name); @@ -387,7 +387,7 @@ static ssize_t ptp_pin_show(struct device *dev, struct device_attribute *attr, mutex_unlock(&ptp->pincfg_mux); - return snprintf(page, PAGE_SIZE, "%u %u\n", func, chan); + return sysfs_emit(page, "%u %u\n", func, chan); } static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c index 8e461f3baa05..8cc8ae16553c 100644 --- a/drivers/pwm/pwm-lpc18xx-sct.c +++ b/drivers/pwm/pwm-lpc18xx-sct.c @@ -395,12 +395,6 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev) lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_LIMIT, BIT(lpc18xx_pwm->period_event)); - ret = pwmchip_add(&lpc18xx_pwm->chip); - if (ret < 0) { - dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret); - goto disable_pwmclk; - } - for (i = 0; i < lpc18xx_pwm->chip.npwm; i++) { struct lpc18xx_pwm_data *data; @@ -410,14 +404,12 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev) GFP_KERNEL); if (!data) { ret = -ENOMEM; - goto remove_pwmchip; + goto disable_pwmclk; } pwm_set_chip_data(pwm, data); } - platform_set_drvdata(pdev, lpc18xx_pwm); - val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL); val &= ~LPC18XX_PWM_BIDIR; val &= ~LPC18XX_PWM_CTRL_HALT; @@ -425,10 +417,16 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev) val |= LPC18XX_PWM_PRE(0); lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL, val); + ret = pwmchip_add(&lpc18xx_pwm->chip); + if (ret < 0) { + dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret); + goto disable_pwmclk; + } + + platform_set_drvdata(pdev, lpc18xx_pwm); + return 0; -remove_pwmchip: - pwmchip_remove(&lpc18xx_pwm->chip); disable_pwmclk: clk_disable_unprepare(lpc18xx_pwm->pwm_clk); return ret; diff --git a/drivers/regulator/atc260x-regulator.c b/drivers/regulator/atc260x-regulator.c index 05147d2c3842..485e58b264c0 100644 --- a/drivers/regulator/atc260x-regulator.c +++ b/drivers/regulator/atc260x-regulator.c @@ -292,6 +292,7 @@ enum atc2603c_reg_ids { .bypass_mask = BIT(5), \ .active_discharge_reg = ATC2603C_PMU_SWITCH_CTL, \ .active_discharge_mask = BIT(1), \ + .active_discharge_on = BIT(1), \ .owner = THIS_MODULE, \ } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index ca6caba8a191..46e76b5b21ef 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -6010,9 +6010,8 @@ core_initcall(regulator_init); static int regulator_late_cleanup(struct device *dev, void *data) { struct regulator_dev *rdev = dev_to_rdev(dev); - const struct regulator_ops *ops = rdev->desc->ops; struct regulation_constraints *c = rdev->constraints; - int enabled, ret; + int ret; if (c && c->always_on) return 0; @@ -6025,14 +6024,8 @@ static int regulator_late_cleanup(struct device *dev, void *data) if (rdev->use_count) goto unlock; - /* If we can't read the status assume it's always on. */ - if (ops->is_enabled) - enabled = ops->is_enabled(rdev); - else - enabled = 1; - - /* But if reading the status failed, assume that it's off. */ - if (enabled <= 0) + /* If reading the status failed, assume that it's off. */ + if (_regulator_is_enabled(rdev) <= 0) goto unlock; if (have_full_constraints()) { diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index 8e077792bddd..b6287f7e78f4 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -1268,8 +1268,10 @@ static int rpm_reg_probe(struct platform_device *pdev) for_each_available_child_of_node(dev->of_node, node) { vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL); - if (!vreg) + if (!vreg) { + of_node_put(node); return -ENOMEM; + } ret = rpm_regulator_init_vreg(vreg, dev, node, rpm, vreg_data); diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c index ee46bfbf5eee..991b4730d768 100644 --- a/drivers/regulator/rpi-panel-attiny-regulator.c +++ b/drivers/regulator/rpi-panel-attiny-regulator.c @@ -37,11 +37,24 @@ static const struct regmap_config attiny_regmap_config = { static int attiny_lcd_power_enable(struct regulator_dev *rdev) { unsigned int data; + int ret, i; regmap_write(rdev->regmap, REG_POWERON, 1); + msleep(80); + /* Wait for nPWRDWN to go low to indicate poweron is done. */ - regmap_read_poll_timeout(rdev->regmap, REG_PORTB, data, - data & BIT(0), 10, 1000000); + for (i = 0; i < 20; i++) { + ret = regmap_read(rdev->regmap, REG_PORTB, &data); + if (!ret) { + if (data & BIT(0)) + break; + } + usleep_range(10000, 12000); + } + usleep_range(10000, 12000); + + if (ret) + pr_err("%s: regmap_read_poll_timeout failed %d\n", __func__, ret); /* Default to the same orientation as the closed source * firmware used for the panel. Runtime rotation @@ -57,23 +70,34 @@ static int attiny_lcd_power_disable(struct regulator_dev *rdev) { regmap_write(rdev->regmap, REG_PWM, 0); regmap_write(rdev->regmap, REG_POWERON, 0); - udelay(1); + msleep(30); return 0; } static int attiny_lcd_power_is_enabled(struct regulator_dev *rdev) { unsigned int data; - int ret; + int ret, i; - ret = regmap_read(rdev->regmap, REG_POWERON, &data); + for (i = 0; i < 10; i++) { + ret = regmap_read(rdev->regmap, REG_POWERON, &data); + if (!ret) + break; + usleep_range(10000, 12000); + } if (ret < 0) return ret; if (!(data & BIT(0))) return 0; - ret = regmap_read(rdev->regmap, REG_PORTB, &data); + for (i = 0; i < 10; i++) { + ret = regmap_read(rdev->regmap, REG_PORTB, &data); + if (!ret) + break; + usleep_range(10000, 12000); + } + if (ret < 0) return ret; @@ -103,20 +127,32 @@ static int attiny_update_status(struct backlight_device *bl) { struct regmap *regmap = bl_get_data(bl); int brightness = bl->props.brightness; + int ret, i; if (bl->props.power != FB_BLANK_UNBLANK || bl->props.fb_blank != FB_BLANK_UNBLANK) brightness = 0; - return regmap_write(regmap, REG_PWM, brightness); + for (i = 0; i < 10; i++) { + ret = regmap_write(regmap, REG_PWM, brightness); + if (!ret) + break; + } + + return ret; } static int attiny_get_brightness(struct backlight_device *bl) { struct regmap *regmap = bl_get_data(bl); - int ret, brightness; + int ret, brightness, i; + + for (i = 0; i < 10; i++) { + ret = regmap_read(regmap, REG_PWM, &brightness); + if (!ret) + break; + } - ret = regmap_read(regmap, REG_PWM, &brightness); if (ret) return ret; @@ -166,7 +202,7 @@ static int attiny_i2c_probe(struct i2c_client *i2c, } regmap_write(regmap, REG_POWERON, 0); - mdelay(1); + msleep(30); config.dev = &i2c->dev; config.regmap = regmap; diff --git a/drivers/regulator/rtq2134-regulator.c b/drivers/regulator/rtq2134-regulator.c index f21e3f8b21f2..8e13dea354a2 100644 --- a/drivers/regulator/rtq2134-regulator.c +++ b/drivers/regulator/rtq2134-regulator.c @@ -285,6 +285,7 @@ static const unsigned int rtq2134_buck_ramp_delay_table[] = { .enable_mask = RTQ2134_VOUTEN_MASK, \ .active_discharge_reg = RTQ2134_REG_BUCK##_id##_CFG0, \ .active_discharge_mask = RTQ2134_ACTDISCHG_MASK, \ + .active_discharge_on = RTQ2134_ACTDISCHG_MASK, \ .ramp_reg = RTQ2134_REG_BUCK##_id##_RSPCFG, \ .ramp_mask = RTQ2134_RSPUP_MASK, \ .ramp_delay_table = rtq2134_buck_ramp_delay_table, \ diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c index cadea0344486..40befdd9dfa9 100644 --- a/drivers/regulator/wm8994-regulator.c +++ b/drivers/regulator/wm8994-regulator.c @@ -82,6 +82,35 @@ static const struct regulator_desc wm8994_ldo_desc[] = { .min_uV = 2400000, .uV_step = 100000, .enable_time = 3000, + .off_on_delay = 36000, + .owner = THIS_MODULE, + }, + { + .name = "LDO2", + .id = 2, + .type = REGULATOR_VOLTAGE, + .n_voltages = WM8994_LDO2_MAX_SELECTOR + 1, + .vsel_reg = WM8994_LDO_2, + .vsel_mask = WM8994_LDO2_VSEL_MASK, + .ops = &wm8994_ldo2_ops, + .enable_time = 3000, + .off_on_delay = 36000, + .owner = THIS_MODULE, + }, +}; + +static const struct regulator_desc wm8958_ldo_desc[] = { + { + .name = "LDO1", + .id = 1, + .type = REGULATOR_VOLTAGE, + .n_voltages = WM8994_LDO1_MAX_SELECTOR + 1, + .vsel_reg = WM8994_LDO_1, + .vsel_mask = WM8994_LDO1_VSEL_MASK, + .ops = &wm8994_ldo1_ops, + .min_uV = 2400000, + .uV_step = 100000, + .enable_time = 3000, .owner = THIS_MODULE, }, { @@ -172,9 +201,16 @@ static int wm8994_ldo_probe(struct platform_device *pdev) * regulator core and we need not worry about it on the * error path. */ - ldo->regulator = devm_regulator_register(&pdev->dev, - &wm8994_ldo_desc[id], - &config); + if (ldo->wm8994->type == WM8994) { + ldo->regulator = devm_regulator_register(&pdev->dev, + &wm8994_ldo_desc[id], + &config); + } else { + ldo->regulator = devm_regulator_register(&pdev->dev, + &wm8958_ldo_desc[id], + &config); + } + if (IS_ERR(ldo->regulator)) { ret = PTR_ERR(ldo->regulator); dev_err(wm8994->dev, "Failed to register LDO%d: %d\n", diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c index 8b0d8bbacd2e..c10d452c9703 100644 --- a/drivers/remoteproc/qcom_q6v5_adsp.c +++ b/drivers/remoteproc/qcom_q6v5_adsp.c @@ -406,6 +406,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp) } ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) return ret; diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index 423b31dfa574..ca1c7387776b 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -1624,18 +1624,20 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) * reserved memory regions from device's memory-region property. */ child = of_get_child_by_name(qproc->dev->of_node, "mba"); - if (!child) + if (!child) { node = of_parse_phandle(qproc->dev->of_node, "memory-region", 0); - else + } else { node = of_parse_phandle(child, "memory-region", 0); + of_node_put(child); + } ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) { dev_err(qproc->dev, "unable to resolve mba region\n"); return ret; } - of_node_put(node); qproc->mba_phys = r.start; qproc->mba_size = resource_size(&r); @@ -1646,14 +1648,15 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) } else { child = of_get_child_by_name(qproc->dev->of_node, "mpss"); node = of_parse_phandle(child, "memory-region", 0); + of_node_put(child); } ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) { dev_err(qproc->dev, "unable to resolve mpss region\n"); return ret; } - of_node_put(node); qproc->mpss_phys = qproc->mpss_reloc = r.start; qproc->mpss_size = resource_size(&r); diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index ebadc6c08e11..b17742eac9ff 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c @@ -501,6 +501,7 @@ static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss) } ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) return ret; diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c index b5a1e3b697d9..581930483ef8 100644 --- a/drivers/remoteproc/remoteproc_debugfs.c +++ b/drivers/remoteproc/remoteproc_debugfs.c @@ -76,7 +76,7 @@ static ssize_t rproc_coredump_write(struct file *filp, int ret, err = 0; char buf[20]; - if (count > sizeof(buf)) + if (count < 1 || count > sizeof(buf)) return -EINVAL; ret = copy_from_user(buf, user_buf, count); diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c index 1e8315038850..a8dde4606360 100644 --- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c +++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c @@ -121,7 +121,9 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev) return dev_err_probe(dev, PTR_ERR(priv->rstc), "failed to get reset\n"); - reset_control_deassert(priv->rstc); + error = reset_control_deassert(priv->rstc); + if (error) + return error; priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops; priv->rcdev.of_reset_n_cells = 1; diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c index 24d3395964cc..4c5bba52b105 100644 --- a/drivers/reset/tegra/reset-bpmp.c +++ b/drivers/reset/tegra/reset-bpmp.c @@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc, struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc); struct mrq_reset_request request; struct tegra_bpmp_message msg; + int err; memset(&request, 0, sizeof(request)); request.cmd = command; @@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc, msg.tx.data = &request; msg.tx.size = sizeof(request); - return tegra_bpmp_transfer(bpmp, &msg); + err = tegra_bpmp_transfer(bpmp, &msg); + if (err) + return err; + if (msg.rx.ret) + return -EINVAL; + + return 0; } static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc, diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 9a2bd4947007..3ee17c4d7298 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -793,9 +793,13 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); struct rtc_time tm; ktime_t now; + int err; + + err = __rtc_read_time(rtc, &tm); + if (err) + return err; timer->enabled = 1; - __rtc_read_time(rtc, &tm); now = rtc_tm_to_ktime(tm); /* Skip over expired timers */ @@ -809,7 +813,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) trace_rtc_timer_enqueue(timer); if (!next || ktime_before(timer->node.expires, next->expires)) { struct rtc_wkalrm alarm; - int err; alarm.time = rtc_ktime_to_tm(timer->node.expires); alarm.enabled = 1; diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index dc3f8b0dde98..b90a603d6b12 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -222,6 +222,8 @@ static inline void cmos_write_bank2(unsigned char val, unsigned char addr) static int cmos_read_time(struct device *dev, struct rtc_time *t) { + int ret; + /* * If pm_trace abused the RTC for storage, set the timespec to 0, * which tells the caller that this RTC value is unusable. @@ -229,7 +231,12 @@ static int cmos_read_time(struct device *dev, struct rtc_time *t) if (!pm_trace_rtc_valid()) return -EIO; - mc146818_get_time(t); + ret = mc146818_get_time(t); + if (ret < 0) { + dev_err_ratelimited(dev, "unable to read current time\n"); + return ret; + } + return 0; } @@ -793,16 +800,14 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) rename_region(ports, dev_name(&cmos_rtc.rtc->dev)); - spin_lock_irq(&rtc_lock); - - /* Ensure that the RTC is accessible. Bit 6 must be 0! */ - if ((CMOS_READ(RTC_VALID) & 0x40) != 0) { - spin_unlock_irq(&rtc_lock); - dev_warn(dev, "not accessible\n"); + if (!mc146818_does_rtc_work()) { + dev_warn(dev, "broken or not accessible\n"); retval = -ENXIO; goto cleanup1; } + spin_lock_irq(&rtc_lock); + if (!(flags & CMOS_RTC_FLAGS_NOFREQ)) { /* force periodic irq to CMOS reset default of 1024Hz; * diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index 2065842f775d..8abac672f3cb 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -8,10 +8,36 @@ #include <linux/acpi.h> #endif -unsigned int mc146818_get_time(struct rtc_time *time) +/* + * If the UIP (Update-in-progress) bit of the RTC is set for more then + * 10ms, the RTC is apparently broken or not present. + */ +bool mc146818_does_rtc_work(void) +{ + int i; + unsigned char val; + unsigned long flags; + + for (i = 0; i < 10; i++) { + spin_lock_irqsave(&rtc_lock, flags); + val = CMOS_READ(RTC_FREQ_SELECT); + spin_unlock_irqrestore(&rtc_lock, flags); + + if ((val & RTC_UIP) == 0) + return true; + + mdelay(1); + } + + return false; +} +EXPORT_SYMBOL_GPL(mc146818_does_rtc_work); + +int mc146818_get_time(struct rtc_time *time) { unsigned char ctrl; unsigned long flags; + unsigned int iter_count = 0; unsigned char century = 0; bool retry; @@ -20,13 +46,13 @@ unsigned int mc146818_get_time(struct rtc_time *time) #endif again: - spin_lock_irqsave(&rtc_lock, flags); - /* Ensure that the RTC is accessible. Bit 6 must be 0! */ - if (WARN_ON_ONCE((CMOS_READ(RTC_VALID) & 0x40) != 0)) { - spin_unlock_irqrestore(&rtc_lock, flags); - memset(time, 0xff, sizeof(*time)); - return 0; + if (iter_count > 10) { + memset(time, 0, sizeof(*time)); + return -EIO; } + iter_count++; + + spin_lock_irqsave(&rtc_lock, flags); /* * Check whether there is an update in progress during which the @@ -116,7 +142,7 @@ again: time->tm_mon--; - return RTC_24H; + return 0; } EXPORT_SYMBOL_GPL(mc146818_get_time); @@ -176,8 +202,10 @@ int mc146818_set_time(struct rtc_time *time) if (yrs >= 100) yrs -= 100; - if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) - || RTC_ALWAYS_BCD) { + spin_lock_irqsave(&rtc_lock, flags); + save_control = CMOS_READ(RTC_CONTROL); + spin_unlock_irqrestore(&rtc_lock, flags); + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { sec = bin2bcd(sec); min = bin2bcd(min); hrs = bin2bcd(hrs); diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index e38ee8848385..bad6a5d9c683 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -350,9 +350,6 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) } } - if (!adev->irq[0]) - clear_bit(RTC_FEATURE_ALARM, ldata->rtc->features); - device_init_wakeup(&adev->dev, true); ldata->rtc = devm_rtc_allocate_device(&adev->dev); if (IS_ERR(ldata->rtc)) { @@ -360,6 +357,9 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) goto out; } + if (!adev->irq[0]) + clear_bit(RTC_FEATURE_ALARM, ldata->rtc->features); + ldata->rtc->ops = ops; ldata->rtc->range_min = vendor->range_min; ldata->rtc->range_max = vendor->range_max; diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c index 2018614f258f..6eaa9321c074 100644 --- a/drivers/rtc/rtc-wm8350.c +++ b/drivers/rtc/rtc-wm8350.c @@ -432,14 +432,21 @@ static int wm8350_rtc_probe(struct platform_device *pdev) return ret; } - wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC, + ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350_rtc_update_handler, 0, "RTC Seconds", wm8350); + if (ret) + return ret; + wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC); - wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM, + ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM, wm8350_rtc_alarm_handler, 0, "RTC Alarm", wm8350); + if (ret) { + wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350); + return ret; + } return 0; } diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index fa966e0db6ca..3a6f3af240fa 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -14,6 +14,7 @@ #define KMSG_COMPONENT "dasd" #include <linux/interrupt.h> +#include <linux/major.h> #include <linux/fs.h> #include <linux/blkpg.h> diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index b13b5c85f3de..75a5a4765f42 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -3370,13 +3370,11 @@ static int __init aha152x_setup(char *str) setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 1; setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT; setup[setup_count].ext_trans = ints[0] >= 8 ? ints[8] : 0; - if (ints[0] > 8) { /*}*/ + if (ints[0] > 8) printk(KERN_NOTICE "aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>" "[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>]]]]]]]\n"); - } else { + else setup_count++; - return 0; - } return 1; } diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index 5ae1e3f78910..e049cdb3c286 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -711,7 +711,7 @@ bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr, char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; bfa_get_adapter_serial_num(&bfad->bfa, serial_num); - return snprintf(buf, PAGE_SIZE, "%s\n", serial_num); + return sysfs_emit(buf, "%s\n", serial_num); } static ssize_t @@ -725,7 +725,7 @@ bfad_im_model_show(struct device *dev, struct device_attribute *attr, char model[BFA_ADAPTER_MODEL_NAME_LEN]; bfa_get_adapter_model(&bfad->bfa, model); - return snprintf(buf, PAGE_SIZE, "%s\n", model); + return sysfs_emit(buf, "%s\n", model); } static ssize_t @@ -805,7 +805,7 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr, snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Invalid Model"); - return snprintf(buf, PAGE_SIZE, "%s\n", model_descr); + return sysfs_emit(buf, "%s\n", model_descr); } static ssize_t @@ -819,7 +819,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr, u64 nwwn; nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port); - return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn)); + return sysfs_emit(buf, "0x%llx\n", cpu_to_be64(nwwn)); } static ssize_t @@ -836,7 +836,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr, bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); strlcpy(symname, port_attr.port_cfg.sym_name.symname, BFA_SYMNAME_MAXLEN); - return snprintf(buf, PAGE_SIZE, "%s\n", symname); + return sysfs_emit(buf, "%s\n", symname); } static ssize_t @@ -850,14 +850,14 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr, char hw_ver[BFA_VERSION_LEN]; bfa_get_pci_chip_rev(&bfad->bfa, hw_ver); - return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver); + return sysfs_emit(buf, "%s\n", hw_ver); } static ssize_t bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_VERSION); + return sysfs_emit(buf, "%s\n", BFAD_DRIVER_VERSION); } static ssize_t @@ -871,7 +871,7 @@ bfad_im_optionrom_version_show(struct device *dev, char optrom_ver[BFA_VERSION_LEN]; bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver); - return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver); + return sysfs_emit(buf, "%s\n", optrom_ver); } static ssize_t @@ -885,7 +885,7 @@ bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr, char fw_ver[BFA_VERSION_LEN]; bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); - return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver); + return sysfs_emit(buf, "%s\n", fw_ver); } static ssize_t @@ -897,7 +897,7 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr, (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; - return snprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", bfa_get_nports(&bfad->bfa)); } @@ -905,7 +905,7 @@ static ssize_t bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_NAME); + return sysfs_emit(buf, "%s\n", BFAD_DRIVER_NAME); } static ssize_t @@ -924,14 +924,14 @@ bfad_im_num_of_discovered_ports_show(struct device *dev, rports = kcalloc(nrports, sizeof(struct bfa_rport_qualifier_s), GFP_ATOMIC); if (rports == NULL) - return snprintf(buf, PAGE_SIZE, "Failed\n"); + return sysfs_emit(buf, "Failed\n"); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports); spin_unlock_irqrestore(&bfad->bfad_lock, flags); kfree(rports); - return snprintf(buf, PAGE_SIZE, "%d\n", nrports); + return sysfs_emit(buf, "%d\n", nrports); } static DEVICE_ATTR(serial_number, S_IRUGO, diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 5521469ce678..e16327a4b4c9 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -1977,7 +1977,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) if (nopin->cq_req_sn != qp->cqe_exp_seq_sn) break; - if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) { + if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) { if (nopin->op_code == ISCSI_OP_NOOP_IN && nopin->itt == (u16) RESERVED_ITT) { printk(KERN_ALERT "bnx2i: Unsolicited " diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 1b5f3e143f07..2e5241d12dc3 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1721,7 +1721,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, struct iscsi_conn *conn = ep->conn->cls_conn->dd_data; /* Must suspend all rx queue activity for this ep */ - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); + set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); } /* CONN_DISCONNECT timeout may or may not be an issue depending * on what transcribed in TCP layer, different targets behave diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 8c7d4dda4cf2..4365d52c6430 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -1634,11 +1634,11 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, conn 0x%p.\n", csk, conn); - if (unlikely(!conn || conn->suspend_rx)) { + if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) { log_debug(1 << CXGBI_DBG_PDU_RX, - "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n", + "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n", csk, conn, conn ? conn->id : 0xFF, - conn ? conn->suspend_rx : 0xFF); + conn ? conn->flags : 0xFF); return; } diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index f8afbfb468dc..d084a7db3925 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -604,7 +604,7 @@ out: FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, tag, sc, io_req, sg_count, cmd_trace, - (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc))); + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); /* if only we issued IO, will we have the io lock */ if (io_lock_acquired) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 3ab669dc806f..1f5e0688c0c8 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -527,7 +527,7 @@ MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)"); /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ static int prot_mask; -module_param(prot_mask, int, 0); +module_param(prot_mask, int, 0444); MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 "); static void debugfs_work_handler_v3_hw(struct work_struct *work); @@ -2392,17 +2392,25 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) return IRQ_WAKE_THREAD; } +static void hisi_sas_v3_free_vectors(void *data) +{ + struct pci_dev *pdev = data; + + pci_free_irq_vectors(pdev); +} + static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba) { int vectors; int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi; struct Scsi_Host *shost = hisi_hba->shost; + struct pci_dev *pdev = hisi_hba->pci_dev; struct irq_affinity desc = { .pre_vectors = BASE_VECTORS_V3_HW, }; min_msi = MIN_AFFINE_VECTORS_V3_HW; - vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev, + vectors = pci_alloc_irq_vectors_affinity(pdev, min_msi, max_msi, PCI_IRQ_MSI | PCI_IRQ_AFFINITY, @@ -2414,6 +2422,7 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba) hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW; shost->nr_hw_queues = hisi_hba->cq_nvecs; + devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev); return 0; } @@ -3959,6 +3968,54 @@ static const struct file_operations debugfs_bist_phy_v3_hw_fops = { .owner = THIS_MODULE, }; +static ssize_t debugfs_bist_cnt_v3_hw_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + unsigned int cnt; + int val; + + if (hisi_hba->debugfs_bist_enable) + return -EPERM; + + val = kstrtouint_from_user(buf, count, 0, &cnt); + if (val) + return val; + + if (cnt) + return -EINVAL; + + hisi_hba->debugfs_bist_cnt = 0; + return count; +} + +static int debugfs_bist_cnt_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + + seq_printf(s, "%u\n", hisi_hba->debugfs_bist_cnt); + + return 0; +} + +static int debugfs_bist_cnt_v3_hw_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, debugfs_bist_cnt_v3_hw_show, + inode->i_private); +} + +static const struct file_operations debugfs_bist_cnt_v3_hw_ops = { + .open = debugfs_bist_cnt_v3_hw_open, + .read = seq_read, + .write = debugfs_bist_cnt_v3_hw_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + static const struct { int value; char *name; @@ -4596,8 +4653,8 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba) debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry, hisi_hba, &debugfs_bist_phy_v3_hw_fops); - debugfs_create_u32("cnt", 0600, hisi_hba->debugfs_bist_dentry, - &hisi_hba->debugfs_bist_cnt); + debugfs_create_file("cnt", 0600, hisi_hba->debugfs_bist_dentry, + hisi_hba, &debugfs_bist_cnt_v3_hw_ops); debugfs_create_file("loopback_mode", 0600, hisi_hba->debugfs_bist_dentry, @@ -4763,7 +4820,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_err(dev, "%d hw queues\n", shost->nr_hw_queues); rc = scsi_add_host(shost, dev); if (rc) - goto err_out_free_irq_vectors; + goto err_out_debugfs; rc = sas_register_ha(sha); if (rc) @@ -4792,8 +4849,6 @@ err_out_hw_init: sas_unregister_ha(sha); err_out_register_ha: scsi_remove_host(shost); -err_out_free_irq_vectors: - pci_free_irq_vectors(pdev); err_out_debugfs: debugfs_exit_v3_hw(hisi_hba); err_out_ha: @@ -4821,7 +4876,6 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq); } - pci_free_irq_vectors(pdev); } static void hisi_sas_v3_remove(struct pci_dev *pdev) diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 10b6c6daaacd..d43bb18f58fd 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -36,7 +36,7 @@ #define IBMVSCSIS_VERSION "v0.2" -#define INITIAL_SRP_LIMIT 800 +#define INITIAL_SRP_LIMIT 1024 #define DEFAULT_MAX_SECTORS 256 #define MAX_TXU 1024 * 1024 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 841000445b9a..aa223db4cf53 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1701,6 +1701,7 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) if (cancel_delayed_work_sync(&ep->timeout_work)) { FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n"); fc_exch_release(ep); /* release from pending timer hold */ + return; } spin_lock_bh(&ep->ex_lock); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index cbc263ec9d66..0f2c7098f9d6 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -678,7 +678,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, struct iscsi_task *task; itt_t itt; - if (session->state == ISCSI_STATE_TERMINATE) + if (session->state == ISCSI_STATE_TERMINATE || + !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags)) return NULL; if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) { @@ -1392,8 +1393,8 @@ static bool iscsi_set_conn_failed(struct iscsi_conn *conn) if (conn->stop_stage == 0) session->state = ISCSI_STATE_FAILED; - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); + set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); + set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); return true; } @@ -1454,7 +1455,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task, * Do this after dropping the extra ref because if this was a requeue * it's removed from that list and cleanup_queued_task would miss it. */ - if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { + if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) { /* * Save the task and ref in case we weren't cleaning up this * task and get woken up again. @@ -1532,7 +1533,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) int rc = 0; spin_lock_bh(&conn->session->frwd_lock); - if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { + if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) { ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n"); spin_unlock_bh(&conn->session->frwd_lock); return -ENODATA; @@ -1746,7 +1747,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) goto fault; } - if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { + if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) { reason = FAILURE_SESSION_IN_RECOVERY; sc->result = DID_REQUEUE << 16; goto fault; @@ -1935,7 +1936,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error) void iscsi_suspend_queue(struct iscsi_conn *conn) { spin_lock_bh(&conn->session->frwd_lock); - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); + set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); spin_unlock_bh(&conn->session->frwd_lock); } EXPORT_SYMBOL_GPL(iscsi_suspend_queue); @@ -1953,7 +1954,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn) struct Scsi_Host *shost = conn->session->host; struct iscsi_host *ihost = shost_priv(shost); - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); + set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); if (ihost->workq) flush_workqueue(ihost->workq); } @@ -1961,7 +1962,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx); static void iscsi_start_tx(struct iscsi_conn *conn) { - clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); + clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); iscsi_conn_queue_work(conn); } @@ -2214,6 +2215,8 @@ void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active) iscsi_suspend_tx(conn); spin_lock_bh(&session->frwd_lock); + clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags); + if (!is_active) { /* * if logout timed out before userspace could even send a PDU @@ -3312,6 +3315,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session, spin_lock_bh(&session->frwd_lock); if (is_leading) session->leadconn = conn; + + set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags); spin_unlock_bh(&session->frwd_lock); /* @@ -3324,8 +3329,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session, /* * Unblock xmitworker(), Login Phase will pass through. */ - clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); - clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); + clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); + clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); return 0; } EXPORT_SYMBOL_GPL(iscsi_conn_bind); diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 2e9ffe3d1a55..883005757ddb 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -927,7 +927,7 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb, */ conn->last_recv = jiffies; - if (unlikely(conn->suspend_rx)) { + if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) { ISCSI_DBG_TCP(conn, "Rx suspended!\n"); *status = ISCSI_TCP_SUSPENDED; return 0; diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index a315715b3622..7e0cde710fc3 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -197,7 +197,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) task->total_xfer_len = qc->nbytes; task->num_scatter = qc->n_elem; task->data_dir = qc->dma_dir; - } else if (qc->tf.protocol == ATA_PROT_NODATA) { + } else if (!ata_is_data(qc->tf.protocol)) { task->data_dir = DMA_NONE; } else { for_each_sg(qc->sg, sg, qc->n_elem, si) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index f66ba64080a3..1044832b6054 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -593,6 +593,7 @@ struct lpfc_vport { #define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */ #define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ #define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/ +#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */ #define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ #define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ #define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 632b9cdabd14..9f3f7805f1f9 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1315,6 +1315,9 @@ lpfc_issue_lip(struct Scsi_Host *shost) pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; pmboxq->u.mb.mbxOwner = OWN_HOST; + if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME)) + vport->fc_flag &= ~FC_PT2PT_NO_NVME; + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); if ((mbxstatus == MBX_SUCCESS) && diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index f08ab8269f44..886006ad12a2 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1072,7 +1072,8 @@ stop_rr_fcf_flogi: /* FLOGI failed, so there is no fabric */ spin_lock_irq(shost->host_lock); - vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | + FC_PT2PT_NO_NVME); spin_unlock_irq(shost->host_lock); /* If private loop, then allow max outstanding els to be @@ -4587,6 +4588,23 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Added for Vendor specifc support * Just keep retrying for these Rsn / Exp codes */ + if ((vport->fc_flag & FC_PT2PT) && + cmd == ELS_CMD_NVMEPRLI) { + switch (stat.un.b.lsRjtRsnCode) { + case LSRJT_UNABLE_TPC: + case LSRJT_INVALID_CMD: + case LSRJT_LOGICAL_ERR: + case LSRJT_CMD_UNSUPPORTED: + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, + "0168 NVME PRLI LS_RJT " + "reason %x port doesn't " + "support NVME, disabling NVME\n", + stat.un.b.lsRjtRsnCode); + retry = 0; + vport->fc_flag |= FC_PT2PT_NO_NVME; + goto out_retry; + } + } switch (stat.un.b.lsRjtRsnCode) { case LSRJT_UNABLE_TPC: /* The driver has a VALID PLOGI but the rport has diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 3eebcae52784..16246526e4c1 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -15105,6 +15105,8 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev) psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); + /* Init cpu_map array */ + lpfc_cpu_map_array_init(phba); /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 7d717a4ac14d..fdf5e777bf11 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1961,8 +1961,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, * is configured try it. */ ndlp->nlp_fc4_type |= NLP_FC4_FCP; - if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) && + (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || + vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { ndlp->nlp_fc4_type |= NLP_FC4_NVME; /* We need to update the localport also */ lpfc_nvme_update_localport(vport); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 2978c61dc586..68d8e55c1205 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -8147,6 +8147,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) struct lpfc_vport *vport = phba->pport; struct lpfc_dmabuf *mp; struct lpfc_rqb *rqbp; + u32 flg; /* Perform a PCI function reset to start from clean */ rc = lpfc_pci_function_reset(phba); @@ -8160,7 +8161,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) else { spin_lock_irq(&phba->hbalock); phba->sli.sli_flag |= LPFC_SLI_ACTIVE; + flg = phba->sli.sli_flag; spin_unlock_irq(&phba->hbalock); + /* Allow a little time after setting SLI_ACTIVE for any polled + * MBX commands to complete via BSG. + */ + for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) { + msleep(20); + spin_lock_irq(&phba->hbalock); + flg = phba->sli.sli_flag; + spin_unlock_irq(&phba->hbalock); + } } lpfc_sli4_dip(phba); @@ -9744,7 +9755,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, "(%d):2541 Mailbox command x%x " "(x%x/x%x) failure: " "mqe_sta: x%x mcqe_sta: x%x/x%x " - "Data: x%x x%x\n,", + "Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, @@ -9778,7 +9789,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, "(%d):2597 Sync Mailbox command " "x%x (x%x/x%x) failure: " "mqe_sta: x%x mcqe_sta: x%x/x%x " - "Data: x%x x%x\n,", + "Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 7af2c23652b0..650210d2abb4 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2558,6 +2558,9 @@ struct megasas_instance_template { #define MEGASAS_IS_LOGICAL(sdev) \ ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) +#define MEGASAS_IS_LUN_VALID(sdev) \ + (((sdev)->lun == 0) ? 1 : 0) + #define MEGASAS_DEV_INDEX(scp) \ (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ scp->device->id) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 39d8754e63ac..bb3f78013a13 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2126,6 +2126,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev) goto scan_target; } return -ENXIO; + } else if (!MEGASAS_IS_LUN_VALID(sdev)) { + sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); + return -ENXIO; } scan_target: @@ -2156,6 +2159,10 @@ static void megasas_slave_destroy(struct scsi_device *sdev) instance = megasas_lookup_instance(sdev->host->host_no); if (MEGASAS_IS_LOGICAL(sdev)) { + if (!MEGASAS_IS_LUN_VALID(sdev)) { + sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); + return; + } ld_tgt_id = MEGASAS_TARGET_ID(sdev); instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED; if (megasas_dbg_lvl & LD_PD_DEBUG) diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index 5af36c54cb59..3ef6b6edef46 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -1275,7 +1275,7 @@ static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) MPI3MR_MAX_SEG_LIST_SIZE, mrioc->req_qinfo[q_idx].q_segment_list, mrioc->req_qinfo[q_idx].q_segment_list_dma); - mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; + mrioc->req_qinfo[q_idx].q_segment_list = NULL; } } else size = mrioc->req_qinfo[q_idx].segment_qd * diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index 3cae8803383b..b2c650542bac 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -2204,6 +2204,8 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, scmd->result = DID_OK << 16; goto out_success; } + + scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count); if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 81dab9b82f79..c38e68943205 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2011,9 +2011,10 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll) enable_irq(reply_q->os_irq); } } + + if (poll) + _base_process_reply_queue(reply_q); } - if (poll) - _base_process_reply_queue(reply_q); } /** @@ -5736,14 +5737,13 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) */ static int -mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz) +mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz) { - long reply_pool_end_address; + dma_addr_t end_address; - reply_pool_end_address = reply_pool_start_address + pool_sz; + end_address = start_address + pool_sz - 1; - if (upper_32_bits(reply_pool_start_address) == - upper_32_bits(reply_pool_end_address)) + if (upper_32_bits(start_address) == upper_32_bits(end_address)) return 1; else return 0; @@ -5804,7 +5804,7 @@ _base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) } if (!mpt3sas_check_same_4gb_region( - (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) { + ioc->pcie_sg_lookup[i].pcie_sgl_dma, sz)) { ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n", ioc->pcie_sg_lookup[i].pcie_sgl, (unsigned long long) @@ -5859,8 +5859,8 @@ _base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) GFP_KERNEL, &ctr->chain_buffer_dma); if (!ctr->chain_buffer) return -EAGAIN; - if (!mpt3sas_check_same_4gb_region((long) - ctr->chain_buffer, ioc->chain_segment_sz)) { + if (!mpt3sas_check_same_4gb_region( + ctr->chain_buffer_dma, ioc->chain_segment_sz)) { ioc_err(ioc, "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n", ctr->chain_buffer, @@ -5896,7 +5896,7 @@ _base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) GFP_KERNEL, &ioc->sense_dma); if (!ioc->sense) return -EAGAIN; - if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) { + if (!mpt3sas_check_same_4gb_region(ioc->sense_dma, sz)) { dinitprintk(ioc, pr_err( "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n", ioc->sense, (unsigned long long) ioc->sense_dma)); @@ -5929,7 +5929,7 @@ _base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) &ioc->reply_dma); if (!ioc->reply) return -EAGAIN; - if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) { + if (!mpt3sas_check_same_4gb_region(ioc->reply_dma, sz)) { dinitprintk(ioc, pr_err( "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n", ioc->reply, (unsigned long long) ioc->reply_dma)); @@ -5964,7 +5964,7 @@ _base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) GFP_KERNEL, &ioc->reply_free_dma); if (!ioc->reply_free) return -EAGAIN; - if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) { + if (!mpt3sas_check_same_4gb_region(ioc->reply_free_dma, sz)) { dinitprintk(ioc, pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n", ioc->reply_free, (unsigned long long) ioc->reply_free_dma)); @@ -6003,7 +6003,7 @@ _base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc, GFP_KERNEL, &ioc->reply_post_free_array_dma); if (!ioc->reply_post_free_array) return -EAGAIN; - if (!mpt3sas_check_same_4gb_region((long)ioc->reply_post_free_array, + if (!mpt3sas_check_same_4gb_region(ioc->reply_post_free_array_dma, reply_post_free_array_sz)) { dinitprintk(ioc, pr_err( "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n", @@ -6068,7 +6068,7 @@ base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz) * resources and set DMA mask to 32 and allocate. */ if (!mpt3sas_check_same_4gb_region( - (long)ioc->reply_post[i].reply_post_free, sz)) { + ioc->reply_post[i].reply_post_free_dma, sz)) { dinitprintk(ioc, ioc_err(ioc, "bad Replypost free pool(0x%p)" "reply_post_free_dma = (0x%llx)\n", diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index 0563078227de..a8dd14c91efd 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -394,10 +394,13 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t retry_count++; if (ioc->config_cmds.smid == smid) mpt3sas_base_free_smid(ioc, smid); - if ((ioc->shost_recovery) || (ioc->config_cmds.status & - MPT3_CMD_RESET) || ioc->pci_error_recovery) + if (ioc->config_cmds.status & MPT3_CMD_RESET) goto retry_config; - issue_host_reset = 1; + if (ioc->shost_recovery || ioc->pci_error_recovery) { + issue_host_reset = 0; + r = -EFAULT; + } else + issue_host_reset = 1; goto free_mem; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index c1f900c6ea00..af275ac42795 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -11035,6 +11035,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, { struct _sas_port *mpt3sas_port, *next; unsigned long flags; + int port_id; /* remove sibling ports attached to this expander */ list_for_each_entry_safe(mpt3sas_port, next, @@ -11055,6 +11056,8 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, mpt3sas_port->hba_port); } + port_id = sas_expander->port->port_id; + mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, sas_expander->sas_address_parent, sas_expander->port); @@ -11062,7 +11065,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", sas_expander->handle, (unsigned long long) sas_expander->sas_address, - sas_expander->port->port_id); + port_id); spin_lock_irqsave(&ioc->sas_node_lock, flags); list_del(&sas_expander->list); diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index f18dd9703595..f6f8ca3c8c7f 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -646,6 +646,7 @@ static struct pci_device_id mvs_pci_table[] = { { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, + { PCI_VDEVICE(TTI, 0x2640), chip_6440 }, { PCI_VDEVICE(TTI, 0x2710), chip_9480 }, { PCI_VDEVICE(TTI, 0x2720), chip_9480 }, { PCI_VDEVICE(TTI, 0x2721), chip_9480 }, @@ -696,7 +697,7 @@ static struct pci_driver mvs_pci_driver = { static ssize_t driver_version_show(struct device *cdev, struct device_attribute *attr, char *buffer) { - return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION); + return sysfs_emit(buffer, "%s\n", DRV_VERSION); } static DEVICE_ATTR_RO(driver_version); @@ -744,7 +745,7 @@ static ssize_t interrupt_coalescing_store(struct device *cdev, static ssize_t interrupt_coalescing_show(struct device *cdev, struct device_attribute *attr, char *buffer) { - return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing); + return sysfs_emit(buffer, "%d\n", interrupt_coalescing); } static DEVICE_ATTR_RW(interrupt_coalescing); diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 880e1f356def..32fc450bf84b 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -1767,7 +1767,6 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, } task = sas_alloc_slow_task(GFP_ATOMIC); - if (!task) { pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n"); return; @@ -1776,13 +1775,16 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, task->task_done = pm8001_task_done; res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); - if (res) + if (res) { + sas_free_task(task); return; + } ccb = &pm8001_ha->ccb_info[ccb_tag]; ccb->device = pm8001_ha_dev; ccb->ccb_tag = ccb_tag; ccb->task = task; + ccb->n_elem = 0; circularQ = &pm8001_ha->inbnd_q_tbl[0]; @@ -1793,8 +1795,10 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, sizeof(task_abort), 0); - if (ret) + if (ret) { + sas_free_task(task); pm8001_tag_free(pm8001_ha, ccb_tag); + } } @@ -1844,6 +1848,7 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, ccb->device = pm8001_ha_dev; ccb->ccb_tag = ccb_tag; ccb->task = task; + ccb->n_elem = 0; pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; @@ -1860,7 +1865,7 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, sata_cmd.tag = cpu_to_le32(ccb_tag); sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); - sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9)); + sata_cmd.ncqtag_atap_dir_m = cpu_to_le32((0x1 << 7) | (0x5 << 9)); memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, @@ -2421,7 +2426,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) len = sizeof(struct pio_setup_fis); pm8001_dbg(pm8001_ha, IO, "PIO read len = %d\n", len); - } else if (t->ata_task.use_ncq) { + } else if (t->ata_task.use_ncq && + t->data_dir != DMA_NONE) { len = sizeof(struct set_dev_bits_fis); pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n", len); @@ -2695,7 +2701,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 tag = le32_to_cpu(psataPayload->tag); u32 port_id = le32_to_cpu(psataPayload->port_id); u32 dev_id = le32_to_cpu(psataPayload->device_id); - unsigned long flags; ccb = &pm8001_ha->ccb_info[tag]; @@ -2735,8 +2740,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; ts->residual = 0; - if (pm8001_dev) - atomic_dec(&pm8001_dev->running_req); break; case IO_XFER_ERROR_BREAK: pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); @@ -2778,7 +2781,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_QUEUE_FULL; - pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); return; } break; @@ -2864,20 +2866,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) ts->stat = SAS_OPEN_TO; break; } - spin_lock_irqsave(&t->task_state_lock, flags); - t->task_state_flags &= ~SAS_TASK_STATE_PENDING; - t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; - t->task_state_flags |= SAS_TASK_STATE_DONE; - if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { - spin_unlock_irqrestore(&t->task_state_lock, flags); - pm8001_dbg(pm8001_ha, FAIL, - "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", - t, event, ts->resp, ts->stat); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - } else { - spin_unlock_irqrestore(&t->task_state_lock, flags); - pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); - } } /*See the comments for mpi_ssp_completion */ @@ -3728,12 +3716,11 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) mb(); if (pm8001_dev->id & NCQ_ABORT_ALL_FLAG) { - pm8001_tag_free(pm8001_ha, tag); sas_free_task(t); - /* clear the flag */ - pm8001_dev->id &= 0xBFFFFFFF; - } else + pm8001_dev->id &= ~NCQ_ABORT_ALL_FLAG; + } else { t->task_done(t); + } return 0; } @@ -4296,22 +4283,22 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, u32 opc = OPC_INB_SATA_HOST_OPSTART; memset(&sata_cmd, 0, sizeof(sata_cmd)); circularQ = &pm8001_ha->inbnd_q_tbl[0]; - if (task->data_dir == DMA_NONE) { + + if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) { ATAP = 0x04; /* no data*/ pm8001_dbg(pm8001_ha, IO, "no data\n"); } else if (likely(!task->ata_task.device_control_reg_update)) { - if (task->ata_task.dma_xfer) { + if (task->ata_task.use_ncq && + dev->sata_dev.class != ATA_DEV_ATAPI) { + ATAP = 0x07; /* FPDMA */ + pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); + } else if (task->ata_task.dma_xfer) { ATAP = 0x06; /* DMA */ pm8001_dbg(pm8001_ha, IO, "DMA\n"); } else { ATAP = 0x05; /* PIO*/ pm8001_dbg(pm8001_ha, IO, "PIO\n"); } - if (task->ata_task.use_ncq && - dev->sata_dev.class != ATA_DEV_ATAPI) { - ATAP = 0x07; /* FPDMA */ - pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); - } } if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); @@ -4501,6 +4488,9 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, SAS_ADDR_SIZE); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + return rc; } @@ -4651,7 +4641,7 @@ int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); if (pm8001_ha->chip_id != chip_8001) - sspTMCmd.ds_ads_m = 0x08; + sspTMCmd.ds_ads_m = cpu_to_le32(0x08); circularQ = &pm8001_ha->inbnd_q_tbl[0]; ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, sizeof(sspTMCmd), 0); @@ -4913,6 +4903,11 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, ccb->ccb_tag = tag; rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info, tag); + if (rc) { + kfree(fw_control_context); + pm8001_tag_free(pm8001_ha, tag); + } + return rc; } @@ -5017,6 +5012,9 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, payload.nds = cpu_to_le32(state); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + return rc; } diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 32e60f0c3b14..5fb08acbc0e5 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -753,8 +753,13 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, res = -TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + struct pm8001_ccb_info *ccb = task->lldd_task; + pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n", tmf->tmf); + + if (ccb) + ccb->task = NULL; goto ex_err; } @@ -826,10 +831,10 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, res = PM8001_CHIP_DISP->task_abort(pm8001_ha, pm8001_dev, flag, task_tag, ccb_tag); - if (res) { del_timer(&task->slow_task->timer); pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n"); + pm8001_tag_free(pm8001_ha, ccb_tag); goto ex_err; } wait_for_completion(&task->slow_task->completion); diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index ed13e0e044b7..04746df26c6c 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -66,18 +66,16 @@ int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value) } static void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset, - const void *destination, + __le32 *destination, u32 dw_count, u32 bus_base_number) { u32 index, value, offset; - u32 *destination1; - destination1 = (u32 *)destination; - for (index = 0; index < dw_count; index += 4, destination1++) { + for (index = 0; index < dw_count; index += 4, destination++) { offset = (soffset + index); if (offset < (64 * 1024)) { value = pm8001_cr32(pm8001_ha, bus_base_number, offset); - *destination1 = cpu_to_le32(value); + *destination = cpu_to_le32(value); } } return; @@ -767,6 +765,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01; pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01; + /* Enable higher IQs and OQs, 32 to 63, bit 16 */ + if (pm8001_ha->max_q_num > 32) + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |= + 1 << 16; /* Disable end to end CRC checking */ pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16); @@ -1028,6 +1030,13 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) if (0x0000 != gst_len_mpistate) return -EBUSY; + /* + * As per controller datasheet, after successful MPI + * initialization minimum 500ms delay is required before + * issuing commands. + */ + msleep(500); + return 0; } @@ -1202,9 +1211,11 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha) else page_code = THERMAL_PAGE_CODE_8H; - payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) | - (THERMAL_ENABLE << 8) | page_code; - payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8); + payload.cfg_pg[0] = + cpu_to_le32((THERMAL_LOG_ENABLE << 9) | + (THERMAL_ENABLE << 8) | page_code); + payload.cfg_pg[1] = + cpu_to_le32((LTEMPHIL << 24) | (RTEMPHIL << 8)); pm8001_dbg(pm8001_ha, DEV, "Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n", @@ -1244,43 +1255,41 @@ pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha) circularQ = &pm8001_ha->inbnd_q_tbl[0]; payload.tag = cpu_to_le32(tag); - SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE; - SASConfigPage.MST_MSI = 3 << 15; - SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO; - SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) | - (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER; - SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME; - - if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF) - SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF; - - - SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) | - SAS_OPNRJT_RTRY_INTVL; - SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16) - | SAS_COPNRJT_RTRY_TMO; - SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16) - | SAS_COPNRJT_RTRY_THR; - SASConfigPage.MAX_AIP = SAS_MAX_AIP; + SASConfigPage.pageCode = cpu_to_le32(SAS_PROTOCOL_TIMER_CONFIG_PAGE); + SASConfigPage.MST_MSI = cpu_to_le32(3 << 15); + SASConfigPage.STP_SSP_MCT_TMO = + cpu_to_le32((STP_MCT_TMO << 16) | SSP_MCT_TMO); + SASConfigPage.STP_FRM_TMO = + cpu_to_le32((SAS_MAX_OPEN_TIME << 24) | + (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER); + SASConfigPage.STP_IDLE_TMO = cpu_to_le32(STP_IDLE_TIME); + + SASConfigPage.OPNRJT_RTRY_INTVL = + cpu_to_le32((SAS_MFD << 16) | SAS_OPNRJT_RTRY_INTVL); + SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = + cpu_to_le32((SAS_DOPNRJT_RTRY_TMO << 16) | SAS_COPNRJT_RTRY_TMO); + SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = + cpu_to_le32((SAS_DOPNRJT_RTRY_THR << 16) | SAS_COPNRJT_RTRY_THR); + SASConfigPage.MAX_AIP = cpu_to_le32(SAS_MAX_AIP); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.pageCode 0x%08x\n", - SASConfigPage.pageCode); + le32_to_cpu(SASConfigPage.pageCode)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MST_MSI 0x%08x\n", - SASConfigPage.MST_MSI); + le32_to_cpu(SASConfigPage.MST_MSI)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_SSP_MCT_TMO 0x%08x\n", - SASConfigPage.STP_SSP_MCT_TMO); + le32_to_cpu(SASConfigPage.STP_SSP_MCT_TMO)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_FRM_TMO 0x%08x\n", - SASConfigPage.STP_FRM_TMO); + le32_to_cpu(SASConfigPage.STP_FRM_TMO)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_IDLE_TMO 0x%08x\n", - SASConfigPage.STP_IDLE_TMO); + le32_to_cpu(SASConfigPage.STP_IDLE_TMO)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.OPNRJT_RTRY_INTVL 0x%08x\n", - SASConfigPage.OPNRJT_RTRY_INTVL); + le32_to_cpu(SASConfigPage.OPNRJT_RTRY_INTVL)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO 0x%08x\n", - SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO); + le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR 0x%08x\n", - SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR); + le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MAX_AIP 0x%08x\n", - SASConfigPage.MAX_AIP); + le32_to_cpu(SASConfigPage.MAX_AIP)); memcpy(&payload.cfg_pg, &SASConfigPage, sizeof(SASProtocolTimerConfig_t)); @@ -1406,12 +1415,13 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) /* Currently only one key is used. New KEK index is 1. * Current KEK index is 1. Store KEK to NVRAM is 1. */ - payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) | - KEK_MGMT_SUBOP_KEYCARDUPDATE); + payload.new_curidx_ksop = + cpu_to_le32(((1 << 24) | (1 << 16) | (1 << 8) | + KEK_MGMT_SUBOP_KEYCARDUPDATE)); pm8001_dbg(pm8001_ha, DEV, "Saving Encryption info to flash. payload 0x%x\n", - payload.new_curidx_ksop); + le32_to_cpu(payload.new_curidx_ksop)); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, sizeof(payload), 0); @@ -1734,10 +1744,11 @@ static void pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX - u32 mask; - mask = (u32)(1 << vec); - - pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF)); + if (vec < 32) + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec); + else + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U, + 1U << (vec - 32)); return; #endif pm80xx_chip_intx_interrupt_enable(pm8001_ha); @@ -1753,12 +1764,15 @@ static void pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX - u32 mask; - if (vec == 0xFF) - mask = 0xFFFFFFFF; + if (vec == 0xFF) { + /* disable all vectors 0-31, 32-63 */ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF); + } else if (vec < 32) + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec); else - mask = (u32)(1 << vec); - pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF)); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, + 1U << (vec - 32)); return; #endif pm80xx_chip_intx_interrupt_disable(pm8001_ha); @@ -1800,6 +1814,7 @@ static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha, ccb->device = pm8001_ha_dev; ccb->ccb_tag = ccb_tag; ccb->task = task; + ccb->n_elem = 0; circularQ = &pm8001_ha->inbnd_q_tbl[0]; @@ -1881,7 +1896,7 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, sata_cmd.tag = cpu_to_le32(ccb_tag); sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); - sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9)); + sata_cmd.ncqtag_atap_dir_m_dad = cpu_to_le32(((0x1 << 7) | (0x5 << 9))); memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, @@ -2184,9 +2199,9 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); if (t->slow_task) complete(&t->slow_task->completion); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else { spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); @@ -2517,7 +2532,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, len = sizeof(struct pio_setup_fis); pm8001_dbg(pm8001_ha, IO, "PIO read len = %d\n", len); - } else if (t->ata_task.use_ncq) { + } else if (t->ata_task.use_ncq && + t->data_dir != DMA_NONE) { len = sizeof(struct set_dev_bits_fis); pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n", len); @@ -2801,9 +2817,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); if (t->slow_task) complete(&t->slow_task->completion); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else { spin_unlock_irqrestore(&t->task_state_lock, flags); spin_unlock_irqrestore(&circularQ->oq_lock, @@ -2828,7 +2844,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, u32 tag = le32_to_cpu(psataPayload->tag); u32 port_id = le32_to_cpu(psataPayload->port_id); u32 dev_id = le32_to_cpu(psataPayload->device_id); - unsigned long flags; ccb = &pm8001_ha->ccb_info[tag]; @@ -2866,8 +2881,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; ts->residual = 0; - if (pm8001_dev) - atomic_dec(&pm8001_dev->running_req); break; case IO_XFER_ERROR_BREAK: pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); @@ -2916,11 +2929,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_QUEUE_FULL; - spin_unlock_irqrestore(&circularQ->oq_lock, - circularQ->lock_flags); - pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); - spin_lock_irqsave(&circularQ->oq_lock, - circularQ->lock_flags); return; } break; @@ -3020,24 +3028,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, ts->stat = SAS_OPEN_TO; break; } - spin_lock_irqsave(&t->task_state_lock, flags); - t->task_state_flags &= ~SAS_TASK_STATE_PENDING; - t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; - t->task_state_flags |= SAS_TASK_STATE_DONE; - if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { - spin_unlock_irqrestore(&t->task_state_lock, flags); - pm8001_dbg(pm8001_ha, FAIL, - "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", - t, event, ts->resp, ts->stat); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - } else { - spin_unlock_irqrestore(&t->task_state_lock, flags); - spin_unlock_irqrestore(&circularQ->oq_lock, - circularQ->lock_flags); - pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); - spin_lock_irqsave(&circularQ->oq_lock, - circularQ->lock_flags); - } } /*See the comments for mpi_ssp_completion */ @@ -4406,13 +4396,15 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, struct ssp_ini_io_start_req ssp_cmd; u32 tag = ccb->ccb_tag; int ret; - u64 phys_addr, start_addr, end_addr; + u64 phys_addr, end_addr; u32 end_addr_high, end_addr_low; struct inbound_queue_table *circularQ; u32 q_index, cpu_id; u32 opc = OPC_INB_SSPINIIOSTART; + memset(&ssp_cmd, 0, sizeof(ssp_cmd)); memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); + /* data address domain added for spcv; set to 0 by host, * used internally by controller * 0 for SAS 1.1 and SAS 2.0 compatible TLR @@ -4423,7 +4415,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); ssp_cmd.tag = cpu_to_le32(tag); if (task->ssp_task.enable_first_burst) - ssp_cmd.ssp_iu.efb_prio_attr |= 0x80; + ssp_cmd.ssp_iu.efb_prio_attr = 0x80; ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3); ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, @@ -4455,21 +4447,24 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.enc_esgl = cpu_to_le32(1<<31); } else if (task->num_scatter == 1) { u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.enc_addr_low = cpu_to_le32(lower_32_bits(dma_addr)); ssp_cmd.enc_addr_high = cpu_to_le32(upper_32_bits(dma_addr)); ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); ssp_cmd.enc_esgl = 0; + /* Check 4G Boundary */ - start_addr = cpu_to_le64(dma_addr); - end_addr = (start_addr + ssp_cmd.enc_len) - 1; - end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); - end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); - if (end_addr_high != ssp_cmd.enc_addr_high) { + end_addr = dma_addr + le32_to_cpu(ssp_cmd.enc_len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + + if (end_addr_high != le32_to_cpu(ssp_cmd.enc_addr_high)) { pm8001_dbg(pm8001_ha, FAIL, "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", - start_addr, ssp_cmd.enc_len, + dma_addr, + le32_to_cpu(ssp_cmd.enc_len), end_addr_high, end_addr_low); pm8001_chip_make_sg(task->scatter, 1, ccb->buf_prd); @@ -4478,7 +4473,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(lower_32_bits(phys_addr)); ssp_cmd.enc_addr_high = cpu_to_le32(upper_32_bits(phys_addr)); - ssp_cmd.enc_esgl = cpu_to_le32(1<<31); + ssp_cmd.enc_esgl = cpu_to_le32(1U<<31); } } else if (task->num_scatter == 0) { ssp_cmd.enc_addr_low = 0; @@ -4486,8 +4481,10 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); ssp_cmd.enc_esgl = 0; } + /* XTS mode. All other fields are 0 */ - ssp_cmd.key_cmode = 0x6 << 4; + ssp_cmd.key_cmode = cpu_to_le32(0x6 << 4); + /* set tweak values. Should be the start lba */ ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cmd->cmnd[2] << 24) | (task->ssp_task.cmd->cmnd[3] << 16) | @@ -4509,20 +4506,22 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.esgl = cpu_to_le32(1<<31); } else if (task->num_scatter == 1) { u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr)); ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(dma_addr)); ssp_cmd.len = cpu_to_le32(task->total_xfer_len); ssp_cmd.esgl = 0; + /* Check 4G Boundary */ - start_addr = cpu_to_le64(dma_addr); - end_addr = (start_addr + ssp_cmd.len) - 1; - end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); - end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); - if (end_addr_high != ssp_cmd.addr_high) { + end_addr = dma_addr + le32_to_cpu(ssp_cmd.len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + if (end_addr_high != le32_to_cpu(ssp_cmd.addr_high)) { pm8001_dbg(pm8001_ha, FAIL, "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", - start_addr, ssp_cmd.len, + dma_addr, + le32_to_cpu(ssp_cmd.len), end_addr_high, end_addr_low); pm8001_chip_make_sg(task->scatter, 1, ccb->buf_prd); @@ -4556,7 +4555,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, u32 q_index, cpu_id; struct sata_start_req sata_cmd; u32 hdr_tag, ncg_tag = 0; - u64 phys_addr, start_addr, end_addr; + u64 phys_addr, end_addr; u32 end_addr_high, end_addr_low; u32 ATAP = 0x0; u32 dir; @@ -4568,22 +4567,21 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num); circularQ = &pm8001_ha->inbnd_q_tbl[q_index]; - if (task->data_dir == DMA_NONE) { + if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) { ATAP = 0x04; /* no data*/ pm8001_dbg(pm8001_ha, IO, "no data\n"); } else if (likely(!task->ata_task.device_control_reg_update)) { - if (task->ata_task.dma_xfer) { + if (task->ata_task.use_ncq && + dev->sata_dev.class != ATA_DEV_ATAPI) { + ATAP = 0x07; /* FPDMA */ + pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); + } else if (task->ata_task.dma_xfer) { ATAP = 0x06; /* DMA */ pm8001_dbg(pm8001_ha, IO, "DMA\n"); } else { ATAP = 0x05; /* PIO*/ pm8001_dbg(pm8001_ha, IO, "PIO\n"); } - if (task->ata_task.use_ncq && - dev->sata_dev.class != ATA_DEV_ATAPI) { - ATAP = 0x07; /* FPDMA */ - pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); - } } if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); @@ -4617,32 +4615,38 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); phys_addr = ccb->ccb_dma_handle; - sata_cmd.enc_addr_low = lower_32_bits(phys_addr); - sata_cmd.enc_addr_high = upper_32_bits(phys_addr); + sata_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + sata_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); sata_cmd.enc_esgl = cpu_to_le32(1 << 31); } else if (task->num_scatter == 1) { u64 dma_addr = sg_dma_address(task->scatter); - sata_cmd.enc_addr_low = lower_32_bits(dma_addr); - sata_cmd.enc_addr_high = upper_32_bits(dma_addr); + + sata_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(dma_addr)); + sata_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); sata_cmd.enc_esgl = 0; + /* Check 4G Boundary */ - start_addr = cpu_to_le64(dma_addr); - end_addr = (start_addr + sata_cmd.enc_len) - 1; - end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); - end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); - if (end_addr_high != sata_cmd.enc_addr_high) { + end_addr = dma_addr + le32_to_cpu(sata_cmd.enc_len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + if (end_addr_high != le32_to_cpu(sata_cmd.enc_addr_high)) { pm8001_dbg(pm8001_ha, FAIL, "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", - start_addr, sata_cmd.enc_len, + dma_addr, + le32_to_cpu(sata_cmd.enc_len), end_addr_high, end_addr_low); pm8001_chip_make_sg(task->scatter, 1, ccb->buf_prd); phys_addr = ccb->ccb_dma_handle; sata_cmd.enc_addr_low = - lower_32_bits(phys_addr); + cpu_to_le32(lower_32_bits(phys_addr)); sata_cmd.enc_addr_high = - upper_32_bits(phys_addr); + cpu_to_le32(upper_32_bits(phys_addr)); sata_cmd.enc_esgl = cpu_to_le32(1 << 31); } @@ -4653,7 +4657,8 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, sata_cmd.enc_esgl = 0; } /* XTS mode. All other fields are 0 */ - sata_cmd.key_index_mode = 0x6 << 4; + sata_cmd.key_index_mode = cpu_to_le32(0x6 << 4); + /* set tweak values. Should be the start lba */ sata_cmd.twk_val0 = cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) | @@ -4679,31 +4684,31 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, phys_addr = ccb->ccb_dma_handle; sata_cmd.addr_low = lower_32_bits(phys_addr); sata_cmd.addr_high = upper_32_bits(phys_addr); - sata_cmd.esgl = cpu_to_le32(1 << 31); + sata_cmd.esgl = cpu_to_le32(1U << 31); } else if (task->num_scatter == 1) { u64 dma_addr = sg_dma_address(task->scatter); + sata_cmd.addr_low = lower_32_bits(dma_addr); sata_cmd.addr_high = upper_32_bits(dma_addr); sata_cmd.len = cpu_to_le32(task->total_xfer_len); sata_cmd.esgl = 0; + /* Check 4G Boundary */ - start_addr = cpu_to_le64(dma_addr); - end_addr = (start_addr + sata_cmd.len) - 1; - end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); - end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); + end_addr = dma_addr + le32_to_cpu(sata_cmd.len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); if (end_addr_high != sata_cmd.addr_high) { pm8001_dbg(pm8001_ha, FAIL, "The sg list address start_addr=0x%016llx data_len=0x%xend_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", - start_addr, sata_cmd.len, + dma_addr, + le32_to_cpu(sata_cmd.len), end_addr_high, end_addr_low); pm8001_chip_make_sg(task->scatter, 1, ccb->buf_prd); phys_addr = ccb->ccb_dma_handle; - sata_cmd.addr_low = - lower_32_bits(phys_addr); - sata_cmd.addr_high = - upper_32_bits(phys_addr); - sata_cmd.esgl = cpu_to_le32(1 << 31); + sata_cmd.addr_low = lower_32_bits(phys_addr); + sata_cmd.addr_high = upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1U << 31); } } else if (task->num_scatter == 0) { sata_cmd.addr_low = 0; @@ -4711,27 +4716,28 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, sata_cmd.len = cpu_to_le32(task->total_xfer_len); sata_cmd.esgl = 0; } + /* scsi cdb */ sata_cmd.atapi_scsi_cdb[0] = cpu_to_le32(((task->ata_task.atapi_packet[0]) | - (task->ata_task.atapi_packet[1] << 8) | - (task->ata_task.atapi_packet[2] << 16) | - (task->ata_task.atapi_packet[3] << 24))); + (task->ata_task.atapi_packet[1] << 8) | + (task->ata_task.atapi_packet[2] << 16) | + (task->ata_task.atapi_packet[3] << 24))); sata_cmd.atapi_scsi_cdb[1] = cpu_to_le32(((task->ata_task.atapi_packet[4]) | - (task->ata_task.atapi_packet[5] << 8) | - (task->ata_task.atapi_packet[6] << 16) | - (task->ata_task.atapi_packet[7] << 24))); + (task->ata_task.atapi_packet[5] << 8) | + (task->ata_task.atapi_packet[6] << 16) | + (task->ata_task.atapi_packet[7] << 24))); sata_cmd.atapi_scsi_cdb[2] = cpu_to_le32(((task->ata_task.atapi_packet[8]) | - (task->ata_task.atapi_packet[9] << 8) | - (task->ata_task.atapi_packet[10] << 16) | - (task->ata_task.atapi_packet[11] << 24))); + (task->ata_task.atapi_packet[9] << 8) | + (task->ata_task.atapi_packet[10] << 16) | + (task->ata_task.atapi_packet[11] << 24))); sata_cmd.atapi_scsi_cdb[3] = cpu_to_le32(((task->ata_task.atapi_packet[12]) | - (task->ata_task.atapi_packet[13] << 8) | - (task->ata_task.atapi_packet[14] << 16) | - (task->ata_task.atapi_packet[15] << 24))); + (task->ata_task.atapi_packet[13] << 8) | + (task->ata_task.atapi_packet[14] << 16) | + (task->ata_task.atapi_packet[15] << 24))); } /* Check for read log for failed drive and return */ @@ -4929,8 +4935,13 @@ static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(tag); payload.phyop_phyid = cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF)); - return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, - sizeof(payload), 0); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + + return rc; } static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha) diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index d01cd829ef97..df9ce6ed52bf 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -772,11 +772,10 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, qedi_cmd->list_tmf_work = NULL; } } + spin_unlock_bh(&qedi_conn->tmf_work_lock); - if (!found) { - spin_unlock_bh(&qedi_conn->tmf_work_lock); + if (!found) goto check_cleanup_reqs; - } QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n", @@ -807,7 +806,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, qedi_cmd->state = CLEANUP_RECV; unlock: spin_unlock_bh(&conn->session->back_lock); - spin_unlock_bh(&qedi_conn->tmf_work_lock); wake_up_interruptible(&qedi_conn->wait_queue); return; diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index c5260429c637..04b40a6c1aff 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -859,6 +859,37 @@ static int qedi_task_xmit(struct iscsi_task *task) return qedi_iscsi_send_ioreq(task); } +static void qedi_offload_work(struct work_struct *work) +{ + struct qedi_endpoint *qedi_ep = + container_of(work, struct qedi_endpoint, offload_work); + struct qedi_ctx *qedi; + int wait_delay = 5 * HZ; + int ret; + + qedi = qedi_ep->qedi; + + ret = qedi_iscsi_offload_conn(qedi_ep); + if (ret) { + QEDI_ERR(&qedi->dbg_ctx, + "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n", + qedi_ep->iscsi_cid, qedi_ep, ret); + qedi_ep->state = EP_STATE_OFLDCONN_FAILED; + return; + } + + ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait, + (qedi_ep->state == + EP_STATE_OFLDCONN_COMPL), + wait_delay); + if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) { + qedi_ep->state = EP_STATE_OFLDCONN_FAILED; + QEDI_ERR(&qedi->dbg_ctx, + "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n", + qedi_ep->iscsi_cid, qedi_ep); + } +} + static struct iscsi_endpoint * qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking) @@ -907,6 +938,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, } qedi_ep = ep->dd_data; memset(qedi_ep, 0, sizeof(struct qedi_endpoint)); + INIT_WORK(&qedi_ep->offload_work, qedi_offload_work); qedi_ep->state = EP_STATE_IDLE; qedi_ep->iscsi_cid = (u32)-1; qedi_ep->qedi = qedi; @@ -1055,12 +1087,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) qedi_ep = ep->dd_data; qedi = qedi_ep->qedi; + flush_work(&qedi_ep->offload_work); + if (qedi_ep->state == EP_STATE_OFLDCONN_START) goto ep_exit_recover; - if (qedi_ep->state != EP_STATE_OFLDCONN_NONE) - flush_work(&qedi_ep->offload_work); - if (qedi_ep->conn) { qedi_conn = qedi_ep->conn; abrt_conn = qedi_conn->abrt_conn; @@ -1234,37 +1265,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid) return rc; } -static void qedi_offload_work(struct work_struct *work) -{ - struct qedi_endpoint *qedi_ep = - container_of(work, struct qedi_endpoint, offload_work); - struct qedi_ctx *qedi; - int wait_delay = 5 * HZ; - int ret; - - qedi = qedi_ep->qedi; - - ret = qedi_iscsi_offload_conn(qedi_ep); - if (ret) { - QEDI_ERR(&qedi->dbg_ctx, - "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n", - qedi_ep->iscsi_cid, qedi_ep, ret); - qedi_ep->state = EP_STATE_OFLDCONN_FAILED; - return; - } - - ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait, - (qedi_ep->state == - EP_STATE_OFLDCONN_COMPL), - wait_delay); - if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) { - qedi_ep->state = EP_STATE_OFLDCONN_FAILED; - QEDI_ERR(&qedi->dbg_ctx, - "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n", - qedi_ep->iscsi_cid, qedi_ep); - } -} - static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) { struct qedi_ctx *qedi; @@ -1380,7 +1380,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) qedi_ep->dst_addr, qedi_ep->dst_port); } - INIT_WORK(&qedi_ep->offload_work, qedi_offload_work); queue_work(qedi->offload_thread, &qedi_ep->offload_work); ret = 0; diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index cb5f2ecb652d..d3534e7f2d21 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -555,7 +555,7 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, if (!capable(CAP_SYS_ADMIN)) return -EINVAL; - if (IS_NOCACHE_VPD_TYPE(ha)) + if (!IS_NOCACHE_VPD_TYPE(ha)) goto skip; faddr = ha->flt_region_vpd << 2; @@ -745,7 +745,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x706f, "Issuing MPI reset.\n"); - if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + if (IS_QLA83XX(ha)) { uint32_t idc_control; qla83xx_idc_lock(vha, 0); @@ -1056,9 +1056,6 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon) continue; if (iter->type == 3 && !(IS_CNA_CAPABLE(ha))) continue; - if (iter->type == 0x27 && - (!IS_QLA27XX(ha) || !IS_QLA28XX(ha))) - continue; sysfs_remove_bin_file(&host->shost_gendev.kobj, iter->attr); diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 655cf5de604b..c636165be52b 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -29,7 +29,8 @@ void qla2x00_bsg_job_done(srb_t *sp, int res) "%s: sp hdl %x, result=%x bsg ptr %p\n", __func__, sp->handle, res, bsg_job); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); bsg_reply->result = res; bsg_job_done(bsg_job, bsg_reply->result, @@ -3010,6 +3011,7 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job) done: spin_unlock_irqrestore(&ha->hardware_lock, flags); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return 0; } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index be2eb75ee1a3..2ea35e47ea44 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -726,6 +726,11 @@ typedef struct srb { * code. */ void (*put_fn)(struct kref *kref); + + /* + * Report completion for asynchronous commands. + */ + void (*async_done)(struct srb *sp, int res); } srb_t; #define GET_CMD_SP(sp) (sp->u.scmd.cmd) @@ -2886,7 +2891,11 @@ struct ct_fdmi2_hba_attributes { #define FDMI_PORT_SPEED_8GB 0x10 #define FDMI_PORT_SPEED_16GB 0x20 #define FDMI_PORT_SPEED_32GB 0x40 -#define FDMI_PORT_SPEED_64GB 0x80 +#define FDMI_PORT_SPEED_20GB 0x80 +#define FDMI_PORT_SPEED_40GB 0x100 +#define FDMI_PORT_SPEED_128GB 0x200 +#define FDMI_PORT_SPEED_64GB 0x400 +#define FDMI_PORT_SPEED_256GB 0x800 #define FDMI_PORT_SPEED_UNKNOWN 0x8000 #define FC_CLASS_2 0x04 @@ -4261,8 +4270,10 @@ struct qla_hw_data { #define QLA_ABTS_WAIT_ENABLED(_sp) \ (QLA_NVME_IOS(_sp) && QLA_ABTS_FW_ENABLED(_sp->fcport->vha->hw)) -#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) -#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) +#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) +#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) #define IS_PI_DIFB_DIX0_CAPABLE(ha) (0) #define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ IS_QLA28XX(ha)) @@ -5427,4 +5438,8 @@ struct ql_vnd_tgt_stats_resp { #include "qla_gbl.h" #include "qla_dbg.h" #include "qla_inline.h" + +#define IS_SESSION_DELETED(_fcport) (_fcport->disc_state == DSC_DELETE_PEND || \ + _fcport->disc_state == DSC_DELETED) + #endif diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c index a04693498dc0..a00fe88c6021 100644 --- a/drivers/scsi/qla2xxx/qla_edif.c +++ b/drivers/scsi/qla2xxx/qla_edif.c @@ -668,6 +668,11 @@ qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job) bsg_job->request_payload.sg_cnt, &appplogiok, sizeof(struct auth_complete_cmd)); + /* silent unaligned access warning */ + portid.b.domain = appplogiok.u.d_id.b.domain; + portid.b.area = appplogiok.u.d_id.b.area; + portid.b.al_pa = appplogiok.u.d_id.b.al_pa; + switch (appplogiok.type) { case PL_TYPE_WWPN: fcport = qla2x00_find_fcport_by_wwpn(vha, @@ -678,7 +683,7 @@ qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job) __func__, appplogiok.u.wwpn); break; case PL_TYPE_DID: - fcport = qla2x00_find_fcport_by_pid(vha, &appplogiok.u.d_id); + fcport = qla2x00_find_fcport_by_pid(vha, &portid); if (!fcport) ql_dbg(ql_dbg_edif, vha, 0x911d, "%s d_id lookup failed: %x\n", __func__, @@ -777,6 +782,11 @@ qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job) bsg_job->request_payload.sg_cnt, &appplogifail, sizeof(struct auth_complete_cmd)); + /* silent unaligned access warning */ + portid.b.domain = appplogifail.u.d_id.b.domain; + portid.b.area = appplogifail.u.d_id.b.area; + portid.b.al_pa = appplogifail.u.d_id.b.al_pa; + /* * TODO: edif: app has failed this plogi. Inform driver to * take any action (if any). @@ -788,7 +798,7 @@ qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job) SET_DID_STATUS(bsg_reply->result, DID_OK); break; case PL_TYPE_DID: - fcport = qla2x00_find_fcport_by_pid(vha, &appplogifail.u.d_id); + fcport = qla2x00_find_fcport_by_pid(vha, &portid); if (!fcport) ql_dbg(ql_dbg_edif, vha, 0x911d, "%s d_id lookup failed: %x\n", __func__, @@ -1253,6 +1263,7 @@ qla24xx_sadb_update(struct bsg_job *bsg_job) int result = 0; struct qla_sa_update_frame sa_frame; struct srb_iocb *iocb_cmd; + port_id_t portid; ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d, "%s entered, vha: 0x%p\n", __func__, vha); @@ -1276,7 +1287,12 @@ qla24xx_sadb_update(struct bsg_job *bsg_job) goto done; } - fcport = qla2x00_find_fcport_by_pid(vha, &sa_frame.port_id); + /* silent unaligned access warning */ + portid.b.domain = sa_frame.port_id.b.domain; + portid.b.area = sa_frame.port_id.b.area; + portid.b.al_pa = sa_frame.port_id.b.al_pa; + + fcport = qla2x00_find_fcport_by_pid(vha, &portid); if (fcport) { found = 1; if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY) @@ -2161,7 +2177,8 @@ edif_doorbell_show(struct device *dev, struct device_attribute *attr, static void qla_noop_sp_done(srb_t *sp, int res) { - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } /* diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 2c7e91bffb82..83912787fa2e 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -316,7 +316,8 @@ extern int qla2x00_start_sp(srb_t *); extern int qla24xx_dif_start_scsi(srb_t *); extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t); extern int qla2xxx_dif_start_scsi_mq(srb_t *); -extern void qla2x00_init_timer(srb_t *sp, unsigned long tmo); +extern void qla2x00_init_async_sp(srb_t *sp, unsigned long tmo, + void (*done)(struct srb *, int)); extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); @@ -332,6 +333,7 @@ extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *); extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *); extern int qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e); +void qla2x00_sp_release(struct kref *kref); /* * Global Function Prototypes in qla_mbx.c source file. diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index ebc8fdb0b43d..da7080c4bd00 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -529,7 +529,6 @@ static void qla2x00_async_sns_sp_done(srb_t *sp, int rc) if (!e) goto err2; - del_timer(&sp->u.iocb_cmd.timer); e->u.iosb.sp = sp; qla2x00_post_work(vha, e); return; @@ -556,8 +555,8 @@ err2: sp->u.iocb_cmd.u.ctarg.rsp = NULL; } - sp->free(sp); - + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return; } @@ -592,13 +591,15 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) if (!vha->flags.online) goto done; + /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_CT_PTHRU_CMD; sp->name = "rft_id"; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_sns_sp_done); sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, @@ -638,8 +639,6 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - sp->done = qla2x00_async_sns_sp_done; ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - hdl=%x portid %06x.\n", @@ -653,7 +652,8 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) } return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } @@ -676,8 +676,7 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type) return (QLA_SUCCESS); } - return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), - FC4_TYPE_FCP_SCSI); + return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), type); } static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, @@ -688,13 +687,15 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, srb_t *sp; struct ct_sns_pkt *ct_sns; + /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_CT_PTHRU_CMD; sp->name = "rff_id"; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_sns_sp_done); sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, @@ -727,13 +728,11 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id); ct_req->req.rff_id.fc4_feature = fc4feature; - ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */ + ct_req->req.rff_id.fc4_type = fc4type; /* SCSI-FCP or FC-NVMe */ sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - sp->done = qla2x00_async_sns_sp_done; ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - hdl=%x portid %06x feature %x type %x.\n", @@ -749,7 +748,8 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } @@ -779,13 +779,15 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, srb_t *sp; struct ct_sns_pkt *ct_sns; + /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_CT_PTHRU_CMD; sp->name = "rnid"; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_sns_sp_done); sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, @@ -823,9 +825,6 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - sp->done = qla2x00_async_sns_sp_done; - ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - hdl=%x portid %06x\n", sp->name, sp->handle, d_id->b24); @@ -840,7 +839,8 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } @@ -886,13 +886,15 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha) srb_t *sp; struct ct_sns_pkt *ct_sns; + /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_CT_PTHRU_CMD; sp->name = "rsnn_nn"; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_sns_sp_done); sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, @@ -936,9 +938,6 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha) sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - sp->done = qla2x00_async_sns_sp_done; - ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - hdl=%x.\n", sp->name, sp->handle); @@ -953,7 +952,8 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha) return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } @@ -2892,7 +2892,8 @@ static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res) qla24xx_handle_gpsc_event(vha, &ea); done: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) @@ -2904,6 +2905,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) return rval; + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; @@ -2912,8 +2914,8 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) sp->name = "gpsc"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; - - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla24xx_async_gpsc_sp_done); /* CT_IU preamble */ ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD, @@ -2931,9 +2933,6 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id; - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - sp->done = qla24xx_async_gpsc_sp_done; - ql_dbg(ql_dbg_disc, vha, 0x205e, "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n", sp->name, fcport->port_name, sp->handle, @@ -2946,7 +2945,8 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } @@ -2995,7 +2995,8 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp) break; } - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) @@ -3134,13 +3135,15 @@ static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res) if (res) { if (res == QLA_FUNCTION_TIMEOUT) { qla24xx_post_gpnid_work(sp->vha, &ea.id); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return; } } else if (sp->gen1) { /* There was another RSCN for this Nport ID */ qla24xx_post_gpnid_work(sp->vha, &ea.id); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return; } @@ -3161,7 +3164,8 @@ static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res) sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return; } @@ -3181,6 +3185,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) if (!vha->flags.online) goto done; + /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; @@ -3189,14 +3194,16 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) sp->name = "gpnid"; sp->u.iocb_cmd.u.ctarg.id = *id; sp->gen1 = 0; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_gpnid_sp_done); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); list_for_each_entry(tsp, &vha->gpnid_list, elem) { if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) { tsp->gen1++; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); goto done; } } @@ -3237,9 +3244,6 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - sp->done = qla2x00_async_gpnid_sp_done; - ql_dbg(ql_dbg_disc, vha, 0x2067, "Async-%s hdl=%x ID %3phC.\n", sp->name, sp->handle, &ct_req->req.port_id.port_id); @@ -3269,8 +3273,8 @@ done_free_sp: sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; } - - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } @@ -3325,7 +3329,8 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res) ea.rc = res; qla24xx_handle_gffid_event(vha, &ea); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } /* Get FC4 Feature with Nport ID. */ @@ -3338,6 +3343,7 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport) if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) return rval; + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) return rval; @@ -3347,9 +3353,8 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport) sp->name = "gffid"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; - - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla24xx_async_gffid_sp_done); /* CT_IU preamble */ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD, @@ -3367,8 +3372,6 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport) sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->done = qla24xx_async_gffid_sp_done; - ql_dbg(ql_dbg_disc, vha, 0x2132, "Async-%s hdl=%x %8phC.\n", sp->name, sp->handle, fcport->port_name); @@ -3379,7 +3382,8 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport) return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); fcport->flags &= ~FCF_ASYNC_SENT; return rval; } @@ -3766,7 +3770,6 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) "Async done-%s res %x FC4Type %x\n", sp->name, res, sp->gen2); - del_timer(&sp->u.iocb_cmd.timer); sp->rc = res; if (res) { unsigned long flags; @@ -3891,9 +3894,8 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, sp->name = "gnnft"; sp->gen1 = vha->hw->base_qpair->chip_reset; sp->gen2 = fc4_type; - - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_gpnft_gnnft_sp_done); memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); @@ -3909,8 +3911,6 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->done = qla2x00_async_gpnft_gnnft_sp_done; - ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s hdl=%x FC4Type %x.\n", sp->name, sp->handle, ct_req->req.gpn_ft.port_type); @@ -3937,8 +3937,8 @@ done_free_sp: sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; } - - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; @@ -3990,9 +3990,12 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, "%s: Performing FCP Scan\n", __func__); - if (sp) - sp->free(sp); /* should not happen */ + if (sp) { + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + } + /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) { spin_lock_irqsave(&vha->work_lock, flags); @@ -4037,6 +4040,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.req_dma); sp->u.iocb_cmd.u.ctarg.req = NULL; + /* ref: INIT */ qla2x00_rel_sp(sp); return rval; } @@ -4056,9 +4060,8 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) sp->name = "gpnft"; sp->gen1 = vha->hw->base_qpair->chip_reset; sp->gen2 = fc4_type; - - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_gpnft_gnnft_sp_done); rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size; memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); @@ -4073,8 +4076,6 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->done = qla2x00_async_gpnft_gnnft_sp_done; - ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s hdl=%x FC4Type %x.\n", sp->name, sp->handle, ct_req->req.gpn_ft.port_type); @@ -4102,7 +4103,8 @@ done_free_sp: sp->u.iocb_cmd.u.ctarg.rsp = NULL; } - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; @@ -4166,7 +4168,8 @@ static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res) qla24xx_handle_gnnid_event(vha, &ea); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) @@ -4179,6 +4182,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) return rval; qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID); + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); if (!sp) goto done; @@ -4188,9 +4192,8 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) sp->name = "gnnid"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; - - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_gnnid_sp_done); /* CT_IU preamble */ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD, @@ -4209,8 +4212,6 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->done = qla2x00_async_gnnid_sp_done; - ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n", sp->name, fcport->port_name, @@ -4222,7 +4223,8 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); fcport->flags &= ~FCF_ASYNC_SENT; done: return rval; @@ -4296,7 +4298,8 @@ static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res) qla24xx_handle_gfpnid_event(vha, &ea); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) @@ -4308,6 +4311,7 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) return rval; + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); if (!sp) goto done; @@ -4316,9 +4320,8 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) sp->name = "gfpnid"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; - - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_gfpnid_sp_done); /* CT_IU preamble */ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD, @@ -4337,8 +4340,6 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->done = qla2x00_async_gfpnid_sp_done; - ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n", sp->name, fcport->port_name, @@ -4351,7 +4352,8 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 847a6e5d9cb0..af8df5a800c6 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -51,6 +51,9 @@ qla2x00_sp_timeout(struct timer_list *t) WARN_ON(irqs_disabled()); iocb = &sp->u.iocb_cmd; iocb->timeout(sp); + + /* ref: TMR */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } void qla2x00_sp_free(srb_t *sp) @@ -125,8 +128,13 @@ static void qla24xx_abort_iocb_timeout(void *data) } spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); - if (sp->cmd_sp) + if (sp->cmd_sp) { + /* + * This done function should take care of + * original command ref: INIT + */ sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED); + } abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT); sp->done(sp, QLA_OS_TIMER_EXPIRED); @@ -140,11 +148,11 @@ static void qla24xx_abort_sp_done(srb_t *sp, int res) if (orig_sp) qla_wait_nvme_release_cmd_kref(orig_sp); - del_timer(&sp->u.iocb_cmd.timer); if (sp->flags & SRB_WAKEUP_ON_COMP) complete(&abt->u.abt.comp); else - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) @@ -154,6 +162,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) srb_t *sp; int rval = QLA_FUNCTION_FAILED; + /* ref: INIT for ABTS command */ sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport, GFP_ATOMIC); if (!sp) @@ -167,23 +176,22 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) if (wait) sp->flags = SRB_WAKEUP_ON_COMP; - abt_iocb->timeout = qla24xx_abort_iocb_timeout; init_completion(&abt_iocb->u.abt.comp); /* FW can send 2 x ABTS's timeout/20s */ - qla2x00_init_timer(sp, 42); + qla2x00_init_async_sp(sp, 42, qla24xx_abort_sp_done); + sp->u.iocb_cmd.timeout = qla24xx_abort_iocb_timeout; abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id); - sp->done = qla24xx_abort_sp_done; - ql_dbg(ql_dbg_async, vha, 0x507c, "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle, cmd_sp->type); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return rval; } @@ -191,7 +199,8 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) wait_for_completion(&abt_iocb->u.abt.comp); rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? QLA_SUCCESS : QLA_ERR_FROM_FW; - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } return rval; @@ -286,10 +295,13 @@ static void qla2x00_async_login_sp_done(srb_t *sp, int res) ea.iop[0] = lio->u.logio.iop[0]; ea.iop[1] = lio->u.logio.iop[1]; ea.sp = sp; + if (res) + ea.data[0] = MBS_COMMAND_ERROR; qla24xx_handle_plogi_done_event(vha, &ea); } - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int @@ -308,6 +320,7 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, return rval; } + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; @@ -320,12 +333,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, sp->name = "login"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_login_sp_done); lio = &sp->u.iocb_cmd; - lio->timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); - - sp->done = qla2x00_async_login_sp_done; if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) { lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY; } else { @@ -359,7 +370,8 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); fcport->flags &= ~FCF_ASYNC_SENT; done: fcport->flags &= ~FCF_ASYNC_ACTIVE; @@ -371,29 +383,26 @@ static void qla2x00_async_logout_sp_done(srb_t *sp, int res) sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); sp->fcport->login_gen++; qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) { srb_t *sp; - struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; fcport->flags |= FCF_ASYNC_SENT; + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_LOGOUT_CMD; sp->name = "logout"; - - lio = &sp->u.iocb_cmd; - lio->timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); - - sp->done = qla2x00_async_logout_sp_done; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_logout_sp_done), ql_dbg(ql_dbg_disc, vha, 0x2070, "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n", @@ -407,7 +416,8 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); return rval; @@ -433,29 +443,26 @@ static void qla2x00_async_prlo_sp_done(srb_t *sp, int res) if (!test_bit(UNLOADING, &vha->dpc_flags)) qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, lio->u.logio.data); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport) { srb_t *sp; - struct srb_iocb *lio; int rval; rval = QLA_FUNCTION_FAILED; + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_PRLO_CMD; sp->name = "prlo"; - - lio = &sp->u.iocb_cmd; - lio->timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); - - sp->done = qla2x00_async_prlo_sp_done; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_prlo_sp_done); ql_dbg(ql_dbg_disc, vha, 0x2070, "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", @@ -469,7 +476,8 @@ qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport) return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; @@ -552,10 +560,12 @@ static void qla2x00_async_adisc_sp_done(srb_t *sp, int res) ea.iop[1] = lio->u.logio.iop[1]; ea.fcport = sp->fcport; ea.sp = sp; + if (res) + ea.data[0] = MBS_COMMAND_ERROR; qla24xx_handle_adisc_event(vha, &ea); - - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int @@ -566,26 +576,34 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; + if (IS_SESSION_DELETED(fcport)) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC is being delete - not sending command.\n", + __func__, fcport->port_name); + fcport->flags &= ~FCF_ASYNC_ACTIVE; + return rval; + } + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) return rval; fcport->flags |= FCF_ASYNC_SENT; + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_ADISC_CMD; sp->name = "adisc"; - - lio = &sp->u.iocb_cmd; - lio->timeout = qla2x00_async_iocb_timeout; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_adisc_sp_done); - sp->done = qla2x00_async_adisc_sp_done; - if (data[1] & QLA_LOGIO_LOGIN_RETRIED) + if (data[1] & QLA_LOGIO_LOGIN_RETRIED) { + lio = &sp->u.iocb_cmd; lio->u.logio.flags |= SRB_LOGIN_RETRIED; + } ql_dbg(ql_dbg_disc, vha, 0x206f, "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n", @@ -598,7 +616,8 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); qla2x00_post_async_adisc_work(vha, fcport, data); @@ -964,6 +983,9 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } break; + case ISP_CFG_NL: + qla24xx_fcport_handle_login(vha, fcport); + break; default: break; } @@ -1079,13 +1101,13 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) { srb_t *sp; - struct srb_iocb *mbx; int rval = QLA_FUNCTION_FAILED; unsigned long flags; u16 *mb; @@ -1110,6 +1132,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) vha->gnl.sent = 1; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; @@ -1118,10 +1141,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) sp->name = "gnlist"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; - - mbx = &sp->u.iocb_cmd; - mbx->timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla24xx_async_gnl_sp_done); mb = sp->u.iocb_cmd.u.mbx.out_mb; mb[0] = MBC_PORT_NODE_NAME_LIST; @@ -1133,8 +1154,6 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) mb[8] = vha->gnl.size; mb[9] = vha->vp_idx; - sp->done = qla24xx_async_gnl_sp_done; - ql_dbg(ql_dbg_disc, vha, 0x20da, "Async-%s - OUT WWPN %8phC hndl %x\n", sp->name, fcport->port_name, sp->handle); @@ -1146,7 +1165,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT); return rval; @@ -1192,7 +1212,7 @@ done: dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, sp->u.iocb_cmd.u.mbx.in_dma); - sp->free(sp); + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport) @@ -1233,11 +1253,13 @@ static void qla2x00_async_prli_sp_done(srb_t *sp, int res) ea.sp = sp; if (res == QLA_OS_TIMER_EXPIRED) ea.data[0] = QLA_OS_TIMER_EXPIRED; + else if (res) + ea.data[0] = MBS_COMMAND_ERROR; qla24xx_handle_prli_done_event(vha, &ea); } - sp->free(sp); + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int @@ -1270,12 +1292,10 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) sp->type = SRB_PRLI_CMD; sp->name = "prli"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_prli_sp_done); lio = &sp->u.iocb_cmd; - lio->timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); - - sp->done = qla2x00_async_prli_sp_done; lio->u.logio.flags = 0; if (NVME_TARGET(vha->hw, fcport)) @@ -1297,7 +1317,8 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); fcport->flags &= ~FCF_ASYNC_SENT; return rval; } @@ -1326,14 +1347,21 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) struct port_database_24xx *pd; struct qla_hw_data *ha = vha->hw; - if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || - fcport->loop_id == FC_NO_LOOP_ID) { + if (IS_SESSION_DELETED(fcport)) { ql_log(ql_log_warn, vha, 0xffff, - "%s: %8phC - not sending command.\n", - __func__, fcport->port_name); + "%s: %8phC is being delete - not sending command.\n", + __func__, fcport->port_name); + fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } + if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC online %d flags %x - not sending command.\n", + __func__, fcport->port_name, vha->flags.online, fcport->flags); + goto done; + } + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; @@ -1345,10 +1373,8 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) sp->name = "gpdb"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; - - mbx = &sp->u.iocb_cmd; - mbx->timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla24xx_async_gpdb_sp_done); pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); if (pd == NULL) { @@ -1367,11 +1393,10 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) mb[9] = vha->vp_idx; mb[10] = opt; - mbx->u.mbx.in = pd; + mbx = &sp->u.iocb_cmd; + mbx->u.mbx.in = (void *)pd; mbx->u.mbx.in_dma = pd_dma; - sp->done = qla24xx_async_gpdb_sp_done; - ql_dbg(ql_dbg_disc, vha, 0x20dc, "Async-%s %8phC hndl %x opt %x\n", sp->name, fcport->port_name, sp->handle, opt); @@ -1385,7 +1410,7 @@ done_free_sp: if (pd) dma_pool_free(ha->s_dma_pool, pd, pd_dma); - sp->free(sp); + kref_put(&sp->cmd_kref, qla2x00_sp_release); fcport->flags &= ~FCF_ASYNC_SENT; done: fcport->flags &= ~FCF_ASYNC_ACTIVE; @@ -1557,6 +1582,11 @@ static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport) u8 login = 0; int rc; + ql_dbg(ql_dbg_disc, vha, 0x307b, + "%s %8phC DS %d LS %d lid %d retries=%d\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, fcport->loop_id, fcport->login_retry); + if (qla_tgt_mode_enabled(vha)) return; @@ -1615,7 +1645,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) fcport->login_gen, fcport->loop_id, fcport->scan_state, fcport->fc4_type); - if (fcport->scan_state != QLA_FCPORT_FOUND) + if (fcport->scan_state != QLA_FCPORT_FOUND || + fcport->disc_state == DSC_DELETE_PEND) return 0; if ((fcport->loop_id != FC_NO_LOOP_ID) && @@ -1636,7 +1667,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw)) return 0; - if (fcport->flags & FCF_ASYNC_SENT) { + if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) { set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return 0; } @@ -1951,22 +1982,21 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, srb_t *sp; int rval = QLA_FUNCTION_FAILED; + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; - tm_iocb = &sp->u.iocb_cmd; sp->type = SRB_TM_CMD; sp->name = "tmf"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), + qla2x00_tmf_sp_done); + sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout; - tm_iocb->timeout = qla2x00_tmf_iocb_timeout; + tm_iocb = &sp->u.iocb_cmd; init_completion(&tm_iocb->u.tmf.comp); - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); - tm_iocb->u.tmf.flags = flags; tm_iocb->u.tmf.lun = lun; - tm_iocb->u.tmf.data = tag; - sp->done = qla2x00_tmf_sp_done; ql_dbg(ql_dbg_taskm, vha, 0x802f, "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", @@ -1996,7 +2026,8 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, } done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); fcport->flags &= ~FCF_ASYNC_SENT; done: return rval; @@ -2055,13 +2086,6 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) qla24xx_post_gpdb_work(vha, ea->fcport, 0); break; default: - if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) && - (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */ - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP; - break; - } - sp = ea->sp; ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC priority %s, fc4type %x prev try %s\n", @@ -2205,12 +2229,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n", __func__, __LINE__, ea->fcport->port_name, ea->data[1]); - ea->fcport->flags &= ~FCF_ASYNC_SENT; - qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED); - if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED) - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - else - qla2x00_mark_device_lost(vha, ea->fcport, 1); + qlt_schedule_sess_for_deletion(ea->fcport); break; case MBS_LOOP_ID_USED: /* data[1] = IO PARAM 1 = nport ID */ @@ -3453,6 +3472,14 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) struct rsp_que *rsp = ha->rsp_q_map[0]; struct qla2xxx_fw_dump *fw_dump; + if (ha->fw_dump) { + ql_dbg(ql_dbg_init, vha, 0x00bd, + "Firmware dump already allocated.\n"); + return; + } + + ha->fw_dumped = 0; + ha->fw_dump_cap_flags = 0; dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; req_q_size = rsp_q_size = 0; @@ -3463,7 +3490,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mem_size = (ha->fw_memory_size - 0x11000 + 1) * sizeof(uint16_t); } else if (IS_FWI2_CAPABLE(ha)) { - if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (IS_QLA83XX(ha)) fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); else if (IS_QLA81XX(ha)) fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); @@ -3475,8 +3502,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mem_size = (ha->fw_memory_size - 0x100000 + 1) * sizeof(uint32_t); if (ha->mqenable) { - if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && - !IS_QLA28XX(ha)) + if (!IS_QLA83XX(ha)) mq_size = sizeof(struct qla2xxx_mq_chain); /* * Allocate maximum buffer size for all queues - Q0. @@ -4037,8 +4063,7 @@ enable_82xx_npiv: ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version); - if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || - IS_QLA28XX(ha)) { + if (IS_QLA83XX(ha)) { ha->flags.fac_supported = 0; rval = QLA_SUCCESS; } @@ -5584,6 +5609,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) memcpy(fcport->node_name, new_fcport->node_name, WWN_SIZE); fcport->scan_state = QLA_FCPORT_FOUND; + if (fcport->login_retry == 0) { + fcport->login_retry = vha->hw->login_retry_count; + ql_dbg(ql_dbg_disc, vha, 0x2135, + "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", + fcport->port_name, fcport->loop_id, + fcport->login_retry); + } found++; break; } @@ -5717,6 +5749,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) if (atomic_read(&fcport->state) == FCS_ONLINE) return; + qla2x00_set_fcport_state(fcport, FCS_ONLINE); + rport_ids.node_name = wwn_to_u64(fcport->node_name); rport_ids.port_name = wwn_to_u64(fcport->port_name); rport_ids.port_id = fcport->d_id.b.domain << 16 | @@ -5824,6 +5858,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) qla2x00_reg_remote_port(vha, fcport); break; case MODE_TARGET: + qla2x00_set_fcport_state(fcport, FCS_ONLINE); if (!vha->vha_tgt.qla_tgt->tgt_stop && !vha->vha_tgt.qla_tgt->tgt_stopped) qlt_fc_port_added(vha, fcport); @@ -5838,8 +5873,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) break; } - qla2x00_set_fcport_state(fcport, FCS_ONLINE); - if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) { if (fcport->id_changed) { fcport->id_changed = 0; @@ -9370,7 +9403,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, qpair->rsp->req = qpair->req; qpair->rsp->qpair = qpair; /* init qpair to this cpu. Will adjust at run time. */ - qla_cpu_update(qpair, smp_processor_id()); + qla_cpu_update(qpair, raw_smp_processor_id()); if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 5f3b7995cc8f..db17f7f410cd 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -184,6 +184,8 @@ static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha, sp->vha = vha; sp->qpair = qpair; sp->cmd_type = TYPE_SRB; + /* ref : INIT - normal flow */ + kref_init(&sp->cmd_kref); INIT_LIST_HEAD(&sp->elem); } diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 9d4ad1d2b00a..606228f4a8b5 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2560,11 +2560,38 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) } } -void qla2x00_init_timer(srb_t *sp, unsigned long tmo) +static void +qla2x00_async_done(struct srb *sp, int res) +{ + if (del_timer(&sp->u.iocb_cmd.timer)) { + /* + * Successfully cancelled the timeout handler + * ref: TMR + */ + if (kref_put(&sp->cmd_kref, qla2x00_sp_release)) + return; + } + sp->async_done(sp, res); +} + +void +qla2x00_sp_release(struct kref *kref) +{ + struct srb *sp = container_of(kref, struct srb, cmd_kref); + + sp->free(sp); +} + +void +qla2x00_init_async_sp(srb_t *sp, unsigned long tmo, + void (*done)(struct srb *sp, int res)) { timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0); - sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; + sp->done = qla2x00_async_done; + sp->async_done = done; sp->free = qla2x00_sp_free; + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; + sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD) init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); sp->start_timer = 1; @@ -2651,7 +2678,9 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, return -ENOMEM; } - /* Alloc SRB structure */ + /* Alloc SRB structure + * ref: INIT + */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) { kfree(fcport); @@ -2672,18 +2701,19 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, sp->type = SRB_ELS_DCMD; sp->name = "ELS_DCMD"; sp->fcport = fcport; - elsio->timeout = qla2x00_els_dcmd_iocb_timeout; - qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT); - init_completion(&sp->u.iocb_cmd.u.els_logo.comp); - sp->done = qla2x00_els_dcmd_sp_done; + qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT, + qla2x00_els_dcmd_sp_done); sp->free = qla2x00_els_dcmd_sp_free; + sp->u.iocb_cmd.timeout = qla2x00_els_dcmd_iocb_timeout; + init_completion(&sp->u.iocb_cmd.u.els_logo.comp); elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma, GFP_KERNEL); if (!elsio->u.els_logo.els_logo_pyld) { - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return QLA_FUNCTION_FAILED; } @@ -2706,7 +2736,8 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return QLA_FUNCTION_FAILED; } @@ -2717,7 +2748,8 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, wait_for_completion(&elsio->u.els_logo.comp); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return rval; } @@ -2850,7 +2882,6 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name); fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); - del_timer(&sp->u.iocb_cmd.timer); if (sp->flags & SRB_WAKEUP_ON_COMP) complete(&lio->u.els_plogi.comp); @@ -2927,6 +2958,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); + break; } fallthrough; default: @@ -2936,9 +2968,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) fw_status[0], fw_status[1], fw_status[2]); fcport->flags &= ~FCF_ASYNC_SENT; - qla2x00_set_fcport_disc_state(fcport, - DSC_LOGIN_FAILED); - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + qlt_schedule_sess_for_deletion(fcport); break; } break; @@ -2950,8 +2980,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) fw_status[0], fw_status[1], fw_status[2]); sp->fcport->flags &= ~FCF_ASYNC_SENT; - qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED); - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + qlt_schedule_sess_for_deletion(fcport); break; } @@ -2960,7 +2989,8 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) struct srb_iocb *elsio = &sp->u.iocb_cmd; qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return; } e->u.iosb.sp = sp; @@ -2978,7 +3008,9 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, int rval = QLA_SUCCESS; void *ptr, *resp_ptr; - /* Alloc SRB structure */ + /* Alloc SRB structure + * ref: INIT + */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) { ql_log(ql_log_info, vha, 0x70e6, @@ -2993,17 +3025,16 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, ql_dbg(ql_dbg_io, vha, 0x3073, "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24); - sp->type = SRB_ELS_DCMD; - sp->name = "ELS_DCMD"; - sp->fcport = fcport; - - elsio->timeout = qla2x00_els_dcmd2_iocb_timeout; if (wait) sp->flags = SRB_WAKEUP_ON_COMP; - qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2); + sp->type = SRB_ELS_DCMD; + sp->name = "ELS_DCMD"; + sp->fcport = fcport; + qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT + 2, + qla2x00_els_dcmd2_sp_done); + sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout; - sp->done = qla2x00_els_dcmd2_sp_done; elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE; ptr = elsio->u.els_plogi.els_plogi_pyld = @@ -3069,7 +3100,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, out: fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } @@ -3880,8 +3912,15 @@ qla2x00_start_sp(srb_t *sp) break; } - if (sp->start_timer) + if (sp->start_timer) { + /* ref: TMR timer ref + * this code should be just before start_iocbs function + * This will make sure that caller function don't to do + * kref_put even on failure + */ + kref_get(&sp->cmd_kref); add_timer(&sp->u.iocb_cmd.timer); + } wmb(); qla2x00_start_iocbs(vha, qp->req); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index b26f2699adb2..62d2f14848e7 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2494,6 +2494,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) iocb->u.tmf.data = QLA_FUNCTION_FAILED; } else if ((le16_to_cpu(sts->scsi_status) & SS_RESPONSE_INFO_LEN_VALID)) { + host_to_fcp_swap(sts->data, sizeof(sts->data)); if (le32_to_cpu(sts->rsp_data_len) < 4) { ql_log(ql_log_warn, fcport->vha, 0x503b, "Async-%s error - hdl=%x not enough response(%d).\n", diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index a6debeea3079..950c5903e799 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -9,6 +9,12 @@ #include <linux/delay.h> #include <linux/gfp.h> +#ifdef CONFIG_PPC +#define IS_PPCARCH true +#else +#define IS_PPCARCH false +#endif + static struct mb_cmd_name { uint16_t cmd; const char *str; @@ -728,6 +734,9 @@ again: vha->min_supported_speed = nv->min_supported_speed; } + + if (IS_PPCARCH) + mcp->mb[11] |= BIT_4; } if (ha->flags.exlogins_enabled) @@ -3029,8 +3038,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha) ha->orig_fw_iocb_count = mcp->mb[10]; if (ha->flags.npiv_supported) ha->max_npiv_vports = mcp->mb[11]; - if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || - IS_QLA28XX(ha)) + if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ha->fw_max_fcf_count = mcp->mb[12]; } @@ -5621,7 +5629,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha) mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) - mcp->in_mb |= MBX_3; + mcp->in_mb |= MBX_4|MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -6479,23 +6487,21 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) if (!vha->hw->flags.fw_started) goto done; + /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; - sp->type = SRB_MB_IOCB; - sp->name = mb_to_str(mcp->mb[0]); - c = &sp->u.iocb_cmd; - c->timeout = qla2x00_async_iocb_timeout; init_completion(&c->u.mbx.comp); - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + sp->type = SRB_MB_IOCB; + sp->name = mb_to_str(mcp->mb[0]); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_mb_sp_done); memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); - sp->done = qla2x00_async_mb_sp_done; - rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1018, @@ -6527,7 +6533,8 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) } done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 1c024055f8c5..e6b5c4ccce97 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -965,6 +965,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) if (vp_index == 0 || vp_index >= ha->max_npiv_vports) return QLA_PARAMETER_ERROR; + /* ref: INIT */ sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL); if (!sp) return rval; @@ -972,9 +973,8 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) sp->type = SRB_CTRL_VP; sp->name = "ctrl_vp"; sp->comp = ∁ - sp->done = qla_ctrlvp_sp_done; - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla_ctrlvp_sp_done); sp->u.iocb_cmd.u.ctrlvp.cmd = cmd; sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index; @@ -1008,6 +1008,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) break; } done: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return rval; } diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 350b0c4346fb..f726eb8449c5 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -1787,17 +1787,18 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) struct register_host_info *preg_hsi; struct new_utsname *p_sysid = NULL; + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_FXIOCB_DCMD; sp->name = "fxdisc"; + qla2x00_init_async_sp(sp, FXDISC_TIMEOUT, + qla2x00_fxdisc_sp_done); + sp->u.iocb_cmd.timeout = qla2x00_fxdisc_iocb_timeout; fdisc = &sp->u.iocb_cmd; - fdisc->timeout = qla2x00_fxdisc_iocb_timeout; - qla2x00_init_timer(sp, FXDISC_TIMEOUT); - switch (fx_type) { case FXDISC_GET_CONFIG_INFO: fdisc->u.fxiocb.flags = @@ -1898,7 +1899,6 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) } fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type); - sp->done = qla2x00_fxdisc_sp_done; rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) @@ -1974,7 +1974,8 @@ done_unmap_req: dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len, fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle); done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 877b2b625020..42b29f4fd937 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -35,6 +35,11 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) (fcport->nvme_flag & NVME_FLAG_REGISTERED)) return 0; + if (atomic_read(&fcport->state) == FCS_ONLINE) + return 0; + + qla2x00_set_fcport_state(fcport, FCS_ONLINE); + fcport->nvme_flag &= ~NVME_FLAG_RESETTING; memset(&req, 0, sizeof(struct nvme_fc_port_info)); @@ -165,6 +170,18 @@ out: qla2xxx_rel_qpair_sp(sp->qpair, sp); } +static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd) +{ + if (sp->flags & SRB_DMA_VALID) { + struct srb_iocb *nvme = &sp->u.iocb_cmd; + struct qla_hw_data *ha = sp->fcport->vha->hw; + + dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma, + fd->rqstlen, DMA_TO_DEVICE); + sp->flags &= ~SRB_DMA_VALID; + } +} + static void qla_nvme_release_ls_cmd_kref(struct kref *kref) { struct srb *sp = container_of(kref, struct srb, cmd_kref); @@ -181,6 +198,8 @@ static void qla_nvme_release_ls_cmd_kref(struct kref *kref) spin_unlock_irqrestore(&priv->cmd_lock, flags); fd = priv->fd; + + qla_nvme_ls_unmap(sp, fd); fd->done(fd, priv->comp_status); out: qla2x00_rel_sp(sp); @@ -351,6 +370,8 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, fd->rqstlen, DMA_TO_DEVICE); + sp->flags |= SRB_DMA_VALID; + rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x700e, @@ -358,6 +379,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, wake_up(&sp->nvme_ls_waitq); sp->priv = NULL; priv->sp = NULL; + qla_nvme_ls_unmap(sp, fd); qla2x00_rel_sp(sp); return rval; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8d87cfae9c59..77c0bf06f162 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -728,7 +728,8 @@ void qla2x00_sp_compl(srb_t *sp, int res) struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct completion *comp = sp->comp; - sp->free(sp); + /* kref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); cmd->result = res; CMD_SP(cmd) = NULL; cmd->scsi_done(cmd); @@ -819,7 +820,8 @@ void qla2xxx_qpair_sp_compl(srb_t *sp, int res) struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct completion *comp = sp->comp; - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); cmd->result = res; CMD_SP(cmd) = NULL; cmd->scsi_done(cmd); @@ -919,6 +921,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto qc24_target_busy; sp = scsi_cmd_priv(cmd); + /* ref: INIT */ qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport); sp->u.scmd.cmd = cmd; @@ -938,7 +941,8 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) return 0; qc24_host_busy_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); qc24_target_busy: return SCSI_MLQUEUE_TARGET_BUSY; @@ -1008,6 +1012,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, goto qc24_target_busy; sp = scsi_cmd_priv(cmd); + /* ref: INIT */ qla2xxx_init_sp(sp, vha, qpair, fcport); sp->u.scmd.cmd = cmd; @@ -1026,7 +1031,8 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, return 0; qc24_host_busy_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); qc24_target_busy: return SCSI_MLQUEUE_TARGET_BUSY; @@ -3733,8 +3739,7 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha) if (ha->mqiobase) iounmap(ha->mqiobase); - if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) && - ha->msixbase) + if (ha->msixbase) iounmap(ha->msixbase); } } @@ -5512,6 +5517,11 @@ void qla2x00_relogin(struct scsi_qla_host *vha) ea.fcport = fcport; qla24xx_handle_relogin_event(vha, &ea); } else if (vha->hw->current_topology == + ISP_CFG_NL && + IS_QLA2XXX_MIDTYPE(vha->hw)) { + (void)qla24xx_fcport_handle_login(vha, + fcport); + } else if (vha->hw->current_topology == ISP_CFG_NL) { fcport->login_retry--; status = @@ -7635,7 +7645,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) switch (state) { case pci_channel_io_normal: - ha->flags.eeh_busy = 0; + qla_pci_set_eeh_busy(vha); if (ql2xmqsupport || ql2xnvmeenable) { set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); @@ -7676,9 +7686,16 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) "mmio enabled\n"); ha->pci_error_state = QLA_PCI_MMIO_ENABLED; + if (IS_QLA82XX(ha)) return PCI_ERS_RESULT_RECOVERED; + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, base_vha, 0x803f, + "During mmio enabled, PCI/Register disconnect still detected.\n"); + goto out; + } + spin_lock_irqsave(&ha->hardware_lock, flags); if (IS_QLA2100(ha) || IS_QLA2200(ha)){ stat = rd_reg_word(®->hccr); @@ -7700,6 +7717,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) "RISC paused -- mmio_enabled, Dumping firmware.\n"); qla2xxx_dump_fw(base_vha); } +out: /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */ ql_dbg(ql_dbg_aer, base_vha, 0x600d, "mmio enabled returning.\n"); diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index a0aeba69513d..c092a6b1ced4 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -844,7 +844,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) ha->flt_region_nvram = start; break; case FLT_REG_IMG_PRI_27XX: - if (IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_img_status_pri = start; break; case FLT_REG_IMG_SEC_27XX: @@ -1356,7 +1356,7 @@ next: flash_data_addr(ha, faddr), le32_to_cpu(*dwptr)); if (ret) { ql_dbg(ql_dbg_user, vha, 0x7006, - "Failed slopw write %x (%x)\n", faddr, *dwptr); + "Failed slow write %x (%x)\n", faddr, *dwptr); break; } } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 1aaa4238cb72..f5d32d830a9b 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -620,7 +620,7 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res) } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - sp->free(sp); + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, @@ -656,12 +656,10 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, sp->type = type; sp->name = "nack"; - - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_nack_sp_done); sp->u.iocb_cmd.u.nack.ntfy = ntfy; - sp->done = qla2x00_async_nack_sp_done; ql_dbg(ql_dbg_disc, vha, 0x20f4, "Async-%s %8phC hndl %x %s\n", @@ -674,7 +672,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, return rval; done_free_sp: - sp->free(sp); + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: fcport->flags &= ~FCF_ASYNC_SENT; return rval; @@ -3320,6 +3318,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", vha->flags.online, qla2x00_reset_active(vha), cmd->reset_count, qpair->chip_reset); + res = 0; goto out_unmap_unlock; } @@ -7221,8 +7220,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) if (!QLA_TGT_MODE_ENABLED()) return; - if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || - IS_QLA28XX(ha)) { + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; } else { diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 26c13a953b97..b0a74b036cf4 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -435,8 +435,13 @@ qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha, { ql_dbg(ql_dbg_misc, vha, 0xd20a, "%s: reset risc [%lx]\n", __func__, *len); - if (buf) - WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS); + if (buf) { + if (qla24xx_soft_reset(vha->hw) != QLA_SUCCESS) { + ql_dbg(ql_dbg_async, vha, 0x5001, + "%s: unable to soft reset\n", __func__); + return INVALID_ENTRY; + } + } return qla27xx_next_entry(ent); } diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 408d49c304b8..bb5a6e0fa49a 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -485,8 +485,13 @@ static void scsi_report_sense(struct scsi_device *sdev, if (sshdr->asc == 0x29) { evt_type = SDEV_EVT_POWER_ON_RESET_OCCURRED; - sdev_printk(KERN_WARNING, sdev, - "Power-on or device reset occurred\n"); + /* + * Do not print message if it is an expected side-effect + * of runtime PM. + */ + if (!sdev->silence_suspend) + sdev_printk(KERN_WARNING, sdev, + "Power-on or device reset occurred\n"); } if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) { diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index fe22191522a3..9466474ff01b 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -198,6 +198,53 @@ static void scsi_unlock_floptical(struct scsi_device *sdev, SCSI_TIMEOUT, 3, NULL); } +static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev, + unsigned int depth) +{ + int new_shift = sbitmap_calculate_shift(depth); + bool need_alloc = !sdev->budget_map.map; + bool need_free = false; + int ret; + struct sbitmap sb_backup; + + depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev)); + + /* + * realloc if new shift is calculated, which is caused by setting + * up one new default queue depth after calling ->slave_configure + */ + if (!need_alloc && new_shift != sdev->budget_map.shift) + need_alloc = need_free = true; + + if (!need_alloc) + return 0; + + /* + * Request queue has to be frozen for reallocating budget map, + * and here disk isn't added yet, so freezing is pretty fast + */ + if (need_free) { + blk_mq_freeze_queue(sdev->request_queue); + sb_backup = sdev->budget_map; + } + ret = sbitmap_init_node(&sdev->budget_map, + scsi_device_max_queue_depth(sdev), + new_shift, GFP_KERNEL, + sdev->request_queue->node, false, true); + if (!ret) + sbitmap_resize(&sdev->budget_map, depth); + + if (need_free) { + if (ret) + sdev->budget_map = sb_backup; + else + sbitmap_free(&sb_backup); + ret = 0; + blk_mq_unfreeze_queue(sdev->request_queue); + } + return ret; +} + /** * scsi_alloc_sdev - allocate and setup a scsi_Device * @starget: which target to allocate a &scsi_device for @@ -291,11 +338,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, * default device queue depth to figure out sbitmap shift * since we use this queue depth most of times. */ - if (sbitmap_init_node(&sdev->budget_map, - scsi_device_max_queue_depth(sdev), - sbitmap_calculate_shift(depth), - GFP_KERNEL, sdev->request_queue->node, - false, true)) { + if (scsi_realloc_sdev_budget_map(sdev, depth)) { put_device(&starget->dev); kfree(sdev); goto out; @@ -1001,6 +1044,13 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, } return SCSI_SCAN_NO_RESPONSE; } + + /* + * The queue_depth is often changed in ->slave_configure. + * Set up budget map again since memory consumption of + * the map depends on actual queue depth. + */ + scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth); } if (sdev->scsi_level >= SCSI_3) diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 60e406bcf42a..a2524106206d 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -34,7 +34,7 @@ static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *); static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *); static void fc_bsg_remove(struct request_queue *); static void fc_bsg_goose_queue(struct fc_rport *); -static void fc_li_stats_update(struct fc_fn_li_desc *li_desc, +static void fc_li_stats_update(u16 event_type, struct fc_fpin_stats *stats); static void fc_delivery_stats_update(u32 reason_code, struct fc_fpin_stats *stats); @@ -670,42 +670,34 @@ fc_find_rport_by_wwpn(struct Scsi_Host *shost, u64 wwpn) EXPORT_SYMBOL(fc_find_rport_by_wwpn); static void -fc_li_stats_update(struct fc_fn_li_desc *li_desc, +fc_li_stats_update(u16 event_type, struct fc_fpin_stats *stats) { - stats->li += be32_to_cpu(li_desc->event_count); - switch (be16_to_cpu(li_desc->event_type)) { + stats->li++; + switch (event_type) { case FPIN_LI_UNKNOWN: - stats->li_failure_unknown += - be32_to_cpu(li_desc->event_count); + stats->li_failure_unknown++; break; case FPIN_LI_LINK_FAILURE: - stats->li_link_failure_count += - be32_to_cpu(li_desc->event_count); + stats->li_link_failure_count++; break; case FPIN_LI_LOSS_OF_SYNC: - stats->li_loss_of_sync_count += - be32_to_cpu(li_desc->event_count); + stats->li_loss_of_sync_count++; break; case FPIN_LI_LOSS_OF_SIG: - stats->li_loss_of_signals_count += - be32_to_cpu(li_desc->event_count); + stats->li_loss_of_signals_count++; break; case FPIN_LI_PRIM_SEQ_ERR: - stats->li_prim_seq_err_count += - be32_to_cpu(li_desc->event_count); + stats->li_prim_seq_err_count++; break; case FPIN_LI_INVALID_TX_WD: - stats->li_invalid_tx_word_count += - be32_to_cpu(li_desc->event_count); + stats->li_invalid_tx_word_count++; break; case FPIN_LI_INVALID_CRC: - stats->li_invalid_crc_count += - be32_to_cpu(li_desc->event_count); + stats->li_invalid_crc_count++; break; case FPIN_LI_DEVICE_SPEC: - stats->li_device_specific += - be32_to_cpu(li_desc->event_count); + stats->li_device_specific++; break; } } @@ -767,6 +759,7 @@ fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv) struct fc_rport *attach_rport = NULL; struct fc_host_attrs *fc_host = shost_to_fc_host(shost); struct fc_fn_li_desc *li_desc = (struct fc_fn_li_desc *)tlv; + u16 event_type = be16_to_cpu(li_desc->event_type); u64 wwpn; rport = fc_find_rport_by_wwpn(shost, @@ -775,7 +768,7 @@ fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv) (rport->roles & FC_PORT_ROLE_FCP_TARGET || rport->roles & FC_PORT_ROLE_NVME_TARGET)) { attach_rport = rport; - fc_li_stats_update(li_desc, &attach_rport->fpin_stats); + fc_li_stats_update(event_type, &attach_rport->fpin_stats); } if (be32_to_cpu(li_desc->pname_count) > 0) { @@ -789,14 +782,14 @@ fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv) rport->roles & FC_PORT_ROLE_NVME_TARGET)) { if (rport == attach_rport) continue; - fc_li_stats_update(li_desc, + fc_li_stats_update(event_type, &rport->fpin_stats); } } } if (fc_host->port_name == be64_to_cpu(li_desc->attached_wwpn)) - fc_li_stats_update(li_desc, &fc_host->fpin_stats); + fc_li_stats_update(event_type, &fc_host->fpin_stats); } /* diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 554b6f784223..bcdfcb25349a 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -86,6 +86,9 @@ struct iscsi_internal { struct transport_container session_cont; }; +static DEFINE_IDR(iscsi_ep_idr); +static DEFINE_MUTEX(iscsi_ep_idr_mutex); + static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ static struct workqueue_struct *iscsi_eh_timer_workq; @@ -169,6 +172,11 @@ struct device_attribute dev_attr_##_prefix##_##_name = \ static void iscsi_endpoint_release(struct device *dev) { struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); + + mutex_lock(&iscsi_ep_idr_mutex); + idr_remove(&iscsi_ep_idr, ep->id); + mutex_unlock(&iscsi_ep_idr_mutex); + kfree(ep); } @@ -181,7 +189,7 @@ static ssize_t show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); - return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id); + return sysfs_emit(buf, "%d\n", ep->id); } static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL); @@ -194,48 +202,32 @@ static struct attribute_group iscsi_endpoint_group = { .attrs = iscsi_endpoint_attrs, }; -#define ISCSI_MAX_EPID -1 - -static int iscsi_match_epid(struct device *dev, const void *data) -{ - struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); - const uint64_t *epid = data; - - return *epid == ep->id; -} - struct iscsi_endpoint * iscsi_create_endpoint(int dd_size) { - struct device *dev; struct iscsi_endpoint *ep; - uint64_t id; - int err; - - for (id = 1; id < ISCSI_MAX_EPID; id++) { - dev = class_find_device(&iscsi_endpoint_class, NULL, &id, - iscsi_match_epid); - if (!dev) - break; - else - put_device(dev); - } - if (id == ISCSI_MAX_EPID) { - printk(KERN_ERR "Too many connections. Max supported %u\n", - ISCSI_MAX_EPID - 1); - return NULL; - } + int err, id; ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL); if (!ep) return NULL; + mutex_lock(&iscsi_ep_idr_mutex); + id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO); + if (id < 0) { + mutex_unlock(&iscsi_ep_idr_mutex); + printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n", + id); + goto free_ep; + } + mutex_unlock(&iscsi_ep_idr_mutex); + ep->id = id; ep->dev.class = &iscsi_endpoint_class; - dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id); + dev_set_name(&ep->dev, "ep-%d", id); err = device_register(&ep->dev); if (err) - goto free_ep; + goto free_id; err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group); if (err) @@ -249,6 +241,10 @@ unregister_dev: device_unregister(&ep->dev); return NULL; +free_id: + mutex_lock(&iscsi_ep_idr_mutex); + idr_remove(&iscsi_ep_idr, id); + mutex_unlock(&iscsi_ep_idr_mutex); free_ep: kfree(ep); return NULL; @@ -276,14 +272,17 @@ EXPORT_SYMBOL_GPL(iscsi_put_endpoint); */ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle) { - struct device *dev; + struct iscsi_endpoint *ep; - dev = class_find_device(&iscsi_endpoint_class, NULL, &handle, - iscsi_match_epid); - if (!dev) - return NULL; + mutex_lock(&iscsi_ep_idr_mutex); + ep = idr_find(&iscsi_ep_idr, handle); + if (!ep) + goto unlock; - return iscsi_dev_to_endpoint(dev); + get_device(&ep->dev); +unlock: + mutex_unlock(&iscsi_ep_idr_mutex); + return ep; } EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint); @@ -2221,10 +2220,10 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag) switch (flag) { case STOP_CONN_RECOVER: - conn->state = ISCSI_CONN_FAILED; + WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); break; case STOP_CONN_TERM: - conn->state = ISCSI_CONN_DOWN; + WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); break; default: iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n", @@ -2236,6 +2235,49 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag) ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n"); } +static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active) +{ + struct iscsi_cls_session *session = iscsi_conn_to_session(conn); + struct iscsi_endpoint *ep; + + ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n"); + WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); + + if (!conn->ep || !session->transport->ep_disconnect) + return; + + ep = conn->ep; + conn->ep = NULL; + + session->transport->unbind_conn(conn, is_active); + session->transport->ep_disconnect(ep); + ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n"); +} + +static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn, + struct iscsi_endpoint *ep, + bool is_active) +{ + /* Check if this was a conn error and the kernel took ownership */ + spin_lock_irq(&conn->lock); + if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { + spin_unlock_irq(&conn->lock); + iscsi_ep_disconnect(conn, is_active); + } else { + spin_unlock_irq(&conn->lock); + ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n"); + mutex_unlock(&conn->ep_mutex); + + flush_work(&conn->cleanup_work); + /* + * Userspace is now done with the EP so we can release the ref + * iscsi_cleanup_conn_work_fn took. + */ + iscsi_put_endpoint(ep); + mutex_lock(&conn->ep_mutex); + } +} + static int iscsi_if_stop_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) { @@ -2257,11 +2299,24 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport, iscsi_stop_conn(conn, flag); } else { /* + * For offload, when iscsid is restarted it won't know about + * existing endpoints so it can't do a ep_disconnect. We clean + * it up here for userspace. + */ + mutex_lock(&conn->ep_mutex); + if (conn->ep) + iscsi_if_disconnect_bound_ep(conn, conn->ep, true); + mutex_unlock(&conn->ep_mutex); + + /* * Figure out if it was the kernel or userspace initiating this. */ + spin_lock_irq(&conn->lock); if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { + spin_unlock_irq(&conn->lock); iscsi_stop_conn(conn, flag); } else { + spin_unlock_irq(&conn->lock); ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n"); flush_work(&conn->cleanup_work); @@ -2270,31 +2325,14 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport, * Only clear for recovery to avoid extra cleanup runs during * termination. */ + spin_lock_irq(&conn->lock); clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags); + spin_unlock_irq(&conn->lock); } ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n"); return 0; } -static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active) -{ - struct iscsi_cls_session *session = iscsi_conn_to_session(conn); - struct iscsi_endpoint *ep; - - ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n"); - conn->state = ISCSI_CONN_FAILED; - - if (!conn->ep || !session->transport->ep_disconnect) - return; - - ep = conn->ep; - conn->ep = NULL; - - session->transport->unbind_conn(conn, is_active); - session->transport->ep_disconnect(ep); - ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n"); -} - static void iscsi_cleanup_conn_work_fn(struct work_struct *work) { struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn, @@ -2303,18 +2341,11 @@ static void iscsi_cleanup_conn_work_fn(struct work_struct *work) mutex_lock(&conn->ep_mutex); /* - * If we are not at least bound there is nothing for us to do. Userspace - * will do a ep_disconnect call if offload is used, but will not be - * doing a stop since there is nothing to clean up, so we have to clear - * the cleanup bit here. + * Get a ref to the ep, so we don't release its ID until after + * userspace is done referencing it in iscsi_if_disconnect_bound_ep. */ - if (conn->state != ISCSI_CONN_BOUND && conn->state != ISCSI_CONN_UP) { - ISCSI_DBG_TRANS_CONN(conn, "Got error while conn is already failed. Ignoring.\n"); - clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags); - mutex_unlock(&conn->ep_mutex); - return; - } - + if (conn->ep) + get_device(&conn->ep->dev); iscsi_ep_disconnect(conn, false); if (system_state != SYSTEM_RUNNING) { @@ -2370,11 +2401,12 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) conn->dd_data = &conn[1]; mutex_init(&conn->ep_mutex); + spin_lock_init(&conn->lock); INIT_LIST_HEAD(&conn->conn_list); INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn); conn->transport = transport; conn->cid = cid; - conn->state = ISCSI_CONN_DOWN; + WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); /* this is released in the dev's release function */ if (!get_device(&session->dev)) @@ -2561,9 +2593,32 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) struct iscsi_uevent *ev; struct iscsi_internal *priv; int len = nlmsg_total_size(sizeof(*ev)); + unsigned long flags; + int state; - if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) - queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work); + spin_lock_irqsave(&conn->lock, flags); + /* + * Userspace will only do a stop call if we are at least bound. And, we + * only need to do the in kernel cleanup if in the UP state so cmds can + * be released to upper layers. If in other states just wait for + * userspace to avoid races that can leave the cleanup_work queued. + */ + state = READ_ONCE(conn->state); + switch (state) { + case ISCSI_CONN_BOUND: + case ISCSI_CONN_UP: + if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, + &conn->flags)) { + queue_work(iscsi_conn_cleanup_workq, + &conn->cleanup_work); + } + break; + default: + ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n", + state); + break; + } + spin_unlock_irqrestore(&conn->lock, flags); priv = iscsi_if_transport_lookup(conn->transport); if (!priv) @@ -2913,7 +2968,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) char *data = (char*)ev + sizeof(*ev); struct iscsi_cls_conn *conn; struct iscsi_cls_session *session; - int err = 0, value = 0; + int err = 0, value = 0, state; if (ev->u.set_param.len > PAGE_SIZE) return -EINVAL; @@ -2930,8 +2985,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) session->recovery_tmo = value; break; default: - if ((conn->state == ISCSI_CONN_BOUND) || - (conn->state == ISCSI_CONN_UP)) { + state = READ_ONCE(conn->state); + if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) { err = transport->set_param(conn, ev->u.set_param.param, data, ev->u.set_param.len); } else { @@ -3003,16 +3058,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport, } mutex_lock(&conn->ep_mutex); - /* Check if this was a conn error and the kernel took ownership */ - if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { - ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n"); - mutex_unlock(&conn->ep_mutex); - - flush_work(&conn->cleanup_work); - goto put_ep; - } - - iscsi_ep_disconnect(conn, false); + iscsi_if_disconnect_bound_ep(conn, ep, false); mutex_unlock(&conn->ep_mutex); put_ep: iscsi_put_endpoint(ep); @@ -3715,24 +3761,17 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport, return -EINVAL; mutex_lock(&conn->ep_mutex); + spin_lock_irq(&conn->lock); if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { + spin_unlock_irq(&conn->lock); mutex_unlock(&conn->ep_mutex); ev->r.retcode = -ENOTCONN; return 0; } + spin_unlock_irq(&conn->lock); switch (nlh->nlmsg_type) { case ISCSI_UEVENT_BIND_CONN: - if (conn->ep) { - /* - * For offload boot support where iscsid is restarted - * during the pivot root stage, the ep will be intact - * here when the new iscsid instance starts up and - * reconnects. - */ - iscsi_ep_disconnect(conn, true); - } - session = iscsi_session_lookup(ev->u.b_conn.sid); if (!session) { err = -EINVAL; @@ -3743,7 +3782,7 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport, ev->u.b_conn.transport_eph, ev->u.b_conn.is_leading); if (!ev->r.retcode) - conn->state = ISCSI_CONN_BOUND; + WRITE_ONCE(conn->state, ISCSI_CONN_BOUND); if (ev->r.retcode || !transport->ep_connect) break; @@ -3762,7 +3801,8 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport, case ISCSI_UEVENT_START_CONN: ev->r.retcode = transport->start_conn(conn); if (!ev->r.retcode) - conn->state = ISCSI_CONN_UP; + WRITE_ONCE(conn->state, ISCSI_CONN_UP); + break; case ISCSI_UEVENT_SEND_PDU: pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev); @@ -4070,10 +4110,11 @@ static ssize_t show_conn_state(struct device *dev, { struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); const char *state = "unknown"; + int conn_state = READ_ONCE(conn->state); - if (conn->state >= 0 && - conn->state < ARRAY_SIZE(connection_state_names)) - state = connection_state_names[conn->state]; + if (conn_state >= 0 && + conn_state < ARRAY_SIZE(connection_state_names)) + state = connection_state_names[conn_state]; return sysfs_emit(buf, "%s\n", state); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 78ead3369779..a713babaee0f 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -48,6 +48,7 @@ #include <linux/blkpg.h> #include <linux/blk-pm.h> #include <linux/delay.h> +#include <linux/major.h> #include <linux/mutex.h> #include <linux/string_helpers.h> #include <linux/async.h> @@ -3627,7 +3628,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) return 0; if (sdkp->WCE && sdkp->media_present) { - sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); + if (!sdkp->device->silence_suspend) + sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); ret = sd_sync_cache(sdkp, &sshdr); if (ret) { @@ -3649,7 +3651,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) } if (sdkp->device->manage_start_stop) { - sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); + if (!sdkp->device->silence_suspend) + sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); /* an error is not worth aborting a system sleep */ ret = sd_start_stop_device(sdkp, 0); if (ignore_stop_errors) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 8f05248920e8..3c98f08dc25d 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -31,6 +31,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */ #include <linux/errno.h> #include <linux/mtio.h> #include <linux/ioctl.h> +#include <linux/major.h> #include <linux/slab.h> #include <linux/fcntl.h> #include <linux/init.h> diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index a5453f5e87c3..2e690d8a3444 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -7653,6 +7653,21 @@ static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) return pqi_revert_to_sis_mode(ctrl_info); } +static void pqi_perform_lockup_action(void) +{ + switch (pqi_lockup_action) { + case PANIC: + panic("FATAL: Smart Family Controller lockup detected"); + break; + case REBOOT: + emergency_restart(); + break; + case NONE: + default: + break; + } +} + static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) { int rc; @@ -7677,8 +7692,15 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) * commands. */ rc = sis_wait_for_ctrl_ready(ctrl_info); - if (rc) + if (rc) { + if (reset_devices) { + dev_err(&ctrl_info->pci_dev->dev, + "kdump init failed with error %d\n", rc); + pqi_lockup_action = REBOOT; + pqi_perform_lockup_action(); + } return rc; + } /* * Get the controller properties. This allows us to determine @@ -8402,21 +8424,6 @@ static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int de return pqi_ctrl_init_resume(ctrl_info); } -static void pqi_perform_lockup_action(void) -{ - switch (pqi_lockup_action) { - case PANIC: - panic("FATAL: Smart Family Controller lockup detected"); - break; - case REBOOT: - emergency_restart(); - break; - case NONE: - default: - break; - } -} - static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, .status = SAM_STAT_CHECK_CONDITION, diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 1203374828b9..652cd81d7775 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -44,6 +44,7 @@ #include <linux/cdrom.h> #include <linux/interrupt.h> #include <linux/init.h> +#include <linux/major.h> #include <linux/blkdev.h> #include <linux/blk-pm.h> #include <linux/mutex.h> @@ -578,7 +579,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, scsi_autopm_get_device(sdev); - if (ret != CDROMCLOSETRAY && ret != CDROMEJECT) { + if (cmd != CDROMCLOSETRAY && cmd != CDROMEJECT) { ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); if (ret != -ENOSYS) goto put; diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index ddd00efc4882..fbdb5124d7f7 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -41,7 +41,7 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi, int result; unsigned char *buffer; - buffer = kmalloc(32, GFP_KERNEL); + buffer = kzalloc(32, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -55,10 +55,13 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi, cgc.data_direction = DMA_FROM_DEVICE; result = sr_do_ioctl(cd, &cgc); + if (result) + goto err; tochdr->cdth_trk0 = buffer[2]; tochdr->cdth_trk1 = buffer[3]; +err: kfree(buffer); return result; } @@ -71,7 +74,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi, int result; unsigned char *buffer; - buffer = kmalloc(32, GFP_KERNEL); + buffer = kzalloc(32, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -86,6 +89,8 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi, cgc.data_direction = DMA_FROM_DEVICE; result = sr_do_ioctl(cd, &cgc); + if (result) + goto err; tocentry->cdte_ctrl = buffer[5] & 0xf; tocentry->cdte_adr = buffer[5] >> 4; @@ -98,6 +103,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi, tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8) + buffer[10]) << 8) + buffer[11]; +err: kfree(buffer); return result; } @@ -384,7 +390,7 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn) { Scsi_CD *cd = cdi->handle; struct packet_command cgc; - char *buffer = kmalloc(32, GFP_KERNEL); + char *buffer = kzalloc(32, GFP_KERNEL); int result; if (!buffer) @@ -400,10 +406,13 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn) cgc.data_direction = DMA_FROM_DEVICE; cgc.timeout = IOCTL_TIMEOUT; result = sr_do_ioctl(cd, &cgc); + if (result) + goto err; memcpy(mcn->medium_catalog_number, buffer + 9, 13); mcn->medium_catalog_number[13] = 0; +err: kfree(buffer); return result; } diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index ae8636d3780b..9933722acfd9 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -32,6 +32,7 @@ static const char *verstr = "20160209"; #include <linux/slab.h> #include <linux/errno.h> #include <linux/mtio.h> +#include <linux/major.h> #include <linux/cdrom.h> #include <linux/ioctl.h> #include <linux/fcntl.h> diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index f76692053ca1..e892b9feffb1 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -428,6 +428,12 @@ static int ufs_intel_adl_init(struct ufs_hba *hba) return ufs_intel_common_init(hba); } +static int ufs_intel_mtl_init(struct ufs_hba *hba) +{ + hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN; + return ufs_intel_common_init(hba); +} + static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = { .name = "intel-pci", .init = ufs_intel_common_init, @@ -465,6 +471,16 @@ static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = { .device_reset = ufs_intel_device_reset, }; +static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = { + .name = "intel-pci", + .init = ufs_intel_mtl_init, + .exit = ufs_intel_common_exit, + .hce_enable_notify = ufs_intel_hce_enable_notify, + .link_startup_notify = ufs_intel_link_startup_notify, + .resume = ufs_intel_resume, + .device_reset = ufs_intel_device_reset, +}; + #ifdef CONFIG_PM_SLEEP static int ufshcd_pci_restore(struct device *dev) { @@ -579,6 +595,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops }, { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops }, { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, { } /* terminate list */ }; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index f489954e4632..b55e0a07363f 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -125,8 +125,9 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs); enum { UFSHCD_MAX_CHANNEL = 0, UFSHCD_MAX_ID = 1, - UFSHCD_CMD_PER_LUN = 32, - UFSHCD_CAN_QUEUE = 32, + UFSHCD_NUM_RESERVED = 1, + UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED, + UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED, }; /* UFSHCD error handling flags */ @@ -357,7 +358,7 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, enum ufs_trace_str_t str_t) { - u64 lba; + u64 lba = 0; u8 opcode = 0, group_id = 0; u32 intr, doorbell; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; @@ -374,7 +375,6 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, return; opcode = cmd->cmnd[0]; - lba = scsi_get_lba(cmd); if (opcode == READ_10 || opcode == WRITE_10) { /* @@ -382,6 +382,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, */ transfer_len = be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len); + lba = scsi_get_lba(cmd); if (opcode == WRITE_10) group_id = lrbp->cmd->cmnd[6]; } else if (opcode == UNMAP) { @@ -389,6 +390,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, * The number of Bytes to be unmapped beginning with the lba. */ transfer_len = blk_rq_bytes(rq); + lba = scsi_get_lba(cmd); } intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); @@ -575,7 +577,12 @@ static void ufshcd_print_pwr_info(struct ufs_hba *hba) "INVALID MODE", }; - dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", + /* + * Using dev_dbg to avoid messages during runtime PM to avoid + * never-ending cycles of messages written back to storage by user space + * causing runtime resume, causing more messages and so on. + */ + dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", __func__, hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, @@ -2185,6 +2192,7 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; hba->nutmrs = ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; + hba->reserved_slot = hba->nutrs - 1; /* Read crypto capabilities */ err = ufshcd_hba_init_crypto_capabilities(hba); @@ -2910,30 +2918,15 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, int timeout) { - struct request_queue *q = hba->cmd_queue; DECLARE_COMPLETION_ONSTACK(wait); - struct request *req; + const u32 tag = hba->reserved_slot; struct ufshcd_lrb *lrbp; int err; - int tag; - down_read(&hba->clk_scaling_lock); + /* Protects use of hba->reserved_slot. */ + lockdep_assert_held(&hba->dev_cmd.lock); - /* - * Get free slot, sleep if slots are unavailable. - * Even though we use wait_event() which sleeps indefinitely, - * the maximum wait time is bounded by SCSI request timeout. - */ - req = blk_get_request(q, REQ_OP_DRV_OUT, 0); - if (IS_ERR(req)) { - err = PTR_ERR(req); - goto out_unlock; - } - tag = req->tag; - WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); - /* Set the timeout such that the SCSI error handler is not activated. */ - req->timeout = msecs_to_jiffies(2 * timeout); - blk_mq_start_request(req); + down_read(&hba->clk_scaling_lock); lrbp = &hba->lrb[tag]; WARN_ON(lrbp->cmd); @@ -2951,8 +2944,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); out: - blk_put_request(req); -out_unlock: up_read(&hba->clk_scaling_lock); return err; } @@ -4982,6 +4973,12 @@ static int ufshcd_slave_configure(struct scsi_device *sdev) pm_runtime_get_noresume(&sdev->sdev_gendev); else if (ufshcd_is_rpm_autosuspend_allowed(hba)) sdev->rpm_autosuspend = 1; + /* + * Do not print messages during runtime PM to avoid never-ending cycles + * of messages written back to storage by user space causing runtime + * resume, causing more messages and so on. + */ + sdev->silence_suspend = 1; ufshcd_crypto_setup_rq_keyslot_manager(hba, q); @@ -6640,28 +6637,16 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, enum query_opcode desc_op) { - struct request_queue *q = hba->cmd_queue; DECLARE_COMPLETION_ONSTACK(wait); - struct request *req; + const u32 tag = hba->reserved_slot; struct ufshcd_lrb *lrbp; int err = 0; - int tag; u8 upiu_flags; - down_read(&hba->clk_scaling_lock); + /* Protects use of hba->reserved_slot. */ + lockdep_assert_held(&hba->dev_cmd.lock); - req = blk_get_request(q, REQ_OP_DRV_OUT, 0); - if (IS_ERR(req)) { - err = PTR_ERR(req); - goto out_unlock; - } - tag = req->tag; - WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); - - if (unlikely(test_bit(tag, &hba->outstanding_reqs))) { - err = -EBUSY; - goto out; - } + down_read(&hba->clk_scaling_lock); lrbp = &hba->lrb[tag]; WARN_ON(lrbp->cmd); @@ -6730,9 +6715,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); -out: - blk_put_request(req); -out_unlock: up_read(&hba->clk_scaling_lock); return err; } @@ -7229,7 +7211,13 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || !hba->vreg_info.vccq2) { - dev_err(hba->dev, + /* + * Using dev_dbg to avoid messages during runtime PM to avoid + * never-ending cycles of messages written back to storage by + * user space causing runtime resume, causing more messages and + * so on. + */ + dev_dbg(hba->dev, "%s: Regulator capability was not set, actvIccLevel=%d", __func__, icc_level); goto out; @@ -9423,8 +9411,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) /* Configure LRB */ ufshcd_host_memory_configure(hba); - host->can_queue = hba->nutrs; - host->cmd_per_lun = hba->nutrs; + host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; + host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED; host->max_id = UFSHCD_MAX_ID; host->max_lun = UFS_MAX_LUNS; host->max_channel = UFSHCD_MAX_CHANNEL; diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 07ada6676c3b..d470a52ff24c 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -725,6 +725,7 @@ struct ufs_hba_monitor { * @capabilities: UFS Controller Capabilities * @nutrs: Transfer Request Queue depth supported by controller * @nutmrs: Task Management Queue depth supported by controller + * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock. * @ufs_version: UFS Version to which controller complies * @vops: pointer to variant specific operations * @priv: pointer to variant specific private data @@ -813,6 +814,7 @@ struct ufs_hba { u32 capabilities; int nutrs; int nutmrs; + u32 reserved_slot; u32 ufs_version; const struct ufs_hba_variant_ops *vops; struct ufs_hba_variant_params *vps; diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index a86d0cc50de2..f7eaf64293a4 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -870,12 +870,6 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb) struct ufshpb_region *rgn, *victim_rgn = NULL; list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) { - if (!rgn) { - dev_err(&hpb->sdev_ufs_lu->sdev_dev, - "%s: no region allocated\n", - __func__); - return NULL; - } if (ufshpb_check_srgns_issue_state(hpb, rgn)) continue; @@ -891,6 +885,11 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb) break; } + if (!victim_rgn) + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "%s: no region allocated\n", + __func__); + return victim_rgn; } diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index 0204e314b482..17b8c8884087 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -233,12 +233,11 @@ static void scsifront_gnttab_done(struct vscsifrnt_info *info, return; for (i = 0; i < shadow->nr_grants; i++) { - if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) { + if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) { shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME "grant still in use by backend\n"); BUG(); } - gnttab_end_foreign_access(shadow->gref[i], 0, 0UL); } kfree(shadow->sg); diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c index 27b9e2baab1a..7acf9193a9e8 100644 --- a/drivers/scsi/zorro7xx.c +++ b/drivers/scsi/zorro7xx.c @@ -159,6 +159,8 @@ static void zorro7xx_remove_one(struct zorro_dev *z) scsi_remove_host(host); NCR_700_release(host); + if (host->base > 0x01000000) + iounmap(hostdata->base); kfree(hostdata); free_irq(host->irq, host); zorro_release_device(z); diff --git a/drivers/soc/aspeed/Kconfig b/drivers/soc/aspeed/Kconfig index 1914f33f0b3b..09bf81b295fd 100644 --- a/drivers/soc/aspeed/Kconfig +++ b/drivers/soc/aspeed/Kconfig @@ -108,6 +108,13 @@ config ASPEED_VGA_SHAREDMEM specification, BIOS will transfer whole SMBIOS table to VGA memory, and BMC can get the table from VGA memory through this driver. +config ASPEED_SBC + bool "ASPEED Secure Boot Controller driver" + default MACH_ASPEED_G6 + help + Say yes to provide information about the secure boot controller in + debugfs. + endmenu endif diff --git a/drivers/soc/aspeed/Makefile b/drivers/soc/aspeed/Makefile index 0ff455f0d0d1..6b498ad1537a 100644 --- a/drivers/soc/aspeed/Makefile +++ b/drivers/soc/aspeed/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o obj-$(CONFIG_ASPEED_UART_ROUTING) += aspeed-uart-routing.o obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o obj-$(CONFIG_ASPEED_SOCINFO) += aspeed-socinfo.o +obj-$(CONFIG_ASPEED_SBC) += aspeed-sbc.o obj-$(CONFIG_ASPEED_XDMA) += aspeed-xdma.o obj-$(CONFIG_ASPEED_VGA_SHAREDMEM) += aspeed-vga-sharedmem.o obj-$(CONFIG_ASPEED_MCTP) += aspeed-mctp.o diff --git a/drivers/soc/aspeed/aspeed-sbc.c b/drivers/soc/aspeed/aspeed-sbc.c new file mode 100644 index 000000000000..be4497b418c4 --- /dev/null +++ b/drivers/soc/aspeed/aspeed-sbc.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Copyright 2022 IBM Corp. */ + +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/debugfs.h> + +#define SEC_STATUS 0x14 +#define ABR_IMAGE_SOURCE BIT(13) +#define OTP_PROTECTED BIT(8) +#define LOW_SEC_KEY BIT(7) +#define SECURE_BOOT BIT(6) +#define UART_BOOT BIT(5) + +struct sbe { + u8 abr_image; + u8 low_security_key; + u8 otp_protected; + u8 secure_boot; + u8 invert; + u8 uart_boot; +}; + +static struct sbe sbe; + +static int __init aspeed_sbc_init(void) +{ + struct device_node *np; + void __iomem *base; + struct dentry *sbc_dir; + u32 security_status; + + /* AST2600 only */ + np = of_find_compatible_node(NULL, NULL, "aspeed,ast2600-sbc"); + if (!of_device_is_available(np)) + return -ENODEV; + + base = of_iomap(np, 0); + if (!base) { + of_node_put(np); + return -ENODEV; + } + + security_status = readl(base + SEC_STATUS); + + iounmap(base); + of_node_put(np); + + sbe.abr_image = !!(security_status & ABR_IMAGE_SOURCE); + sbe.low_security_key = !!(security_status & LOW_SEC_KEY); + sbe.otp_protected = !!(security_status & OTP_PROTECTED); + sbe.secure_boot = !!(security_status & SECURE_BOOT); + /* Invert the bit, as 1 is boot from SPI/eMMC */ + sbe.uart_boot = !(security_status & UART_BOOT); + + pr_info("AST2600 secure boot %s\n", sbe.secure_boot ? "enabled" : "disabled"); + + sbc_dir = debugfs_create_dir("sbc", arch_debugfs_dir); + if (IS_ERR(sbc_dir)) + return PTR_ERR(sbc_dir); + + debugfs_create_u8("abr_image", 0444, sbc_dir, &sbe.abr_image); + debugfs_create_u8("low_security_key", 0444, sbc_dir, &sbe.low_security_key); + debugfs_create_u8("otp_protected", 0444, sbc_dir, &sbe.otp_protected); + debugfs_create_u8("uart_boot", 0444, sbc_dir, &sbe.uart_boot); + debugfs_create_u8("secure_boot", 0444, sbc_dir, &sbe.secure_boot); + + return 0; +} + +subsys_initcall(aspeed_sbc_init); diff --git a/drivers/soc/aspeed/aspeed-xdma.c b/drivers/soc/aspeed/aspeed-xdma.c index 48cfe30c90ad..579937ee3745 100644 --- a/drivers/soc/aspeed/aspeed-xdma.c +++ b/drivers/soc/aspeed/aspeed-xdma.c @@ -253,6 +253,9 @@ struct aspeed_xdma_client { u32 size; }; +#define CREATE_TRACE_POINTS +#include <trace/events/xdma.h> + static u32 aspeed_xdma_readl(struct aspeed_xdma *ctx, u8 reg) { u32 v = readl(ctx->base + reg); @@ -448,6 +451,7 @@ static int aspeed_xdma_start(struct aspeed_xdma *ctx, unsigned int num_cmds, ctx->upstream = upstream; for (i = 0; i < num_cmds; ++i) { + trace_xdma_start(ctx, &cmds[i]); /* * Use memcpy_toio here to get some barriers before starting * the operation. The command(s) need to be in physical memory @@ -490,6 +494,8 @@ static irqreturn_t aspeed_xdma_irq(int irq, void *arg) spin_lock(&ctx->engine_lock); status = aspeed_xdma_readl(ctx, ctx->chip->regs.status); + trace_xdma_irq(status); + if (status & ctx->chip->status_bits.ds_dirty) { aspeed_xdma_done(ctx, true); } else { @@ -514,6 +520,8 @@ static void aspeed_xdma_reset(struct aspeed_xdma *ctx) { unsigned long flags; + trace_xdma_reset(ctx); + reset_control_assert(ctx->reset); usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US, XDMA_ENGINE_SETUP_TIME_MAX_US); @@ -544,7 +552,7 @@ static irqreturn_t aspeed_xdma_pcie_irq(int irq, void *arg) { struct aspeed_xdma *ctx = arg; - dev_dbg(ctx->dev, "PCI-E reset requested.\n"); + trace_xdma_perst(ctx); spin_lock(&ctx->engine_lock); if (ctx->in_reset) { @@ -682,6 +690,7 @@ static void aspeed_xdma_vma_close(struct vm_area_struct *vma) gen_pool_free(client->ctx->pool, (unsigned long)client->virt, client->size); + trace_xdma_unmap(client); client->virt = NULL; client->phys = 0; @@ -706,6 +715,7 @@ static int aspeed_xdma_mmap(struct file *file, struct vm_area_struct *vma) client->virt = gen_pool_dma_alloc(ctx->pool, client->size, &client->phys); if (!client->virt) { + trace_xdma_mmap_error(client, 0UL); client->phys = 0; client->size = 0; return -ENOMEM; @@ -725,12 +735,14 @@ static int aspeed_xdma_mmap(struct file *file, struct vm_area_struct *vma) gen_pool_free(ctx->pool, (unsigned long)client->virt, client->size); + trace_xdma_mmap_error(client, vma->vm_start); client->virt = NULL; client->phys = 0; client->size = 0; return rc; } + trace_xdma_mmap(client); dev_dbg(ctx->dev, "mmap: v[%08lx] to p[%08x], s[%08x]\n", vma->vm_start, (u32)client->phys, client->size); @@ -776,9 +788,11 @@ static int aspeed_xdma_release(struct inode *inode, struct file *file) if (reset) aspeed_xdma_reset(ctx); - if (client->virt) + if (client->virt) { gen_pool_free(ctx->pool, (unsigned long)client->virt, client->size); + trace_xdma_unmap(client); + } kfree(client); kobject_put(&ctx->kobj); diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c index d5e9a5f2c087..75eabfb916cb 100644 --- a/drivers/soc/fsl/guts.c +++ b/drivers/soc/fsl/guts.c @@ -28,7 +28,6 @@ struct fsl_soc_die_attr { static struct guts *guts; static struct soc_device_attribute soc_dev_attr; static struct soc_device *soc_dev; -static struct device_node *root; /* SoC die attribute definition for QorIQ platform */ @@ -138,7 +137,7 @@ static u32 fsl_guts_get_svr(void) static int fsl_guts_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; + struct device_node *root, *np = pdev->dev.of_node; struct device *dev = &pdev->dev; struct resource *res; const struct fsl_soc_die_attr *soc_die; @@ -161,8 +160,14 @@ static int fsl_guts_probe(struct platform_device *pdev) root = of_find_node_by_path("/"); if (of_property_read_string(root, "model", &machine)) of_property_read_string_index(root, "compatible", 0, &machine); - if (machine) - soc_dev_attr.machine = machine; + if (machine) { + soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL); + if (!soc_dev_attr.machine) { + of_node_put(root); + return -ENOMEM; + } + } + of_node_put(root); svr = fsl_guts_get_svr(); soc_die = fsl_soc_die_match(svr, fsl_soc_die); @@ -197,7 +202,6 @@ static int fsl_guts_probe(struct platform_device *pdev) static int fsl_guts_remove(struct platform_device *dev) { soc_device_unregister(soc_dev); - of_node_put(root); return 0; } diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c index e277c827bdf3..a5e2d0e5ab51 100644 --- a/drivers/soc/fsl/qe/qe_io.c +++ b/drivers/soc/fsl/qe/qe_io.c @@ -35,6 +35,8 @@ int par_io_init(struct device_node *np) if (ret) return ret; par_io = ioremap(res.start, resource_size(&res)); + if (!par_io) + return -ENOMEM; if (!of_property_read_u32(np, "num-ports", &num_ports)) num_par_io_ports = num_ports; diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c index b762bc40f56b..afd2fd74802d 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.c +++ b/drivers/soc/mediatek/mtk-pm-domains.c @@ -443,6 +443,9 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no pd->genpd.power_off = scpsys_power_off; pd->genpd.power_on = scpsys_power_on; + if (MTK_SCPD_CAPS(pd, MTK_SCPD_ACTIVE_WAKEUP)) + pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP; + if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF)) pm_genpd_init(&pd->genpd, NULL, true); else diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c index f1875dc31ae2..85f82e195ef8 100644 --- a/drivers/soc/qcom/ocmem.c +++ b/drivers/soc/qcom/ocmem.c @@ -206,6 +206,7 @@ struct ocmem *of_get_ocmem(struct device *dev) ocmem = platform_get_drvdata(pdev); if (!ocmem) { dev_err(dev, "Cannot get ocmem\n"); + put_device(&pdev->dev); return ERR_PTR(-ENODEV); } return ocmem; diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c index 536c3e4114fb..8583c1e558ae 100644 --- a/drivers/soc/qcom/qcom_aoss.c +++ b/drivers/soc/qcom/qcom_aoss.c @@ -8,10 +8,12 @@ #include <linux/io.h> #include <linux/mailbox_client.h> #include <linux/module.h> +#include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/thermal.h> #include <linux/slab.h> +#include <linux/soc/qcom/qcom_aoss.h> #define QMP_DESC_MAGIC 0x0 #define QMP_DESC_VERSION 0x4 @@ -223,11 +225,14 @@ static bool qmp_message_empty(struct qmp *qmp) * * Return: 0 on success, negative errno on failure */ -static int qmp_send(struct qmp *qmp, const void *data, size_t len) +int qmp_send(struct qmp *qmp, const void *data, size_t len) { long time_left; int ret; + if (WARN_ON(IS_ERR_OR_NULL(qmp) || !data)) + return -EINVAL; + if (WARN_ON(len + sizeof(u32) > qmp->size)) return -EINVAL; @@ -261,6 +266,7 @@ static int qmp_send(struct qmp *qmp, const void *data, size_t len) return ret; } +EXPORT_SYMBOL(qmp_send); static int qmp_qdss_clk_prepare(struct clk_hw *hw) { @@ -519,6 +525,55 @@ static void qmp_cooling_devices_remove(struct qmp *qmp) thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev); } +/** + * qmp_get() - get a qmp handle from a device + * @dev: client device pointer + * + * Return: handle to qmp device on success, ERR_PTR() on failure + */ +struct qmp *qmp_get(struct device *dev) +{ + struct platform_device *pdev; + struct device_node *np; + struct qmp *qmp; + + if (!dev || !dev->of_node) + return ERR_PTR(-EINVAL); + + np = of_parse_phandle(dev->of_node, "qcom,qmp", 0); + if (!np) + return ERR_PTR(-ENODEV); + + pdev = of_find_device_by_node(np); + of_node_put(np); + if (!pdev) + return ERR_PTR(-EINVAL); + + qmp = platform_get_drvdata(pdev); + + if (!qmp) { + put_device(&pdev->dev); + return ERR_PTR(-EPROBE_DEFER); + } + return qmp; +} +EXPORT_SYMBOL(qmp_get); + +/** + * qmp_put() - release a qmp handle + * @qmp: qmp handle obtained from qmp_get() + */ +void qmp_put(struct qmp *qmp) +{ + /* + * Match get_device() inside of_find_device_by_node() in + * qmp_get() + */ + if (!IS_ERR_OR_NULL(qmp)) + put_device(qmp->dev); +} +EXPORT_SYMBOL(qmp_put); + static int qmp_probe(struct platform_device *pdev) { struct resource *res; @@ -548,7 +603,7 @@ static int qmp_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - ret = devm_request_irq(&pdev->dev, irq, qmp_intr, IRQF_ONESHOT, + ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0, "aoss-qmp", qmp); if (ret < 0) { dev_err(&pdev->dev, "failed to request interrupt\n"); @@ -615,6 +670,7 @@ static struct platform_driver qmp_driver = { .driver = { .name = "qcom_aoss_qmp", .of_match_table = qmp_dt_match, + .suppress_bind_attrs = true, }, .probe = qmp_probe, .remove = qmp_remove, diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c index dbf494e92574..9f07274b0d28 100644 --- a/drivers/soc/qcom/rpmpd.c +++ b/drivers/soc/qcom/rpmpd.c @@ -546,6 +546,9 @@ static int rpmpd_probe(struct platform_device *pdev) data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains), GFP_KERNEL); + if (!data->domains) + return -ENOMEM; + data->num_domains = num; for (i = 0; i < num; i++) { diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c index 09abd17065ba..8b3ff44fd901 100644 --- a/drivers/soc/ti/wkup_m3_ipc.c +++ b/drivers/soc/ti/wkup_m3_ipc.c @@ -449,9 +449,9 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev) return PTR_ERR(m3_ipc->ipc_mem_base); irq = platform_get_irq(pdev, 0); - if (!irq) { + if (irq < 0) { dev_err(&pdev->dev, "no irq resource\n"); - return -ENXIO; + return irq; } ret = devm_request_irq(dev, irq, wkup_m3_txev_handler, diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c index 0ca2a3e3a02e..747983743a14 100644 --- a/drivers/soundwire/dmi-quirks.c +++ b/drivers/soundwire/dmi-quirks.c @@ -59,7 +59,7 @@ static const struct dmi_system_id adr_remap_quirk_table[] = { { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Conv"), }, .driver_data = (void *)intel_tgl_bios, }, diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index 78037ffdb09b..f72d36654ac2 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -448,8 +448,8 @@ static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable) /* Clear wake status */ wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS); - wake_sts |= (SDW_SHIM_WAKEEN_ENABLE << link_id); - intel_writew(shim, SDW_SHIM_WAKESTS_STATUS, wake_sts); + wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id); + intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts); } mutex_unlock(sdw->link_res->shim_lock); } diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 92d9610df1fd..938017a60c8e 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -277,6 +277,9 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op) static bool atmel_qspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { + if (!spi_mem_default_supports_op(mem, op)) + return false; + if (atmel_qspi_find_mode(op) < 0) return false; diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index ae8c86be7786..bd7c7fc73961 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -1033,7 +1033,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem, addr = op->addr.val; len = op->data.nbytes; - if (bcm_qspi_bspi_ver_three(qspi) == true) { + if (has_bspi(qspi) && bcm_qspi_bspi_ver_three(qspi) == true) { /* * The address coming into this function is a raw flash offset. * But for BSPI <= V3, we need to convert it to a remapped BSPI @@ -1052,7 +1052,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem, len < 4) mspi_read = true; - if (mspi_read) + if (!has_bspi(qspi) || mspi_read) return bcm_qspi_mspi_exec_mem_op(spi, op); ret = bcm_qspi_bspi_set_mode(qspi, op, 0); diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 101cc71bffa7..75680eecd2f7 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -18,6 +18,7 @@ #include <linux/iopoll.h> #include <linux/jiffies.h> #include <linux/kernel.h> +#include <linux/log2.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/of.h> @@ -93,12 +94,6 @@ struct cqspi_driver_platdata { #define CQSPI_TIMEOUT_MS 500 #define CQSPI_READ_TIMEOUT_MS 10 -/* Instruction type */ -#define CQSPI_INST_TYPE_SINGLE 0 -#define CQSPI_INST_TYPE_DUAL 1 -#define CQSPI_INST_TYPE_QUAD 2 -#define CQSPI_INST_TYPE_OCTAL 3 - #define CQSPI_DUMMY_CLKS_PER_BYTE 8 #define CQSPI_DUMMY_BYTES_MAX 4 #define CQSPI_DUMMY_CLKS_MAX 31 @@ -322,10 +317,6 @@ static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr) static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata, const struct spi_mem_op *op) { - f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE; - f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE; - f_pdata->data_width = CQSPI_INST_TYPE_SINGLE; - /* * For an op to be DTR, cmd phase along with every other non-empty * phase should have dtr field set to 1. If an op phase has zero @@ -335,32 +326,23 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata, (!op->addr.nbytes || op->addr.dtr) && (!op->data.nbytes || op->data.dtr); - switch (op->data.buswidth) { - case 0: - break; - case 1: - f_pdata->data_width = CQSPI_INST_TYPE_SINGLE; - break; - case 2: - f_pdata->data_width = CQSPI_INST_TYPE_DUAL; - break; - case 4: - f_pdata->data_width = CQSPI_INST_TYPE_QUAD; - break; - case 8: - f_pdata->data_width = CQSPI_INST_TYPE_OCTAL; - break; - default: - return -EINVAL; - } + f_pdata->inst_width = 0; + if (op->cmd.buswidth) + f_pdata->inst_width = ilog2(op->cmd.buswidth); + + f_pdata->addr_width = 0; + if (op->addr.buswidth) + f_pdata->addr_width = ilog2(op->addr.buswidth); + + f_pdata->data_width = 0; + if (op->data.buswidth) + f_pdata->data_width = ilog2(op->data.buswidth); /* Right now we only support 8-8-8 DTR mode. */ if (f_pdata->dtr) { switch (op->cmd.buswidth) { case 0: - break; case 8: - f_pdata->inst_width = CQSPI_INST_TYPE_OCTAL; break; default: return -EINVAL; @@ -368,9 +350,7 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata, switch (op->addr.buswidth) { case 0: - break; case 8: - f_pdata->addr_width = CQSPI_INST_TYPE_OCTAL; break; default: return -EINVAL; @@ -378,9 +358,7 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata, switch (op->data.buswidth) { case 0: - break; case 8: - f_pdata->data_width = CQSPI_INST_TYPE_OCTAL; break; default: return -EINVAL; @@ -1248,9 +1226,24 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem, all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr && !op->data.dtr; - /* Mixed DTR modes not supported. */ - if (!(all_true || all_false)) + if (all_true) { + /* Right now we only support 8-8-8 DTR mode. */ + if (op->cmd.nbytes && op->cmd.buswidth != 8) + return false; + if (op->addr.nbytes && op->addr.buswidth != 8) + return false; + if (op->data.nbytes && op->data.buswidth != 8) + return false; + } else if (all_false) { + /* Only 1-1-X ops are supported without DTR */ + if (op->cmd.nbytes && op->cmd.buswidth > 1) + return false; + if (op->addr.nbytes && op->addr.buswidth > 1) + return false; + } else { + /* Mixed DTR modes are not supported. */ return false; + } if (all_true) return spi_mem_dtr_supports_op(mem, op); diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c index b6c7467f0b59..d403a7a3021d 100644 --- a/drivers/spi/spi-fsi.c +++ b/drivers/spi/spi-fsi.c @@ -25,6 +25,7 @@ #define SPI_FSI_BASE 0x70000 #define SPI_FSI_INIT_TIMEOUT_MS 1000 +#define SPI_FSI_STATUS_TIMEOUT_MS 100 #define SPI_FSI_MAX_RX_SIZE 8 #define SPI_FSI_MAX_TX_SIZE 40 @@ -299,6 +300,7 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx, struct spi_transfer *transfer) { int rc = 0; + unsigned long end; u64 status = 0ULL; if (transfer->tx_buf) { @@ -315,10 +317,14 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx, if (rc) return rc; + end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS); do { rc = fsi_spi_status(ctx, &status, "TX"); if (rc) return rc; + + if (time_after(jiffies, end)) + return -ETIMEDOUT; } while (status & SPI_FSI_STATUS_TDR_FULL); sent += nb; @@ -329,10 +335,14 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx, u8 *rx = transfer->rx_buf; while (transfer->len > recv) { + end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS); do { rc = fsi_spi_status(ctx, &status, "RX"); if (rc) return rc; + + if (time_after(jiffies, end)) + return -ETIMEDOUT; } while (!(status & SPI_FSI_STATUS_RDR_FULL)); rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in); diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 753bd313e6fd..2ca19b01948a 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -43,8 +43,11 @@ #define SPI_CFG1_PACKET_LOOP_OFFSET 8 #define SPI_CFG1_PACKET_LENGTH_OFFSET 16 #define SPI_CFG1_GET_TICK_DLY_OFFSET 29 +#define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30 #define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000 +#define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000 + #define SPI_CFG1_CS_IDLE_MASK 0xff #define SPI_CFG1_PACKET_LOOP_MASK 0xff00 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 @@ -346,9 +349,15 @@ static int mtk_spi_prepare_message(struct spi_master *master, /* tick delay */ reg_val = readl(mdata->base + SPI_CFG1_REG); - reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK; - reg_val |= ((chip_config->tick_delay & 0x7) - << SPI_CFG1_GET_TICK_DLY_OFFSET); + if (mdata->dev_comp->enhance_timing) { + reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK; + reg_val |= ((chip_config->tick_delay & 0x7) + << SPI_CFG1_GET_TICK_DLY_OFFSET); + } else { + reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1; + reg_val |= ((chip_config->tick_delay & 0x3) + << SPI_CFG1_GET_TICK_DLY_OFFSET_V1); + } writel(reg_val, mdata->base + SPI_CFG1_REG); /* set hw cs timing */ diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c index 5c93730615f8..6d203477c04b 100644 --- a/drivers/spi/spi-mtk-nor.c +++ b/drivers/spi/spi-mtk-nor.c @@ -909,7 +909,17 @@ static int __maybe_unused mtk_nor_suspend(struct device *dev) static int __maybe_unused mtk_nor_resume(struct device *dev) { - return pm_runtime_force_resume(dev); + struct spi_controller *ctlr = dev_get_drvdata(dev); + struct mtk_nor *sp = spi_controller_get_devdata(ctlr); + int ret; + + ret = pm_runtime_force_resume(dev); + if (ret) + return ret; + + mtk_nor_init(sp); + + return 0; } static const struct dev_pm_ops mtk_nor_pm_ops = { diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c index 45889947afed..03fce4493aa7 100644 --- a/drivers/spi/spi-mxic.c +++ b/drivers/spi/spi-mxic.c @@ -304,25 +304,21 @@ static int mxic_spi_data_xfer(struct mxic_spi *mxic, const void *txbuf, writel(data, mxic->regs + TXD(nbytes % 4)); + ret = readl_poll_timeout(mxic->regs + INT_STS, sts, + sts & INT_TX_EMPTY, 0, USEC_PER_SEC); + if (ret) + return ret; + + ret = readl_poll_timeout(mxic->regs + INT_STS, sts, + sts & INT_RX_NOT_EMPTY, 0, + USEC_PER_SEC); + if (ret) + return ret; + + data = readl(mxic->regs + RXD); if (rxbuf) { - ret = readl_poll_timeout(mxic->regs + INT_STS, sts, - sts & INT_TX_EMPTY, 0, - USEC_PER_SEC); - if (ret) - return ret; - - ret = readl_poll_timeout(mxic->regs + INT_STS, sts, - sts & INT_RX_NOT_EMPTY, 0, - USEC_PER_SEC); - if (ret) - return ret; - - data = readl(mxic->regs + RXD); data >>= (8 * (4 - nbytes)); memcpy(rxbuf + pos, &data, nbytes); - WARN_ON(readl(mxic->regs + INT_STS) & INT_RX_NOT_EMPTY); - } else { - readl(mxic->regs + RXD); } WARN_ON(readl(mxic->regs + INT_STS) & INT_RX_NOT_EMPTY); diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index 2e134eb4bd2c..6502fda6243e 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c @@ -76,14 +76,23 @@ static bool lpss_dma_filter(struct dma_chan *chan, void *param) return true; } +static void lpss_dma_put_device(void *dma_dev) +{ + pci_dev_put(dma_dev); +} + static int lpss_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c) { struct pci_dev *dma_dev; + int ret; c->num_chipselect = 1; c->max_clk_rate = 50000000; dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); + ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev); + if (ret) + return ret; if (c->tx_param) { struct dw_dma_slave *slave = c->tx_param; @@ -107,8 +116,9 @@ static int lpss_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c) static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c) { - struct pci_dev *dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0)); struct dw_dma_slave *tx, *rx; + struct pci_dev *dma_dev; + int ret; switch (PCI_FUNC(dev->devfn)) { case 0: @@ -133,6 +143,11 @@ static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c) return -ENODEV; } + dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0)); + ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev); + if (ret) + return ret; + tx = c->tx_param; tx->dma_dev = &dma_dev->dev; diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 553b6b9d0222..c6a1bb09be05 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -585,6 +585,12 @@ static int rockchip_spi_slave_abort(struct spi_controller *ctlr) { struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); + if (atomic_read(&rs->state) & RXDMA) + dmaengine_terminate_sync(ctlr->dma_rx); + if (atomic_read(&rs->state) & TXDMA) + dmaengine_terminate_sync(ctlr->dma_tx); + atomic_set(&rs->state, 0); + spi_enable_chip(rs, false); rs->slave_abort = true; spi_finalize_current_transfer(ctlr); @@ -654,7 +660,7 @@ static int rockchip_spi_probe(struct platform_device *pdev) struct spi_controller *ctlr; struct resource *mem; struct device_node *np = pdev->dev.of_node; - u32 rsd_nsecs; + u32 rsd_nsecs, num_cs; bool slave_mode; slave_mode = of_property_read_bool(np, "spi-slave"); @@ -764,8 +770,9 @@ static int rockchip_spi_probe(struct platform_device *pdev) * rk spi0 has two native cs, spi1..5 one cs only * if num-cs is missing in the dts, default to 1 */ - if (of_property_read_u16(np, "num-cs", &ctlr->num_chipselect)) - ctlr->num_chipselect = 1; + if (of_property_read_u32(np, "num-cs", &num_cs)) + num_cs = 1; + ctlr->num_chipselect = num_cs; ctlr->use_gpio_descriptors = true; } ctlr->dev.of_node = pdev->dev.of_node; diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index e9de1d958bbd..8f345247a8c3 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -1352,6 +1352,10 @@ static int tegra_spi_probe(struct platform_device *pdev) tspi->phys = r->start; spi_irq = platform_get_irq(pdev, 0); + if (spi_irq < 0) { + ret = spi_irq; + goto exit_free_master; + } tspi->irq = spi_irq; tspi->clk = devm_clk_get(&pdev->dev, "spi"); diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 3226c4e1c7c0..3b44ca455049 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -1003,14 +1003,8 @@ static int tegra_slink_probe(struct platform_device *pdev) struct resource *r; int ret, spi_irq; const struct tegra_slink_chip_data *cdata = NULL; - const struct of_device_id *match; - match = of_match_device(tegra_slink_of_match, &pdev->dev); - if (!match) { - dev_err(&pdev->dev, "Error: No device match found\n"); - return -ENODEV; - } - cdata = match->data; + cdata = of_device_get_match_data(&pdev->dev); master = spi_alloc_master(&pdev->dev, sizeof(*tspi)); if (!master) { diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c index 2354ca1e3858..7967073c1354 100644 --- a/drivers/spi/spi-tegra210-quad.c +++ b/drivers/spi/spi-tegra210-quad.c @@ -1249,6 +1249,8 @@ static int tegra_qspi_probe(struct platform_device *pdev) tqspi->phys = r->start; qspi_irq = platform_get_irq(pdev, 0); + if (qspi_irq < 0) + return qspi_irq; tqspi->irq = qspi_irq; tqspi->clk = devm_clk_get(&pdev->dev, "qspi"); diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c index cfa222c9bd5e..78f31b61a2aa 100644 --- a/drivers/spi/spi-zynq-qspi.c +++ b/drivers/spi/spi-zynq-qspi.c @@ -570,6 +570,9 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem, if (op->dummy.nbytes) { tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; + memset(tmpbuf, 0xff, op->dummy.nbytes); reinit_completion(&xqspi->data_completion); xqspi->txbuf = tmpbuf; diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 328b6559bb19..2b5afae8ff7f 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -1172,7 +1172,10 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) goto clk_dis_all; } - dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + if (ret) + goto clk_dis_all; + ctlr->bits_per_word_mask = SPI_BPW_MASK(8); ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS; ctlr->mem_ops = &zynqmp_qspi_mem_ops; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index a42b9e8521ce..d0bbf8f9414d 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -942,10 +942,10 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev, int i, ret; if (vmalloced_buf || kmap_buf) { - desc_len = min_t(int, max_seg_size, PAGE_SIZE); + desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE); sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); } else if (virt_addr_valid(buf)) { - desc_len = min_t(int, max_seg_size, ctlr->max_dma_len); + desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len); sgs = DIV_ROUND_UP(len, desc_len); } else { return -EINVAL; @@ -1072,11 +1072,15 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) if (ctlr->dma_tx) tx_dev = ctlr->dma_tx->device->dev; + else if (ctlr->dma_map_dev) + tx_dev = ctlr->dma_map_dev; else tx_dev = ctlr->dev.parent; if (ctlr->dma_rx) rx_dev = ctlr->dma_rx->device->dev; + else if (ctlr->dma_map_dev) + rx_dev = ctlr->dma_map_dev; else rx_dev = ctlr->dev.parent; diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c index abe9395a0aef..861a154144e6 100644 --- a/drivers/staging/fbtft/fb_st7789v.c +++ b/drivers/staging/fbtft/fb_st7789v.c @@ -144,6 +144,8 @@ static int init_display(struct fbtft_par *par) { int rc; + par->fbtftops.reset(par); + rc = init_tearing_effect_line(par); if (rc) return rc; diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c index e390c924ec1c..3c680ed4429c 100644 --- a/drivers/staging/gdm724x/gdm_lte.c +++ b/drivers/staging/gdm724x/gdm_lte.c @@ -76,14 +76,15 @@ static void tx_complete(void *arg) static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type) { - int ret; + int ret, len; + len = skb->len + ETH_HLEN; ret = netif_rx_ni(skb); if (ret == NET_RX_DROP) { nic->stats.rx_dropped++; } else { nic->stats.rx_packets++; - nic->stats.rx_bytes += skb->len + ETH_HLEN; + nic->stats.rx_bytes += len; } return 0; diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c index fef0055b8990..20183b2ea127 100644 --- a/drivers/staging/iio/adc/ad7280a.c +++ b/drivers/staging/iio/adc/ad7280a.c @@ -107,9 +107,9 @@ static unsigned int ad7280a_devaddr(unsigned int addr) { return ((addr & 0x1) << 4) | - ((addr & 0x2) << 3) | + ((addr & 0x2) << 2) | (addr & 0x4) | - ((addr & 0x8) >> 3) | + ((addr & 0x8) >> 2) | ((addr & 0x10) >> 4); } diff --git a/drivers/staging/media/atomisp/pci/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp_acc.c index 9a1751895ab0..28cb271663c4 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_acc.c +++ b/drivers/staging/media/atomisp/pci/atomisp_acc.c @@ -439,6 +439,18 @@ int atomisp_acc_s_mapped_arg(struct atomisp_sub_device *asd, return 0; } +static void atomisp_acc_unload_some_extensions(struct atomisp_sub_device *asd, + int i, + struct atomisp_acc_fw *acc_fw) +{ + while (--i >= 0) { + if (acc_fw->flags & acc_flag_to_pipe[i].flag) { + atomisp_css_unload_acc_extension(asd, acc_fw->fw, + acc_flag_to_pipe[i].pipe_id); + } + } +} + /* * Appends the loaded acceleration binary extensions to the * current ISP mode. Must be called just before sh_css_start(). @@ -479,16 +491,20 @@ int atomisp_acc_load_extensions(struct atomisp_sub_device *asd) acc_fw->fw, acc_flag_to_pipe[i].pipe_id, acc_fw->type); - if (ret) + if (ret) { + atomisp_acc_unload_some_extensions(asd, i, acc_fw); goto error; + } ext_loaded = true; } } ret = atomisp_css_set_acc_parameters(acc_fw); - if (ret < 0) + if (ret < 0) { + atomisp_acc_unload_some_extensions(asd, i, acc_fw); goto error; + } } if (!ext_loaded) @@ -497,6 +513,7 @@ int atomisp_acc_load_extensions(struct atomisp_sub_device *asd) ret = atomisp_css_update_stream(asd); if (ret) { dev_err(isp->dev, "%s: update stream failed.\n", __func__); + atomisp_acc_unload_extensions(asd); goto error; } @@ -504,13 +521,6 @@ int atomisp_acc_load_extensions(struct atomisp_sub_device *asd) return 0; error: - while (--i >= 0) { - if (acc_fw->flags & acc_flag_to_pipe[i].flag) { - atomisp_css_unload_acc_extension(asd, acc_fw->fw, - acc_flag_to_pipe[i].pipe_id); - } - } - list_for_each_entry_continue_reverse(acc_fw, &asd->acc.fw, list) { if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT && acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER) diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c index 62dc06e22476..cd0a771454da 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c +++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c @@ -729,6 +729,21 @@ static int axp_regulator_set(struct device *dev, struct gmin_subdev *gs, return 0; } +/* + * Some boards contain a hw-bug where turning eldo2 back on after having turned + * it off causes the CPLM3218 ambient-light-sensor on the image-sensor's I2C bus + * to crash, hanging the bus. Do not turn eldo2 off on these systems. + */ +static const struct dmi_system_id axp_leave_eldo2_on_ids[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"), + DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"), + }, + }, + { } +}; + static int axp_v1p8_on(struct device *dev, struct gmin_subdev *gs) { int ret; @@ -763,6 +778,9 @@ static int axp_v1p8_off(struct device *dev, struct gmin_subdev *gs) if (ret) return ret; + if (dmi_check_system(axp_leave_eldo2_on_ids)) + return 0; + ret = axp_regulator_set(dev, gs, gs->eldo2_sel_reg, gs->eldo2_1p8v, ELDO_CTRL_REG, gs->eldo2_ctrl_shift, false); return ret; diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c index 6a5ee4607089..c1cda16f2dc0 100644 --- a/drivers/staging/media/atomisp/pci/hmm/hmm.c +++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c @@ -39,7 +39,7 @@ struct hmm_bo_device bo_device; struct hmm_pool dynamic_pool; struct hmm_pool reserved_pool; -static ia_css_ptr dummy_ptr; +static ia_css_ptr dummy_ptr = mmgr_EXCEPTION; static bool hmm_initialized; struct _hmm_mem_stat hmm_mem_stat; @@ -209,7 +209,7 @@ int hmm_init(void) void hmm_cleanup(void) { - if (!dummy_ptr) + if (dummy_ptr == mmgr_EXCEPTION) return; sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group); @@ -288,7 +288,8 @@ void hmm_free(ia_css_ptr virt) dev_dbg(atomisp_dev, "%s: free 0x%08x\n", __func__, virt); - WARN_ON(!virt); + if (WARN_ON(virt == mmgr_EXCEPTION)) + return; bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt); diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c index 9cd713c02a45..686d813f5c62 100644 --- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c +++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c @@ -23,7 +23,7 @@ static void hantro_h1_set_src_img_ctrl(struct hantro_dev *vpu, reg = H1_REG_IN_IMG_CTRL_ROW_LEN(pix_fmt->width) | H1_REG_IN_IMG_CTRL_OVRFLR_D4(0) - | H1_REG_IN_IMG_CTRL_OVRFLB_D4(0) + | H1_REG_IN_IMG_CTRL_OVRFLB(0) | H1_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt); vepu_write_relaxed(vpu, reg, H1_REG_IN_IMG_CTRL); } diff --git a/drivers/staging/media/hantro/hantro_h1_regs.h b/drivers/staging/media/hantro/hantro_h1_regs.h index d6e9825bb5c7..30e7e7b920b5 100644 --- a/drivers/staging/media/hantro/hantro_h1_regs.h +++ b/drivers/staging/media/hantro/hantro_h1_regs.h @@ -47,7 +47,7 @@ #define H1_REG_IN_IMG_CTRL 0x03c #define H1_REG_IN_IMG_CTRL_ROW_LEN(x) ((x) << 12) #define H1_REG_IN_IMG_CTRL_OVRFLR_D4(x) ((x) << 10) -#define H1_REG_IN_IMG_CTRL_OVRFLB_D4(x) ((x) << 6) +#define H1_REG_IN_IMG_CTRL_OVRFLB(x) ((x) << 6) #define H1_REG_IN_IMG_CTRL_FMT(x) ((x) << 2) #define H1_REG_ENC_CTRL0 0x040 #define H1_REG_ENC_CTRL0_INIT_QP(x) ((x) << 26) diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c index 41e33535de55..d35e52374116 100644 --- a/drivers/staging/media/imx/imx7-mipi-csis.c +++ b/drivers/staging/media/imx/imx7-mipi-csis.c @@ -32,7 +32,6 @@ #include <media/v4l2-subdev.h> #define CSIS_DRIVER_NAME "imx7-mipi-csis" -#define CSIS_SUBDEV_NAME CSIS_DRIVER_NAME #define CSIS_PAD_SINK 0 #define CSIS_PAD_SOURCE 1 @@ -311,7 +310,6 @@ struct csi_state { struct reset_control *mrst; struct regulator *mipi_phy_regulator; const struct mipi_csis_info *info; - u8 index; struct v4l2_subdev sd; struct media_pad pads[CSIS_PADS_NUM]; @@ -1303,8 +1301,8 @@ static int mipi_csis_subdev_init(struct csi_state *state) v4l2_subdev_init(sd, &mipi_csis_subdev_ops); sd->owner = THIS_MODULE; - snprintf(sd->name, sizeof(sd->name), "%s.%d", - CSIS_SUBDEV_NAME, state->index); + snprintf(sd->name, sizeof(sd->name), "csis-%s", + dev_name(state->dev)); sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; sd->ctrl_handler = NULL; diff --git a/drivers/staging/media/imx/imx8mq-mipi-csi2.c b/drivers/staging/media/imx/imx8mq-mipi-csi2.c index a6f562009b9a..1d28313dbed7 100644 --- a/drivers/staging/media/imx/imx8mq-mipi-csi2.c +++ b/drivers/staging/media/imx/imx8mq-mipi-csi2.c @@ -398,9 +398,6 @@ static int imx8mq_mipi_csi_s_stream(struct v4l2_subdev *sd, int enable) struct csi_state *state = mipi_sd_to_csi2_state(sd); int ret = 0; - imx8mq_mipi_csi_write(state, CSI2RX_IRQ_MASK, - CSI2RX_IRQ_MASK_ULPS_STATUS_CHANGE); - if (enable) { ret = pm_runtime_resume_and_get(state->dev); if (ret < 0) @@ -696,7 +693,7 @@ err_parse: * Suspend/resume */ -static int imx8mq_mipi_csi_pm_suspend(struct device *dev, bool runtime) +static int imx8mq_mipi_csi_pm_suspend(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct csi_state *state = mipi_sd_to_csi2_state(sd); @@ -708,36 +705,21 @@ static int imx8mq_mipi_csi_pm_suspend(struct device *dev, bool runtime) imx8mq_mipi_csi_stop_stream(state); imx8mq_mipi_csi_clk_disable(state); state->state &= ~ST_POWERED; - if (!runtime) - state->state |= ST_SUSPENDED; } mutex_unlock(&state->lock); - ret = icc_set_bw(state->icc_path, 0, 0); - if (ret) - dev_err(dev, "icc_set_bw failed with %d\n", ret); - return ret ? -EAGAIN : 0; } -static int imx8mq_mipi_csi_pm_resume(struct device *dev, bool runtime) +static int imx8mq_mipi_csi_pm_resume(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct csi_state *state = mipi_sd_to_csi2_state(sd); int ret = 0; - ret = icc_set_bw(state->icc_path, 0, state->icc_path_bw); - if (ret) { - dev_err(dev, "icc_set_bw failed with %d\n", ret); - return ret; - } - mutex_lock(&state->lock); - if (!runtime && !(state->state & ST_SUSPENDED)) - goto unlock; - if (!(state->state & ST_POWERED)) { state->state |= ST_POWERED; ret = imx8mq_mipi_csi_clk_enable(state); @@ -758,22 +740,60 @@ unlock: static int __maybe_unused imx8mq_mipi_csi_suspend(struct device *dev) { - return imx8mq_mipi_csi_pm_suspend(dev, false); + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct csi_state *state = mipi_sd_to_csi2_state(sd); + int ret; + + ret = imx8mq_mipi_csi_pm_suspend(dev); + if (ret) + return ret; + + state->state |= ST_SUSPENDED; + + return ret; } static int __maybe_unused imx8mq_mipi_csi_resume(struct device *dev) { - return imx8mq_mipi_csi_pm_resume(dev, false); + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct csi_state *state = mipi_sd_to_csi2_state(sd); + + if (!(state->state & ST_SUSPENDED)) + return 0; + + return imx8mq_mipi_csi_pm_resume(dev); } static int __maybe_unused imx8mq_mipi_csi_runtime_suspend(struct device *dev) { - return imx8mq_mipi_csi_pm_suspend(dev, true); + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct csi_state *state = mipi_sd_to_csi2_state(sd); + int ret; + + ret = imx8mq_mipi_csi_pm_suspend(dev); + if (ret) + return ret; + + ret = icc_set_bw(state->icc_path, 0, 0); + if (ret) + dev_err(dev, "icc_set_bw failed with %d\n", ret); + + return ret; } static int __maybe_unused imx8mq_mipi_csi_runtime_resume(struct device *dev) { - return imx8mq_mipi_csi_pm_resume(dev, true); + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct csi_state *state = mipi_sd_to_csi2_state(sd); + int ret; + + ret = icc_set_bw(state->icc_path, 0, state->icc_path_bw); + if (ret) { + dev_err(dev, "icc_set_bw failed with %d\n", ret); + return ret; + } + + return imx8mq_mipi_csi_pm_resume(dev); } static const struct dev_pm_ops imx8mq_mipi_csi_pm_ops = { @@ -921,7 +941,7 @@ static int imx8mq_mipi_csi_probe(struct platform_device *pdev) /* Enable runtime PM. */ pm_runtime_enable(dev); if (!pm_runtime_enabled(dev)) { - ret = imx8mq_mipi_csi_pm_resume(dev, true); + ret = imx8mq_mipi_csi_runtime_resume(dev); if (ret < 0) goto icc; } @@ -934,7 +954,7 @@ static int imx8mq_mipi_csi_probe(struct platform_device *pdev) cleanup: pm_runtime_disable(&pdev->dev); - imx8mq_mipi_csi_pm_suspend(&pdev->dev, true); + imx8mq_mipi_csi_runtime_suspend(&pdev->dev); media_entity_cleanup(&state->sd.entity); v4l2_async_notifier_unregister(&state->notifier); @@ -958,7 +978,7 @@ static int imx8mq_mipi_csi_remove(struct platform_device *pdev) v4l2_async_unregister_subdev(&state->sd); pm_runtime_disable(&pdev->dev); - imx8mq_mipi_csi_pm_suspend(&pdev->dev, true); + imx8mq_mipi_csi_runtime_suspend(&pdev->dev); media_entity_cleanup(&state->sd.entity); mutex_destroy(&state->lock); pm_runtime_set_suspended(&pdev->dev); diff --git a/drivers/staging/media/meson/vdec/esparser.c b/drivers/staging/media/meson/vdec/esparser.c index db7022707ff8..86ccc8937afc 100644 --- a/drivers/staging/media/meson/vdec/esparser.c +++ b/drivers/staging/media/meson/vdec/esparser.c @@ -328,7 +328,12 @@ esparser_queue(struct amvdec_session *sess, struct vb2_v4l2_buffer *vbuf) offset = esparser_get_offset(sess); - amvdec_add_ts(sess, vb->timestamp, vbuf->timecode, offset, vbuf->flags); + ret = amvdec_add_ts(sess, vb->timestamp, vbuf->timecode, offset, vbuf->flags); + if (ret) { + v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); + return ret; + } + dev_dbg(core->dev, "esparser: ts = %llu pld_size = %u offset = %08X flags = %08X\n", vb->timestamp, payload_size, offset, vbuf->flags); diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.c b/drivers/staging/media/meson/vdec/vdec_helpers.c index b9125c295d1d..06fd66539797 100644 --- a/drivers/staging/media/meson/vdec/vdec_helpers.c +++ b/drivers/staging/media/meson/vdec/vdec_helpers.c @@ -227,13 +227,16 @@ int amvdec_set_canvases(struct amvdec_session *sess, } EXPORT_SYMBOL_GPL(amvdec_set_canvases); -void amvdec_add_ts(struct amvdec_session *sess, u64 ts, - struct v4l2_timecode tc, u32 offset, u32 vbuf_flags) +int amvdec_add_ts(struct amvdec_session *sess, u64 ts, + struct v4l2_timecode tc, u32 offset, u32 vbuf_flags) { struct amvdec_timestamp *new_ts; unsigned long flags; new_ts = kzalloc(sizeof(*new_ts), GFP_KERNEL); + if (!new_ts) + return -ENOMEM; + new_ts->ts = ts; new_ts->tc = tc; new_ts->offset = offset; @@ -242,6 +245,7 @@ void amvdec_add_ts(struct amvdec_session *sess, u64 ts, spin_lock_irqsave(&sess->ts_spinlock, flags); list_add_tail(&new_ts->list, &sess->timestamps); spin_unlock_irqrestore(&sess->ts_spinlock, flags); + return 0; } EXPORT_SYMBOL_GPL(amvdec_add_ts); diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.h b/drivers/staging/media/meson/vdec/vdec_helpers.h index cfaed52ab526..798e5a8a9b3f 100644 --- a/drivers/staging/media/meson/vdec/vdec_helpers.h +++ b/drivers/staging/media/meson/vdec/vdec_helpers.h @@ -55,8 +55,8 @@ void amvdec_dst_buf_done_offset(struct amvdec_session *sess, * @offset: offset in the VIFIFO where the associated packet was written * @flags the vb2_v4l2_buffer flags */ -void amvdec_add_ts(struct amvdec_session *sess, u64 ts, - struct v4l2_timecode tc, u32 offset, u32 flags); +int amvdec_add_ts(struct amvdec_session *sess, u64 ts, + struct v4l2_timecode tc, u32 offset, u32 flags); void amvdec_remove_ts(struct amvdec_session *sess, u64 ts); /** diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c index de7442d4834d..d3e26bfe6c90 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c @@ -38,7 +38,7 @@ struct cedrus_h264_sram_ref_pic { #define CEDRUS_H264_FRAME_NUM 18 -#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (16 * SZ_1K) +#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (32 * SZ_1K) #define CEDRUS_MIN_PIC_INFO_BUF_SIZE (130 * SZ_1K) static void cedrus_h264_write_sram(struct cedrus_dev *dev, diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c index ef0311a16d01..754942ecf064 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c @@ -23,7 +23,7 @@ * Subsequent BSP implementations seem to double the neighbor info buffer size * for the H6 SoC, which may be related to 10 bit H265 support. */ -#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (397 * SZ_1K) +#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (794 * SZ_1K) #define CEDRUS_H265_ENTRY_POINTS_BUF_SIZE (4 * SZ_1K) #define CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE 160 diff --git a/drivers/staging/media/zoran/zoran.h b/drivers/staging/media/zoran/zoran.h index b1ad2a2b914c..50d5a7acfab6 100644 --- a/drivers/staging/media/zoran/zoran.h +++ b/drivers/staging/media/zoran/zoran.h @@ -313,6 +313,6 @@ static inline struct zoran *to_zoran(struct v4l2_device *v4l2_dev) #endif -int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq); +int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq, int dir); void zoran_queue_exit(struct zoran *zr); int zr_set_buf(struct zoran *zr); diff --git a/drivers/staging/media/zoran/zoran_card.c b/drivers/staging/media/zoran/zoran_card.c index f259585b0689..11d415c0c05d 100644 --- a/drivers/staging/media/zoran/zoran_card.c +++ b/drivers/staging/media/zoran/zoran_card.c @@ -803,6 +803,52 @@ int zoran_check_jpg_settings(struct zoran *zr, return 0; } +static int zoran_init_video_device(struct zoran *zr, struct video_device *video_dev, int dir) +{ + int err; + + /* Now add the template and register the device unit. */ + *video_dev = zoran_template; + video_dev->v4l2_dev = &zr->v4l2_dev; + video_dev->lock = &zr->lock; + video_dev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE | dir; + + strscpy(video_dev->name, ZR_DEVNAME(zr), sizeof(video_dev->name)); + /* + * It's not a mem2mem device, but you can both capture and output from one and the same + * device. This should really be split up into two device nodes, but that's a job for + * another day. + */ + video_dev->vfl_dir = VFL_DIR_M2M; + zoran_queue_init(zr, &zr->vq, V4L2_BUF_TYPE_VIDEO_CAPTURE); + + err = video_register_device(video_dev, VFL_TYPE_VIDEO, video_nr[zr->id]); + if (err < 0) + return err; + video_set_drvdata(video_dev, zr); + return 0; +} + +static void zoran_exit_video_devices(struct zoran *zr) +{ + video_unregister_device(zr->video_dev); + kfree(zr->video_dev); +} + +static int zoran_init_video_devices(struct zoran *zr) +{ + int err; + + zr->video_dev = video_device_alloc(); + if (!zr->video_dev) + return -ENOMEM; + + err = zoran_init_video_device(zr, zr->video_dev, V4L2_CAP_VIDEO_CAPTURE); + if (err) + kfree(zr->video_dev); + return err; +} + void zoran_open_init_params(struct zoran *zr) { int i; @@ -874,17 +920,11 @@ static int zr36057_init(struct zoran *zr) zoran_open_init_params(zr); /* allocate memory *before* doing anything to the hardware in case allocation fails */ - zr->video_dev = video_device_alloc(); - if (!zr->video_dev) { - err = -ENOMEM; - goto exit; - } zr->stat_com = dma_alloc_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32), &zr->p_sc, GFP_KERNEL); if (!zr->stat_com) { - err = -ENOMEM; - goto exit_video; + return -ENOMEM; } for (j = 0; j < BUZ_NUM_STAT_COM; j++) zr->stat_com[j] = cpu_to_le32(1); /* mark as unavailable to zr36057 */ @@ -897,26 +937,9 @@ static int zr36057_init(struct zoran *zr) goto exit_statcom; } - /* Now add the template and register the device unit. */ - *zr->video_dev = zoran_template; - zr->video_dev->v4l2_dev = &zr->v4l2_dev; - zr->video_dev->lock = &zr->lock; - zr->video_dev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE; - - strscpy(zr->video_dev->name, ZR_DEVNAME(zr), sizeof(zr->video_dev->name)); - /* - * It's not a mem2mem device, but you can both capture and output from one and the same - * device. This should really be split up into two device nodes, but that's a job for - * another day. - */ - zr->video_dev->vfl_dir = VFL_DIR_M2M; - - zoran_queue_init(zr, &zr->vq); - - err = video_register_device(zr->video_dev, VFL_TYPE_VIDEO, video_nr[zr->id]); - if (err < 0) + err = zoran_init_video_devices(zr); + if (err) goto exit_statcomb; - video_set_drvdata(zr->video_dev, zr); zoran_init_hardware(zr); if (!pass_through) { @@ -931,9 +954,6 @@ exit_statcomb: dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2, zr->stat_comb, zr->p_scb); exit_statcom: dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32), zr->stat_com, zr->p_sc); -exit_video: - kfree(zr->video_dev); -exit: return err; } @@ -965,7 +985,7 @@ static void zoran_remove(struct pci_dev *pdev) dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2, zr->stat_comb, zr->p_scb); pci_release_regions(pdev); pci_disable_device(zr->pci_dev); - video_unregister_device(zr->video_dev); + zoran_exit_video_devices(zr); exit_free: v4l2_ctrl_handler_free(&zr->hdl); v4l2_device_unregister(&zr->v4l2_dev); @@ -1069,8 +1089,10 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) - return -ENODEV; - vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32)); + return err; + err = vb2_dma_contig_set_max_seg_size(&pdev->dev, U32_MAX); + if (err) + return err; nr = zoran_num++; if (nr >= BUZ_MAX) { diff --git a/drivers/staging/media/zoran/zoran_device.c b/drivers/staging/media/zoran/zoran_device.c index 5b12a730a229..fb1f0465ca87 100644 --- a/drivers/staging/media/zoran/zoran_device.c +++ b/drivers/staging/media/zoran/zoran_device.c @@ -814,7 +814,7 @@ static void zoran_reap_stat_com(struct zoran *zr) if (zr->jpg_settings.tmp_dcm == 1) i = (zr->jpg_dma_tail - zr->jpg_err_shift) & BUZ_MASK_STAT_COM; else - i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2 + 1; + i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2; stat_com = le32_to_cpu(zr->stat_com[i]); if ((stat_com & 1) == 0) { @@ -826,6 +826,11 @@ static void zoran_reap_stat_com(struct zoran *zr) size = (stat_com & GENMASK(22, 1)) >> 1; buf = zr->inuse[i]; + if (!buf) { + spin_unlock_irqrestore(&zr->queued_bufs_lock, flags); + pci_err(zr->pci_dev, "No buffer at slot %d\n", i); + return; + } buf->vbuf.vb2_buf.timestamp = ktime_get_ns(); if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) { diff --git a/drivers/staging/media/zoran/zoran_driver.c b/drivers/staging/media/zoran/zoran_driver.c index 46382e43f1bf..84665637ebb7 100644 --- a/drivers/staging/media/zoran/zoran_driver.c +++ b/drivers/staging/media/zoran/zoran_driver.c @@ -255,8 +255,6 @@ static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability strscpy(cap->card, ZR_DEVNAME(zr), sizeof(cap->card)); strscpy(cap->driver, "zoran", sizeof(cap->driver)); snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", pci_name(zr->pci_dev)); - cap->device_caps = zr->video_dev->device_caps; - cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } @@ -582,6 +580,9 @@ static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id std) struct zoran *zr = video_drvdata(file); int res = 0; + if (zr->norm == std) + return 0; + if (zr->running != ZORAN_MAP_MODE_NONE) return -EBUSY; @@ -739,6 +740,7 @@ static int zoran_g_parm(struct file *file, void *priv, struct v4l2_streamparm *p if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; + parm->parm.capture.readbuffers = 9; return 0; } @@ -869,6 +871,10 @@ int zr_set_buf(struct zoran *zr) vbuf = &buf->vbuf; buf->vbuf.field = V4L2_FIELD_INTERLACED; + if (BUZ_MAX_HEIGHT < (zr->v4l_settings.height * 2)) + buf->vbuf.field = V4L2_FIELD_INTERLACED; + else + buf->vbuf.field = V4L2_FIELD_TOP; vb2_set_plane_payload(&buf->vbuf.vb2_buf, 0, zr->buffer_size); vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_DONE); zr->inuse[0] = NULL; @@ -928,6 +934,7 @@ static int zr_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) zr->stat_com[j] = cpu_to_le32(1); zr->inuse[j] = NULL; } + zr->vbseq = 0; if (zr->map_mode != ZORAN_MAP_MODE_RAW) { pci_info(zr->pci_dev, "START JPG\n"); @@ -1008,7 +1015,7 @@ static const struct vb2_ops zr_video_qops = { .wait_finish = vb2_ops_wait_finish, }; -int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq) +int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq, int dir) { int err; @@ -1016,8 +1023,9 @@ int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq) INIT_LIST_HEAD(&zr->queued_bufs); vq->dev = &zr->pci_dev->dev; - vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - vq->io_modes = VB2_USERPTR | VB2_DMABUF | VB2_MMAP | VB2_READ | VB2_WRITE; + vq->type = dir; + + vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_READ | VB2_WRITE; vq->drv_priv = zr; vq->buf_struct_size = sizeof(struct zr_buffer); vq->ops = &zr_video_qops; diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts index b65d71686814..02fd9be5e173 100644 --- a/drivers/staging/mt7621-dts/gbpc1.dts +++ b/drivers/staging/mt7621-dts/gbpc1.dts @@ -11,7 +11,8 @@ memory@0 { device_type = "memory"; - reg = <0x0 0x1c000000>, <0x20000000 0x4000000>; + reg = <0x00000000 0x1c000000>, + <0x20000000 0x04000000>; }; chosen { @@ -37,24 +38,16 @@ gpio-leds { compatible = "gpio-leds"; - system { - label = "gb-pc1:green:system"; + power { + label = "green:power"; gpios = <&gpio 6 GPIO_ACTIVE_LOW>; + linux,default-trigger = "default-on"; }; - status { - label = "gb-pc1:green:status"; + system { + label = "green:system"; gpios = <&gpio 8 GPIO_ACTIVE_LOW>; - }; - - lan1 { - label = "gb-pc1:green:lan1"; - gpios = <&gpio 24 GPIO_ACTIVE_LOW>; - }; - - lan2 { - label = "gb-pc1:green:lan2"; - gpios = <&gpio 25 GPIO_ACTIVE_LOW>; + linux,default-trigger = "disk-activity"; }; }; }; @@ -94,9 +87,8 @@ partition@50000 { label = "firmware"; - reg = <0x50000 0x1FB0000>; + reg = <0x50000 0x1fb0000>; }; - }; }; @@ -105,9 +97,12 @@ }; &pinctrl { - state_default: pinctrl0 { - default_gpio: gpio { - groups = "wdt", "rgmii2", "uart3"; + pinctrl-names = "default"; + pinctrl-0 = <&state_default>; + + state_default: state-default { + gpio-pinmux { + groups = "rgmii2", "uart3", "wdt"; function = "gpio"; }; }; @@ -116,12 +111,13 @@ &switch0 { ports { port@0 { + status = "okay"; label = "ethblack"; - status = "ok"; }; + port@4 { + status = "okay"; label = "ethblue"; - status = "ok"; }; }; }; diff --git a/drivers/staging/mt7621-dts/gbpc2.dts b/drivers/staging/mt7621-dts/gbpc2.dts index 52760e7351f6..6f6fed071dda 100644 --- a/drivers/staging/mt7621-dts/gbpc2.dts +++ b/drivers/staging/mt7621-dts/gbpc2.dts @@ -1,21 +1,121 @@ /dts-v1/; -#include "gbpc1.dts" +#include "mt7621.dtsi" + +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/input/input.h> / { compatible = "gnubee,gb-pc2", "mediatek,mt7621-soc"; model = "GB-PC2"; + + memory@0 { + device_type = "memory"; + reg = <0x00000000 0x1c000000>, + <0x20000000 0x04000000>; + }; + + chosen { + bootargs = "console=ttyS0,57600"; + }; + + palmbus: palmbus@1e000000 { + i2c@900 { + status = "okay"; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + + reset { + label = "reset"; + gpios = <&gpio 18 GPIO_ACTIVE_HIGH>; + linux,code = <KEY_RESTART>; + }; + }; +}; + +&sdhci { + status = "okay"; +}; + +&spi0 { + status = "okay"; + + m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <50000000>; + broken-flash-reset; + + partition@0 { + label = "u-boot"; + reg = <0x0 0x30000>; + read-only; + }; + + partition@30000 { + label = "u-boot-env"; + reg = <0x30000 0x10000>; + read-only; + }; + + factory: partition@40000 { + label = "factory"; + reg = <0x40000 0x10000>; + read-only; + }; + + partition@50000 { + label = "firmware"; + reg = <0x50000 0x1fb0000>; + }; + }; }; -&default_gpio { - groups = "wdt", "uart3"; - function = "gpio"; +&pcie { + status = "okay"; }; -&gmac1 { - status = "ok"; +&pinctrl { + pinctrl-names = "default"; + pinctrl-0 = <&state_default>; + + state_default: state-default { + gpio-pinmux { + groups = "wdt"; + function = "gpio"; + }; + }; }; -&phy_external { - status = "ok"; +ðernet { + gmac1: mac@1 { + status = "okay"; + phy-handle = <ðphy7>; + }; + + mdio-bus { + ethphy7: ethernet-phy@7 { + reg = <7>; + phy-mode = "rgmii-rxid"; + }; + }; +}; + +&switch0 { + ports { + port@0 { + status = "okay"; + label = "ethblack"; + }; + + port@4 { + status = "okay"; + label = "ethblue"; + }; + }; }; diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi index eeabe9c0f4fb..59a9ce282a3b 100644 --- a/drivers/staging/mt7621-dts/mt7621.dtsi +++ b/drivers/staging/mt7621-dts/mt7621.dtsi @@ -36,9 +36,9 @@ regulator-max-microvolt = <3300000>; enable-active-high; regulator-always-on; - }; + }; - mmc_fixed_1v8_io: fixedregulator@1 { + mmc_fixed_1v8_io: fixedregulator@1 { compatible = "regulator-fixed"; regulator-name = "mmc_io"; regulator-min-microvolt = <1800000>; @@ -391,37 +391,32 @@ mediatek,ethsys = <&sysc>; + pinctrl-names = "default"; + pinctrl-0 = <&mdio_pins>, <&rgmii1_pins>, <&rgmii2_pins>; gmac0: mac@0 { compatible = "mediatek,eth-mac"; reg = <0>; phy-mode = "rgmii"; + fixed-link { speed = <1000>; full-duplex; pause; }; }; + gmac1: mac@1 { compatible = "mediatek,eth-mac"; reg = <1>; status = "off"; phy-mode = "rgmii-rxid"; - phy-handle = <&phy_external>; }; + mdio-bus { #address-cells = <1>; #size-cells = <0>; - phy_external: ethernet-phy@5 { - status = "off"; - reg = <5>; - phy-mode = "rgmii-rxid"; - - pinctrl-names = "default"; - pinctrl-0 = <&rgmii2_pins>; - }; - switch0: switch0@0 { compatible = "mediatek,mt7621"; #address-cells = <1>; @@ -439,36 +434,43 @@ #address-cells = <1>; #size-cells = <0>; reg = <0>; + port@0 { status = "off"; reg = <0>; label = "lan0"; }; + port@1 { status = "off"; reg = <1>; label = "lan1"; }; + port@2 { status = "off"; reg = <2>; label = "lan2"; }; + port@3 { status = "off"; reg = <3>; label = "lan3"; }; + port@4 { status = "off"; reg = <4>; label = "lan4"; }; + port@6 { reg = <6>; label = "cpu"; ethernet = <&gmac0>; phy-mode = "trgmii"; + fixed-link { speed = <1000>; full-duplex; diff --git a/drivers/staging/r8188eu/core/rtw_recv.c b/drivers/staging/r8188eu/core/rtw_recv.c index e082edfbaad8..30ca9f1e0363 100644 --- a/drivers/staging/r8188eu/core/rtw_recv.c +++ b/drivers/staging/r8188eu/core/rtw_recv.c @@ -1942,8 +1942,7 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe) struct recv_frame *pending_frame; int cnt = 0; - pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue); - while (pending_frame) { + while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue))) { cnt++; recv_func_posthandle(padapter, pending_frame); } diff --git a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c index 14758361960c..9f2b86f9b660 100644 --- a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c +++ b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c @@ -572,10 +572,10 @@ static int load_firmware(struct rt_firmware *pFirmware, struct device *device) } memcpy(pFirmware->szFwBuffer, fw->data, fw->size); pFirmware->ulFwLength = fw->size; - release_firmware(fw); - DBG_88E_LEVEL(_drv_info_, "+%s: !bUsedWoWLANFw, FmrmwareLen:%d+\n", __func__, pFirmware->ulFwLength); + dev_dbg(device, "!bUsedWoWLANFw, FmrmwareLen:%d+\n", pFirmware->ulFwLength); Exit: + release_firmware(fw); return rtStatus; } diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c index ad9c237054c4..1a4b4c75c4bf 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c @@ -5915,6 +5915,7 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) struct sta_info *psta_bmc; struct list_head *xmitframe_plist, *xmitframe_phead, *tmp; struct xmit_frame *pxmitframe = NULL; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct sta_priv *pstapriv = &padapter->stapriv; /* for BC/MC Frames */ @@ -5925,7 +5926,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) { msleep(10);/* 10ms, ATIM(HIQ) Windows */ - spin_lock_bh(&psta_bmc->sleep_q.lock); + /* spin_lock_bh(&psta_bmc->sleep_q.lock); */ + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta_bmc->sleep_q); list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) { @@ -5948,7 +5950,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) rtw_hal_xmitframe_enqueue(padapter, pxmitframe); } - spin_unlock_bh(&psta_bmc->sleep_q.lock); + /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */ + spin_unlock_bh(&pxmitpriv->lock); /* check hi queue and bmc_sleepq */ rtw_chk_hi_queue_cmd(padapter); diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c index 3564e2af5741..5b0a596eefb7 100644 --- a/drivers/staging/rtl8723bs/core/rtw_recv.c +++ b/drivers/staging/rtl8723bs/core/rtw_recv.c @@ -953,8 +953,10 @@ static signed int validate_recv_ctrl_frame(struct adapter *padapter, union recv_ if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) { struct list_head *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe = NULL; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; - spin_lock_bh(&psta->sleep_q.lock); + /* spin_lock_bh(&psta->sleep_q.lock); */ + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); xmitframe_plist = get_next(xmitframe_phead); @@ -985,10 +987,12 @@ static signed int validate_recv_ctrl_frame(struct adapter *padapter, union recv_ update_beacon(padapter, WLAN_EID_TIM, NULL, true); } - spin_unlock_bh(&psta->sleep_q.lock); + /* spin_unlock_bh(&psta->sleep_q.lock); */ + spin_unlock_bh(&pxmitpriv->lock); } else { - spin_unlock_bh(&psta->sleep_q.lock); + /* spin_unlock_bh(&psta->sleep_q.lock); */ + spin_unlock_bh(&pxmitpriv->lock); if (pstapriv->tim_bitmap&BIT(psta->aid)) { if (psta->sleepq_len == 0) { diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c index 3d269842677d..5eae3ccb1ff5 100644 --- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c +++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c @@ -288,48 +288,46 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta) /* list_del_init(&psta->wakeup_list); */ - spin_lock_bh(&psta->sleep_q.lock); + spin_lock_bh(&pxmitpriv->lock); + rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q); psta->sleepq_len = 0; - spin_unlock_bh(&psta->sleep_q.lock); - - spin_lock_bh(&pxmitpriv->lock); /* vo */ - spin_lock_bh(&pstaxmitpriv->vo_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->vo_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending); list_del_init(&(pstaxmitpriv->vo_q.tx_pending)); phwxmit = pxmitpriv->hwxmits; phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt; pstaxmitpriv->vo_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->vo_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->vo_pending.lock)); */ /* vi */ - spin_lock_bh(&pstaxmitpriv->vi_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->vi_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending); list_del_init(&(pstaxmitpriv->vi_q.tx_pending)); phwxmit = pxmitpriv->hwxmits+1; phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt; pstaxmitpriv->vi_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->vi_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->vi_pending.lock)); */ /* be */ - spin_lock_bh(&pstaxmitpriv->be_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->be_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending); list_del_init(&(pstaxmitpriv->be_q.tx_pending)); phwxmit = pxmitpriv->hwxmits+2; phwxmit->accnt -= pstaxmitpriv->be_q.qcnt; pstaxmitpriv->be_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->be_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->be_pending.lock)); */ /* bk */ - spin_lock_bh(&pstaxmitpriv->bk_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->bk_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending); list_del_init(&(pstaxmitpriv->bk_q.tx_pending)); phwxmit = pxmitpriv->hwxmits+3; phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt; pstaxmitpriv->bk_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->bk_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->bk_pending.lock)); */ spin_unlock_bh(&pxmitpriv->lock); diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c index 6b37b42ec226..79e4d7df1ef5 100644 --- a/drivers/staging/rtl8723bs/core/rtw_xmit.c +++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c @@ -1723,12 +1723,15 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram struct list_head *plist, *phead, *tmp; struct xmit_frame *pxmitframe; + spin_lock_bh(&pframequeue->lock); + phead = get_list_head(pframequeue); list_for_each_safe(plist, tmp, phead) { pxmitframe = list_entry(plist, struct xmit_frame, list); rtw_free_xmitframe(pxmitpriv, pxmitframe); } + spin_unlock_bh(&pframequeue->lock); } s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe) @@ -1783,7 +1786,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe) struct sta_info *psta; struct tx_servq *ptxservq; struct pkt_attrib *pattrib = &pxmitframe->attrib; - struct xmit_priv *xmit_priv = &padapter->xmitpriv; struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits; signed int res = _SUCCESS; @@ -1801,14 +1803,12 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe) ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index)); - spin_lock_bh(&xmit_priv->lock); if (list_empty(&ptxservq->tx_pending)) list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue)); list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending)); ptxservq->qcnt++; phwxmits[ac_index].accnt++; - spin_unlock_bh(&xmit_priv->lock); exit: @@ -2191,10 +2191,11 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta) struct list_head *xmitframe_plist, *xmitframe_phead, *tmp; struct xmit_frame *pxmitframe = NULL; struct sta_priv *pstapriv = &padapter->stapriv; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; psta_bmc = rtw_get_bcmc_stainfo(padapter); - spin_lock_bh(&psta->sleep_q.lock); + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) { @@ -2295,7 +2296,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta) _exit: - spin_unlock_bh(&psta->sleep_q.lock); + spin_unlock_bh(&pxmitpriv->lock); if (update_mask) update_beacon(padapter, WLAN_EID_TIM, NULL, true); @@ -2307,8 +2308,9 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst struct list_head *xmitframe_plist, *xmitframe_phead, *tmp; struct xmit_frame *pxmitframe = NULL; struct sta_priv *pstapriv = &padapter->stapriv; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; - spin_lock_bh(&psta->sleep_q.lock); + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) { @@ -2361,7 +2363,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst } } - spin_unlock_bh(&psta->sleep_q.lock); + spin_unlock_bh(&pxmitpriv->lock); } void enqueue_pending_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf) diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c index 5f5c4719b586..156d6aba18ca 100644 --- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c @@ -507,7 +507,9 @@ s32 rtl8723bs_hal_xmit( rtw_issue_addbareq_cmd(padapter, pxmitframe); } + spin_lock_bh(&pxmitpriv->lock); err = rtw_xmitframe_enqueue(padapter, pxmitframe); + spin_unlock_bh(&pxmitpriv->lock); if (err != _SUCCESS) { rtw_free_xmitframe(pxmitpriv, pxmitframe); diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 967f10b9582a..099359fc0115 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -1033,15 +1033,27 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, DEBUG_TRACE(SERVICE_CALLBACK_LINE); + rcu_read_lock(); service = handle_to_service(handle); - if (WARN_ON(!service)) + if (WARN_ON(!service)) { + rcu_read_unlock(); return VCHIQ_SUCCESS; + } user_service = (struct user_service *)service->base.userdata; instance = user_service->instance; - if (!instance || instance->closing) + if (!instance || instance->closing) { + rcu_read_unlock(); return VCHIQ_SUCCESS; + } + + /* + * As hopping around different synchronization mechanism, + * taking an extra reference results in simpler implementation. + */ + vchiq_service_get(service); + rcu_read_unlock(); vchiq_log_trace(vchiq_arm_log_level, "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx", @@ -1074,6 +1086,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, NULL, user_service, bulk_userdata); if (status != VCHIQ_SUCCESS) { DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); return status; } } @@ -1084,11 +1097,13 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__); DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); return VCHIQ_RETRY; } else if (instance->closing) { vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__); DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); return VCHIQ_ERROR; } DEBUG_TRACE(SERVICE_CALLBACK_LINE); @@ -1117,6 +1132,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, header = NULL; } DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); if (skip_completion) return VCHIQ_SUCCESS; @@ -1173,6 +1189,9 @@ int vchiq_dump_platform_instances(void *dump_context) int len; int i; + if (!state) + return -ENOTCONN; + /* * There is no list of instances, so instead scan all services, * marking those that have been dumped. diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c index 9429b8a642fb..630ed0dc24c3 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c @@ -2421,6 +2421,9 @@ void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header) struct vchiq_service *service = find_service_by_handle(handle); int pos; + if (!service) + return; + while (service->msg_queue_write == service->msg_queue_read + VCHIQ_MAX_SLOTS) { if (wait_for_completion_interruptible(&service->msg_queue_pop)) @@ -2441,6 +2444,9 @@ struct vchiq_header *vchiq_msg_hold(unsigned int handle) struct vchiq_header *header; int pos; + if (!service) + return NULL; + if (service->msg_queue_write == service->msg_queue_read) return NULL; diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c index 4b9fdf99981b..9ff69c5e0ae9 100644 --- a/drivers/staging/wfx/main.c +++ b/drivers/staging/wfx/main.c @@ -309,7 +309,8 @@ struct wfx_dev *wfx_init_common(struct device *dev, wdev->pdata.gpio_wakeup = devm_gpiod_get_optional(dev, "wakeup", GPIOD_OUT_LOW); if (IS_ERR(wdev->pdata.gpio_wakeup)) - return NULL; + goto err; + if (wdev->pdata.gpio_wakeup) gpiod_set_consumer_name(wdev->pdata.gpio_wakeup, "wfx wakeup"); @@ -328,6 +329,10 @@ struct wfx_dev *wfx_init_common(struct device *dev, return NULL; return wdev; + +err: + ieee80211_free_hw(hw); + return NULL; } int wfx_probe(struct wfx_dev *wdev) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 9f552f48084c..0ca5ec14d3db 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1821,6 +1821,7 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) mutex_lock(&udev->cmdr_lock); page = xa_load(&udev->data_pages, dpi); if (likely(page)) { + get_page(page); mutex_unlock(&udev->cmdr_lock); return page; } @@ -1877,6 +1878,7 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) /* For the vmalloc()ed cmd area pages */ addr = (void *)(unsigned long)info->mem[mi].addr + offset; page = vmalloc_to_page(addr); + get_page(page); } else { uint32_t dpi; @@ -1887,7 +1889,6 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } - get_page(page); vmf->page = page; return 0; } diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 5363ebebfc35..50c0d839fe75 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -588,6 +588,7 @@ static int optee_remove(struct platform_device *pdev) /* Unregister OP-TEE specific client devices on TEE bus */ optee_unregister_devices(); + teedev_close_context(optee->ctx); /* * Ask OP-TEE to free all cached shared memory objects to decrease * reference counters and also avoid wild pointers in secure world @@ -633,6 +634,7 @@ static int optee_probe(struct platform_device *pdev) struct optee *optee = NULL; void *memremaped_shm = NULL; struct tee_device *teedev; + struct tee_context *ctx; u32 sec_caps; int rc; @@ -719,6 +721,12 @@ static int optee_probe(struct platform_device *pdev) optee_supp_init(&optee->supp); optee->memremaped_shm = memremaped_shm; optee->pool = pool; + ctx = teedev_open(optee->teedev); + if (IS_ERR(ctx)) { + rc = PTR_ERR(ctx); + goto err; + } + optee->ctx = ctx; /* * Ensure that there are no pre-existing shm objects before enabling diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index f6bb4a763ba9..ea09533e30cd 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -70,6 +70,7 @@ struct optee_supp { * struct optee - main service struct * @supp_teedev: supplicant device * @teedev: client device + * @ctx: driver internal TEE context * @invoke_fn: function to issue smc or hvc * @call_queue: queue of threads waiting to call @invoke_fn * @wait_queue: queue of threads from secure world waiting for a @@ -87,6 +88,7 @@ struct optee { struct tee_device *supp_teedev; struct tee_device *teedev; optee_invoke_fn *invoke_fn; + struct tee_context *ctx; struct optee_call_queue call_queue; struct optee_wait_queue wait_queue; struct optee_supp supp; diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c index efbaff7ad7e5..456833d82007 100644 --- a/drivers/tee/optee/rpc.c +++ b/drivers/tee/optee/rpc.c @@ -285,6 +285,7 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz) } static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, + struct optee *optee, struct optee_msg_arg *arg, struct optee_call_ctx *call_ctx) { @@ -314,7 +315,8 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, shm = cmd_alloc_suppl(ctx, sz); break; case OPTEE_RPC_SHM_TYPE_KERNEL: - shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV); + shm = tee_shm_alloc(optee->ctx, sz, + TEE_SHM_MAPPED | TEE_SHM_PRIV); break; default: arg->ret = TEEC_ERROR_BAD_PARAMETERS; @@ -471,7 +473,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, break; case OPTEE_RPC_CMD_SHM_ALLOC: free_pages_list(call_ctx); - handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx); + handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx); break; case OPTEE_RPC_CMD_SHM_FREE: handle_rpc_func_cmd_shm_free(ctx, arg); @@ -502,7 +504,7 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param, switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { case OPTEE_SMC_RPC_FUNC_ALLOC: - shm = tee_shm_alloc(ctx, param->a1, + shm = tee_shm_alloc(optee->ctx, param->a1, TEE_SHM_MAPPED | TEE_SHM_PRIV); if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { reg_pair_from_64(¶m->a1, ¶m->a2, pa); diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 85102d12d716..3fc426dad2df 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -43,7 +43,7 @@ static DEFINE_SPINLOCK(driver_lock); static struct class *tee_class; static dev_t tee_devt; -static struct tee_context *teedev_open(struct tee_device *teedev) +struct tee_context *teedev_open(struct tee_device *teedev) { int rc; struct tee_context *ctx; @@ -70,6 +70,7 @@ err: return ERR_PTR(rc); } +EXPORT_SYMBOL_GPL(teedev_open); void teedev_ctx_get(struct tee_context *ctx) { @@ -96,13 +97,14 @@ void teedev_ctx_put(struct tee_context *ctx) kref_put(&ctx->refcount, teedev_ctx_release); } -static void teedev_close_context(struct tee_context *ctx) +void teedev_close_context(struct tee_context *ctx) { struct tee_device *teedev = ctx->teedev; teedev_ctx_put(ctx); tee_device_put(teedev); } +EXPORT_SYMBOL_GPL(teedev_close_context); static int tee_open(struct inode *inode, struct file *filp) { diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 19926beeb3b7..c8cdc614a357 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -53,7 +53,7 @@ struct int3400_thermal_priv { struct art *arts; int trt_count; struct trt *trts; - u8 uuid_bitmap; + u32 uuid_bitmap; int rel_misc_dev_res; int current_uuid_index; char *data_vault; @@ -405,6 +405,10 @@ static void int3400_notify(acpi_handle handle, thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event); thermal_prop[4] = NULL; kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop); + kfree(thermal_prop[0]); + kfree(thermal_prop[1]); + kfree(thermal_prop[2]); + kfree(thermal_prop[3]); } static int int3400_thermal_get_temp(struct thermal_zone_device *thermal, @@ -465,6 +469,11 @@ static void int3400_setup_gddv(struct int3400_thermal_priv *priv) priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer, obj->package.elements[0].buffer.length, GFP_KERNEL); + if (!priv->data_vault) { + kfree(buffer.pointer); + return; + } + bin_attr_data_vault.private = priv->data_vault; bin_attr_data_vault.size = obj->package.elements[0].buffer.length; kfree(buffer.pointer); diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c index 1234dbe95895..41c8d47805c4 100644 --- a/drivers/thermal/thermal_netlink.c +++ b/drivers/thermal/thermal_netlink.c @@ -418,11 +418,12 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p) for (i = 0; i < tz->trips; i++) { enum thermal_trip_type type; - int temp, hyst; + int temp, hyst = 0; tz->ops->get_trip_type(tz, i, &type); tz->ops->get_trip_temp(tz, i, &temp); - tz->ops->get_trip_hyst(tz, i, &hyst); + if (tz->ops->get_trip_hyst) + tz->ops->get_trip_hyst(tz, i, &hyst); if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, i) || nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, type) || diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c index 82a76cac94de..32366caca662 100644 --- a/drivers/tty/hvc/hvc_iucv.c +++ b/drivers/tty/hvc/hvc_iucv.c @@ -1417,7 +1417,9 @@ out_error: */ static int __init hvc_iucv_config(char *val) { - return kstrtoul(val, 10, &hvc_iucv_devices); + if (kstrtoul(val, 10, &hvc_iucv_devices)) + pr_warn("hvc_iucv= invalid parameter value '%s'\n", val); + return 1; } diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c index da375851af4e..3b3e169c1f69 100644 --- a/drivers/tty/mxser.c +++ b/drivers/tty/mxser.c @@ -711,6 +711,7 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty) struct mxser_port *info = container_of(port, struct mxser_port, port); unsigned long page; unsigned long flags; + int ret; page = __get_free_page(GFP_KERNEL); if (!page) @@ -720,9 +721,9 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty) if (!info->type) { set_bit(TTY_IO_ERROR, &tty->flags); - free_page(page); spin_unlock_irqrestore(&info->slock, flags); - return 0; + ret = 0; + goto err_free_xmit; } info->port.xmit_buf = (unsigned char *) page; @@ -748,8 +749,10 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty) if (capable(CAP_SYS_ADMIN)) { set_bit(TTY_IO_ERROR, &tty->flags); return 0; - } else - return -ENODEV; + } + + ret = -ENODEV; + goto err_free_xmit; } /* @@ -794,6 +797,10 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty) spin_unlock_irqrestore(&info->slock, flags); return 0; +err_free_xmit: + free_page(page); + info->port.xmit_buf = NULL; + return ret; } /* diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 09a14f7c79f4..8643b143c408 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -435,7 +435,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci) modembits |= MDM_RTR; if (dlci->modem_tx & TIOCM_RI) modembits |= MDM_IC; - if (dlci->modem_tx & TIOCM_CD) + if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator) modembits |= MDM_DV; return modembits; } @@ -1009,25 +1009,25 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data, * @tty: virtual tty bound to the DLCI * @dlci: DLCI to affect * @modem: modem bits (full EA) - * @clen: command length + * @slen: number of signal octets * * Used when a modem control message or line state inline in adaption * layer 2 is processed. Sort out the local modem state and throttles */ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci, - u32 modem, int clen) + u32 modem, int slen) { int mlines = 0; u8 brk = 0; int fc; - /* The modem status command can either contain one octet (v.24 signals) - or two octets (v.24 signals + break signals). The length field will - either be 2 or 3 respectively. This is specified in section - 5.4.6.3.7 of the 27.010 mux spec. */ + /* The modem status command can either contain one octet (V.24 signals) + * or two octets (V.24 signals + break signals). This is specified in + * section 5.4.6.3.7 of the 07.10 mux spec. + */ - if (clen == 2) + if (slen == 1) modem = modem & 0x7f; else { brk = modem & 0x7f; @@ -1084,6 +1084,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) unsigned int brk = 0; struct gsm_dlci *dlci; int len = clen; + int slen; const u8 *dp = data; struct tty_struct *tty; @@ -1103,6 +1104,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) return; dlci = gsm->dlci[addr]; + slen = len; while (gsm_read_ea(&modem, *dp++) == 0) { len--; if (len == 0) @@ -1119,7 +1121,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) modem |= (brk & 0x7f); } tty = tty_port_tty_get(&dlci->port); - gsm_process_modem(tty, dlci, modem, clen); + gsm_process_modem(tty, dlci, modem, slen); if (tty) { tty_wakeup(tty); tty_kref_put(tty); @@ -1429,6 +1431,9 @@ static void gsm_dlci_close(struct gsm_dlci *dlci) if (dlci->addr != 0) { tty_port_tty_hangup(&dlci->port, false); kfifo_reset(&dlci->fifo); + /* Ensure that gsmtty_open() can return. */ + tty_port_set_initialized(&dlci->port, 0); + wake_up_interruptible(&dlci->port.open_wait); } else dlci->gsm->dead = true; wake_up(&dlci->gsm->event); @@ -1488,7 +1493,7 @@ static void gsm_dlci_t1(struct timer_list *t) dlci->mode = DLCI_MODE_ADM; gsm_dlci_open(dlci); } else { - gsm_dlci_close(dlci); + gsm_dlci_begin_close(dlci); /* prevent half open link */ } break; @@ -1567,6 +1572,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen) struct tty_struct *tty; unsigned int modem = 0; int len = clen; + int slen = 0; if (debug & 16) pr_debug("%d bytes for tty\n", len); @@ -1579,12 +1585,14 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen) case 2: /* Asynchronous serial with line state in each frame */ while (gsm_read_ea(&modem, *data++) == 0) { len--; + slen++; if (len == 0) return; } + slen++; tty = tty_port_tty_get(port); if (tty) { - gsm_process_modem(tty, dlci, modem, clen); + gsm_process_modem(tty, dlci, modem, slen); tty_kref_put(tty); } fallthrough; @@ -1722,7 +1730,12 @@ static void gsm_dlci_release(struct gsm_dlci *dlci) gsm_destroy_network(dlci); mutex_unlock(&dlci->mutex); - tty_hangup(tty); + /* We cannot use tty_hangup() because in tty_kref_put() the tty + * driver assumes that the hangup queue is free and reuses it to + * queue release_one_tty() -> NULL pointer panic in + * process_one_work(). + */ + tty_vhangup(tty); tty_port_tty_set(&dlci->port, NULL); tty_kref_put(tty); @@ -3175,9 +3188,9 @@ static void gsmtty_throttle(struct tty_struct *tty) if (dlci->state == DLCI_CLOSED) return; if (C_CRTSCTS(tty)) - dlci->modem_tx &= ~TIOCM_DTR; + dlci->modem_tx &= ~TIOCM_RTS; dlci->throttled = true; - /* Send an MSC with DTR cleared */ + /* Send an MSC with RTS cleared */ gsmtty_modem_update(dlci, 0); } @@ -3187,9 +3200,9 @@ static void gsmtty_unthrottle(struct tty_struct *tty) if (dlci->state == DLCI_CLOSED) return; if (C_CRTSCTS(tty)) - dlci->modem_tx |= TIOCM_DTR; + dlci->modem_tx |= TIOCM_RTS; dlci->throttled = false; - /* Send an MSC with DTR set */ + /* Send an MSC with RTS set */ gsmtty_modem_update(dlci, 0); } diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 451e02cd0637..de5b45de5040 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1963,7 +1963,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty, return false; canon_head = smp_load_acquire(&ldata->canon_head); - n = min(*nr + 1, canon_head - ldata->read_tail); + n = min(*nr, canon_head - ldata->read_tail); tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); @@ -1985,10 +1985,8 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty, n += N_TTY_BUF_SIZE; c = n + found; - if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) { - c = min(*nr, c); + if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) n = c; - } n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n", __func__, eol, found, n, c, tail, more); diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c index 2350fb3bb5e4..c2cecc6f47db 100644 --- a/drivers/tty/serial/8250/8250_aspeed_vuart.c +++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c @@ -487,7 +487,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev) port.port.irq = irq_of_parse_and_map(np, 0); port.port.handle_irq = aspeed_vuart_handle_irq; port.port.iotype = UPIO_MEM; - port.port.type = PORT_16550A; + port.port.type = PORT_ASPEED_VUART; port.port.uartclk = clk; port.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_NO_THRE_TEST; diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c index 890fa7ddaa7f..b3c3f7e5851a 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -64,10 +64,19 @@ int serial8250_tx_dma(struct uart_8250_port *p) struct uart_8250_dma *dma = p->dma; struct circ_buf *xmit = &p->port.state->xmit; struct dma_async_tx_descriptor *desc; + struct uart_port *up = &p->port; int ret; - if (dma->tx_running) + if (dma->tx_running) { + if (up->x_char) { + dmaengine_pause(dma->txchan); + uart_xchar_out(up, UART_TX); + dmaengine_resume(dma->txchan); + } return 0; + } else if (up->x_char) { + uart_xchar_out(up, UART_TX); + } if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) { /* We have been called from __dma_tx_complete() */ diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c index 673cda3d011d..948d0a1c6ae8 100644 --- a/drivers/tty/serial/8250/8250_gsc.c +++ b/drivers/tty/serial/8250/8250_gsc.c @@ -26,7 +26,7 @@ static int __init serial_init_chip(struct parisc_device *dev) unsigned long address; int err; -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC) if (!dev->irq && (dev->id.sversion == 0xad)) dev->irq = iosapic_serial_irq(dev); #endif diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c index 848d81e3838c..49ae73f4d3a0 100644 --- a/drivers/tty/serial/8250/8250_lpss.c +++ b/drivers/tty/serial/8250/8250_lpss.c @@ -121,8 +121,7 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port) { struct dw_dma_slave *param = &lpss->dma_param; struct pci_dev *pdev = to_pci_dev(port->dev); - unsigned int dma_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); - struct pci_dev *dma_dev = pci_get_slot(pdev->bus, dma_devfn); + struct pci_dev *dma_dev; switch (pdev->device) { case PCI_DEVICE_ID_INTEL_BYT_UART1: @@ -141,6 +140,8 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port) return -EINVAL; } + dma_dev = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0)); + param->dma_dev = &dma_dev->dev; param->m_master = 0; param->p_master = 1; @@ -156,6 +157,14 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port) return 0; } +static void byt_serial_exit(struct lpss8250 *lpss) +{ + struct dw_dma_slave *param = &lpss->dma_param; + + /* Paired with pci_get_slot() in the byt_serial_setup() above */ + put_device(param->dma_dev); +} + static int ehl_serial_setup(struct lpss8250 *lpss, struct uart_port *port) { struct uart_8250_dma *dma = &lpss->data.dma; @@ -171,6 +180,13 @@ static int ehl_serial_setup(struct lpss8250 *lpss, struct uart_port *port) return 0; } +static void ehl_serial_exit(struct lpss8250 *lpss) +{ + struct uart_8250_port *up = serial8250_get_port(lpss->data.line); + + up->dma = NULL; +} + #ifdef CONFIG_SERIAL_8250_DMA static const struct dw_dma_platform_data qrk_serial_dma_pdata = { .nr_channels = 2, @@ -345,8 +361,7 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_exit: - if (lpss->board->exit) - lpss->board->exit(lpss); + lpss->board->exit(lpss); pci_free_irq_vectors(pdev); return ret; } @@ -357,8 +372,7 @@ static void lpss8250_remove(struct pci_dev *pdev) serial8250_unregister_port(lpss->data.line); - if (lpss->board->exit) - lpss->board->exit(lpss); + lpss->board->exit(lpss); pci_free_irq_vectors(pdev); } @@ -366,12 +380,14 @@ static const struct lpss8250_board byt_board = { .freq = 100000000, .base_baud = 2764800, .setup = byt_serial_setup, + .exit = byt_serial_exit, }; static const struct lpss8250_board ehl_board = { .freq = 200000000, .base_baud = 12500000, .setup = ehl_serial_setup, + .exit = ehl_serial_exit, }; static const struct lpss8250_board qrk_board = { diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c index efa0515139f8..e6c1791609dd 100644 --- a/drivers/tty/serial/8250/8250_mid.c +++ b/drivers/tty/serial/8250/8250_mid.c @@ -73,6 +73,11 @@ static int pnw_setup(struct mid8250 *mid, struct uart_port *p) return 0; } +static void pnw_exit(struct mid8250 *mid) +{ + pci_dev_put(mid->dma_dev); +} + static int tng_handle_irq(struct uart_port *p) { struct mid8250 *mid = p->private_data; @@ -124,6 +129,11 @@ static int tng_setup(struct mid8250 *mid, struct uart_port *p) return 0; } +static void tng_exit(struct mid8250 *mid) +{ + pci_dev_put(mid->dma_dev); +} + static int dnv_handle_irq(struct uart_port *p) { struct mid8250 *mid = p->private_data; @@ -330,9 +340,9 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, mid); return 0; + err: - if (mid->board->exit) - mid->board->exit(mid); + mid->board->exit(mid); return ret; } @@ -342,8 +352,7 @@ static void mid8250_remove(struct pci_dev *pdev) serial8250_unregister_port(mid->line); - if (mid->board->exit) - mid->board->exit(mid); + mid->board->exit(mid); } static const struct mid8250_board pnw_board = { @@ -351,6 +360,7 @@ static const struct mid8250_board pnw_board = { .freq = 50000000, .base_baud = 115200, .setup = pnw_setup, + .exit = pnw_exit, }; static const struct mid8250_board tng_board = { @@ -358,6 +368,7 @@ static const struct mid8250_board tng_board = { .freq = 38400000, .base_baud = 1843200, .setup = tng_setup, + .exit = tng_exit, }; static const struct mid8250_board dnv_board = { diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index ec88b706e882..723ec0806799 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -307,6 +307,14 @@ static const struct serial8250_config uart_config[] = { .rxtrig_bytes = {1, 32, 64, 112}, .flags = UART_CAP_FIFO | UART_CAP_SLEEP, }, + [PORT_ASPEED_VUART] = { + .name = "ASPEED VUART", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00, + .rxtrig_bytes = {1, 4, 8, 14}, + .flags = UART_CAP_FIFO, + }, }; /* Uart divisor latch read */ @@ -1615,6 +1623,18 @@ static inline void start_tx_rs485(struct uart_port *port) struct uart_8250_port *up = up_to_u8250p(port); struct uart_8250_em485 *em485 = up->em485; + /* + * While serial8250_em485_handle_stop_tx() is a noop if + * em485->active_timer != &em485->stop_tx_timer, it might happen that + * the timer is still armed and triggers only after the current bunch of + * chars is send and em485->active_timer == &em485->stop_tx_timer again. + * So cancel the timer. There is still a theoretical race condition if + * the timer is already running and only comes around to check for + * em485->active_timer when &em485->stop_tx_timer is armed again. + */ + if (em485->active_timer == &em485->stop_tx_timer) + hrtimer_try_to_cancel(&em485->stop_tx_timer); + em485->active_timer = NULL; if (em485->tx_stopped) { @@ -1799,9 +1819,7 @@ void serial8250_tx_chars(struct uart_8250_port *up) int count; if (port->x_char) { - serial_out(up, UART_TX, port->x_char); - port->icount.tx++; - port->x_char = 0; + uart_xchar_out(port, UART_TX); return; } if (uart_tx_stopped(port)) { diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index 49d0c7f2b29b..79b7db8580e0 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c @@ -403,16 +403,16 @@ static int kgdboc_option_setup(char *opt) { if (!opt) { pr_err("config string not provided\n"); - return -EINVAL; + return 1; } if (strlen(opt) >= MAX_CONFIG_LEN) { pr_err("config string too long\n"); - return -ENOSPC; + return 1; } strcpy(config, opt); - return 0; + return 1; } __setup("kgdboc=", kgdboc_option_setup); diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c index e2f49863e9c2..319533b3c32a 100644 --- a/drivers/tty/serial/samsung_tty.c +++ b/drivers/tty/serial/samsung_tty.c @@ -922,11 +922,8 @@ static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport) return; } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) { - spin_unlock(&port->lock); + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); - spin_lock(&port->lock); - } if (uart_circ_empty(xmit)) s3c24xx_serial_stop_tx(port); diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index acbb615dd28f..0ab788058fa2 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -734,12 +734,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id) static void sc16is7xx_tx_proc(struct kthread_work *ws) { struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port); + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); if ((port->rs485.flags & SER_RS485_ENABLED) && (port->rs485.delay_rts_before_send > 0)) msleep(port->rs485.delay_rts_before_send); + mutex_lock(&s->efr_lock); sc16is7xx_handle_tx(port); + mutex_unlock(&s->efr_lock); } static void sc16is7xx_reconf_rs485(struct uart_port *port) diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index dc6129ddef85..eb15423f935a 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -653,6 +653,20 @@ static void uart_flush_buffer(struct tty_struct *tty) } /* + * This function performs low-level write of high-priority XON/XOFF + * character and accounting for it. + * + * Requires uart_port to implement .serial_out(). + */ +void uart_xchar_out(struct uart_port *uport, int offset) +{ + serial_port_out(uport, offset, uport->x_char); + uport->icount.tx++; + uport->x_char = 0; +} +EXPORT_SYMBOL_GPL(uart_xchar_out); + +/* * This function is used to send a high-priority XON/XOFF character to * the device */ diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 200cd293d14d..810a1b0b6520 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -421,10 +421,22 @@ static void stm32_usart_transmit_chars(struct uart_port *port) struct stm32_port *stm32_port = to_stm32_port(port); const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; struct circ_buf *xmit = &port->state->xmit; + u32 isr; + int ret; if (port->x_char) { if (stm32_port->tx_dma_busy) stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); + + /* Check that TDR is empty before filling FIFO */ + ret = + readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, + isr, + (isr & USART_SR_TXE), + 10, 1000); + if (ret) + dev_warn(port->dev, "1 character may be erased\n"); + writel_relaxed(port->x_char, port->membase + ofs->tdr); port->x_char = 0; port->icount.tx++; diff --git a/drivers/usb/cdns3/cdnsp-debug.h b/drivers/usb/cdns3/cdnsp-debug.h index a8776df2d4e0..f0ca865cce2a 100644 --- a/drivers/usb/cdns3/cdnsp-debug.h +++ b/drivers/usb/cdns3/cdnsp-debug.h @@ -182,208 +182,211 @@ static inline const char *cdnsp_decode_trb(char *str, size_t size, u32 field0, int ep_id = TRB_TO_EP_INDEX(field3) - 1; int type = TRB_FIELD_TO_TYPE(field3); unsigned int ep_num; - int ret = 0; + int ret; u32 temp; ep_num = DIV_ROUND_UP(ep_id, 2); switch (type) { case TRB_LINK: - ret += snprintf(str, size, - "LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c", - field1, field0, GET_INTR_TARGET(field2), - cdnsp_trb_type_string(type), - field3 & TRB_IOC ? 'I' : 'i', - field3 & TRB_CHAIN ? 'C' : 'c', - field3 & TRB_TC ? 'T' : 't', - field3 & TRB_CYCLE ? 'C' : 'c'); + ret = snprintf(str, size, + "LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c", + field1, field0, GET_INTR_TARGET(field2), + cdnsp_trb_type_string(type), + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_TC ? 'T' : 't', + field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_TRANSFER: case TRB_COMPLETION: case TRB_PORT_STATUS: case TRB_HC_EVENT: - ret += snprintf(str, size, - "ep%d%s(%d) type '%s' TRB %08x%08x status '%s'" - " len %ld slot %ld flags %c:%c", - ep_num, ep_id % 2 ? "out" : "in", - TRB_TO_EP_INDEX(field3), - cdnsp_trb_type_string(type), field1, field0, - cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)), - EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3), - field3 & EVENT_DATA ? 'E' : 'e', - field3 & TRB_CYCLE ? 'C' : 'c'); + ret = snprintf(str, size, + "ep%d%s(%d) type '%s' TRB %08x%08x status '%s'" + " len %ld slot %ld flags %c:%c", + ep_num, ep_id % 2 ? "out" : "in", + TRB_TO_EP_INDEX(field3), + cdnsp_trb_type_string(type), field1, field0, + cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)), + EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3), + field3 & EVENT_DATA ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_MFINDEX_WRAP: - ret += snprintf(str, size, "%s: flags %c", - cdnsp_trb_type_string(type), - field3 & TRB_CYCLE ? 'C' : 'c'); + ret = snprintf(str, size, "%s: flags %c", + cdnsp_trb_type_string(type), + field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_SETUP: - ret += snprintf(str, size, - "type '%s' bRequestType %02x bRequest %02x " - "wValue %02x%02x wIndex %02x%02x wLength %d " - "length %ld TD size %ld intr %ld Setup ID %ld " - "flags %c:%c:%c", - cdnsp_trb_type_string(type), - field0 & 0xff, - (field0 & 0xff00) >> 8, - (field0 & 0xff000000) >> 24, - (field0 & 0xff0000) >> 16, - (field1 & 0xff00) >> 8, - field1 & 0xff, - (field1 & 0xff000000) >> 16 | - (field1 & 0xff0000) >> 16, - TRB_LEN(field2), GET_TD_SIZE(field2), - GET_INTR_TARGET(field2), - TRB_SETUPID_TO_TYPE(field3), - field3 & TRB_IDT ? 'D' : 'd', - field3 & TRB_IOC ? 'I' : 'i', - field3 & TRB_CYCLE ? 'C' : 'c'); + ret = snprintf(str, size, + "type '%s' bRequestType %02x bRequest %02x " + "wValue %02x%02x wIndex %02x%02x wLength %d " + "length %ld TD size %ld intr %ld Setup ID %ld " + "flags %c:%c:%c", + cdnsp_trb_type_string(type), + field0 & 0xff, + (field0 & 0xff00) >> 8, + (field0 & 0xff000000) >> 24, + (field0 & 0xff0000) >> 16, + (field1 & 0xff00) >> 8, + field1 & 0xff, + (field1 & 0xff000000) >> 16 | + (field1 & 0xff0000) >> 16, + TRB_LEN(field2), GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + TRB_SETUPID_TO_TYPE(field3), + field3 & TRB_IDT ? 'D' : 'd', + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_DATA: - ret += snprintf(str, size, - "type '%s' Buffer %08x%08x length %ld TD size %ld " - "intr %ld flags %c:%c:%c:%c:%c:%c:%c", - cdnsp_trb_type_string(type), - field1, field0, TRB_LEN(field2), - GET_TD_SIZE(field2), - GET_INTR_TARGET(field2), - field3 & TRB_IDT ? 'D' : 'i', - field3 & TRB_IOC ? 'I' : 'i', - field3 & TRB_CHAIN ? 'C' : 'c', - field3 & TRB_NO_SNOOP ? 'S' : 's', - field3 & TRB_ISP ? 'I' : 'i', - field3 & TRB_ENT ? 'E' : 'e', - field3 & TRB_CYCLE ? 'C' : 'c'); + ret = snprintf(str, size, + "type '%s' Buffer %08x%08x length %ld TD size %ld " + "intr %ld flags %c:%c:%c:%c:%c:%c:%c", + cdnsp_trb_type_string(type), + field1, field0, TRB_LEN(field2), + GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + field3 & TRB_IDT ? 'D' : 'i', + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_NO_SNOOP ? 'S' : 's', + field3 & TRB_ISP ? 'I' : 'i', + field3 & TRB_ENT ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_STATUS: - ret += snprintf(str, size, - "Buffer %08x%08x length %ld TD size %ld intr" - "%ld type '%s' flags %c:%c:%c:%c", - field1, field0, TRB_LEN(field2), - GET_TD_SIZE(field2), - GET_INTR_TARGET(field2), - cdnsp_trb_type_string(type), - field3 & TRB_IOC ? 'I' : 'i', - field3 & TRB_CHAIN ? 'C' : 'c', - field3 & TRB_ENT ? 'E' : 'e', - field3 & TRB_CYCLE ? 'C' : 'c'); + ret = snprintf(str, size, + "Buffer %08x%08x length %ld TD size %ld intr" + "%ld type '%s' flags %c:%c:%c:%c", + field1, field0, TRB_LEN(field2), + GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + cdnsp_trb_type_string(type), + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_ENT ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_NORMAL: case TRB_ISOC: case TRB_EVENT_DATA: case TRB_TR_NOOP: - ret += snprintf(str, size, - "type '%s' Buffer %08x%08x length %ld " - "TD size %ld intr %ld " - "flags %c:%c:%c:%c:%c:%c:%c:%c:%c", - cdnsp_trb_type_string(type), - field1, field0, TRB_LEN(field2), - GET_TD_SIZE(field2), - GET_INTR_TARGET(field2), - field3 & TRB_BEI ? 'B' : 'b', - field3 & TRB_IDT ? 'T' : 't', - field3 & TRB_IOC ? 'I' : 'i', - field3 & TRB_CHAIN ? 'C' : 'c', - field3 & TRB_NO_SNOOP ? 'S' : 's', - field3 & TRB_ISP ? 'I' : 'i', - field3 & TRB_ENT ? 'E' : 'e', - field3 & TRB_CYCLE ? 'C' : 'c', - !(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v'); + ret = snprintf(str, size, + "type '%s' Buffer %08x%08x length %ld " + "TD size %ld intr %ld " + "flags %c:%c:%c:%c:%c:%c:%c:%c:%c", + cdnsp_trb_type_string(type), + field1, field0, TRB_LEN(field2), + GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + field3 & TRB_BEI ? 'B' : 'b', + field3 & TRB_IDT ? 'T' : 't', + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_NO_SNOOP ? 'S' : 's', + field3 & TRB_ISP ? 'I' : 'i', + field3 & TRB_ENT ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c', + !(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v'); break; case TRB_CMD_NOOP: case TRB_ENABLE_SLOT: - ret += snprintf(str, size, "%s: flags %c", - cdnsp_trb_type_string(type), - field3 & TRB_CYCLE ? 'C' : 'c'); + ret = snprintf(str, size, "%s: flags %c", + cdnsp_trb_type_string(type), + field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_DISABLE_SLOT: - ret += snprintf(str, size, "%s: slot %ld flags %c", - cdnsp_trb_type_string(type), - TRB_TO_SLOT_ID(field3), - field3 & TRB_CYCLE ? 'C' : 'c'); + ret = snprintf(str, size, "%s: slot %ld flags %c", + cdnsp_trb_type_string(type), + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_ADDR_DEV: - ret += snprintf(str, size, - "%s: ctx %08x%08x slot %ld flags %c:%c", - cdnsp_trb_type_string(type), field1, field0, - TRB_TO_SLOT_ID(field3), - field3 & TRB_BSR ? 'B' : 'b', - field3 & TRB_CYCLE ? 'C' : 'c'); + ret = snprintf(str, size, + "%s: ctx %08x%08x slot %ld flags %c:%c", + cdnsp_trb_type_string(type), field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_BSR ? 'B' : 'b', + field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_CONFIG_EP: - ret += snprintf(str, size, - "%s: ctx %08x%08x slot %ld flags %c:%c", - cdnsp_trb_type_string(type), field1, field0, - TRB_TO_SLOT_ID(field3), - field3 & TRB_DC ? 'D' : 'd', - field3 & TRB_CYCLE ? 'C' : 'c'); + ret = snprintf(str, size, + "%s: ctx %08x%08x slot %ld flags %c:%c", + cdnsp_trb_type_string(type), field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_DC ? 'D' : 'd', + field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_EVAL_CONTEXT: - ret += snprintf(str, size, - "%s: ctx %08x%08x slot %ld flags %c", - cdnsp_trb_type_string(type), field1, field0, - TRB_TO_SLOT_ID(field3), - field3 & TRB_CYCLE ? 'C' : 'c'); + ret = snprintf(str, size, + "%s: ctx %08x%08x slot %ld flags %c", + cdnsp_trb_type_string(type), field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_RESET_EP: case TRB_HALT_ENDPOINT: case TRB_FLUSH_ENDPOINT: - ret += snprintf(str, size, - "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c", - cdnsp_trb_type_string(type), - ep_num, ep_id % 2 ? "out" : "in", - TRB_TO_EP_INDEX(field3), field1, field0, - TRB_TO_SLOT_ID(field3), - field3 & TRB_CYCLE ? 'C' : 'c'); + ret = snprintf(str, size, + "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c", + cdnsp_trb_type_string(type), + ep_num, ep_id % 2 ? "out" : "in", + TRB_TO_EP_INDEX(field3), field1, field0, + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_STOP_RING: - ret += snprintf(str, size, - "%s: ep%d%s(%d) slot %ld sp %d flags %c", - cdnsp_trb_type_string(type), - ep_num, ep_id % 2 ? "out" : "in", - TRB_TO_EP_INDEX(field3), - TRB_TO_SLOT_ID(field3), - TRB_TO_SUSPEND_PORT(field3), - field3 & TRB_CYCLE ? 'C' : 'c'); + ret = snprintf(str, size, + "%s: ep%d%s(%d) slot %ld sp %d flags %c", + cdnsp_trb_type_string(type), + ep_num, ep_id % 2 ? "out" : "in", + TRB_TO_EP_INDEX(field3), + TRB_TO_SLOT_ID(field3), + TRB_TO_SUSPEND_PORT(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_SET_DEQ: - ret += snprintf(str, size, - "%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld flags %c", - cdnsp_trb_type_string(type), - ep_num, ep_id % 2 ? "out" : "in", - TRB_TO_EP_INDEX(field3), field1, field0, - TRB_TO_STREAM_ID(field2), - TRB_TO_SLOT_ID(field3), - field3 & TRB_CYCLE ? 'C' : 'c'); + ret = snprintf(str, size, + "%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld flags %c", + cdnsp_trb_type_string(type), + ep_num, ep_id % 2 ? "out" : "in", + TRB_TO_EP_INDEX(field3), field1, field0, + TRB_TO_STREAM_ID(field2), + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_RESET_DEV: - ret += snprintf(str, size, "%s: slot %ld flags %c", - cdnsp_trb_type_string(type), - TRB_TO_SLOT_ID(field3), - field3 & TRB_CYCLE ? 'C' : 'c'); + ret = snprintf(str, size, "%s: slot %ld flags %c", + cdnsp_trb_type_string(type), + TRB_TO_SLOT_ID(field3), + field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_ENDPOINT_NRDY: - temp = TRB_TO_HOST_STREAM(field2); - - ret += snprintf(str, size, - "%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c", - cdnsp_trb_type_string(type), - ep_num, ep_id % 2 ? "out" : "in", - TRB_TO_EP_INDEX(field3), temp, - temp == STREAM_PRIME_ACK ? "(PRIME)" : "", - temp == STREAM_REJECTED ? "(REJECTED)" : "", - TRB_TO_DEV_STREAM(field0), - field3 & TRB_STAT ? 'S' : 's', - field3 & TRB_CYCLE ? 'C' : 'c'); + temp = TRB_TO_HOST_STREAM(field2); + + ret = snprintf(str, size, + "%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c", + cdnsp_trb_type_string(type), + ep_num, ep_id % 2 ? "out" : "in", + TRB_TO_EP_INDEX(field3), temp, + temp == STREAM_PRIME_ACK ? "(PRIME)" : "", + temp == STREAM_REJECTED ? "(REJECTED)" : "", + TRB_TO_DEV_STREAM(field0), + field3 & TRB_STAT ? 'S' : 's', + field3 & TRB_CYCLE ? 'C' : 'c'); break; default: - ret += snprintf(str, size, - "type '%s' -> raw %08x %08x %08x %08x", - cdnsp_trb_type_string(type), - field0, field1, field2, field3); + ret = snprintf(str, size, + "type '%s' -> raw %08x %08x %08x %08x", + cdnsp_trb_type_string(type), + field0, field1, field2, field3); } + if (ret >= size) + pr_info("CDNSP: buffer overflowed.\n"); + return str; } diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 73f419adce61..4bb6d304eb4b 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -1919,6 +1919,7 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, struct usbtmc_ctrlrequest request; u8 *buffer = NULL; int rv; + unsigned int is_in, pipe; unsigned long res; res = copy_from_user(&request, arg, sizeof(struct usbtmc_ctrlrequest)); @@ -1928,12 +1929,14 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, if (request.req.wLength > USBTMC_BUFSIZE) return -EMSGSIZE; + is_in = request.req.bRequestType & USB_DIR_IN; + if (request.req.wLength) { buffer = kmalloc(request.req.wLength, GFP_KERNEL); if (!buffer) return -ENOMEM; - if ((request.req.bRequestType & USB_DIR_IN) == 0) { + if (!is_in) { /* Send control data to device */ res = copy_from_user(buffer, request.data, request.req.wLength); @@ -1944,8 +1947,12 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, } } + if (is_in) + pipe = usb_rcvctrlpipe(data->usb_dev, 0); + else + pipe = usb_sndctrlpipe(data->usb_dev, 0); rv = usb_control_msg(data->usb_dev, - usb_rcvctrlpipe(data->usb_dev, 0), + pipe, request.req.bRequest, request.req.bRequestType, request.req.wValue, @@ -1957,7 +1964,7 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, goto exit; } - if (rv && (request.req.bRequestType & USB_DIR_IN)) { + if (rv && is_in) { /* Read control data from device */ res = copy_to_user(request.data, buffer, rv); if (res) diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index cb9059a8444b..71e62b3081db 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -1417,6 +1417,7 @@ void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg); void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2); int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode); #define dwc2_is_device_connected(hsotg) (hsotg->connected) +#define dwc2_is_device_enabled(hsotg) (hsotg->enabled) int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg); int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup); int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg); @@ -1453,6 +1454,7 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode) { return 0; } #define dwc2_is_device_connected(hsotg) (0) +#define dwc2_is_device_enabled(hsotg) (0) static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) { return 0; } static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c index aa6eb76f64dd..36f2c38416e5 100644 --- a/drivers/usb/dwc2/drd.c +++ b/drivers/usb/dwc2/drd.c @@ -109,8 +109,10 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role) already = dwc2_ovr_avalid(hsotg, true); } else if (role == USB_ROLE_DEVICE) { already = dwc2_ovr_bvalid(hsotg, true); - /* This clear DCTL.SFTDISCON bit */ - dwc2_hsotg_core_connect(hsotg); + if (dwc2_is_device_enabled(hsotg)) { + /* This clear DCTL.SFTDISCON bit */ + dwc2_hsotg_core_connect(hsotg); + } } else { if (dwc2_is_device_mode(hsotg)) { if (!dwc2_ovr_bvalid(hsotg, false)) diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index e196673f5c64..efaf0db595f4 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c @@ -242,7 +242,7 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, break; case OMAP_DWC3_ID_FLOAT: - if (omap->vbus_reg) + if (omap->vbus_reg && regulator_is_enabled(omap->vbus_reg)) regulator_disable(omap->vbus_reg); val = dwc3_omap_read_utmi_ctrl(omap); val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG; diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 7ff8fc8f79a9..4d9608cc55f7 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -43,6 +43,7 @@ #define PCI_DEVICE_ID_INTEL_ADLP 0x51ee #define PCI_DEVICE_ID_INTEL_ADLM 0x54ee #define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1 +#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61 #define PCI_DEVICE_ID_INTEL_TGL 0x9a15 #define PCI_DEVICE_ID_AMD_MR 0x163a @@ -85,8 +86,8 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = { static struct gpiod_lookup_table platform_bytcr_gpios = { .dev_id = "0000:00:16.0", .table = { - GPIO_LOOKUP("INT33FC:00", 54, "reset", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("INT33FC:02", 14, "cs", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("INT33FC:00", 54, "cs", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("INT33FC:02", 14, "reset", GPIO_ACTIVE_HIGH), {} }, }; @@ -119,6 +120,13 @@ static const struct property_entry dwc3_pci_intel_properties[] = { {} }; +static const struct property_entry dwc3_pci_intel_byt_properties[] = { + PROPERTY_ENTRY_STRING("dr_mode", "peripheral"), + PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"), + PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), + {} +}; + static const struct property_entry dwc3_pci_mrfld_properties[] = { PROPERTY_ENTRY_STRING("dr_mode", "otg"), PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"), @@ -161,6 +169,10 @@ static const struct software_node dwc3_pci_intel_swnode = { .properties = dwc3_pci_intel_properties, }; +static const struct software_node dwc3_pci_intel_byt_swnode = { + .properties = dwc3_pci_intel_byt_properties, +}; + static const struct software_node dwc3_pci_intel_mrfld_swnode = { .properties = dwc3_pci_mrfld_properties, }; @@ -173,7 +185,8 @@ static const struct software_node dwc3_pci_amd_mr_swnode = { .properties = dwc3_pci_mr_properties, }; -static int dwc3_pci_quirks(struct dwc3_pci *dwc) +static int dwc3_pci_quirks(struct dwc3_pci *dwc, + const struct software_node *swnode) { struct pci_dev *pdev = dwc->pci; @@ -230,7 +243,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc) } } - return 0; + return device_add_software_node(&dwc->dwc3->dev, swnode); } #ifdef CONFIG_PM @@ -295,11 +308,7 @@ static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) dwc->dwc3->dev.parent = dev; ACPI_COMPANION_SET(&dwc->dwc3->dev, ACPI_COMPANION(dev)); - ret = device_add_software_node(&dwc->dwc3->dev, (void *)id->driver_data); - if (ret < 0) - goto err; - - ret = dwc3_pci_quirks(dwc); + ret = dwc3_pci_quirks(dwc, (void *)id->driver_data); if (ret) goto err; @@ -344,7 +353,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BYT), - (kernel_ulong_t) &dwc3_pci_intel_swnode, }, + (kernel_ulong_t) &dwc3_pci_intel_byt_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD), (kernel_ulong_t) &dwc3_pci_intel_mrfld_swnode, }, @@ -409,6 +418,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS), (kernel_ulong_t) &dwc3_pci_intel_swnode, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS), + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL), (kernel_ulong_t) &dwc3_pci_intel_swnode, }, diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 146cebde33b8..00cf8ebcb338 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -4131,9 +4131,11 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) unsigned long flags; irqreturn_t ret = IRQ_NONE; + local_bh_disable(); spin_lock_irqsave(&dwc->lock, flags); ret = dwc3_process_event_buf(evt); spin_unlock_irqrestore(&dwc->lock, flags); + local_bh_enable(); return ret; } diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 415994077d15..1a710c129efa 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -1160,6 +1160,8 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh) int msf = common->cmnd[1] & 0x02; int start_track = common->cmnd[6]; u8 *buf = (u8 *)bh->buf; + u8 format; + int i, len; if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */ start_track > 1) { @@ -1167,18 +1169,62 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh) return -EINVAL; } - memset(buf, 0, 20); - buf[1] = (20-2); /* TOC data length */ - buf[2] = 1; /* First track number */ - buf[3] = 1; /* Last track number */ - buf[5] = 0x16; /* Data track, copying allowed */ - buf[6] = 0x01; /* Only track is number 1 */ - store_cdrom_address(&buf[8], msf, 0); + format = common->cmnd[2] & 0xf; + /* + * Check if CDB is old style SFF-8020i + * i.e. format is in 2 MSBs of byte 9 + * Mac OS-X host sends us this. + */ + if (format == 0) + format = (common->cmnd[9] >> 6) & 0x3; + + switch (format) { + case 0: + /* Formatted TOC */ + len = 4 + 2*8; /* 4 byte header + 2 descriptors */ + memset(buf, 0, len); + buf[1] = len - 2; /* TOC Length excludes length field */ + buf[2] = 1; /* First track number */ + buf[3] = 1; /* Last track number */ + buf[5] = 0x16; /* Data track, copying allowed */ + buf[6] = 0x01; /* Only track is number 1 */ + store_cdrom_address(&buf[8], msf, 0); + + buf[13] = 0x16; /* Lead-out track is data */ + buf[14] = 0xAA; /* Lead-out track number */ + store_cdrom_address(&buf[16], msf, curlun->num_sectors); + return len; + + case 2: + /* Raw TOC */ + len = 4 + 3*11; /* 4 byte header + 3 descriptors */ + memset(buf, 0, len); /* Header + A0, A1 & A2 descriptors */ + buf[1] = len - 2; /* TOC Length excludes length field */ + buf[2] = 1; /* First complete session */ + buf[3] = 1; /* Last complete session */ + + buf += 4; + /* fill in A0, A1 and A2 points */ + for (i = 0; i < 3; i++) { + buf[0] = 1; /* Session number */ + buf[1] = 0x16; /* Data track, copying allowed */ + /* 2 - Track number 0 -> TOC */ + buf[3] = 0xA0 + i; /* A0, A1, A2 point */ + /* 4, 5, 6 - Min, sec, frame is zero */ + buf[8] = 1; /* Pmin: last track number */ + buf += 11; /* go to next track descriptor */ + } + buf -= 11; /* go back to A2 descriptor */ - buf[13] = 0x16; /* Lead-out track is data */ - buf[14] = 0xAA; /* Lead-out track number */ - store_cdrom_address(&buf[16], msf, curlun->num_sectors); - return 20; + /* For A2, 7, 8, 9, 10 - zero, Pmin, Psec, Pframe of Lead out */ + store_cdrom_address(&buf[7], msf, curlun->num_sectors); + return len; + + default: + /* Multi-session, PMA, ATIP, CD-TEXT not supported/required */ + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; + return -EINVAL; + } } static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh) @@ -1905,7 +1951,7 @@ static int do_scsi_command(struct fsg_common *common) common->data_size_from_cmnd = get_unaligned_be16(&common->cmnd[7]); reply = check_command(common, 10, DATA_DIR_TO_HOST, - (7<<6) | (1<<1), 1, + (0xf<<6) | (3<<1), 1, "READ TOC"); if (reply == 0) reply = do_read_toc(common, bh); diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index d9ed651f06ac..4150de96b937 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c @@ -640,6 +640,7 @@ static int rndis_set_response(struct rndis_params *params, BufLength = le32_to_cpu(buf->InformationBufferLength); BufOffset = le32_to_cpu(buf->InformationBufferOffset); if ((BufLength > RNDIS_MAX_TOTAL_SIZE) || + (BufOffset > RNDIS_MAX_TOTAL_SIZE) || (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE)) return -EINVAL; @@ -922,6 +923,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v) params->resp_avail = resp_avail; params->v = v; INIT_LIST_HEAD(¶ms->resp_queue); + spin_lock_init(¶ms->resp_lock); pr_debug("%s: configNr = %d\n", __func__, i); return params; @@ -1015,12 +1017,14 @@ void rndis_free_response(struct rndis_params *params, u8 *buf) { rndis_resp_t *r, *n; + spin_lock(¶ms->resp_lock); list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { if (r->buf == buf) { list_del(&r->list); kfree(r); } } + spin_unlock(¶ms->resp_lock); } EXPORT_SYMBOL_GPL(rndis_free_response); @@ -1030,14 +1034,17 @@ u8 *rndis_get_next_response(struct rndis_params *params, u32 *length) if (!length) return NULL; + spin_lock(¶ms->resp_lock); list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { if (!r->send) { r->send = 1; *length = r->length; + spin_unlock(¶ms->resp_lock); return r->buf; } } + spin_unlock(¶ms->resp_lock); return NULL; } EXPORT_SYMBOL_GPL(rndis_get_next_response); @@ -1054,7 +1061,9 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length) r->length = length; r->send = 0; + spin_lock(¶ms->resp_lock); list_add_tail(&r->list, ¶ms->resp_queue); + spin_unlock(¶ms->resp_lock); return r; } diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h index f6167f7fea82..6206b8b7490f 100644 --- a/drivers/usb/gadget/function/rndis.h +++ b/drivers/usb/gadget/function/rndis.h @@ -174,6 +174,7 @@ typedef struct rndis_params { void (*resp_avail)(void *v); void *v; struct list_head resp_queue; + spinlock_t resp_lock; } rndis_params; /* RNDIS Message parser and other useless functions */ diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 1b223cba4c2c..3279b4767424 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1829,8 +1829,9 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) spin_lock_irq (&dev->lock); value = -EINVAL; if (dev->buf) { + spin_unlock_irq(&dev->lock); kfree(kbuf); - goto fail; + return value; } dev->buf = kbuf; @@ -1877,8 +1878,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) value = usb_gadget_probe_driver(&gadgetfs_driver); if (value != 0) { - kfree (dev->buf); - dev->buf = NULL; + spin_lock_irq(&dev->lock); + goto fail; } else { /* at this point "good" hardware has for the first time * let the USB the host see us. alternatively, if users @@ -1895,6 +1896,9 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) return value; fail: + dev->config = NULL; + dev->hs_config = NULL; + dev->dev = NULL; spin_unlock_irq (&dev->lock); pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev); kfree (dev->buf); diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 14fdf918ecfe..61099f2d057d 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1434,7 +1434,6 @@ static void usb_gadget_remove_driver(struct usb_udc *udc) usb_gadget_udc_stop(udc); udc->driver = NULL; - udc->dev.driver = NULL; udc->gadget->dev.driver = NULL; } @@ -1496,7 +1495,6 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri driver->function); udc->driver = driver; - udc->dev.driver = &driver->driver; udc->gadget->dev.driver = &driver->driver; usb_gadget_udc_set_speed(udc, driver->max_speed); @@ -1519,7 +1517,6 @@ err1: dev_err(&udc->dev, "failed to start %s: %d\n", udc->driver->function, ret); udc->driver = NULL; - udc->dev.driver = NULL; udc->gadget->dev.driver = NULL; return ret; } diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index 43f1b0d461c1..be76f891b9c5 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -32,9 +32,6 @@ #include <linux/workqueue.h> /* XUSB_DEV registers */ -#define SPARAM 0x000 -#define SPARAM_ERSTMAX_MASK GENMASK(20, 16) -#define SPARAM_ERSTMAX(x) (((x) << 16) & SPARAM_ERSTMAX_MASK) #define DB 0x004 #define DB_TARGET_MASK GENMASK(15, 8) #define DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK) @@ -275,8 +272,10 @@ BUILD_EP_CONTEXT_RW(deq_hi, deq_hi, 0, 0xffffffff) BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff) BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff) BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff) -BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 24, 0xff) +BUILD_EP_CONTEXT_RW(rsvd, rsvd[0], 24, 0x1) BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1) +BUILD_EP_CONTEXT_RW(splitxstate, rsvd[0], 26, 0x1) +BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 27, 0x1f) BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3) BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff) BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f) @@ -1557,6 +1556,9 @@ static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt) ep_reload(xudc, ep->index); ep_ctx_write_state(ep->context, EP_STATE_RUNNING); + ep_ctx_write_rsvd(ep->context, 0); + ep_ctx_write_partial_td(ep->context, 0); + ep_ctx_write_splitxstate(ep->context, 0); ep_ctx_write_seq_num(ep->context, 0); ep_reload(xudc, ep->index); @@ -2812,7 +2814,10 @@ static void tegra_xudc_reset(struct tegra_xudc *xudc) xudc->setup_seq_num = 0; xudc->queued_setup_packet = false; - ep_ctx_write_seq_num(ep0->context, xudc->setup_seq_num); + ep_ctx_write_rsvd(ep0->context, 0); + ep_ctx_write_partial_td(ep0->context, 0); + ep_ctx_write_splitxstate(ep0->context, 0); + ep_ctx_write_seq_num(ep0->context, 0); deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]); @@ -3295,11 +3300,6 @@ static void tegra_xudc_init_event_ring(struct tegra_xudc *xudc) unsigned int i; u32 val; - val = xudc_readl(xudc, SPARAM); - val &= ~(SPARAM_ERSTMAX_MASK); - val |= SPARAM_ERSTMAX(XUDC_NR_EVENT_RINGS); - xudc_writel(xudc, val, SPARAM); - for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) { memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE * sizeof(*xudc->event_ring[i])); diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index fb4ffedd6f0d..9cf43731bcd1 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -1612,6 +1612,8 @@ static void xudc_getstatus(struct xusb_udc *udc) break; case USB_RECIP_ENDPOINT: epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; + if (epnum >= XUSB_MAX_ENDPOINTS) + goto stall; target_ep = &udc->ep[epnum]; epcfgreg = udc->read_fn(udc->addr + target_ep->offset); halt = epcfgreg & XUSB_EP_CFG_STALL_MASK; @@ -1679,6 +1681,10 @@ static void xudc_set_clear_feature(struct xusb_udc *udc) case USB_RECIP_ENDPOINT: if (!udc->setup.wValue) { endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; + if (endpoint >= XUSB_MAX_ENDPOINTS) { + xudc_ep0_stall(udc); + return; + } target_ep = &udc->ep[endpoint]; outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK; outinbit = outinbit >> 7; diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index e87cf3a00fa4..638f03b89739 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -21,6 +21,9 @@ static const char hcd_name[] = "ehci-pci"; /* defined here to avoid adding to pci_ids.h for single instance use */ #define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70 +#define PCI_VENDOR_ID_ASPEED 0x1a03 +#define PCI_DEVICE_ID_ASPEED_EHCI 0x2603 + /*-------------------------------------------------------------------------*/ #define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC 0x0939 static inline bool is_intel_quark_x1000(struct pci_dev *pdev) @@ -222,6 +225,12 @@ static int ehci_pci_setup(struct usb_hcd *hcd) ehci->has_synopsys_hc_bug = 1; } break; + case PCI_VENDOR_ID_ASPEED: + if (pdev->device == PCI_DEVICE_ID_ASPEED_EHCI) { + ehci_info(ehci, "applying Aspeed HC workaround\n"); + ehci->is_aspeed = 1; + } + break; } /* optional debug port, normally in the first BAR */ diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index df3522dab31b..1e7dc130c39a 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -762,7 +762,7 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci) } pm_runtime_allow(xhci_to_hcd(xhci)->self.controller); xhci->test_mode = 0; - return xhci_reset(xhci); + return xhci_reset(xhci, XHCI_RESET_SHORT_USEC); } void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port, @@ -1088,6 +1088,9 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status, if (link_state == XDEV_U2) *status |= USB_PORT_STAT_L1; if (link_state == XDEV_U0) { + if (bus_state->resume_done[portnum]) + usb_hcd_end_port_resume(&port->rhub->hcd->self, + portnum); bus_state->resume_done[portnum] = 0; clear_bit(portnum, &bus_state->resuming_ports); if (bus_state->suspended_ports & (1 << portnum)) { diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 0e312066c5c6..b398d3fdabf6 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2583,7 +2583,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) fail: xhci_halt(xhci); - xhci_reset(xhci); + xhci_reset(xhci, XHCI_RESET_SHORT_USEC); xhci_mem_cleanup(xhci); return -ENOMEM; } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index f5b1bcc875de..2c1cc9480887 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -65,7 +65,7 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) * handshake done). There are two failure modes: "usec" have passed (major * hardware flakeout), or the register reads as all-ones (hardware removed). */ -int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) +int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) { u32 result; int ret; @@ -73,7 +73,7 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) ret = readl_poll_timeout_atomic(ptr, result, (result & mask) == done || result == U32_MAX, - 1, usec); + 1, timeout_us); if (result == U32_MAX) /* card removed */ return -ENODEV; @@ -162,7 +162,7 @@ int xhci_start(struct xhci_hcd *xhci) * Transactions will be terminated immediately, and operational registers * will be set to their defaults. */ -int xhci_reset(struct xhci_hcd *xhci) +int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) { u32 command; u32 state; @@ -195,8 +195,7 @@ int xhci_reset(struct xhci_hcd *xhci) if (xhci->quirks & XHCI_INTEL_HOST) udelay(1000); - ret = xhci_handshake(&xhci->op_regs->command, - CMD_RESET, 0, 10 * 1000 * 1000); + ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us); if (ret) return ret; @@ -209,8 +208,7 @@ int xhci_reset(struct xhci_hcd *xhci) * xHCI cannot write to any doorbells or operational registers other * than status until the "Controller Not Ready" flag is cleared. */ - ret = xhci_handshake(&xhci->op_regs->status, - STS_CNR, 0, 10 * 1000 * 1000); + ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us); xhci->usb2_rhub.bus_state.port_c_suspend = 0; xhci->usb2_rhub.bus_state.suspended_ports = 0; @@ -731,7 +729,7 @@ static void xhci_stop(struct usb_hcd *hcd) xhci->xhc_state |= XHCI_STATE_HALTED; xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; xhci_halt(xhci); - xhci_reset(xhci); + xhci_reset(xhci, XHCI_RESET_SHORT_USEC); spin_unlock_irq(&xhci->lock); xhci_cleanup_msix(xhci); @@ -784,7 +782,7 @@ void xhci_shutdown(struct usb_hcd *hcd) xhci_halt(xhci); /* Workaround for spurious wakeups at shutdown with HSW */ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) - xhci_reset(xhci); + xhci_reset(xhci, XHCI_RESET_SHORT_USEC); spin_unlock_irq(&xhci->lock); xhci_cleanup_msix(xhci); @@ -1091,6 +1089,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) int retval = 0; bool comp_timer_running = false; bool pending_portevent = false; + bool reinit_xhc = false; if (!hcd->state) return 0; @@ -1107,10 +1106,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); spin_lock_irq(&xhci->lock); - if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend) - hibernated = true; - if (!hibernated) { + if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) + reinit_xhc = true; + + if (!reinit_xhc) { /* * Some controllers might lose power during suspend, so wait * for controller not ready bit to clear, just as in xHC init. @@ -1143,12 +1143,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; } - temp = readl(&xhci->op_regs->status); } - /* If restore operation fails, re-initialize the HC during resume */ - if ((temp & STS_SRE) || hibernated) { + temp = readl(&xhci->op_regs->status); + + /* re-initialize the HC on Restore Error, or Host Controller Error */ + if (temp & (STS_SRE | STS_HCE)) { + reinit_xhc = true; + xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); + } + if (reinit_xhc) { if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !(xhci_all_ports_seen_u0(xhci))) { del_timer_sync(&xhci->comp_mode_recovery_timer); @@ -1163,7 +1168,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) xhci_dbg(xhci, "Stop HCD\n"); xhci_halt(xhci); xhci_zero_64b_regs(xhci); - retval = xhci_reset(xhci); + retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); spin_unlock_irq(&xhci->lock); if (retval) return retval; @@ -1604,9 +1609,12 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag struct urb_priv *urb_priv; int num_tds; - if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, - true, true, __func__) <= 0) + if (!urb) return -EINVAL; + ret = xhci_check_args(hcd, urb->dev, urb->ep, + true, true, __func__); + if (ret <= 0) + return ret ? ret : -EINVAL; slot_id = urb->dev->slot_id; ep_index = xhci_get_endpoint_index(&urb->ep->desc); @@ -3323,7 +3331,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, return -EINVAL; ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); if (ret <= 0) - return -EINVAL; + return ret ? ret : -EINVAL; if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" " descriptor for ep 0x%x does not support streams\n", @@ -5308,7 +5316,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) xhci_dbg(xhci, "Resetting HCD\n"); /* Reset the internal HC memory state and registers. */ - retval = xhci_reset(xhci); + retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); if (retval) return retval; xhci_dbg(xhci, "Reset complete\n"); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 5a75fe563123..bc0789229527 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -229,6 +229,9 @@ struct xhci_op_regs { #define CMD_ETE (1 << 14) /* bits 15:31 are reserved (and should be preserved on writes). */ +#define XHCI_RESET_LONG_USEC (10 * 1000 * 1000) +#define XHCI_RESET_SHORT_USEC (250 * 1000) + /* IMAN - Interrupt Management Register */ #define IMAN_IE (1 << 1) #define IMAN_IP (1 << 0) @@ -2083,11 +2086,11 @@ void xhci_free_container_ctx(struct xhci_hcd *xhci, /* xHCI host controller glue */ typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); -int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec); +int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us); void xhci_quiesce(struct xhci_hcd *xhci); int xhci_halt(struct xhci_hcd *xhci); int xhci_start(struct xhci_hcd *xhci); -int xhci_reset(struct xhci_hcd *xhci); +int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us); int xhci_run(struct usb_hcd *hcd); int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); void xhci_shutdown(struct usb_hcd *hcd); @@ -2467,6 +2470,8 @@ static inline const char *xhci_decode_ctrl_ctx(char *str, unsigned int bit; int ret = 0; + str[0] = '\0'; + if (drop) { ret = sprintf(str, "Drop:"); for_each_set_bit(bit, &drop, 32) @@ -2624,8 +2629,11 @@ static inline const char *xhci_decode_usbsts(char *str, u32 usbsts) { int ret = 0; + ret = sprintf(str, " 0x%08x", usbsts); + if (usbsts == ~(u32)0) - return " 0xffffffff"; + return str; + if (usbsts & STS_HALT) ret += sprintf(str + ret, " HCHalted"); if (usbsts & STS_FATAL) diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index de5c01257060..ef8d1c73c754 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig @@ -66,6 +66,7 @@ config USB_SERIAL_SIMPLE - Libtransistor USB console - a number of Motorola phones - Motorola Tetra devices + - Nokia mobile phones - Novatel Wireless GPS receivers - Siemens USB/MPI adapter. - ViVOtech ViVOpay USB device. diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 4b65e6904499..b5a1864e9cfd 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -81,7 +81,6 @@ #define CH341_QUIRK_SIMULATE_BREAK BIT(1) static const struct usb_device_id id_table[] = { - { USB_DEVICE(0x1a86, 0x5512) }, { USB_DEVICE(0x1a86, 0x5523) }, { USB_DEVICE(0x1a86, 0x7522) }, { USB_DEVICE(0x1a86, 0x7523) }, diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 962e9943fc20..e7755d9cfc61 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -198,6 +198,8 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5821E 0x81d7 #define DELL_PRODUCT_5821E_ESIM 0x81e0 +#define DELL_PRODUCT_5829E_ESIM 0x81e4 +#define DELL_PRODUCT_5829E 0x81e6 #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da @@ -1063,6 +1065,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM), .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E), + .driver_info = RSVD(0) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM), + .driver_info = RSVD(0) | RSVD(6) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -1273,10 +1279,16 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */ .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff), /* Telit LE910R1 (RNDIS) */ + .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */ + .driver_info = NCTRL(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */ .driver_info = NCTRL(0) | ZLP }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9201), /* Telit LE910R1 flashing device */ + .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index e2ef761ed39c..88b284d61681 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -436,6 +436,7 @@ static int pl2303_detect_type(struct usb_serial *serial) case 0x105: case 0x305: case 0x405: + case 0x605: /* * Assume it's an HXN-type if the device doesn't * support the old read request value. diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index bd23a7cb1be2..4c6747889a19 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -91,6 +91,11 @@ DEVICE(moto_modem, MOTO_IDS); { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */ DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); +/* Nokia mobile phone driver */ +#define NOKIA_IDS() \ + { USB_DEVICE(0x0421, 0x069a) } /* Nokia 130 (RM-1035) */ +DEVICE(nokia, NOKIA_IDS); + /* Novatel Wireless GPS driver */ #define NOVATEL_IDS() \ { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */ @@ -123,6 +128,7 @@ static struct usb_serial_driver * const serial_drivers[] = { &vivopay_device, &moto_modem_device, &motorola_tetra_device, + &nokia_device, &novatel_gps_device, &hp4x_device, &suunto_device, @@ -140,6 +146,7 @@ static const struct usb_device_id id_table[] = { VIVOPAY_IDS(), MOTO_IDS(), MOTOROLA_TETRA_IDS(), + NOKIA_IDS(), NOVATEL_IDS(), HP4X_IDS(), SUUNTO_IDS(), diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c index 5f7d678502be..6012603f3630 100644 --- a/drivers/usb/storage/ene_ub6250.c +++ b/drivers/usb/storage/ene_ub6250.c @@ -237,36 +237,33 @@ static struct us_unusual_dev ene_ub6250_unusual_dev_list[] = { #define memstick_logaddr(logadr1, logadr0) ((((u16)(logadr1)) << 8) | (logadr0)) -struct SD_STATUS { - u8 Insert:1; - u8 Ready:1; - u8 MediaChange:1; - u8 IsMMC:1; - u8 HiCapacity:1; - u8 HiSpeed:1; - u8 WtP:1; - u8 Reserved:1; -}; - -struct MS_STATUS { - u8 Insert:1; - u8 Ready:1; - u8 MediaChange:1; - u8 IsMSPro:1; - u8 IsMSPHG:1; - u8 Reserved1:1; - u8 WtP:1; - u8 Reserved2:1; -}; - -struct SM_STATUS { - u8 Insert:1; - u8 Ready:1; - u8 MediaChange:1; - u8 Reserved:3; - u8 WtP:1; - u8 IsMS:1; -}; +/* SD_STATUS bits */ +#define SD_Insert BIT(0) +#define SD_Ready BIT(1) +#define SD_MediaChange BIT(2) +#define SD_IsMMC BIT(3) +#define SD_HiCapacity BIT(4) +#define SD_HiSpeed BIT(5) +#define SD_WtP BIT(6) + /* Bit 7 reserved */ + +/* MS_STATUS bits */ +#define MS_Insert BIT(0) +#define MS_Ready BIT(1) +#define MS_MediaChange BIT(2) +#define MS_IsMSPro BIT(3) +#define MS_IsMSPHG BIT(4) + /* Bit 5 reserved */ +#define MS_WtP BIT(6) + /* Bit 7 reserved */ + +/* SM_STATUS bits */ +#define SM_Insert BIT(0) +#define SM_Ready BIT(1) +#define SM_MediaChange BIT(2) + /* Bits 3-5 reserved */ +#define SM_WtP BIT(6) +#define SM_IsMS BIT(7) struct ms_bootblock_cis { u8 bCistplDEVICE[6]; /* 0 */ @@ -437,9 +434,9 @@ struct ene_ub6250_info { u8 *bbuf; /* for 6250 code */ - struct SD_STATUS SD_Status; - struct MS_STATUS MS_Status; - struct SM_STATUS SM_Status; + u8 SD_Status; + u8 MS_Status; + u8 SM_Status; /* ----- SD Control Data ---------------- */ /*SD_REGISTER SD_Regs; */ @@ -602,7 +599,7 @@ static int sd_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb) { struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; - if (info->SD_Status.Insert && info->SD_Status.Ready) + if ((info->SD_Status & SD_Insert) && (info->SD_Status & SD_Ready)) return USB_STOR_TRANSPORT_GOOD; else { ene_sd_init(us); @@ -622,7 +619,7 @@ static int sd_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb) 0x0b, 0x00, 0x80, 0x08, 0x00, 0x00, 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 }; - if (info->SD_Status.WtP) + if (info->SD_Status & SD_WtP) usb_stor_set_xfer_buf(mediaWP, 12, srb); else usb_stor_set_xfer_buf(mediaNoWP, 12, srb); @@ -641,9 +638,9 @@ static int sd_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb) struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; usb_stor_dbg(us, "sd_scsi_read_capacity\n"); - if (info->SD_Status.HiCapacity) { + if (info->SD_Status & SD_HiCapacity) { bl_len = 0x200; - if (info->SD_Status.IsMMC) + if (info->SD_Status & SD_IsMMC) bl_num = info->HC_C_SIZE-1; else bl_num = (info->HC_C_SIZE + 1) * 1024 - 1; @@ -693,7 +690,7 @@ static int sd_scsi_read(struct us_data *us, struct scsi_cmnd *srb) return USB_STOR_TRANSPORT_ERROR; } - if (info->SD_Status.HiCapacity) + if (info->SD_Status & SD_HiCapacity) bnByte = bn; /* set up the command wrapper */ @@ -733,7 +730,7 @@ static int sd_scsi_write(struct us_data *us, struct scsi_cmnd *srb) return USB_STOR_TRANSPORT_ERROR; } - if (info->SD_Status.HiCapacity) + if (info->SD_Status & SD_HiCapacity) bnByte = bn; /* set up the command wrapper */ @@ -1456,7 +1453,7 @@ static int ms_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb) struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); /* pr_info("MS_SCSI_Test_Unit_Ready\n"); */ - if (info->MS_Status.Insert && info->MS_Status.Ready) { + if ((info->MS_Status & MS_Insert) && (info->MS_Status & MS_Ready)) { return USB_STOR_TRANSPORT_GOOD; } else { ene_ms_init(us); @@ -1476,7 +1473,7 @@ static int ms_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb) 0x0b, 0x00, 0x80, 0x08, 0x00, 0x00, 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 }; - if (info->MS_Status.WtP) + if (info->MS_Status & MS_WtP) usb_stor_set_xfer_buf(mediaWP, 12, srb); else usb_stor_set_xfer_buf(mediaNoWP, 12, srb); @@ -1495,7 +1492,7 @@ static int ms_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb) usb_stor_dbg(us, "ms_scsi_read_capacity\n"); bl_len = 0x200; - if (info->MS_Status.IsMSPro) + if (info->MS_Status & MS_IsMSPro) bl_num = info->MSP_TotalBlock - 1; else bl_num = info->MS_Lib.NumberOfLogBlock * info->MS_Lib.blockSize * 2 - 1; @@ -1650,7 +1647,7 @@ static int ms_scsi_read(struct us_data *us, struct scsi_cmnd *srb) if (bn > info->bl_num) return USB_STOR_TRANSPORT_ERROR; - if (info->MS_Status.IsMSPro) { + if (info->MS_Status & MS_IsMSPro) { result = ene_load_bincode(us, MSP_RW_PATTERN); if (result != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "Load MPS RW pattern Fail !!\n"); @@ -1751,7 +1748,7 @@ static int ms_scsi_write(struct us_data *us, struct scsi_cmnd *srb) if (bn > info->bl_num) return USB_STOR_TRANSPORT_ERROR; - if (info->MS_Status.IsMSPro) { + if (info->MS_Status & MS_IsMSPro) { result = ene_load_bincode(us, MSP_RW_PATTERN); if (result != USB_STOR_XFER_GOOD) { pr_info("Load MSP RW pattern Fail !!\n"); @@ -1859,12 +1856,12 @@ static int ene_get_card_status(struct us_data *us, u8 *buf) tmpreg = (u16) reg4b; reg4b = *(u32 *)(&buf[0x14]); - if (info->SD_Status.HiCapacity && !info->SD_Status.IsMMC) + if ((info->SD_Status & SD_HiCapacity) && !(info->SD_Status & SD_IsMMC)) info->HC_C_SIZE = (reg4b >> 8) & 0x3fffff; info->SD_C_SIZE = ((tmpreg & 0x03) << 10) | (u16)(reg4b >> 22); info->SD_C_SIZE_MULT = (u8)(reg4b >> 7) & 0x07; - if (info->SD_Status.HiCapacity && info->SD_Status.IsMMC) + if ((info->SD_Status & SD_HiCapacity) && (info->SD_Status & SD_IsMMC)) info->HC_C_SIZE = *(u32 *)(&buf[0x100]); if (info->SD_READ_BL_LEN > SD_BLOCK_LEN) { @@ -2076,6 +2073,7 @@ static int ene_ms_init(struct us_data *us) u16 MSP_BlockSize, MSP_UserAreaBlocks; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; u8 *bbuf = info->bbuf; + unsigned int s; printk(KERN_INFO "transport --- ENE_MSInit\n"); @@ -2100,15 +2098,16 @@ static int ene_ms_init(struct us_data *us) return USB_STOR_TRANSPORT_ERROR; } /* the same part to test ENE */ - info->MS_Status = *(struct MS_STATUS *) bbuf; - - if (info->MS_Status.Insert && info->MS_Status.Ready) { - printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert); - printk(KERN_INFO "Ready = %x\n", info->MS_Status.Ready); - printk(KERN_INFO "IsMSPro = %x\n", info->MS_Status.IsMSPro); - printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG); - printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP); - if (info->MS_Status.IsMSPro) { + info->MS_Status = bbuf[0]; + + s = info->MS_Status; + if ((s & MS_Insert) && (s & MS_Ready)) { + printk(KERN_INFO "Insert = %x\n", !!(s & MS_Insert)); + printk(KERN_INFO "Ready = %x\n", !!(s & MS_Ready)); + printk(KERN_INFO "IsMSPro = %x\n", !!(s & MS_IsMSPro)); + printk(KERN_INFO "IsMSPHG = %x\n", !!(s & MS_IsMSPHG)); + printk(KERN_INFO "WtP= %x\n", !!(s & MS_WtP)); + if (s & MS_IsMSPro) { MSP_BlockSize = (bbuf[6] << 8) | bbuf[7]; MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11]; info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks; @@ -2169,17 +2168,17 @@ static int ene_sd_init(struct us_data *us) return USB_STOR_TRANSPORT_ERROR; } - info->SD_Status = *(struct SD_STATUS *) bbuf; - if (info->SD_Status.Insert && info->SD_Status.Ready) { - struct SD_STATUS *s = &info->SD_Status; + info->SD_Status = bbuf[0]; + if ((info->SD_Status & SD_Insert) && (info->SD_Status & SD_Ready)) { + unsigned int s = info->SD_Status; ene_get_card_status(us, bbuf); - usb_stor_dbg(us, "Insert = %x\n", s->Insert); - usb_stor_dbg(us, "Ready = %x\n", s->Ready); - usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC); - usb_stor_dbg(us, "HiCapacity = %x\n", s->HiCapacity); - usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed); - usb_stor_dbg(us, "WtP = %x\n", s->WtP); + usb_stor_dbg(us, "Insert = %x\n", !!(s & SD_Insert)); + usb_stor_dbg(us, "Ready = %x\n", !!(s & SD_Ready)); + usb_stor_dbg(us, "IsMMC = %x\n", !!(s & SD_IsMMC)); + usb_stor_dbg(us, "HiCapacity = %x\n", !!(s & SD_HiCapacity)); + usb_stor_dbg(us, "HiSpeed = %x\n", !!(s & SD_HiSpeed)); + usb_stor_dbg(us, "WtP = %x\n", !!(s & SD_WtP)); } else { usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]); return USB_STOR_TRANSPORT_ERROR; @@ -2201,14 +2200,14 @@ static int ene_init(struct us_data *us) misc_reg03 = bbuf[0]; if (misc_reg03 & 0x01) { - if (!info->SD_Status.Ready) { + if (!(info->SD_Status & SD_Ready)) { result = ene_sd_init(us); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; } } if (misc_reg03 & 0x02) { - if (!info->MS_Status.Ready) { + if (!(info->MS_Status & MS_Ready)) { result = ene_ms_init(us); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; @@ -2307,14 +2306,14 @@ static int ene_transport(struct scsi_cmnd *srb, struct us_data *us) /*US_DEBUG(usb_stor_show_command(us, srb)); */ scsi_set_resid(srb, 0); - if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) + if (unlikely(!(info->SD_Status & SD_Ready) || (info->MS_Status & MS_Ready))) result = ene_init(us); if (result == USB_STOR_XFER_GOOD) { result = USB_STOR_TRANSPORT_ERROR; - if (info->SD_Status.Ready) + if (info->SD_Status & SD_Ready) result = sd_scsi_irp(us, srb); - if (info->MS_Status.Ready) + if (info->MS_Status & MS_Ready) result = ms_scsi_irp(us, srb); } return result; @@ -2378,7 +2377,6 @@ static int ene_ub6250_probe(struct usb_interface *intf, static int ene_ub6250_resume(struct usb_interface *iface) { - u8 tmp = 0; struct us_data *us = usb_get_intfdata(iface); struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); @@ -2390,17 +2388,16 @@ static int ene_ub6250_resume(struct usb_interface *iface) mutex_unlock(&us->dev_mutex); info->Power_IsResum = true; - /*info->SD_Status.Ready = 0; */ - info->SD_Status = *(struct SD_STATUS *)&tmp; - info->MS_Status = *(struct MS_STATUS *)&tmp; - info->SM_Status = *(struct SM_STATUS *)&tmp; + /* info->SD_Status &= ~SD_Ready; */ + info->SD_Status = 0; + info->MS_Status = 0; + info->SM_Status = 0; return 0; } static int ene_ub6250_reset_resume(struct usb_interface *iface) { - u8 tmp = 0; struct us_data *us = usb_get_intfdata(iface); struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); @@ -2412,10 +2409,10 @@ static int ene_ub6250_reset_resume(struct usb_interface *iface) * the device */ info->Power_IsResum = true; - /*info->SD_Status.Ready = 0; */ - info->SD_Status = *(struct SD_STATUS *)&tmp; - info->MS_Status = *(struct MS_STATUS *)&tmp; - info->SM_Status = *(struct SM_STATUS *)&tmp; + /* info->SD_Status &= ~SD_Ready; */ + info->SD_Status = 0; + info->MS_Status = 0; + info->SM_Status = 0; return 0; } diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c index 3789698d9d3c..0c423916d7bf 100644 --- a/drivers/usb/storage/realtek_cr.c +++ b/drivers/usb/storage/realtek_cr.c @@ -365,7 +365,7 @@ static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len) buf = kmalloc(len, GFP_NOIO); if (buf == NULL) - return USB_STOR_TRANSPORT_ERROR; + return -ENOMEM; usb_stor_dbg(us, "addr = 0x%x, len = %d\n", addr, len); diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index 97f50f301f13..23a8b9b0b1fe 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -246,6 +246,10 @@ static int tps6598x_connect(struct tps6598x *tps, u32 status) typec_set_pwr_opmode(tps->port, mode); typec_set_pwr_role(tps->port, TPS_STATUS_TO_TYPEC_PORTROLE(status)); typec_set_vconn_role(tps->port, TPS_STATUS_TO_TYPEC_VCONN(status)); + if (TPS_STATUS_TO_UPSIDE_DOWN(status)) + typec_set_orientation(tps->port, TYPEC_ORIENTATION_REVERSE); + else + typec_set_orientation(tps->port, TYPEC_ORIENTATION_NORMAL); tps6598x_set_data_role(tps, TPS_STATUS_TO_TYPEC_DATAROLE(status), true); tps->partner = typec_register_partner(tps->port, &desc); @@ -268,6 +272,7 @@ static void tps6598x_disconnect(struct tps6598x *tps, u32 status) typec_set_pwr_opmode(tps->port, TYPEC_PWR_MODE_USB); typec_set_pwr_role(tps->port, TPS_STATUS_TO_TYPEC_PORTROLE(status)); typec_set_vconn_role(tps->port, TPS_STATUS_TO_TYPEC_VCONN(status)); + typec_set_orientation(tps->port, TYPEC_ORIENTATION_NONE); tps6598x_set_data_role(tps, TPS_STATUS_TO_TYPEC_DATAROLE(status), false); power_supply_changed(tps->psy); @@ -618,12 +623,12 @@ static int tps6598x_probe(struct i2c_client *client) ret = tps6598x_read32(tps, TPS_REG_STATUS, &status); if (ret < 0) - return ret; + goto err_clear_mask; trace_tps6598x_status(status); ret = tps6598x_read32(tps, TPS_REG_SYSTEM_CONF, &conf); if (ret < 0) - return ret; + goto err_clear_mask; /* * This fwnode has a "compatible" property, but is never populated as a @@ -712,7 +717,8 @@ err_role_put: usb_role_switch_put(tps->role_sw); err_fwnode_put: fwnode_handle_put(fwnode); - +err_clear_mask: + tps6598x_write64(tps, TPS_REG_INT_MASK1, 0); return ret; } diff --git a/drivers/usb/typec/tipd/tps6598x.h b/drivers/usb/typec/tipd/tps6598x.h index 003a577be216..1f59b9fa3fad 100644 --- a/drivers/usb/typec/tipd/tps6598x.h +++ b/drivers/usb/typec/tipd/tps6598x.h @@ -17,6 +17,7 @@ /* TPS_REG_STATUS bits */ #define TPS_STATUS_PLUG_PRESENT BIT(0) #define TPS_STATUS_PLUG_UPSIDE_DOWN BIT(4) +#define TPS_STATUS_TO_UPSIDE_DOWN(s) (!!((s) & TPS_STATUS_PLUG_UPSIDE_DOWN)) #define TPS_STATUS_PORTROLE BIT(5) #define TPS_STATUS_TO_TYPEC_PORTROLE(s) (!!((s) & TPS_STATUS_PORTROLE)) #define TPS_STATUS_DATAROLE BIT(6) diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 01a848adf590..81dc3d88d3dd 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -63,7 +63,7 @@ struct mlx5_control_vq { unsigned short head; }; -struct mlx5_ctrl_wq_ent { +struct mlx5_vdpa_wq_ent { struct work_struct work; struct mlx5_vdpa_dev *mvdev; }; diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 1afbda216df5..e4258f40dcd7 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -161,6 +161,9 @@ struct mlx5_vdpa_net { bool setup; u16 mtu; u32 cur_num_vqs; + struct notifier_block nb; + struct vdpa_callback config_cb; + struct mlx5_vdpa_wq_ent cvq_ent; }; static void free_resources(struct mlx5_vdpa_net *ndev); @@ -1529,11 +1532,27 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd) switch (cmd) { case VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET: + /* This mq feature check aligns with pre-existing userspace + * implementation. + * + * Without it, an untrusted driver could fake a multiqueue config + * request down to a non-mq device that may cause kernel to + * panic due to uninitialized resources for extra vqs. Even with + * a well behaving guest driver, it is not expected to allow + * changing the number of vqs on a non-mq device. + */ + if (!MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) + break; + read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq)); if (read != sizeof(mq)) break; newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs); + if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || + newqps > mlx5_vdpa_max_qps(mvdev->max_vqs)) + break; + if (ndev->cur_num_vqs == 2 * newqps) { status = VIRTIO_NET_OK; break; @@ -1557,22 +1576,22 @@ static void mlx5_cvq_kick_handler(struct work_struct *work) { virtio_net_ctrl_ack status = VIRTIO_NET_ERR; struct virtio_net_ctrl_hdr ctrl; - struct mlx5_ctrl_wq_ent *wqent; + struct mlx5_vdpa_wq_ent *wqent; struct mlx5_vdpa_dev *mvdev; struct mlx5_control_vq *cvq; struct mlx5_vdpa_net *ndev; size_t read, write; int err; - wqent = container_of(work, struct mlx5_ctrl_wq_ent, work); + wqent = container_of(work, struct mlx5_vdpa_wq_ent, work); mvdev = wqent->mvdev; ndev = to_mlx5_vdpa_ndev(mvdev); cvq = &mvdev->cvq; if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) - goto out; + return; if (!cvq->ready) - goto out; + return; while (true) { err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head, @@ -1606,9 +1625,10 @@ static void mlx5_cvq_kick_handler(struct work_struct *work) if (vringh_need_notify_iotlb(&cvq->vring)) vringh_notify(&cvq->vring); + + queue_work(mvdev->wq, &wqent->work); + break; } -out: - kfree(wqent); } static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) @@ -1616,7 +1636,6 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); struct mlx5_vdpa_virtqueue *mvq; - struct mlx5_ctrl_wq_ent *wqent; if (!is_index_valid(mvdev, idx)) return; @@ -1625,13 +1644,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) if (!mvdev->cvq.ready) return; - wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC); - if (!wqent) - return; - - wqent->mvdev = mvdev; - INIT_WORK(&wqent->work, mlx5_cvq_kick_handler); - queue_work(mvdev->wq, &wqent->work); + queue_work(mvdev->wq, &ndev->cvq_ent.work); return; } @@ -1852,16 +1865,31 @@ static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev) ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ); ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR); ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MQ); + ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_STATUS); print_features(mvdev, ndev->mvdev.mlx_features, false); return ndev->mvdev.mlx_features; } -static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features) +static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features) { + /* Minimum features to expect */ if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM))) return -EOPNOTSUPP; + /* Double check features combination sent down by the driver. + * Fail invalid features due to absence of the depended feature. + * + * Per VIRTIO v1.1 specification, section 5.1.3.1 Feature bit + * requirements: "VIRTIO_NET_F_MQ Requires VIRTIO_NET_F_CTRL_VQ". + * By failing the invalid features sent down by untrusted drivers, + * we're assured the assumption made upon is_index_valid() and + * is_ctrl_vq_idx() will not be compromised. + */ + if ((features & (BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) == + BIT_ULL(VIRTIO_NET_F_MQ)) + return -EINVAL; + return 0; } @@ -1937,7 +1965,7 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features) print_features(mvdev, features, true); - err = verify_min_features(mvdev, features); + err = verify_driver_features(mvdev, features); if (err) return err; @@ -1950,8 +1978,10 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features) static void mlx5_vdpa_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb) { - /* not implemented */ - mlx5_vdpa_warn(to_mvdev(vdev), "set config callback not supported\n"); + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + + ndev->config_cb = *cb; } #define MLX5_VDPA_MAX_VQ_ENTRIES 256 @@ -2403,6 +2433,82 @@ struct mlx5_vdpa_mgmtdev { struct mlx5_vdpa_net *ndev; }; +static u8 query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) +{ + u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {}; + int err; + + MLX5_SET(query_vport_state_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_STATE); + MLX5_SET(query_vport_state_in, in, op_mod, opmod); + MLX5_SET(query_vport_state_in, in, vport_number, vport); + if (vport) + MLX5_SET(query_vport_state_in, in, other_vport, 1); + + err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out); + if (err) + return 0; + + return MLX5_GET(query_vport_state_out, out, state); +} + +static bool get_link_state(struct mlx5_vdpa_dev *mvdev) +{ + if (query_vport_state(mvdev->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0) == + VPORT_STATE_UP) + return true; + + return false; +} + +static void update_carrier(struct work_struct *work) +{ + struct mlx5_vdpa_wq_ent *wqent; + struct mlx5_vdpa_dev *mvdev; + struct mlx5_vdpa_net *ndev; + + wqent = container_of(work, struct mlx5_vdpa_wq_ent, work); + mvdev = wqent->mvdev; + ndev = to_mlx5_vdpa_ndev(mvdev); + if (get_link_state(mvdev)) + ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); + else + ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); + + if (ndev->config_cb.callback) + ndev->config_cb.callback(ndev->config_cb.private); + + kfree(wqent); +} + +static int event_handler(struct notifier_block *nb, unsigned long event, void *param) +{ + struct mlx5_vdpa_net *ndev = container_of(nb, struct mlx5_vdpa_net, nb); + struct mlx5_eqe *eqe = param; + int ret = NOTIFY_DONE; + struct mlx5_vdpa_wq_ent *wqent; + + if (event == MLX5_EVENT_TYPE_PORT_CHANGE) { + switch (eqe->sub_type) { + case MLX5_PORT_CHANGE_SUBTYPE_DOWN: + case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: + wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC); + if (!wqent) + return NOTIFY_DONE; + + wqent->mvdev = &ndev->mvdev; + INIT_WORK(&wqent->work, update_carrier); + queue_work(ndev->mvdev.wq, &wqent->work); + ret = NOTIFY_OK; + break; + default: + return NOTIFY_DONE; + } + return ret; + } + return ret; +} + static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name) { struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev); @@ -2447,6 +2553,11 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name) if (err) goto err_mtu; + if (get_link_state(mvdev)) + ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); + else + ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); + if (!is_zero_ether_addr(config->mac)) { pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev)); err = mlx5_mpfs_add_mac(pfmdev, config->mac); @@ -2472,12 +2583,16 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name) if (err) goto err_mr; - mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_ctrl_wq"); + ndev->cvq_ent.mvdev = mvdev; + INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler); + mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq"); if (!mvdev->wq) { err = -ENOMEM; goto err_res2; } + ndev->nb.notifier_call = event_handler; + mlx5_notifier_register(mdev, &ndev->nb); ndev->cur_num_vqs = 2 * mlx5_vdpa_max_qps(max_vqs); mvdev->vdev.mdev = &mgtdev->mgtdev; err = _vdpa_register_device(&mvdev->vdev, ndev->cur_num_vqs + 1); @@ -2508,7 +2623,9 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device * { struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev); struct mlx5_vdpa_dev *mvdev = to_mvdev(dev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); destroy_workqueue(mvdev->wq); _vdpa_unregister_device(dev); mgtdev->ndev = NULL; diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c index 1daae2608860..0678c2514197 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.c +++ b/drivers/vdpa/vdpa_user/iova_domain.c @@ -302,7 +302,7 @@ vduse_domain_alloc_iova(struct iova_domain *iovad, iova_len = roundup_pow_of_two(iova_len); iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true); - return iova_pfn << shift; + return (dma_addr_t)iova_pfn << shift; } static void vduse_domain_free_iova(struct iova_domain *iovad, diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c index 5bcd00246d2e..dead832b4571 100644 --- a/drivers/vdpa/virtio_pci/vp_vdpa.c +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -513,8 +513,8 @@ static void vp_vdpa_remove(struct pci_dev *pdev) { struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev); - vdpa_unregister_device(&vp_vdpa->vdpa); vp_modern_remove(&vp_vdpa->mdev); + vdpa_unregister_device(&vp_vdpa->vdpa); } static struct pci_driver vp_vdpa_driver = { diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index a03b5a99c2da..f3916e6b16b9 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -36,6 +36,10 @@ static bool nointxmask; static bool disable_vga; static bool disable_idle_d3; +/* List of PF's that vfio_pci_core_sriov_configure() has been called on */ +static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex); +static LIST_HEAD(vfio_pci_sriov_pfs); + static inline bool vfio_vga_disabled(void) { #ifdef CONFIG_VFIO_PCI_VGA @@ -228,6 +232,19 @@ int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t stat if (!ret) { /* D3 might be unsupported via quirk, skip unless in D3 */ if (needs_save && pdev->current_state >= PCI_D3hot) { + /* + * The current PCI state will be saved locally in + * 'pm_save' during the D3hot transition. When the + * device state is changed to D0 again with the current + * function, then pci_store_saved_state() will restore + * the state and will free the memory pointed by + * 'pm_save'. There are few cases where the PCI power + * state can be changed to D0 without the involvement + * of the driver. For these cases, free the earlier + * allocated memory first before overwriting 'pm_save' + * to prevent the memory leak. + */ + kfree(vdev->pm_save); vdev->pm_save = pci_store_saved_state(pdev); } else if (needs_restore) { pci_load_and_free_saved_state(pdev, &vdev->pm_save); @@ -322,6 +339,17 @@ void vfio_pci_core_disable(struct vfio_pci_core_device *vdev) /* For needs_reset */ lockdep_assert_held(&vdev->vdev.dev_set->lock); + /* + * This function can be invoked while the power state is non-D0. + * This function calls __pci_reset_function_locked() which internally + * can use pci_pm_reset() for the function reset. pci_pm_reset() will + * fail if the power state is non-D0. Also, for the devices which + * have NoSoftRst-, the reset function can cause the PCI config space + * reset without restoring the original state (saved locally in + * 'vdev->pm_save'). + */ + vfio_pci_set_power_state(vdev, PCI_D0); + /* Stop the device from further DMA */ pci_clear_master(pdev); @@ -410,47 +438,17 @@ out: } EXPORT_SYMBOL_GPL(vfio_pci_core_disable); -static struct vfio_pci_core_device *get_pf_vdev(struct vfio_pci_core_device *vdev) -{ - struct pci_dev *physfn = pci_physfn(vdev->pdev); - struct vfio_device *pf_dev; - - if (!vdev->pdev->is_virtfn) - return NULL; - - pf_dev = vfio_device_get_from_dev(&physfn->dev); - if (!pf_dev) - return NULL; - - if (pci_dev_driver(physfn) != pci_dev_driver(vdev->pdev)) { - vfio_device_put(pf_dev); - return NULL; - } - - return container_of(pf_dev, struct vfio_pci_core_device, vdev); -} - -static void vfio_pci_vf_token_user_add(struct vfio_pci_core_device *vdev, int val) -{ - struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev); - - if (!pf_vdev) - return; - - mutex_lock(&pf_vdev->vf_token->lock); - pf_vdev->vf_token->users += val; - WARN_ON(pf_vdev->vf_token->users < 0); - mutex_unlock(&pf_vdev->vf_token->lock); - - vfio_device_put(&pf_vdev->vdev); -} - void vfio_pci_core_close_device(struct vfio_device *core_vdev) { struct vfio_pci_core_device *vdev = container_of(core_vdev, struct vfio_pci_core_device, vdev); - vfio_pci_vf_token_user_add(vdev, -1); + if (vdev->sriov_pf_core_dev) { + mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock); + WARN_ON(!vdev->sriov_pf_core_dev->vf_token->users); + vdev->sriov_pf_core_dev->vf_token->users--; + mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock); + } vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_core_disable(vdev); @@ -471,7 +469,12 @@ void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev) { vfio_pci_probe_mmaps(vdev); vfio_spapr_pci_eeh_open(vdev->pdev); - vfio_pci_vf_token_user_add(vdev, 1); + + if (vdev->sriov_pf_core_dev) { + mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock); + vdev->sriov_pf_core_dev->vf_token->users++; + mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock); + } } EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable); @@ -921,6 +924,19 @@ long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd, return -EINVAL; vfio_pci_zap_and_down_write_memory_lock(vdev); + + /* + * This function can be invoked while the power state is non-D0. + * If pci_try_reset_function() has been called while the power + * state is non-D0, then pci_try_reset_function() will + * internally set the power state to D0 without vfio driver + * involvement. For the devices which have NoSoftRst-, the + * reset function can cause the PCI config space reset without + * restoring the original state (saved locally in + * 'vdev->pm_save'). + */ + vfio_pci_set_power_state(vdev, PCI_D0); + ret = pci_try_reset_function(vdev->pdev); up_write(&vdev->memory_lock); @@ -1566,11 +1582,8 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev, * * If the VF token is provided but unused, an error is generated. */ - if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token) - return 0; /* No VF token provided or required */ - if (vdev->pdev->is_virtfn) { - struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev); + struct vfio_pci_core_device *pf_vdev = vdev->sriov_pf_core_dev; bool match; if (!pf_vdev) { @@ -1583,7 +1596,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev, } if (!vf_token) { - vfio_device_put(&pf_vdev->vdev); pci_info_ratelimited(vdev->pdev, "VF token required to access device\n"); return -EACCES; @@ -1593,8 +1605,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev, match = uuid_equal(uuid, &pf_vdev->vf_token->uuid); mutex_unlock(&pf_vdev->vf_token->lock); - vfio_device_put(&pf_vdev->vdev); - if (!match) { pci_info_ratelimited(vdev->pdev, "Incorrect VF token provided for device\n"); @@ -1715,8 +1725,30 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb, static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev) { struct pci_dev *pdev = vdev->pdev; + struct vfio_pci_core_device *cur; + struct pci_dev *physfn; int ret; + if (pdev->is_virtfn) { + /* + * If this VF was created by our vfio_pci_core_sriov_configure() + * then we can find the PF vfio_pci_core_device now, and due to + * the locking in pci_disable_sriov() it cannot change until + * this VF device driver is removed. + */ + physfn = pci_physfn(vdev->pdev); + mutex_lock(&vfio_pci_sriov_pfs_mutex); + list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) { + if (cur->pdev == physfn) { + vdev->sriov_pf_core_dev = cur; + break; + } + } + mutex_unlock(&vfio_pci_sriov_pfs_mutex); + return 0; + } + + /* Not a SRIOV PF */ if (!pdev->is_physfn) return 0; @@ -1788,6 +1820,7 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev, INIT_LIST_HEAD(&vdev->ioeventfds_list); mutex_init(&vdev->vma_lock); INIT_LIST_HEAD(&vdev->vma_list); + INIT_LIST_HEAD(&vdev->sriov_pfs_item); init_rwsem(&vdev->memory_lock); } EXPORT_SYMBOL_GPL(vfio_pci_core_init_device); @@ -1886,7 +1919,7 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev) { struct pci_dev *pdev = vdev->pdev; - pci_disable_sriov(pdev); + vfio_pci_core_sriov_configure(pdev, 0); vfio_unregister_group_dev(&vdev->vdev); @@ -1926,21 +1959,49 @@ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev, int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn) { + struct vfio_pci_core_device *vdev; struct vfio_device *device; int ret = 0; + device_lock_assert(&pdev->dev); + device = vfio_device_get_from_dev(&pdev->dev); if (!device) return -ENODEV; - if (nr_virtfn == 0) - pci_disable_sriov(pdev); - else + vdev = container_of(device, struct vfio_pci_core_device, vdev); + + if (nr_virtfn) { + mutex_lock(&vfio_pci_sriov_pfs_mutex); + /* + * The thread that adds the vdev to the list is the only thread + * that gets to call pci_enable_sriov() and we will only allow + * it to be called once without going through + * pci_disable_sriov() + */ + if (!list_empty(&vdev->sriov_pfs_item)) { + ret = -EINVAL; + goto out_unlock; + } + list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs); + mutex_unlock(&vfio_pci_sriov_pfs_mutex); ret = pci_enable_sriov(pdev, nr_virtfn); + if (ret) + goto out_del; + ret = nr_virtfn; + goto out_put; + } - vfio_device_put(device); + pci_disable_sriov(pdev); - return ret < 0 ? ret : nr_virtfn; +out_del: + mutex_lock(&vfio_pci_sriov_pfs_mutex); + list_del_init(&vdev->sriov_pfs_item); +out_unlock: + mutex_unlock(&vfio_pci_sriov_pfs_mutex); +out_put: + vfio_device_put(device); + return ret; } EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure); @@ -2064,6 +2125,18 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set, } cur_mem = NULL; + /* + * The pci_reset_bus() will reset all the devices in the bus. + * The power state can be non-D0 for some of the devices in the bus. + * For these devices, the pci_reset_bus() will internally set + * the power state to D0 without vfio driver involvement. + * For the devices which have NoSoftRst-, the reset function can + * cause the PCI config space reset without restoring the original + * state (saved locally in 'vdev->pm_save'). + */ + list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) + vfio_pci_set_power_state(cur, PCI_D0); + ret = pci_reset_bus(pdev); err_undo: @@ -2117,6 +2190,18 @@ static bool vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set) if (!pdev) return false; + /* + * The pci_reset_bus() will reset all the devices in the bus. + * The power state can be non-D0 for some of the devices in the bus. + * For these devices, the pci_reset_bus() will internally set + * the power state to D0 without vfio driver involvement. + * For the devices which have NoSoftRst-, the reset function can + * cause the PCI config space reset without restoring the original + * state (saved locally in 'vdev->pm_save'). + */ + list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) + vfio_pci_set_power_state(cur, PCI_D0); + ret = pci_reset_bus(pdev); if (ret) return false; diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 57d3b2cbbd8e..82ac1569deb0 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -288,6 +288,7 @@ out: return done; } +#ifdef CONFIG_VFIO_PCI_VGA ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, size_t count, loff_t *ppos, bool iswrite) { @@ -355,6 +356,7 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, return done; } +#endif static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd, bool test_mem) diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c index 670d56c879e5..5829cf2d0552 100644 --- a/drivers/vhost/iotlb.c +++ b/drivers/vhost/iotlb.c @@ -57,6 +57,21 @@ int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb, if (last < start) return -EFAULT; + /* If the range being mapped is [0, ULONG_MAX], split it into two entries + * otherwise its size would overflow u64. + */ + if (start == 0 && last == ULONG_MAX) { + u64 mid = last / 2; + int err = vhost_iotlb_add_range_ctx(iotlb, start, mid, addr, + perm, opaque); + + if (err) + return err; + + addr += mid + 1; + start = mid + 1; + } + if (iotlb->limit && iotlb->nmaps == iotlb->limit && iotlb->flags & VHOST_IOTLB_FLAG_RETIRE) { diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 28ef323882fb..792ab5f23647 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -473,6 +473,7 @@ static void vhost_tx_batch(struct vhost_net *net, goto signal_used; msghdr->msg_control = &ctl; + msghdr->msg_controllen = sizeof(ctl); err = sock->ops->sendmsg(sock, msghdr, 0); if (unlikely(err < 0)) { vq_err(&nvq->vq, "Fail to batch sending packets\n"); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 59edb5a1ffe2..6942472cffb0 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1170,6 +1170,13 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, goto done; } + if ((msg.type == VHOST_IOTLB_UPDATE || + msg.type == VHOST_IOTLB_INVALIDATE) && + msg.size == 0) { + ret = -EINVAL; + goto done; + } + if (dev->msg_handler) ret = dev->msg_handler(dev, &msg); else diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 4e3b95af7ee4..dcb1585819a1 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -633,16 +633,18 @@ err: return ret; } -static int vhost_vsock_stop(struct vhost_vsock *vsock) +static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner) { size_t i; - int ret; + int ret = 0; mutex_lock(&vsock->dev.mutex); - ret = vhost_dev_check_owner(&vsock->dev); - if (ret) - goto err; + if (check_owner) { + ret = vhost_dev_check_owner(&vsock->dev); + if (ret) + goto err; + } for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { struct vhost_virtqueue *vq = &vsock->vqs[i]; @@ -755,9 +757,15 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file) /* Iterating over all connections for all CIDs to find orphans is * inefficient. Room for improvement here. */ - vsock_for_each_connected_socket(vhost_vsock_reset_orphans); + vsock_for_each_connected_socket(&vhost_transport.transport, + vhost_vsock_reset_orphans); - vhost_vsock_stop(vsock); + /* Don't check the owner, because we are in the release path, so we + * need to stop the vsock device in any case. + * vhost_vsock_stop() can not fail in this case, so we don't need to + * check the return code. + */ + vhost_vsock_stop(vsock, false); vhost_vsock_flush(vsock); vhost_dev_stop(&vsock->dev); @@ -872,7 +880,7 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl, if (start) return vhost_vsock_start(vsock); else - return vhost_vsock_stop(vsock); + return vhost_vsock_stop(vsock, true); case VHOST_GET_FEATURES: features = VHOST_VSOCK_FEATURES; if (copy_to_user(argp, &features, sizeof(features))) diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c index e3812a8ff55a..29e650ecfceb 100644 --- a/drivers/video/fbdev/atafb.c +++ b/drivers/video/fbdev/atafb.c @@ -1683,9 +1683,9 @@ static int falcon_setcolreg(unsigned int regno, unsigned int red, ((blue & 0xfc00) >> 8)); if (regno < 16) { shifter_tt.color_reg[regno] = - (((red & 0xe000) >> 13) | ((red & 0x1000) >> 12) << 8) | - (((green & 0xe000) >> 13) | ((green & 0x1000) >> 12) << 4) | - ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12); + ((((red & 0xe000) >> 13) | ((red & 0x1000) >> 12)) << 8) | + ((((green & 0xe000) >> 13) | ((green & 0x1000) >> 12)) << 4) | + ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12); ((u32 *)info->pseudo_palette)[regno] = ((red & 0xf800) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11)); @@ -1971,9 +1971,9 @@ static int stste_setcolreg(unsigned int regno, unsigned int red, green >>= 12; if (ATARIHW_PRESENT(EXTD_SHIFTER)) shifter_tt.color_reg[regno] = - (((red & 0xe) >> 1) | ((red & 1) << 3) << 8) | - (((green & 0xe) >> 1) | ((green & 1) << 3) << 4) | - ((blue & 0xe) >> 1) | ((blue & 1) << 3); + ((((red & 0xe) >> 1) | ((red & 1) << 3)) << 8) | + ((((green & 0xe) >> 1) | ((green & 1) << 3)) << 4) | + ((blue & 0xe) >> 1) | ((blue & 1) << 3); else shifter_tt.color_reg[regno] = ((red & 0xe) << 7) | diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index 355b6120dc4f..1fc8de4ecbeb 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -1062,15 +1062,16 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev) INIT_LIST_HEAD(&info->modelist); - if (pdev->dev.of_node) { - ret = atmel_lcdfb_of_init(sinfo); - if (ret) - goto free_info; - } else { + if (!pdev->dev.of_node) { dev_err(dev, "cannot get default configuration\n"); goto free_info; } + ret = atmel_lcdfb_of_init(sinfo); + if (ret) + goto free_info; + + ret = -ENODEV; if (!sinfo->config) goto free_info; diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c index 93802abbbc72..3d47c347b897 100644 --- a/drivers/video/fbdev/cirrusfb.c +++ b/drivers/video/fbdev/cirrusfb.c @@ -469,7 +469,7 @@ static int cirrusfb_check_mclk(struct fb_info *info, long freq) return 0; } -static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var, +static int cirrusfb_check_pixclock(struct fb_var_screeninfo *var, struct fb_info *info) { long freq; @@ -478,9 +478,7 @@ static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var, unsigned maxclockidx = var->bits_per_pixel >> 3; /* convert from ps to kHz */ - freq = PICOS2KHZ(var->pixclock); - - dev_dbg(info->device, "desired pixclock: %ld kHz\n", freq); + freq = PICOS2KHZ(var->pixclock ? : 1); maxclock = cirrusfb_board_info[cinfo->btype].maxclock[maxclockidx]; cinfo->multiplexing = 0; @@ -488,11 +486,13 @@ static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var, /* If the frequency is greater than we can support, we might be able * to use multiplexing for the video mode */ if (freq > maxclock) { - dev_err(info->device, - "Frequency greater than maxclock (%ld kHz)\n", - maxclock); - return -EINVAL; + var->pixclock = KHZ2PICOS(maxclock); + + while ((freq = PICOS2KHZ(var->pixclock)) > maxclock) + var->pixclock++; } + dev_dbg(info->device, "desired pixclock: %ld kHz\n", freq); + /* * Additional constraint: 8bpp uses DAC clock doubling to allow maximum * pixel clock diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c index 509311471d51..bd59e7b11ed5 100644 --- a/drivers/video/fbdev/controlfb.c +++ b/drivers/video/fbdev/controlfb.c @@ -67,7 +67,9 @@ #define out_8(addr, val) (void)(val) #define in_le32(addr) 0 #define out_le32(addr, val) (void)(val) +#ifndef pgprot_cached_wthru #define pgprot_cached_wthru(prot) (prot) +#endif #else static void invalid_vram_cache(void __force *addr) { diff --git a/drivers/video/fbdev/core/fbcvt.c b/drivers/video/fbdev/core/fbcvt.c index 55d2bd0ce5c0..64843464c661 100644 --- a/drivers/video/fbdev/core/fbcvt.c +++ b/drivers/video/fbdev/core/fbcvt.c @@ -214,9 +214,11 @@ static u32 fb_cvt_aspect_ratio(struct fb_cvt_data *cvt) static void fb_cvt_print_name(struct fb_cvt_data *cvt) { u32 pixcount, pixcount_mod; - int cnt = 255, offset = 0, read = 0; - u8 *buf = kzalloc(256, GFP_KERNEL); + int size = 256; + int off = 0; + u8 *buf; + buf = kzalloc(size, GFP_KERNEL); if (!buf) return; @@ -224,43 +226,30 @@ static void fb_cvt_print_name(struct fb_cvt_data *cvt) pixcount_mod = (cvt->xres * (cvt->yres/cvt->interlace)) % 1000000; pixcount_mod /= 1000; - read = snprintf(buf+offset, cnt, "fbcvt: %dx%d@%d: CVT Name - ", - cvt->xres, cvt->yres, cvt->refresh); - offset += read; - cnt -= read; + off += scnprintf(buf + off, size - off, "fbcvt: %dx%d@%d: CVT Name - ", + cvt->xres, cvt->yres, cvt->refresh); - if (cvt->status) - snprintf(buf+offset, cnt, "Not a CVT standard - %d.%03d Mega " - "Pixel Image\n", pixcount, pixcount_mod); - else { - if (pixcount) { - read = snprintf(buf+offset, cnt, "%d", pixcount); - cnt -= read; - offset += read; - } + if (cvt->status) { + off += scnprintf(buf + off, size - off, + "Not a CVT standard - %d.%03d Mega Pixel Image\n", + pixcount, pixcount_mod); + } else { + if (pixcount) + off += scnprintf(buf + off, size - off, "%d", pixcount); - read = snprintf(buf+offset, cnt, ".%03dM", pixcount_mod); - cnt -= read; - offset += read; + off += scnprintf(buf + off, size - off, ".%03dM", pixcount_mod); if (cvt->aspect_ratio == 0) - read = snprintf(buf+offset, cnt, "3"); + off += scnprintf(buf + off, size - off, "3"); else if (cvt->aspect_ratio == 3) - read = snprintf(buf+offset, cnt, "4"); + off += scnprintf(buf + off, size - off, "4"); else if (cvt->aspect_ratio == 1 || cvt->aspect_ratio == 4) - read = snprintf(buf+offset, cnt, "9"); + off += scnprintf(buf + off, size - off, "9"); else if (cvt->aspect_ratio == 2) - read = snprintf(buf+offset, cnt, "A"); - else - read = 0; - cnt -= read; - offset += read; - - if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK) { - read = snprintf(buf+offset, cnt, "-R"); - cnt -= read; - offset += read; - } + off += scnprintf(buf + off, size - off, "A"); + + if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK) + off += scnprintf(buf + off, size - off, "-R"); } printk(KERN_INFO "%s\n", buf); diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 7bd5e2a4a9da..0371ad233fdf 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -25,6 +25,7 @@ #include <linux/init.h> #include <linux/linux_logo.h> #include <linux/proc_fs.h> +#include <linux/platform_device.h> #include <linux/seq_file.h> #include <linux/console.h> #include <linux/kmod.h> @@ -1557,18 +1558,43 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a, /* check all firmware fbs and kick off if the base addr overlaps */ for_each_registered_fb(i) { struct apertures_struct *gen_aper; + struct device *device; if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE)) continue; gen_aper = registered_fb[i]->apertures; + device = registered_fb[i]->device; if (fb_do_apertures_overlap(gen_aper, a) || (primary && gen_aper && gen_aper->count && gen_aper->ranges[0].base == VGA_FB_PHYS)) { printk(KERN_INFO "fb%d: switching to %s from %s\n", i, name, registered_fb[i]->fix.id); - do_unregister_framebuffer(registered_fb[i]); + + /* + * If we kick-out a firmware driver, we also want to remove + * the underlying platform device, such as simple-framebuffer, + * VESA, EFI, etc. A native driver will then be able to + * allocate the memory range. + * + * If it's not a platform device, at least print a warning. A + * fix would add code to remove the device from the system. + */ + if (!device) { + /* TODO: Represent each OF framebuffer as its own + * device in the device hierarchy. For now, offb + * doesn't have such a device, so unregister the + * framebuffer as before without warning. + */ + do_unregister_framebuffer(registered_fb[i]); + } else if (dev_is_platform(device)) { + registered_fb[i]->forced_out = true; + platform_device_unregister(to_platform_device(device)); + } else { + pr_warn("fb%d: cannot remove device\n", i); + do_unregister_framebuffer(registered_fb[i]); + } } } } @@ -1895,9 +1921,13 @@ EXPORT_SYMBOL(register_framebuffer); void unregister_framebuffer(struct fb_info *fb_info) { - mutex_lock(®istration_lock); + bool forced_out = fb_info->forced_out; + + if (!forced_out) + mutex_lock(®istration_lock); do_unregister_framebuffer(fb_info); - mutex_unlock(®istration_lock); + if (!forced_out) + mutex_unlock(®istration_lock); } EXPORT_SYMBOL(unregister_framebuffer); diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c index 5c82611e93d9..236521b19daf 100644 --- a/drivers/video/fbdev/matrox/matroxfb_base.c +++ b/drivers/video/fbdev/matrox/matroxfb_base.c @@ -1377,7 +1377,7 @@ static struct video_board vbG200 = { .lowlevel = &matrox_G100 }; static struct video_board vbG200eW = { - .maxvram = 0x800000, + .maxvram = 0x100000, .maxdisplayable = 0x800000, .accelID = FB_ACCEL_MATROX_MGAG200, .lowlevel = &matrox_G100 diff --git a/drivers/video/fbdev/nvidia/nv_i2c.c b/drivers/video/fbdev/nvidia/nv_i2c.c index d7994a173245..0b48965a6420 100644 --- a/drivers/video/fbdev/nvidia/nv_i2c.c +++ b/drivers/video/fbdev/nvidia/nv_i2c.c @@ -86,7 +86,7 @@ static int nvidia_setup_i2c_bus(struct nvidia_i2c_chan *chan, const char *name, { int rc; - strcpy(chan->adapter.name, name); + strscpy(chan->adapter.name, name, sizeof(chan->adapter.name)); chan->adapter.owner = THIS_MODULE; chan->adapter.class = i2c_class; chan->adapter.algo_data = &chan->algo; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c index 2fa436475b40..c8ad3ef42bd3 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c @@ -246,6 +246,7 @@ static int dvic_probe_of(struct platform_device *pdev) adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0); if (adapter_node) { adapter = of_get_i2c_adapter_by_node(adapter_node); + of_node_put(adapter_node); if (adapter == NULL) { dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n"); omap_dss_put_device(ddata->in); diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c index 4b0793abdd84..a2c7c5cb1523 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c @@ -409,7 +409,7 @@ static ssize_t dsicm_num_errors_show(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", errors); + return sysfs_emit(buf, "%d\n", errors); } static ssize_t dsicm_hw_revision_show(struct device *dev, @@ -439,7 +439,7 @@ static ssize_t dsicm_hw_revision_show(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3); + return sysfs_emit(buf, "%02x.%02x.%02x\n", id1, id2, id3); } static ssize_t dsicm_store_ulps(struct device *dev, @@ -487,7 +487,7 @@ static ssize_t dsicm_show_ulps(struct device *dev, t = ddata->ulps_enabled; mutex_unlock(&ddata->lock); - return snprintf(buf, PAGE_SIZE, "%u\n", t); + return sysfs_emit(buf, "%u\n", t); } static ssize_t dsicm_store_ulps_timeout(struct device *dev, @@ -532,7 +532,7 @@ static ssize_t dsicm_show_ulps_timeout(struct device *dev, t = ddata->ulps_timeout; mutex_unlock(&ddata->lock); - return snprintf(buf, PAGE_SIZE, "%u\n", t); + return sysfs_emit(buf, "%u\n", t); } static DEVICE_ATTR(num_dsi_errors, S_IRUGO, dsicm_num_errors_show, NULL); diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c index 8d8b5ff7d43c..3696eb09b69b 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c @@ -476,7 +476,7 @@ static ssize_t show_cabc_available_modes(struct device *dev, int i; if (!ddata->has_cabc) - return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]); + return sysfs_emit(buf, "%s\n", cabc_modes[0]); for (i = 0, len = 0; len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++) diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c index afac1d9445aa..57b7d1f49096 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c @@ -169,7 +169,7 @@ static ssize_t tpo_td043_vmirror_show(struct device *dev, { struct panel_drv_data *ddata = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", ddata->vmirror); + return sysfs_emit(buf, "%d\n", ddata->vmirror); } static ssize_t tpo_td043_vmirror_store(struct device *dev, @@ -199,7 +199,7 @@ static ssize_t tpo_td043_mode_show(struct device *dev, { struct panel_drv_data *ddata = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", ddata->mode); + return sysfs_emit(buf, "%d\n", ddata->mode); } static ssize_t tpo_td043_mode_store(struct device *dev, diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c index 0dbc6bf8268a..092a1caa1208 100644 --- a/drivers/video/fbdev/sm712fb.c +++ b/drivers/video/fbdev/sm712fb.c @@ -1047,7 +1047,7 @@ static ssize_t smtcfb_read(struct fb_info *info, char __user *buf, if (count + p > total_size) count = total_size - p; - buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL); + buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -1059,25 +1059,14 @@ static ssize_t smtcfb_read(struct fb_info *info, char __user *buf, while (count) { c = (count > PAGE_SIZE) ? PAGE_SIZE : count; dst = buffer; - for (i = c >> 2; i--;) { - *dst = fb_readl(src++); - *dst = big_swap(*dst); + for (i = (c + 3) >> 2; i--;) { + u32 val; + + val = fb_readl(src); + *dst = big_swap(val); + src++; dst++; } - if (c & 3) { - u8 *dst8 = (u8 *)dst; - u8 __iomem *src8 = (u8 __iomem *)src; - - for (i = c & 3; i--;) { - if (i & 1) { - *dst8++ = fb_readb(++src8); - } else { - *dst8++ = fb_readb(--src8); - src8 += 2; - } - } - src = (u32 __iomem *)src8; - } if (copy_to_user(buf, buffer, c)) { err = -EFAULT; @@ -1130,7 +1119,7 @@ static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf, count = total_size - p; } - buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL); + buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -1148,24 +1137,11 @@ static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf, break; } - for (i = c >> 2; i--;) { - fb_writel(big_swap(*src), dst++); + for (i = (c + 3) >> 2; i--;) { + fb_writel(big_swap(*src), dst); + dst++; src++; } - if (c & 3) { - u8 *src8 = (u8 *)src; - u8 __iomem *dst8 = (u8 __iomem *)dst; - - for (i = c & 3; i--;) { - if (i & 1) { - fb_writeb(*src8++, ++dst8); - } else { - fb_writeb(*src8++, --dst8); - dst8 += 2; - } - } - dst = (u32 __iomem *)dst8; - } *ppos += c; buf += c; diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c index bfac3ee4a642..28768c272b73 100644 --- a/drivers/video/fbdev/smscufx.c +++ b/drivers/video/fbdev/smscufx.c @@ -1656,6 +1656,7 @@ static int ufx_usb_probe(struct usb_interface *interface, info->par = dev; info->pseudo_palette = dev->pseudo_palette; info->fbops = &ufx_ops; + INIT_LIST_HEAD(&info->modelist); retval = fb_alloc_cmap(&info->cmap, 256, 0); if (retval < 0) { @@ -1666,8 +1667,6 @@ static int ufx_usb_probe(struct usb_interface *interface, INIT_DELAYED_WORK(&dev->free_framebuffer_work, ufx_free_framebuffer_work); - INIT_LIST_HEAD(&info->modelist); - retval = ufx_reg_read(dev, 0x3000, &id_rev); check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval); dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev); diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index b9cdd02c1000..90f48b71fd8f 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -1426,7 +1426,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev, struct device_attribute *a, char *buf) { struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dlfb = fb_info->par; - return snprintf(buf, PAGE_SIZE, "%u\n", + return sysfs_emit(buf, "%u\n", atomic_read(&dlfb->bytes_rendered)); } @@ -1434,7 +1434,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev, struct device_attribute *a, char *buf) { struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dlfb = fb_info->par; - return snprintf(buf, PAGE_SIZE, "%u\n", + return sysfs_emit(buf, "%u\n", atomic_read(&dlfb->bytes_identical)); } @@ -1442,7 +1442,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev, struct device_attribute *a, char *buf) { struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dlfb = fb_info->par; - return snprintf(buf, PAGE_SIZE, "%u\n", + return sysfs_emit(buf, "%u\n", atomic_read(&dlfb->bytes_sent)); } @@ -1450,7 +1450,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev, struct device_attribute *a, char *buf) { struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dlfb = fb_info->par; - return snprintf(buf, PAGE_SIZE, "%u\n", + return sysfs_emit(buf, "%u\n", atomic_read(&dlfb->cpu_kcycles_used)); } diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c index d96ab28f8ce4..4e641a780726 100644 --- a/drivers/video/fbdev/w100fb.c +++ b/drivers/video/fbdev/w100fb.c @@ -770,12 +770,18 @@ out: fb_dealloc_cmap(&info->cmap); kfree(info->pseudo_palette); } - if (remapped_fbuf != NULL) + if (remapped_fbuf != NULL) { iounmap(remapped_fbuf); - if (remapped_regs != NULL) + remapped_fbuf = NULL; + } + if (remapped_regs != NULL) { iounmap(remapped_regs); - if (remapped_base != NULL) + remapped_regs = NULL; + } + if (remapped_base != NULL) { iounmap(remapped_base); + remapped_base = NULL; + } if (info) framebuffer_release(info); return err; @@ -795,8 +801,11 @@ static int w100fb_remove(struct platform_device *pdev) fb_dealloc_cmap(&info->cmap); iounmap(remapped_base); + remapped_base = NULL; iounmap(remapped_regs); + remapped_regs = NULL; iounmap(remapped_fbuf); + remapped_fbuf = NULL; framebuffer_release(info); diff --git a/drivers/virt/acrn/hsm.c b/drivers/virt/acrn/hsm.c index 130e12b8652a..af889cee6680 100644 --- a/drivers/virt/acrn/hsm.c +++ b/drivers/virt/acrn/hsm.c @@ -134,8 +134,10 @@ static long acrn_dev_ioctl(struct file *filp, unsigned int cmd, if (IS_ERR(vm_param)) return PTR_ERR(vm_param); - if ((vm_param->reserved0 | vm_param->reserved1) != 0) + if ((vm_param->reserved0 | vm_param->reserved1) != 0) { + kfree(vm_param); return -EINVAL; + } vm = acrn_vm_create(vm, vm_param); if (!vm) { @@ -180,21 +182,29 @@ static long acrn_dev_ioctl(struct file *filp, unsigned int cmd, return PTR_ERR(cpu_regs); for (i = 0; i < ARRAY_SIZE(cpu_regs->reserved); i++) - if (cpu_regs->reserved[i]) + if (cpu_regs->reserved[i]) { + kfree(cpu_regs); return -EINVAL; + } for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_32); i++) - if (cpu_regs->vcpu_regs.reserved_32[i]) + if (cpu_regs->vcpu_regs.reserved_32[i]) { + kfree(cpu_regs); return -EINVAL; + } for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_64); i++) - if (cpu_regs->vcpu_regs.reserved_64[i]) + if (cpu_regs->vcpu_regs.reserved_64[i]) { + kfree(cpu_regs); return -EINVAL; + } for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.gdt.reserved); i++) if (cpu_regs->vcpu_regs.gdt.reserved[i] | - cpu_regs->vcpu_regs.idt.reserved[i]) + cpu_regs->vcpu_regs.idt.reserved[i]) { + kfree(cpu_regs); return -EINVAL; + } ret = hcall_set_vcpu_regs(vm->vmid, virt_to_phys(cpu_regs)); if (ret < 0) diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c index c4f2e15c8a2b..3b1b1e7a844b 100644 --- a/drivers/virt/acrn/mm.c +++ b/drivers/virt/acrn/mm.c @@ -162,10 +162,34 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) void *remap_vaddr; int ret, pinned; u64 user_vm_pa; + unsigned long pfn; + struct vm_area_struct *vma; if (!vm || !memmap) return -EINVAL; + mmap_read_lock(current->mm); + vma = vma_lookup(current->mm, memmap->vma_base); + if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) { + if ((memmap->vma_base + memmap->len) > vma->vm_end) { + mmap_read_unlock(current->mm); + return -EINVAL; + } + + ret = follow_pfn(vma, memmap->vma_base, &pfn); + mmap_read_unlock(current->mm); + if (ret < 0) { + dev_dbg(acrn_dev.this_device, + "Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base); + return ret; + } + + return acrn_mm_region_add(vm, memmap->user_vm_pa, + PFN_PHYS(pfn), memmap->len, + ACRN_MEM_TYPE_WB, memmap->attr); + } + mmap_read_unlock(current->mm); + /* Get the page number of the map region */ nr_pages = memmap->len >> PAGE_SHIFT; pages = vzalloc(nr_pages * sizeof(struct page *)); diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 236081afe9a2..c2b733ef95b0 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -166,14 +166,13 @@ void virtio_add_status(struct virtio_device *dev, unsigned int status) } EXPORT_SYMBOL_GPL(virtio_add_status); -int virtio_finalize_features(struct virtio_device *dev) +/* Do some validation, then set FEATURES_OK */ +static int virtio_features_ok(struct virtio_device *dev) { - int ret = dev->config->finalize_features(dev); unsigned status; + int ret; might_sleep(); - if (ret) - return ret; ret = arch_has_restricted_virtio_memory_access(); if (ret) { @@ -202,7 +201,6 @@ int virtio_finalize_features(struct virtio_device *dev) } return 0; } -EXPORT_SYMBOL_GPL(virtio_finalize_features); static int virtio_dev_probe(struct device *_d) { @@ -239,17 +237,6 @@ static int virtio_dev_probe(struct device *_d) driver_features_legacy = driver_features; } - /* - * Some devices detect legacy solely via F_VERSION_1. Write - * F_VERSION_1 to force LE config space accesses before FEATURES_OK for - * these when needed. - */ - if (drv->validate && !virtio_legacy_is_little_endian() - && device_features & BIT_ULL(VIRTIO_F_VERSION_1)) { - dev->features = BIT_ULL(VIRTIO_F_VERSION_1); - dev->config->finalize_features(dev); - } - if (device_features & (1ULL << VIRTIO_F_VERSION_1)) dev->features = driver_features & device_features; else @@ -260,13 +247,26 @@ static int virtio_dev_probe(struct device *_d) if (device_features & (1ULL << i)) __virtio_set_bit(dev, i); + err = dev->config->finalize_features(dev); + if (err) + goto err; + if (drv->validate) { + u64 features = dev->features; + err = drv->validate(dev); if (err) goto err; + + /* Did validation change any features? Then write them again. */ + if (features != dev->features) { + err = dev->config->finalize_features(dev); + if (err) + goto err; + } } - err = virtio_finalize_features(dev); + err = virtio_features_ok(dev); if (err) goto err; @@ -490,7 +490,11 @@ int virtio_device_restore(struct virtio_device *dev) /* We have a driver! */ virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); - ret = virtio_finalize_features(dev); + ret = dev->config->finalize_features(dev); + if (ret) + goto err; + + ret = virtio_features_ok(dev); if (ret) goto err; diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c index ca70c5f03206..9cbeeb4923ec 100644 --- a/drivers/w1/slaves/w1_therm.c +++ b/drivers/w1/slaves/w1_therm.c @@ -2090,16 +2090,20 @@ static ssize_t w1_seq_show(struct device *device, if (sl->reg_num.id == reg_num->id) seq = i; + if (w1_reset_bus(sl->master)) + goto error; + + /* Put the device into chain DONE state */ + w1_write_8(sl->master, W1_MATCH_ROM); + w1_write_block(sl->master, (u8 *)&rn, 8); w1_write_8(sl->master, W1_42_CHAIN); w1_write_8(sl->master, W1_42_CHAIN_DONE); w1_write_8(sl->master, W1_42_CHAIN_DONE_INV); - w1_read_block(sl->master, &ack, sizeof(ack)); /* check for acknowledgment */ ack = w1_read_8(sl->master); if (ack != W1_42_SUCCESS_CONFIRM_BYTE) goto error; - } /* Exit from CHAIN state */ diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c index 359302f71f7e..ae7f9357bb87 100644 --- a/drivers/watchdog/rti_wdt.c +++ b/drivers/watchdog/rti_wdt.c @@ -229,6 +229,7 @@ static int rti_wdt_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(dev); if (ret) { pm_runtime_put_noidle(dev); + pm_runtime_disable(&pdev->dev); return dev_err_probe(dev, ret, "runtime pm failed\n"); } diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index 3fa40c723e8e..edb0acd0b832 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c @@ -169,20 +169,14 @@ undo: __del_gref(gref); } - /* It's possible for the target domain to map the just-allocated grant - * references by blindly guessing their IDs; if this is done, then - * __del_gref will leave them in the queue_gref list. They need to be - * added to the global list so that we can free them when they are no - * longer referenced. - */ - if (unlikely(!list_empty(&queue_gref))) - list_splice_tail(&queue_gref, &gref_list); mutex_unlock(&gref_mutex); return rc; } static void __del_gref(struct gntalloc_gref *gref) { + unsigned long addr; + if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { uint8_t *tmp = kmap(gref->page); tmp[gref->notify.pgoff] = 0; @@ -196,21 +190,16 @@ static void __del_gref(struct gntalloc_gref *gref) gref->notify.flags = 0; if (gref->gref_id) { - if (gnttab_query_foreign_access(gref->gref_id)) - return; - - if (!gnttab_end_foreign_access_ref(gref->gref_id, 0)) - return; - - gnttab_free_grant_reference(gref->gref_id); + if (gref->page) { + addr = (unsigned long)page_to_virt(gref->page); + gnttab_end_foreign_access(gref->gref_id, 0, addr); + } else + gnttab_free_grant_reference(gref->gref_id); } gref_size--; list_del(&gref->next_gref); - if (gref->page) - __free_page(gref->page); - kfree(gref); } diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 3729bea0c989..5c83d41766c8 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -134,12 +134,9 @@ struct gnttab_ops { */ unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref); /* - * Query the status of a grant entry. Ref parameter is reference of - * queried grant entry, return value is the status of queried entry. - * Detailed status(writing/reading) can be gotten from the return value - * by bit operations. + * Read the frame number related to a given grant reference. */ - int (*query_foreign_access)(grant_ref_t ref); + unsigned long (*read_frame)(grant_ref_t ref); }; struct unmap_refs_callback_data { @@ -284,22 +281,6 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); -static int gnttab_query_foreign_access_v1(grant_ref_t ref) -{ - return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing); -} - -static int gnttab_query_foreign_access_v2(grant_ref_t ref) -{ - return grstatus[ref] & (GTF_reading|GTF_writing); -} - -int gnttab_query_foreign_access(grant_ref_t ref) -{ - return gnttab_interface->query_foreign_access(ref); -} -EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); - static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly) { u16 flags, nflags; @@ -353,6 +334,16 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); +static unsigned long gnttab_read_frame_v1(grant_ref_t ref) +{ + return gnttab_shared.v1[ref].frame; +} + +static unsigned long gnttab_read_frame_v2(grant_ref_t ref) +{ + return gnttab_shared.v2[ref].full_page.frame; +} + struct deferred_entry { struct list_head list; grant_ref_t ref; @@ -382,12 +373,9 @@ static void gnttab_handle_deferred(struct timer_list *unused) spin_unlock_irqrestore(&gnttab_list_lock, flags); if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) { put_free_entry(entry->ref); - if (entry->page) { - pr_debug("freeing g.e. %#x (pfn %#lx)\n", - entry->ref, page_to_pfn(entry->page)); - put_page(entry->page); - } else - pr_info("freeing g.e. %#x\n", entry->ref); + pr_debug("freeing g.e. %#x (pfn %#lx)\n", + entry->ref, page_to_pfn(entry->page)); + put_page(entry->page); kfree(entry); entry = NULL; } else { @@ -412,9 +400,18 @@ static void gnttab_handle_deferred(struct timer_list *unused) static void gnttab_add_deferred(grant_ref_t ref, bool readonly, struct page *page) { - struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC); + struct deferred_entry *entry; + gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; const char *what = KERN_WARNING "leaking"; + entry = kmalloc(sizeof(*entry), gfp); + if (!page) { + unsigned long gfn = gnttab_interface->read_frame(ref); + + page = pfn_to_page(gfn_to_pfn(gfn)); + get_page(page); + } + if (entry) { unsigned long flags; @@ -435,11 +432,21 @@ static void gnttab_add_deferred(grant_ref_t ref, bool readonly, what, ref, page ? page_to_pfn(page) : -1); } +int gnttab_try_end_foreign_access(grant_ref_t ref) +{ + int ret = _gnttab_end_foreign_access_ref(ref, 0); + + if (ret) + put_free_entry(ref); + + return ret; +} +EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access); + void gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page) { - if (gnttab_end_foreign_access_ref(ref, readonly)) { - put_free_entry(ref); + if (gnttab_try_end_foreign_access(ref)) { if (page != 0) put_page(virt_to_page(page)); } else @@ -1417,7 +1424,7 @@ static const struct gnttab_ops gnttab_v1_ops = { .update_entry = gnttab_update_entry_v1, .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1, .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1, - .query_foreign_access = gnttab_query_foreign_access_v1, + .read_frame = gnttab_read_frame_v1, }; static const struct gnttab_ops gnttab_v2_ops = { @@ -1429,7 +1436,7 @@ static const struct gnttab_ops gnttab_v2_ops = { .update_entry = gnttab_update_entry_v2, .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2, .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2, - .query_foreign_access = gnttab_query_foreign_access_v2, + .read_frame = gnttab_read_frame_v2, }; static bool gnttab_need_v2(void) diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 7984645b5956..bbe337dc296e 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -337,8 +337,8 @@ static void free_active_ring(struct sock_mapping *map) if (!map->active.ring) return; - free_pages((unsigned long)map->active.data.in, - map->active.ring->ring_order); + free_pages_exact(map->active.data.in, + PAGE_SIZE << map->active.ring->ring_order); free_page((unsigned long)map->active.ring); } @@ -352,8 +352,8 @@ static int alloc_active_ring(struct sock_mapping *map) goto out; map->active.ring->ring_order = PVCALLS_RING_ORDER; - bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - PVCALLS_RING_ORDER); + bytes = alloc_pages_exact(PAGE_SIZE << PVCALLS_RING_ORDER, + GFP_KERNEL | __GFP_ZERO); if (!bytes) goto out; diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index e56a5faac395..cbdff8979980 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -380,7 +380,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, */ trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); - map = swiotlb_tbl_map_single(dev, phys, size, size, dir, attrs); + map = swiotlb_tbl_map_single(dev, phys, size, size, 0, dir, attrs); if (map == (phys_addr_t)DMA_MAPPING_ERROR) return DMA_MAPPING_ERROR; diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index e8bed1cb76ba..df6890681231 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -379,7 +379,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, unsigned int nr_pages, grant_ref_t *grefs) { int err; - int i, j; + unsigned int i; + grant_ref_t gref_head; + + err = gnttab_alloc_grant_references(nr_pages, &gref_head); + if (err) { + xenbus_dev_fatal(dev, err, "granting access to ring page"); + return err; + } for (i = 0; i < nr_pages; i++) { unsigned long gfn; @@ -389,23 +396,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, else gfn = virt_to_gfn(vaddr); - err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0); - if (err < 0) { - xenbus_dev_fatal(dev, err, - "granting access to ring page"); - goto fail; - } - grefs[i] = err; + grefs[i] = gnttab_claim_grant_reference(&gref_head); + gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id, + gfn, 0); vaddr = vaddr + XEN_PAGE_SIZE; } return 0; - -fail: - for (j = 0; j < i; j++) - gnttab_end_foreign_access_ref(grefs[j], 0); - return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); |