diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2023-10-16 18:36:12 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2023-10-16 18:36:12 +0300 |
commit | d0d27ef87e1ca974ed93ed4f7d3c123cbd392ba6 (patch) | |
tree | f7c3e3259579973c896c634e2bdabed31a171395 /drivers | |
parent | 1034cc423f1b4a7a9a56d310ca980fcd2753e11d (diff) | |
parent | 58720809f52779dc0f08e53e54b014209d13eebb (diff) | |
download | linux-d0d27ef87e1ca974ed93ed4f7d3c123cbd392ba6.tar.xz |
Merge 6.6-rc6 into usb-next
We need the USB and Thunderbolt fixes in here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
281 files changed, 2442 insertions, 1298 deletions
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index c711db8a9c33..0f5218e361df 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -12,6 +12,7 @@ #define pr_fmt(fmt) "ACPI: " fmt #include <linux/acpi.h> +#include <linux/cpu.h> #include <linux/device.h> #include <linux/dmi.h> #include <linux/kernel.h> diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 660834a49c1f..c95d0edb0be9 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1915,6 +1915,17 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = { }, { /* + * HP Pavilion Gaming Laptop 15-dk1xxx + * https://github.com/systemd/systemd/issues/28942 + */ + .callback = ec_honor_dsdt_gpe, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"), + }, + }, + { + /* * Samsung hardware * https://bugzilla.kernel.org/show_bug.cgi?id=44161 */ diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index f0e6738ae3c9..f96bf32cd368 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -855,7 +855,7 @@ static size_t sizeof_idt(struct acpi_nfit_interleave *idt) { if (idt->header.length < sizeof(*idt)) return 0; - return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); + return sizeof(*idt) + sizeof(u32) * idt->line_count; } static bool add_idt(struct acpi_nfit_desc *acpi_desc, diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index dc615ef6550a..3a34a8c425fe 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -1217,8 +1217,7 @@ static int acpi_processor_setup_lpi_states(struct acpi_processor *pr) strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN); state->exit_latency = lpi->wake_latency; state->target_residency = lpi->min_residency; - if (lpi->arch_flags) - state->flags |= CPUIDLE_FLAG_TIMER_STOP; + state->flags |= arch_get_idle_state_flags(lpi->arch_flags); if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH) state->flags |= CPUIDLE_FLAG_RCU_IDLE; state->enter = acpi_idle_lpi_enter; diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 32cfa3f4efd3..297a88587031 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -440,6 +440,13 @@ static const struct dmi_system_id asus_laptop[] = { }, }, { + .ident = "Asus ExpertBook B1402CBA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"), + }, + }, + { .ident = "Asus ExpertBook B1502CBA", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), @@ -500,16 +507,23 @@ static const struct dmi_system_id maingear_laptop[] = { static const struct dmi_system_id pcspecialist_laptop[] = { { - .ident = "PCSpecialist Elimina Pro 16 M", - /* - * Some models have product-name "Elimina Pro 16 M", - * others "GM6BGEQ". Match on board-name to match both. - */ + /* TongFang GM6BGEQ / PCSpecialist Elimina Pro 16 M, RTX 3050 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"), DMI_MATCH(DMI_BOARD_NAME, "GM6BGEQ"), }, }, + { + /* TongFang GM6BG5Q, RTX 4050 */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM6BG5Q"), + }, + }, + { + /* TongFang GM6BG0Q / PCSpecialist Elimina Pro 16 M, RTX 4060 */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM6BG0Q"), + }, + }, { } }; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 367afac5f1bf..92128aae2d06 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -4812,6 +4812,8 @@ static void binder_release_work(struct binder_proc *proc, "undelivered TRANSACTION_ERROR: %u\n", e->cmd); } break; + case BINDER_WORK_TRANSACTION_PENDING: + case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: case BINDER_WORK_TRANSACTION_COMPLETE: { binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered TRANSACTION_COMPLETE\n"); diff --git a/drivers/ata/pata_parport/fit3.c b/drivers/ata/pata_parport/fit3.c index bad7aa920cdc..d2b81cf2e16d 100644 --- a/drivers/ata/pata_parport/fit3.c +++ b/drivers/ata/pata_parport/fit3.c @@ -9,11 +9,6 @@ * * The TD-2000 and certain older devices use a different protocol. * Try the fit2 protocol module with them. - * - * NB: The FIT adapters do not appear to support the control - * registers. So, we map ALT_STATUS to STATUS and NO-OP writes - * to the device control register - this means that IDE reset - * will not work on these devices. */ #include <linux/module.h> @@ -37,8 +32,7 @@ static void fit3_write_regr(struct pi_adapter *pi, int cont, int regr, int val) { - if (cont == 1) - return; + regr += cont << 3; switch (pi->mode) { case 0: @@ -59,11 +53,7 @@ static int fit3_read_regr(struct pi_adapter *pi, int cont, int regr) { int a, b; - if (cont) { - if (regr != 6) - return 0xff; - regr = 7; - } + regr += cont << 3; switch (pi->mode) { case 0: diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c index 1af64d435d3c..a7adfdcb5e27 100644 --- a/drivers/ata/pata_parport/pata_parport.c +++ b/drivers/ata/pata_parport/pata_parport.c @@ -51,6 +51,13 @@ static void pata_parport_dev_select(struct ata_port *ap, unsigned int device) ata_sff_pause(ap); } +static void pata_parport_set_devctl(struct ata_port *ap, u8 ctl) +{ + struct pi_adapter *pi = ap->host->private_data; + + pi->proto->write_regr(pi, 1, 6, ctl); +} + static bool pata_parport_devchk(struct ata_port *ap, unsigned int device) { struct pi_adapter *pi = ap->host->private_data; @@ -64,7 +71,7 @@ static bool pata_parport_devchk(struct ata_port *ap, unsigned int device) pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 0xaa); pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0x55); - pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 055); + pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 0x55); pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0xaa); nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT); @@ -73,6 +80,72 @@ static bool pata_parport_devchk(struct ata_port *ap, unsigned int device) return (nsect == 0x55) && (lbal == 0xaa); } +static int pata_parport_wait_after_reset(struct ata_link *link, + unsigned int devmask, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct pi_adapter *pi = ap->host->private_data; + unsigned int dev0 = devmask & (1 << 0); + unsigned int dev1 = devmask & (1 << 1); + int rc, ret = 0; + + ata_msleep(ap, ATA_WAIT_AFTER_RESET); + + /* always check readiness of the master device */ + rc = ata_sff_wait_ready(link, deadline); + if (rc) { + /* + * some adapters return bogus values if master device is not + * present, so don't abort now if a slave device is present + */ + if (!dev1) + return rc; + ret = -ENODEV; + } + + /* + * if device 1 was found in ata_devchk, wait for register + * access briefly, then wait for BSY to clear. + */ + if (dev1) { + int i; + + pata_parport_dev_select(ap, 1); + + /* + * Wait for register access. Some ATAPI devices fail + * to set nsect/lbal after reset, so don't waste too + * much time on it. We're gonna wait for !BSY anyway. + */ + for (i = 0; i < 2; i++) { + u8 nsect, lbal; + + nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT); + lbal = pi->proto->read_regr(pi, 0, ATA_REG_LBAL); + if (nsect == 1 && lbal == 1) + break; + /* give drive a breather */ + ata_msleep(ap, 50); + } + + rc = ata_sff_wait_ready(link, deadline); + if (rc) { + if (rc != -ENODEV) + return rc; + ret = rc; + } + } + + pata_parport_dev_select(ap, 0); + if (dev1) + pata_parport_dev_select(ap, 1); + if (dev0) + pata_parport_dev_select(ap, 0); + + return ret; +} + static int pata_parport_bus_softreset(struct ata_port *ap, unsigned int devmask, unsigned long deadline) { @@ -87,7 +160,7 @@ static int pata_parport_bus_softreset(struct ata_port *ap, unsigned int devmask, ap->last_ctl = ap->ctl; /* wait the port to become ready */ - return ata_sff_wait_after_reset(&ap->link, devmask, deadline); + return pata_parport_wait_after_reset(&ap->link, devmask, deadline); } static int pata_parport_softreset(struct ata_link *link, unsigned int *classes, @@ -252,6 +325,7 @@ static struct ata_port_operations pata_parport_port_ops = { .hardreset = NULL, .sff_dev_select = pata_parport_dev_select, + .sff_set_devctl = pata_parport_set_devctl, .sff_check_status = pata_parport_check_status, .sff_check_altstatus = pata_parport_check_altstatus, .sff_tf_load = pata_parport_tf_load, diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index db716ffd083e..3db88bbcae0f 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -453,7 +453,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, if (!rbnode) return -ENOMEM; regcache_rbtree_set_register(map, rbnode, - reg - rbnode->base_reg, value); + (reg - rbnode->base_reg) / map->reg_stride, + value); regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); rbtree_ctx->cached_rbnode = rbnode; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index df1cd0f718b8..800f131222fc 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1436,8 +1436,9 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd) static void nbd_clear_sock_ioctl(struct nbd_device *nbd) { - blk_mark_disk_dead(nbd->disk); nbd_clear_sock(nbd); + disk_force_media_change(nbd->disk); + nbd_bdev_reset(nbd); if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, &nbd->config->runtime_flags)) nbd_config_put(nbd); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 82597ab4f747..499f4809fcdf 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -4419,6 +4419,7 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_QCA_ROME) { data->setup_on_usb = btusb_setup_qca; + hdev->shutdown = btusb_shutdown_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; hdev->cmd_timeout = btusb_qca_cmd_timeout; set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); diff --git a/drivers/counter/counter-chrdev.c b/drivers/counter/counter-chrdev.c index 80acdf62794a..afc94d0062b1 100644 --- a/drivers/counter/counter-chrdev.c +++ b/drivers/counter/counter-chrdev.c @@ -247,8 +247,8 @@ static int counter_get_ext(const struct counter_comp *const ext, if (*id == component_id) return 0; - if (ext->type == COUNTER_COMP_ARRAY) { - element = ext->priv; + if (ext[*ext_idx].type == COUNTER_COMP_ARRAY) { + element = ext[*ext_idx].priv; if (component_id - *id < element->length) return 0; diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c index 975e431d1590..b3e615cbd2ca 100644 --- a/drivers/counter/microchip-tcb-capture.c +++ b/drivers/counter/microchip-tcb-capture.c @@ -97,7 +97,7 @@ static int mchp_tc_count_function_write(struct counter_device *counter, priv->qdec_mode = 0; /* Set highest rate based on whether soc has gclk or not */ bmr &= ~(ATMEL_TC_QDEN | ATMEL_TC_POSEN); - if (priv->tc_cfg->has_gclk) + if (!priv->tc_cfg->has_gclk) cmr |= ATMEL_TC_TIMER_CLOCK2; else cmr |= ATMEL_TC_TIMER_CLOCK1; diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c index c625bb2b5d56..628af51c81af 100644 --- a/drivers/dma-buf/dma-fence-unwrap.c +++ b/drivers/dma-buf/dma-fence-unwrap.c @@ -76,16 +76,11 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences, dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) { if (!dma_fence_is_signaled(tmp)) { ++count; - } else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, - &tmp->flags)) { - if (ktime_after(tmp->timestamp, timestamp)) - timestamp = tmp->timestamp; } else { - /* - * Use the current time if the fence is - * currently signaling. - */ - timestamp = ktime_get(); + ktime_t t = dma_fence_timestamp(tmp); + + if (ktime_after(t, timestamp)) + timestamp = t; } } } diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index af57799c86ce..2e9a316c596a 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -268,13 +268,10 @@ static int sync_fill_fence_info(struct dma_fence *fence, sizeof(info->driver_name)); info->status = dma_fence_get_status(fence); - while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && - !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) - cpu_relax(); info->timestamp_ns = - test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? - ktime_to_ns(fence->timestamp) : - ktime_set(0, 0); + dma_fence_is_signaled(fence) ? + ktime_to_ns(dma_fence_timestamp(fence)) : + ktime_set(0, 0); return info->status; } diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index a0f5741abcc4..6a3abe5b1790 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -92,8 +92,14 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan) edma_writel_chreg(fsl_chan, val, ch_sbr); - if (flags & FSL_EDMA_DRV_HAS_CHMUX) - edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux); + if (flags & FSL_EDMA_DRV_HAS_CHMUX) { + /* + * ch_mux: With the exception of 0, attempts to write a value + * already in use will be forced to 0. + */ + if (!edma_readl_chreg(fsl_chan, ch_mux)) + edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux); + } val = edma_readl_chreg(fsl_chan, ch_csr); val |= EDMA_V3_CH_CSR_ERQ; @@ -448,12 +454,25 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga); + csr = le16_to_cpu(tcd->csr); + if (fsl_chan->is_sw) { - csr = le16_to_cpu(tcd->csr); csr |= EDMA_TCD_CSR_START; tcd->csr = cpu_to_le16(csr); } + /* + * Must clear CHn_CSR[DONE] bit before enable TCDn_CSR[ESG] at EDMAv3 + * eDMAv4 have not such requirement. + * Change MLINK need clear CHn_CSR[DONE] for both eDMAv3 and eDMAv4. + */ + if (((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_SG) && + (csr & EDMA_TCD_CSR_E_SG)) || + ((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_LINK) && + (csr & EDMA_TCD_CSR_E_LINK))) + edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr); + + edma_write_tcdreg(fsl_chan, tcd->csr, csr); } diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index 3cc0cc8fc2d0..40d50cc3d75a 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -183,11 +183,23 @@ struct fsl_edma_desc { #define FSL_EDMA_DRV_BUS_8BYTE BIT(10) #define FSL_EDMA_DRV_DEV_TO_DEV BIT(11) #define FSL_EDMA_DRV_ALIGN_64BYTE BIT(12) +/* Need clean CHn_CSR DONE before enable TCD's ESG */ +#define FSL_EDMA_DRV_CLEAR_DONE_E_SG BIT(13) +/* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */ +#define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14) #define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \ FSL_EDMA_DRV_BUS_8BYTE | \ FSL_EDMA_DRV_DEV_TO_DEV | \ - FSL_EDMA_DRV_ALIGN_64BYTE) + FSL_EDMA_DRV_ALIGN_64BYTE | \ + FSL_EDMA_DRV_CLEAR_DONE_E_SG | \ + FSL_EDMA_DRV_CLEAR_DONE_E_LINK) + +#define FSL_EDMA_DRV_EDMA4 (FSL_EDMA_DRV_SPLIT_REG | \ + FSL_EDMA_DRV_BUS_8BYTE | \ + FSL_EDMA_DRV_DEV_TO_DEV | \ + FSL_EDMA_DRV_ALIGN_64BYTE | \ + FSL_EDMA_DRV_CLEAR_DONE_E_LINK) struct fsl_edma_drvdata { u32 dmamuxs; /* only used before v3 */ diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index 63d48d046f04..8c4ed7012e23 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -154,18 +154,20 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec, fsl_chan = to_fsl_edma_chan(chan); i = fsl_chan - fsl_edma->chans; - chan = dma_get_slave_channel(chan); - chan->device->privatecnt++; fsl_chan->priority = dma_spec->args[1]; fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX; fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE; fsl_chan->is_multi_fifo = dma_spec->args[2] & ARGS_MULTI_FIFO; if (!b_chmux && i == dma_spec->args[0]) { + chan = dma_get_slave_channel(chan); + chan->device->privatecnt++; mutex_unlock(&fsl_edma->fsl_edma_mutex); return chan; } else if (b_chmux && !fsl_chan->srcid) { /* if controller support channel mux, choose a free channel */ + chan = dma_get_slave_channel(chan); + chan->device->privatecnt++; fsl_chan->srcid = dma_spec->args[0]; mutex_unlock(&fsl_edma->fsl_edma_mutex); return chan; @@ -355,7 +357,7 @@ static struct fsl_edma_drvdata imx93_data3 = { }; static struct fsl_edma_drvdata imx93_data4 = { - .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3, + .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4, .chreg_space_sz = 0x8000, .chreg_off = 0x10000, .setup_irq = fsl_edma3_irq_init, diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 22d6f4e455b7..8f754f922217 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -477,6 +477,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, union idxd_command_reg cmd; DECLARE_COMPLETION_ONSTACK(done); u32 stat; + unsigned long flags; if (idxd_device_is_halted(idxd)) { dev_warn(&idxd->pdev->dev, "Device is HALTED!\n"); @@ -490,7 +491,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, cmd.operand = operand; cmd.int_req = 1; - spin_lock(&idxd->cmd_lock); + spin_lock_irqsave(&idxd->cmd_lock, flags); wait_event_lock_irq(idxd->cmd_waitq, !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags), idxd->cmd_lock); @@ -507,7 +508,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, * After command submitted, release lock and go to sleep until * the command completes via interrupt. */ - spin_unlock(&idxd->cmd_lock); + spin_unlock_irqrestore(&idxd->cmd_lock, flags); wait_for_completion(&done); stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); spin_lock(&idxd->cmd_lock); diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c index c51dc017b48a..06d12ac39144 100644 --- a/drivers/dma/mediatek/mtk-uart-apdma.c +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -450,9 +450,8 @@ static int mtk_uart_apdma_device_pause(struct dma_chan *chan) mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); - synchronize_irq(c->irq); - spin_unlock_irqrestore(&c->vc.lock, flags); + synchronize_irq(c->irq); return 0; } diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 89e82508c133..002833fb1fa0 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3668,6 +3668,7 @@ static int __init d40_probe(struct platform_device *pdev) regulator_disable(base->lcpa_regulator); regulator_put(base->lcpa_regulator); } + pm_runtime_disable(base->dev); report_failure: d40_err(dev, "probe failed\n"); diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 5c36811aa134..0b30151fb45c 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -1113,8 +1113,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg( chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; /* Activate Double Buffer Mode if DMA triggers STM32 MDMA and more than 1 sg */ - if (chan->trig_mdma && sg_len > 1) + if (chan->trig_mdma && sg_len > 1) { chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM; + chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT; + } for_each_sg(sgl, sg, sg_len, i) { ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, @@ -1387,11 +1389,12 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, residue = stm32_dma_get_remaining_bytes(chan); - if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) { + if ((chan->desc->cyclic || chan->trig_mdma) && !stm32_dma_is_current_sg(chan)) { n_sg++; if (n_sg == chan->desc->num_sgs) n_sg = 0; - residue = sg_req->len; + if (!chan->trig_mdma) + residue = sg_req->len; } /* @@ -1401,7 +1404,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, * residue = remaining bytes from NDTR + remaining * periods/sg to be transferred */ - if (!chan->desc->cyclic || n_sg != 0) + if ((!chan->desc->cyclic && !chan->trig_mdma) || n_sg != 0) for (i = n_sg; i < desc->num_sgs; i++) residue += desc->sg_req[i].len; diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 0de234022c6d..bae08b3f55c7 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -777,8 +777,6 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan, /* Enable interrupts */ ccr &= ~STM32_MDMA_CCR_IRQ_MASK; ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE; - if (sg_len > 1) - ccr |= STM32_MDMA_CCR_BTIE; desc->ccr = ccr; return 0; @@ -1236,6 +1234,10 @@ static int stm32_mdma_resume(struct dma_chan *c) unsigned long flags; u32 status, reg; + /* Transfer can be terminated */ + if (!chan->desc || (stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & STM32_MDMA_CCR_EN)) + return -EPERM; + hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc; spin_lock_irqsave(&chan->vchan.lock, flags); @@ -1316,21 +1318,35 @@ static int stm32_mdma_slave_config(struct dma_chan *c, static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan, struct stm32_mdma_desc *desc, - u32 curr_hwdesc) + u32 curr_hwdesc, + struct dma_tx_state *state) { struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); struct stm32_mdma_hwdesc *hwdesc; - u32 cbndtr, residue, modulo, burst_size; + u32 cisr, clar, cbndtr, residue, modulo, burst_size; int i; + cisr = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id)); + residue = 0; - for (i = curr_hwdesc + 1; i < desc->count; i++) { + /* Get the next hw descriptor to process from current transfer */ + clar = stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id)); + for (i = desc->count - 1; i >= 0; i--) { hwdesc = desc->node[i].hwdesc; + + if (hwdesc->clar == clar) + break;/* Current transfer found, stop cumulating */ + + /* Cumulate residue of unprocessed hw descriptors */ residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr); } cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)); residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK; + state->in_flight_bytes = 0; + if (chan->chan_config.m2m_hw && (cisr & STM32_MDMA_CISR_CRQA)) + state->in_flight_bytes = cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK; + if (!chan->mem_burst) return residue; @@ -1360,11 +1376,10 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c, vdesc = vchan_find_desc(&chan->vchan, cookie); if (chan->desc && cookie == chan->desc->vdesc.tx.cookie) - residue = stm32_mdma_desc_residue(chan, chan->desc, - chan->curr_hwdesc); + residue = stm32_mdma_desc_residue(chan, chan->desc, chan->curr_hwdesc, state); else if (vdesc) - residue = stm32_mdma_desc_residue(chan, - to_stm32_mdma_desc(vdesc), 0); + residue = stm32_mdma_desc_residue(chan, to_stm32_mdma_desc(vdesc), 0, state); + dma_set_residue(state, residue); spin_unlock_irqrestore(&chan->vchan.lock, flags); diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c index 789193ed0386..c278d5facf7d 100644 --- a/drivers/dma/ti/k3-udma-glue.c +++ b/drivers/dma/ti/k3-udma-glue.c @@ -558,6 +558,9 @@ int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn) tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq); } + if (!tx_chn->virq) + return -ENXIO; + return tx_chn->virq; } EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq); diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index da33bbbdacb9..58f107194fda 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c @@ -973,7 +973,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset, else if (param == PIN_CONFIG_BIAS_DISABLE || param == PIN_CONFIG_BIAS_PULL_DOWN || param == PIN_CONFIG_DRIVE_STRENGTH) - return pinctrl_gpio_set_config(offset, config); + return pinctrl_gpio_set_config(chip->base + offset, config); else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN || param == PIN_CONFIG_DRIVE_OPEN_SOURCE) /* Return -ENOTSUPP to trigger emulation, as per datasheet */ diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 7e9f7a32d3ee..cae9661862fe 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -237,6 +237,7 @@ static bool pxa_gpio_has_pinctrl(void) switch (gpio_type) { case PXA3XX_GPIO: case MMP2_GPIO: + case MMP_GPIO: return false; default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 30c4f5cca02c..2b8356699f23 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2093,7 +2093,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) adev->flags |= AMD_IS_PX; if (!(adev->flags & AMD_IS_APU)) { - parent = pci_upstream_bridge(adev->pdev); + parent = pcie_find_root_port(adev->pdev); adev->has_pr3 = parent ? pci_pr3_present(parent) : false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c index da4be0bbb446..8eee5d783a92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c @@ -142,6 +142,10 @@ int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev) int r; int size; + /* SI HW does not have doorbells, skip allocation */ + if (adev->doorbell.num_kernel_doorbells == 0) + return 0; + /* Reserve first num_kernel_doorbells (page-aligned) for kernel ops */ size = ALIGN(adev->doorbell.num_kernel_doorbells * sizeof(u32), PAGE_SIZE); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c index 9c66d98af6d8..7cd0dfaeee20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -170,6 +170,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) csum += pia[size - 1]; if (csum) { DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum); + kfree(pia); return -EIO; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index f3ee83cdf97e..d28e21baef16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -252,7 +252,7 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_res_cursor cursor; - if (bo->tbo.resource->mem_type != TTM_PL_VRAM) + if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM) return false; amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index c435f7632e8e..5ee87965a078 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -157,7 +157,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct int32_t N; int32_t j; - if (!pipe_ctx->stream) + if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER)) continue; /* Virtual encoders don't have this function */ if (!stream_enc->funcs->get_fifo_cal_average_level) @@ -188,7 +188,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct int32_t N; int32_t j; - if (!pipe_ctx->stream) + if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER)) continue; /* Virtual encoders don't have this function */ if (!stream_enc->funcs->get_fifo_cal_average_level) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 984b52923534..e9345f6554db 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -355,7 +355,7 @@ static void dcn32_update_clocks_update_dentist( int32_t N; int32_t j; - if (!pipe_ctx->stream) + if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER)) continue; /* Virtual encoders don't have this function */ if (!stream_enc->funcs->get_fifo_cal_average_level) @@ -401,7 +401,7 @@ static void dcn32_update_clocks_update_dentist( int32_t N; int32_t j; - if (!pipe_ctx->stream) + if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER)) continue; /* Virtual encoders don't have this function */ if (!stream_enc->funcs->get_fifo_cal_average_level) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 3a9077b60029..d08e60dff46d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1262,6 +1262,9 @@ static void disable_vbios_mode_if_required( if (stream == NULL) continue; + if (stream->apply_seamless_boot_optimization) + continue; + // only looking for first odm pipe if (pipe->prev_odm_pipe) continue; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 41147da54458..8bb2da13826f 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2040,6 +2040,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): *states = ATTR_STATE_SUPPORTED; break; default: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 4bb289f9b4b8..da2860da6018 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2082,36 +2082,41 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context return ret; } +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap) { struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; - u32 smu_pcie_arg; + uint8_t *table_member1, *table_member2; + uint32_t min_gen_speed, max_gen_speed; + uint32_t min_lane_width, max_lane_width; + uint32_t smu_pcie_arg; int ret, i; - /* PCIE gen speed and lane width override */ - if (!amdgpu_device_pcie_dynamic_switching_supported()) { - if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap) - pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1]; + GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1); + GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2); - if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap) - pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1]; + min_gen_speed = MAX(0, table_member1[0]); + max_gen_speed = MIN(pcie_gen_cap, table_member1[1]); + min_gen_speed = min_gen_speed > max_gen_speed ? + max_gen_speed : min_gen_speed; + min_lane_width = MAX(1, table_member2[0]); + max_lane_width = MIN(pcie_width_cap, table_member2[1]); + min_lane_width = min_lane_width > max_lane_width ? + max_lane_width : min_lane_width; - /* Force all levels to use the same settings */ - for (i = 0; i < NUM_LINK_LEVELS; i++) { - pcie_table->pcie_gen[i] = pcie_gen_cap; - pcie_table->pcie_lane[i] = pcie_width_cap; - } + if (!amdgpu_device_pcie_dynamic_switching_supported()) { + pcie_table->pcie_gen[0] = max_gen_speed; + pcie_table->pcie_lane[0] = max_lane_width; } else { - for (i = 0; i < NUM_LINK_LEVELS; i++) { - if (pcie_table->pcie_gen[i] > pcie_gen_cap) - pcie_table->pcie_gen[i] = pcie_gen_cap; - if (pcie_table->pcie_lane[i] > pcie_width_cap) - pcie_table->pcie_lane[i] = pcie_width_cap; - } + pcie_table->pcie_gen[0] = min_gen_speed; + pcie_table->pcie_lane[0] = min_lane_width; } + pcie_table->pcie_gen[1] = max_gen_speed; + pcie_table->pcie_lane[1] = max_lane_width; for (i = 0; i < NUM_LINK_LEVELS; i++) { smu_pcie_arg = (i << 16 | diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 292e38eb6218..60794fcde1d5 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -290,7 +290,8 @@ static int update_connector_routing(struct drm_atomic_state *state, struct drm_connector *connector, struct drm_connector_state *old_connector_state, - struct drm_connector_state *new_connector_state) + struct drm_connector_state *new_connector_state, + bool added_by_user) { const struct drm_connector_helper_funcs *funcs; struct drm_encoder *new_encoder; @@ -339,9 +340,13 @@ update_connector_routing(struct drm_atomic_state *state, * there's a chance the connector may have been destroyed during the * process, but it's better to ignore that then cause * drm_atomic_helper_resume() to fail. + * + * Last, we want to ignore connector registration when the connector + * was not pulled in the atomic state by user-space (ie, was pulled + * in by the driver, e.g. when updating a DP-MST stream). */ if (!state->duplicated && drm_connector_is_unregistered(connector) && - crtc_state->active) { + added_by_user && crtc_state->active) { drm_dbg_atomic(connector->dev, "[CONNECTOR:%d:%s] is not registered\n", connector->base.id, connector->name); @@ -620,7 +625,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, struct drm_connector *connector; struct drm_connector_state *old_connector_state, *new_connector_state; int i, ret; - unsigned int connectors_mask = 0; + unsigned int connectors_mask = 0, user_connectors_mask = 0; + + for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) + user_connectors_mask |= BIT(i); for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { bool has_connectors = @@ -685,7 +693,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, */ ret = update_connector_routing(state, connector, old_connector_state, - new_connector_state); + new_connector_state, + BIT(i) & user_connectors_mask); if (ret) return ret; if (old_connector_state->crtc) { diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 6129b89bb366..44a948b80ee1 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -540,7 +540,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) struct page **pages; struct folio *folio; struct folio_batch fbatch; - int i, j, npages; + long i, j, npages; if (WARN_ON(!obj->filp)) return ERR_PTR(-EINVAL); @@ -564,11 +564,13 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) i = 0; while (i < npages) { + long nr; folio = shmem_read_folio_gfp(mapping, i, mapping_gfp_mask(mapping)); if (IS_ERR(folio)) goto fail; - for (j = 0; j < folio_nr_pages(folio); j++, i++) + nr = min(npages - i, folio_nr_pages(folio)); + for (j = 0; j < nr; j++, i++) pages[i] = folio_file_page(folio, i); /* Make sure shmem keeps __GFP_DMA32 allocated pages in the diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 0cb646cb04ee..d5c15292ae93 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -38,6 +38,14 @@ static const struct drm_dmi_panel_orientation_data gpd_micropc = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data gpd_onemix2s = { + .width = 1200, + .height = 1920, + .bios_dates = (const char * const []){ "05/21/2018", "10/26/2018", + "03/04/2019", NULL }, + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + static const struct drm_dmi_panel_orientation_data gpd_pocket = { .width = 1200, .height = 1920, @@ -401,6 +409,14 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* One Mix 2S (generic strings, also match on bios date) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), + }, + .driver_data = (void *)&gpd_onemix2s, }, {} }; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 6b6d22c19411..0ba955611dfb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -198,7 +198,7 @@ static void flush_tlb_invalidate(struct drm_i915_gem_object *obj) for_each_gt(gt, i915, id) { if (!obj->mm.tlb[id]) - return; + continue; intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]); obj->mm.tlb[id] = 0; diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index a4ff55aa5e55..7ad36198aab2 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -271,8 +271,17 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70)) bit_group_0 |= PIPE_CONTROL_CCS_FLUSH; + /* + * L3 fabric flush is needed for AUX CCS invalidation + * which happens as part of pipe-control so we can + * ignore PIPE_CONTROL_FLUSH_L3. Also PIPE_CONTROL_FLUSH_L3 + * deals with Protected Memory which is not needed for + * AUX CCS invalidation and lead to unwanted side effects. + */ + if (mode & EMIT_FLUSH) + bit_group_1 |= PIPE_CONTROL_FLUSH_L3; + bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH; - bit_group_1 |= PIPE_CONTROL_FLUSH_L3; bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; /* Wa_1409600907:tgl,adl-p */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1f65bb33dd21..a8551ce322de 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1199,6 +1199,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv) goto err_unlock; } + /* + * Register engines early to ensure the engine list is in its final + * rb-tree form, lowering the amount of code that has to deal with + * the intermediate llist state. + */ + intel_engines_driver_register(dev_priv); + return 0; /* @@ -1246,8 +1253,6 @@ err_unlock: void i915_gem_driver_register(struct drm_i915_private *i915) { i915_gem_driver_register__shrinker(i915); - - intel_engines_driver_register(i915); } void i915_gem_driver_unregister(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index c2aaaded07ed..0be195f9149c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -119,6 +119,7 @@ static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog, struct dpu_sw_pipe_cfg *pipe_cfg) { int src_width, src_height, dst_height, fps; + u64 plane_pixel_rate, plane_bit_rate; u64 plane_prefill_bw; u64 plane_bw; u32 hw_latency_lines; @@ -136,13 +137,12 @@ static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog, scale_factor = src_height > dst_height ? mult_frac(src_height, 1, dst_height) : 1; - plane_bw = - src_width * mode->vtotal * fps * fmt->bpp * - scale_factor; + plane_pixel_rate = src_width * mode->vtotal * fps; + plane_bit_rate = plane_pixel_rate * fmt->bpp; - plane_prefill_bw = - src_width * hw_latency_lines * fps * fmt->bpp * - scale_factor * mode->vtotal; + plane_bw = plane_bit_rate * scale_factor; + + plane_prefill_bw = plane_bw * hw_latency_lines; if ((vbp+vpw) > hw_latency_lines) do_div(plane_prefill_bw, (vbp+vpw)); @@ -733,9 +733,11 @@ static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu, static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe, struct dpu_sw_pipe_cfg *pipe_cfg, - const struct dpu_format *fmt) + const struct dpu_format *fmt, + const struct drm_display_mode *mode) { uint32_t min_src_size; + struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1; @@ -774,6 +776,12 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, return -EINVAL; } + /* max clk check */ + if (_dpu_plane_calc_clk(mode, pipe_cfg) > kms->perf.max_core_clk_rate) { + DPU_DEBUG_PLANE(pdpu, "plane exceeds max mdp core clk limits\n"); + return -E2BIG; + } + return 0; } @@ -899,12 +907,13 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2; } - ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt); + ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode); if (ret) return ret; if (r_pipe->sspp) { - ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt); + ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt, + &crtc_state->adjusted_mode); if (ret) return ret; } diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index a7a5c7e0ab92..77a8d9366ed7 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -1774,13 +1774,6 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) return rc; while (--link_train_max_retries) { - rc = dp_ctrl_reinitialize_mainlink(ctrl); - if (rc) { - DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", - rc); - break; - } - training_step = DP_TRAINING_NONE; rc = dp_ctrl_setup_main_link(ctrl, &training_step); if (rc == 0) { @@ -1832,6 +1825,12 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) /* stop link training before start re training */ dp_ctrl_clear_training_pattern(ctrl); } + + rc = dp_ctrl_reinitialize_mainlink(ctrl); + if (rc) { + DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc); + break; + } } if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index 42427129acea..6375daaeb98e 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -1090,7 +1090,7 @@ int dp_link_process_request(struct dp_link *dp_link) } else if (dp_link_read_psr_error_status(link)) { DRM_ERROR("PSR IRQ_HPD received\n"); } else if (dp_link_psr_capability_changed(link)) { - drm_dbg_dp(link->drm_dev, "PSR Capability changed"); + drm_dbg_dp(link->drm_dev, "PSR Capability changed\n"); } else { ret = dp_link_process_link_status_update(link); if (!ret) { @@ -1107,7 +1107,7 @@ int dp_link_process_request(struct dp_link *dp_link) } } - drm_dbg_dp(link->drm_dev, "sink request=%#x", + drm_dbg_dp(link->drm_dev, "sink request=%#x\n", dp_link->sink_request); return ret; } diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 5d9ec27c89d3..3d6fb708dc22 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1082,9 +1082,21 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host) static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host) { + u32 data; + if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) return; + data = dsi_read(msm_host, REG_DSI_STATUS0); + + /* if video mode engine is not busy, its because + * either timing engine was not turned on or the + * DSI controller has finished transmitting the video + * data already, so no need to wait in those cases + */ + if (!(data & DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY)) + return; + if (msm_host->power_on && msm_host->enabled) { dsi_wait4video_done(msm_host); /* delay 4 ms to skip BLLP */ @@ -1894,10 +1906,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) } msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); - if (msm_host->irq < 0) { - ret = msm_host->irq; - dev_err(&pdev->dev, "failed to get irq: %d\n", ret); - return ret; + if (!msm_host->irq) { + dev_err(&pdev->dev, "failed to get irq\n"); + return -EINVAL; } /* do not autoenable, will be enabled later */ diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c index 2e87dd6cb17b..348c66b14683 100644 --- a/drivers/gpu/drm/msm/msm_mdss.c +++ b/drivers/gpu/drm/msm/msm_mdss.c @@ -511,7 +511,7 @@ static int mdss_remove(struct platform_device *pdev) static const struct msm_mdss_data msm8998_data = { .ubwc_enc_version = UBWC_1_0, .ubwc_dec_version = UBWC_1_0, - .highest_bank_bit = 1, + .highest_bank_bit = 2, }; static const struct msm_mdss_data qcm2290_data = { diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 30afbec9e3b1..2edd7bb13fae 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -31,6 +31,7 @@ #include "nouveau_drv.h" #include "nouveau_dma.h" +#include "nouveau_exec.h" #include "nouveau_gem.h" #include "nouveau_chan.h" #include "nouveau_abi16.h" @@ -183,6 +184,20 @@ nouveau_abi16_fini(struct nouveau_abi16 *abi16) cli->abi16 = NULL; } +static inline int +getparam_dma_ib_max(struct nvif_device *device) +{ + const struct nvif_mclass dmas[] = { + { NV03_CHANNEL_DMA, 0 }, + { NV10_CHANNEL_DMA, 0 }, + { NV17_CHANNEL_DMA, 0 }, + { NV40_CHANNEL_DMA, 0 }, + {} + }; + + return nvif_mclass(&device->object, dmas) < 0 ? NV50_DMA_IB_MAX : 0; +} + int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) { @@ -247,6 +262,12 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) case NOUVEAU_GETPARAM_GRAPH_UNITS: getparam->value = nvkm_gr_units(gr); break; + case NOUVEAU_GETPARAM_EXEC_PUSH_MAX: { + int ib_max = getparam_dma_ib_max(device); + + getparam->value = nouveau_exec_push_max_from_ib_max(ib_max); + break; + } default: NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param); return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index bb3d6e5c122f..7c97b2886807 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -257,10 +257,7 @@ static int nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool priv, u64 runm, struct nouveau_channel **pchan) { - static const struct { - s32 oclass; - int version; - } hosts[] = { + const struct nvif_mclass hosts[] = { { AMPERE_CHANNEL_GPFIFO_B, 0 }, { AMPERE_CHANNEL_GPFIFO_A, 0 }, { TURING_CHANNEL_GPFIFO_A, 0 }, @@ -443,9 +440,11 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) } /* initialise dma tracking parameters */ - switch (chan->user.oclass & 0x00ff) { - case 0x006b: - case 0x006e: + switch (chan->user.oclass) { + case NV03_CHANNEL_DMA: + case NV10_CHANNEL_DMA: + case NV17_CHANNEL_DMA: + case NV40_CHANNEL_DMA: chan->user_put = 0x40; chan->user_get = 0x44; chan->dma.max = (0x10000 / 4) - 2; @@ -455,7 +454,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) chan->user_get = 0x44; chan->user_get_hi = 0x60; chan->dma.ib_base = 0x10000 / 4; - chan->dma.ib_max = (0x02000 / 8) - 1; + chan->dma.ib_max = NV50_DMA_IB_MAX; chan->dma.ib_put = 0; chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put; chan->dma.max = chan->dma.ib_base; diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index 1744d95b233e..c52cda82353e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h @@ -49,6 +49,9 @@ void nv50_dma_push(struct nouveau_channel *, u64 addr, u32 length, /* Maximum push buffer size. */ #define NV50_DMA_PUSH_MAX_LENGTH 0x7fffff +/* Maximum IBs per ring. */ +#define NV50_DMA_IB_MAX ((0x02000 / 8) - 1) + /* Object handles - for stuff that's doesn't use handle == oclass. */ enum { NvDmaFB = 0x80000002, diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index 5dda94e1318c..c1837ba95fb5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -379,7 +379,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev, struct nouveau_channel *chan = NULL; struct nouveau_exec_job_args args = {}; struct drm_nouveau_exec *req = data; - int ret = 0; + int push_max, ret = 0; if (unlikely(!abi16)) return -ENOMEM; @@ -404,9 +404,10 @@ nouveau_exec_ioctl_exec(struct drm_device *dev, if (!chan->dma.ib_max) return nouveau_abi16_put(abi16, -ENOSYS); - if (unlikely(req->push_count > NOUVEAU_GEM_MAX_PUSH)) { + push_max = nouveau_exec_push_max_from_ib_max(chan->dma.ib_max); + if (unlikely(req->push_count > push_max)) { NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n", - req->push_count, NOUVEAU_GEM_MAX_PUSH); + req->push_count, push_max); return nouveau_abi16_put(abi16, -EINVAL); } diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.h b/drivers/gpu/drm/nouveau/nouveau_exec.h index 778cacd90f65..5488d337bcc0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.h +++ b/drivers/gpu/drm/nouveau/nouveau_exec.h @@ -51,4 +51,14 @@ int nouveau_exec_job_init(struct nouveau_exec_job **job, int nouveau_exec_ioctl_exec(struct drm_device *dev, void *data, struct drm_file *file_priv); +static inline unsigned int +nouveau_exec_push_max_from_ib_max(int ib_max) +{ + /* Limit the number of IBs per job to half the size of the ring in order + * to avoid the ring running dry between submissions and preserve one + * more slot for the job's HW fence. + */ + return ib_max > 1 ? ib_max / 2 - 1 : 0; +} + #endif diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index 5ac926281d2c..c9087f474cbc 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -1342,9 +1342,7 @@ static const struct panel_init_cmd starry_himax83102_j02_init_cmd[] = { _INIT_DCS_CMD(0xB1, 0x01, 0xBF, 0x11), _INIT_DCS_CMD(0xCB, 0x86), _INIT_DCS_CMD(0xD2, 0x3C, 0xFA), - _INIT_DCS_CMD(0xE9, 0xC5), - _INIT_DCS_CMD(0xD3, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0C, 0x01), - _INIT_DCS_CMD(0xE9, 0x3F), + _INIT_DCS_CMD(0xD3, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0C, 0x01), _INIT_DCS_CMD(0xE7, 0x02, 0x00, 0x28, 0x01, 0x7E, 0x0F, 0x7E, 0x10, 0xA0, 0x00, 0x00, 0x20, 0x40, 0x50, 0x40), _INIT_DCS_CMD(0xBD, 0x02), _INIT_DCS_CMD(0xD8, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0), diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 506371c42745..5a3a622fc672 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -929,7 +929,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) if (next) { next->s_fence->scheduled.timestamp = - job->s_fence->finished.timestamp; + dma_fence_timestamp(&job->s_fence->finished); /* start TO timer for next job */ drm_sched_start_timeout(sched); } diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c index c1dfbfcaa000..bccb33b900f3 100644 --- a/drivers/gpu/drm/tests/drm_kunit_helpers.c +++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c @@ -118,7 +118,7 @@ void drm_kunit_helper_free_device(struct kunit *test, struct device *dev) kunit_release_action(test, kunit_action_platform_driver_unregister, - pdev); + &fake_platform_driver); } EXPORT_SYMBOL_GPL(drm_kunit_helper_free_device); diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index ff86ba1ae1b8..8ea120eb8674 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -745,7 +745,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res)); if (ret) { - drm_err(dev, "could not acquire memory range %pr: %d\n", &res, ret); + drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index c43853597776..2bfac3aad7b7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -34,6 +34,8 @@ static void vmw_bo_release(struct vmw_bo *vbo) { + WARN_ON(vbo->tbo.base.funcs && + kref_read(&vbo->tbo.base.refcount) != 0); vmw_bo_unmap(vbo); drm_gem_object_release(&vbo->tbo.base); } @@ -497,7 +499,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp, if (!(flags & drm_vmw_synccpu_allow_cs)) { atomic_dec(&vmw_bo->cpu_writers); } - vmw_user_bo_unref(vmw_bo); + vmw_user_bo_unref(&vmw_bo); } return ret; @@ -539,7 +541,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, return ret; ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); - vmw_user_bo_unref(vbo); + vmw_user_bo_unref(&vbo); if (unlikely(ret != 0)) { if (ret == -ERESTARTSYS || ret == -EBUSY) return -EBUSY; @@ -612,7 +614,6 @@ int vmw_user_bo_lookup(struct drm_file *filp, } *out = to_vmw_bo(gobj); - ttm_bo_get(&(*out)->tbo); return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h index 1d433fceed3d..0d496dc9c6af 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h @@ -195,12 +195,19 @@ static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf) return buf; } -static inline void vmw_user_bo_unref(struct vmw_bo *vbo) +static inline struct vmw_bo *vmw_user_bo_ref(struct vmw_bo *vbo) { - if (vbo) { - ttm_bo_put(&vbo->tbo); - drm_gem_object_put(&vbo->tbo.base); - } + drm_gem_object_get(&vbo->tbo.base); + return vbo; +} + +static inline void vmw_user_bo_unref(struct vmw_bo **buf) +{ + struct vmw_bo *tmp_buf = *buf; + + *buf = NULL; + if (tmp_buf) + drm_gem_object_put(&tmp_buf->tbo.base); } static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index c0b24d1cacbf..a7c07692262b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -432,7 +432,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) * for the new COTable. Initially pin the buffer object to make sure * we can use tryreserve without failure. */ - ret = vmw_bo_create(dev_priv, &bo_params, &buf); + ret = vmw_gem_object_create(dev_priv, &bo_params, &buf); if (ret) { DRM_ERROR("Failed initializing new cotable MOB.\n"); goto out_done; @@ -502,7 +502,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) vmw_resource_mob_attach(res); /* Let go of the old mob. */ - vmw_bo_unreference(&old_buf); + vmw_user_bo_unref(&old_buf); res->id = vcotbl->type; ret = dma_resv_reserve_fences(bo->base.resv, 1); @@ -521,7 +521,7 @@ out_map_new: out_wait: ttm_bo_unpin(bo); ttm_bo_unreserve(bo); - vmw_bo_unreference(&buf); + vmw_user_bo_unref(&buf); out_done: MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 58bfdf203eca..3cd5090dedfc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -853,6 +853,10 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res) /** * GEM related functionality - vmwgfx_gem.c */ +struct vmw_bo_params; +int vmw_gem_object_create(struct vmw_private *vmw, + struct vmw_bo_params *params, + struct vmw_bo **p_vbo); extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, struct drm_file *filp, uint32_t size, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 98e0723ca6f5..36987ef3fc30 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1151,7 +1151,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, SVGAMobId *id, struct vmw_bo **vmw_bo_p) { - struct vmw_bo *vmw_bo; + struct vmw_bo *vmw_bo, *tmp_bo; uint32_t handle = *id; struct vmw_relocation *reloc; int ret; @@ -1164,7 +1164,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, } vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB); ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); - vmw_user_bo_unref(vmw_bo); + tmp_bo = vmw_bo; + vmw_user_bo_unref(&tmp_bo); if (unlikely(ret != 0)) return ret; @@ -1206,7 +1207,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, SVGAGuestPtr *ptr, struct vmw_bo **vmw_bo_p) { - struct vmw_bo *vmw_bo; + struct vmw_bo *vmw_bo, *tmp_bo; uint32_t handle = ptr->gmrId; struct vmw_relocation *reloc; int ret; @@ -1220,7 +1221,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); - vmw_user_bo_unref(vmw_bo); + tmp_bo = vmw_bo; + vmw_user_bo_unref(&tmp_bo); if (unlikely(ret != 0)) return ret; @@ -1619,7 +1621,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState); SVGA3dTextureState *last_state = (SVGA3dTextureState *) - ((unsigned long) header + header->size + sizeof(header)); + ((unsigned long) header + header->size + sizeof(*header)); SVGA3dTextureState *cur_state = (SVGA3dTextureState *) ((unsigned long) header + sizeof(*cmd)); struct vmw_resource *ctx; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c index c0da89e16e6f..8b1eb0061610 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c @@ -111,6 +111,20 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = { .vm_ops = &vmw_vm_ops, }; +int vmw_gem_object_create(struct vmw_private *vmw, + struct vmw_bo_params *params, + struct vmw_bo **p_vbo) +{ + int ret = vmw_bo_create(vmw, params, p_vbo); + + if (ret != 0) + goto out_no_bo; + + (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs; +out_no_bo: + return ret; +} + int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, struct drm_file *filp, uint32_t size, @@ -126,12 +140,10 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, .pin = false }; - ret = vmw_bo_create(dev_priv, ¶ms, p_vbo); + ret = vmw_gem_object_create(dev_priv, ¶ms, p_vbo); if (ret != 0) goto out_no_bo; - (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs; - ret = drm_gem_handle_create(filp, &(*p_vbo)->tbo.base, handle); out_no_bo: return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 1489ad73c103..818b7f109f53 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1471,8 +1471,8 @@ static int vmw_create_bo_proxy(struct drm_device *dev, /* Reserve and switch the backing mob. */ mutex_lock(&res->dev_priv->cmdbuf_mutex); (void) vmw_resource_reserve(res, false, true); - vmw_bo_unreference(&res->guest_memory_bo); - res->guest_memory_bo = vmw_bo_reference(bo_mob); + vmw_user_bo_unref(&res->guest_memory_bo); + res->guest_memory_bo = vmw_user_bo_ref(bo_mob); res->guest_memory_offset = 0; vmw_resource_unreserve(res, false, false, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); @@ -1666,7 +1666,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, err_out: /* vmw_user_lookup_handle takes one ref so does new_fb */ if (bo) - vmw_user_bo_unref(bo); + vmw_user_bo_unref(&bo); if (surface) vmw_surface_unreference(&surface); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index fb85f244c3d0..c45b4724e414 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -451,7 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); - vmw_user_bo_unref(buf); + vmw_user_bo_unref(&buf); out_unlock: mutex_unlock(&overlay->mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 71eeabf001c8..ca300c7427d2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -141,7 +141,7 @@ static void vmw_resource_release(struct kref *kref) if (res->coherent) vmw_bo_dirty_release(res->guest_memory_bo); ttm_bo_unreserve(bo); - vmw_bo_unreference(&res->guest_memory_bo); + vmw_user_bo_unref(&res->guest_memory_bo); } if (likely(res->hw_destroy != NULL)) { @@ -338,7 +338,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, return 0; } - ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo); + ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo); if (unlikely(ret != 0)) goto out_no_bo; @@ -457,11 +457,11 @@ void vmw_resource_unreserve(struct vmw_resource *res, vmw_resource_mob_detach(res); if (res->coherent) vmw_bo_dirty_release(res->guest_memory_bo); - vmw_bo_unreference(&res->guest_memory_bo); + vmw_user_bo_unref(&res->guest_memory_bo); } if (new_guest_memory_bo) { - res->guest_memory_bo = vmw_bo_reference(new_guest_memory_bo); + res->guest_memory_bo = vmw_user_bo_ref(new_guest_memory_bo); /* * The validation code should already have added a @@ -551,7 +551,7 @@ out_no_reserve: ttm_bo_put(val_buf->bo); val_buf->bo = NULL; if (guest_memory_dirty) - vmw_bo_unreference(&res->guest_memory_bo); + vmw_user_bo_unref(&res->guest_memory_bo); return ret; } @@ -727,7 +727,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr, goto out_no_validate; else if (!res->func->needs_guest_memory && res->guest_memory_bo) { WARN_ON_ONCE(vmw_resource_mob_attached(res)); - vmw_bo_unreference(&res->guest_memory_bo); + vmw_user_bo_unref(&res->guest_memory_bo); } return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 1e81ff2422cf..a01ca3226d0a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -180,7 +180,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, res->guest_memory_size = size; if (byte_code) { - res->guest_memory_bo = vmw_bo_reference(byte_code); + res->guest_memory_bo = vmw_user_bo_ref(byte_code); res->guest_memory_offset = offset; } shader->size = size; @@ -809,7 +809,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, shader_type, num_input_sig, num_output_sig, tfile, shader_handle); out_bad_arg: - vmw_user_bo_unref(buffer); + vmw_user_bo_unref(&buffer); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 5db403ee8261..3829be282ff0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -686,9 +686,6 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) container_of(base, struct vmw_user_surface, prime.base); struct vmw_resource *res = &user_srf->srf.res; - if (res->guest_memory_bo) - drm_gem_object_put(&res->guest_memory_bo->tbo.base); - *p_base = NULL; vmw_resource_unreference(&res); } @@ -855,23 +852,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, * expect a backup buffer to be present. */ if (dev_priv->has_mob && req->shareable) { - uint32_t backup_handle; - - ret = vmw_gem_object_create_with_handle(dev_priv, - file_priv, - res->guest_memory_size, - &backup_handle, - &res->guest_memory_bo); + struct vmw_bo_params params = { + .domain = VMW_BO_DOMAIN_SYS, + .busy_domain = VMW_BO_DOMAIN_SYS, + .bo_type = ttm_bo_type_device, + .size = res->guest_memory_size, + .pin = false + }; + + ret = vmw_gem_object_create(dev_priv, + ¶ms, + &res->guest_memory_bo); if (unlikely(ret != 0)) { vmw_resource_unreference(&res); goto out_unlock; } - vmw_bo_reference(res->guest_memory_bo); - /* - * We don't expose the handle to the userspace and surface - * already holds a gem reference - */ - drm_gem_handle_delete(file_priv, backup_handle); } tmp = vmw_resource_reference(&srf->res); @@ -1512,7 +1507,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, if (ret == 0) { if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) { VMW_DEBUG_USER("Surface backup buffer too small.\n"); - vmw_bo_unreference(&res->guest_memory_bo); + vmw_user_bo_unref(&res->guest_memory_bo); ret = -EINVAL; goto out_unlock; } else { @@ -1526,8 +1521,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, res->guest_memory_size, &backup_handle, &res->guest_memory_bo); - if (ret == 0) - vmw_bo_reference(res->guest_memory_bo); } if (unlikely(ret != 0)) { diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 0cea301cc9a9..790aa908e2a7 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -799,6 +799,8 @@ config HID_NVIDIA_SHIELD tristate "NVIDIA SHIELD devices" depends on USB_HID depends on BT_HIDP + depends on LEDS_CLASS + select POWER_SUPPLY help Support for NVIDIA SHIELD accessories. diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c index 403506b9697e..b346d68a06f5 100644 --- a/drivers/hid/hid-holtek-kbd.c +++ b/drivers/hid/hid-holtek-kbd.c @@ -130,6 +130,10 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type, return -ENODEV; boot_hid = usb_get_intfdata(boot_interface); + if (list_empty(&boot_hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } boot_hid_input = list_first_entry(&boot_hid->inputs, struct hid_input, list); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 7e499992a793..e4d2dfd5d253 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -425,6 +425,7 @@ #define I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100 0x29F5 #define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1 0x2BED #define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2 0x2BEE +#define I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG 0x2D02 #define USB_VENDOR_ID_ELECOM 0x056e #define USB_DEVICE_ID_ELECOM_BM084 0x0061 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 0235cc1690a1..c8b20d44b147 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -409,6 +409,8 @@ static const struct hid_device_id hid_battery_quirks[] = { HID_BATTERY_QUIRK_IGNORE }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2), HID_BATTERY_QUIRK_IGNORE }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG), + HID_BATTERY_QUIRK_IGNORE }, {} }; diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 05f5b5f588a2..a209d51bd247 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -4515,7 +4515,8 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) goto hid_hw_init_fail; } - hidpp_connect_event(hidpp); + schedule_work(&hidpp->work); + flush_work(&hidpp->work); if (will_restart) { /* Reset the HID node state */ @@ -4677,6 +4678,8 @@ static const struct hid_device_id hidpp_devices[] = { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) }, { /* MX Master mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012) }, + { /* M720 Triathlon mouse over Bluetooth */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb015) }, { /* MX Ergo trackball over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e) }, diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 521b2ffb4244..8db4ae05febc 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -2146,6 +2146,10 @@ static const struct hid_device_id mt_devices[] = { /* Synaptics devices */ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_SYNAPTICS, 0xcd7e) }, + + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_SYNAPTICS, 0xce08) }, { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c index 250f5d2f888a..10468f727e5b 100644 --- a/drivers/hid/hid-nintendo.c +++ b/drivers/hid/hid-nintendo.c @@ -2088,7 +2088,9 @@ static int joycon_read_info(struct joycon_ctlr *ctlr) struct joycon_input_report *report; req.subcmd_id = JC_SUBCMD_REQ_DEV_INFO; + mutex_lock(&ctlr->output_mutex); ret = joycon_send_subcmd(ctlr, &req, 0, HZ); + mutex_unlock(&ctlr->output_mutex); if (ret) { hid_err(ctlr->hdev, "Failed to get joycon info; ret=%d\n", ret); return ret; @@ -2117,6 +2119,85 @@ static int joycon_read_info(struct joycon_ctlr *ctlr) return 0; } +static int joycon_init(struct hid_device *hdev) +{ + struct joycon_ctlr *ctlr = hid_get_drvdata(hdev); + int ret = 0; + + mutex_lock(&ctlr->output_mutex); + /* if handshake command fails, assume ble pro controller */ + if ((jc_type_is_procon(ctlr) || jc_type_is_chrggrip(ctlr)) && + !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) { + hid_dbg(hdev, "detected USB controller\n"); + /* set baudrate for improved latency */ + ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ); + if (ret) { + hid_err(hdev, "Failed to set baudrate; ret=%d\n", ret); + goto out_unlock; + } + /* handshake */ + ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ); + if (ret) { + hid_err(hdev, "Failed handshake; ret=%d\n", ret); + goto out_unlock; + } + /* + * Set no timeout (to keep controller in USB mode). + * This doesn't send a response, so ignore the timeout. + */ + joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT, HZ/10); + } else if (jc_type_is_chrggrip(ctlr)) { + hid_err(hdev, "Failed charging grip handshake\n"); + ret = -ETIMEDOUT; + goto out_unlock; + } + + /* get controller calibration data, and parse it */ + ret = joycon_request_calibration(ctlr); + if (ret) { + /* + * We can function with default calibration, but it may be + * inaccurate. Provide a warning, and continue on. + */ + hid_warn(hdev, "Analog stick positions may be inaccurate\n"); + } + + /* get IMU calibration data, and parse it */ + ret = joycon_request_imu_calibration(ctlr); + if (ret) { + /* + * We can function with default calibration, but it may be + * inaccurate. Provide a warning, and continue on. + */ + hid_warn(hdev, "Unable to read IMU calibration data\n"); + } + + /* Set the reporting mode to 0x30, which is the full report mode */ + ret = joycon_set_report_mode(ctlr); + if (ret) { + hid_err(hdev, "Failed to set report mode; ret=%d\n", ret); + goto out_unlock; + } + + /* Enable rumble */ + ret = joycon_enable_rumble(ctlr); + if (ret) { + hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret); + goto out_unlock; + } + + /* Enable the IMU */ + ret = joycon_enable_imu(ctlr); + if (ret) { + hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret); + goto out_unlock; + } + +out_unlock: + mutex_unlock(&ctlr->output_mutex); + return ret; +} + /* Common handler for parsing inputs */ static int joycon_ctlr_read_handler(struct joycon_ctlr *ctlr, u8 *data, int size) @@ -2248,85 +2329,19 @@ static int nintendo_hid_probe(struct hid_device *hdev, hid_device_io_start(hdev); - /* Initialize the controller */ - mutex_lock(&ctlr->output_mutex); - /* if handshake command fails, assume ble pro controller */ - if ((jc_type_is_procon(ctlr) || jc_type_is_chrggrip(ctlr)) && - !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) { - hid_dbg(hdev, "detected USB controller\n"); - /* set baudrate for improved latency */ - ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ); - if (ret) { - hid_err(hdev, "Failed to set baudrate; ret=%d\n", ret); - goto err_mutex; - } - /* handshake */ - ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ); - if (ret) { - hid_err(hdev, "Failed handshake; ret=%d\n", ret); - goto err_mutex; - } - /* - * Set no timeout (to keep controller in USB mode). - * This doesn't send a response, so ignore the timeout. - */ - joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT, HZ/10); - } else if (jc_type_is_chrggrip(ctlr)) { - hid_err(hdev, "Failed charging grip handshake\n"); - ret = -ETIMEDOUT; - goto err_mutex; - } - - /* get controller calibration data, and parse it */ - ret = joycon_request_calibration(ctlr); + ret = joycon_init(hdev); if (ret) { - /* - * We can function with default calibration, but it may be - * inaccurate. Provide a warning, and continue on. - */ - hid_warn(hdev, "Analog stick positions may be inaccurate\n"); - } - - /* get IMU calibration data, and parse it */ - ret = joycon_request_imu_calibration(ctlr); - if (ret) { - /* - * We can function with default calibration, but it may be - * inaccurate. Provide a warning, and continue on. - */ - hid_warn(hdev, "Unable to read IMU calibration data\n"); - } - - /* Set the reporting mode to 0x30, which is the full report mode */ - ret = joycon_set_report_mode(ctlr); - if (ret) { - hid_err(hdev, "Failed to set report mode; ret=%d\n", ret); - goto err_mutex; - } - - /* Enable rumble */ - ret = joycon_enable_rumble(ctlr); - if (ret) { - hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret); - goto err_mutex; - } - - /* Enable the IMU */ - ret = joycon_enable_imu(ctlr); - if (ret) { - hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret); - goto err_mutex; + hid_err(hdev, "Failed to initialize controller; ret=%d\n", ret); + goto err_close; } ret = joycon_read_info(ctlr); if (ret) { hid_err(hdev, "Failed to retrieve controller info; ret=%d\n", ret); - goto err_mutex; + goto err_close; } - mutex_unlock(&ctlr->output_mutex); - /* Initialize the leds */ ret = joycon_leds_create(ctlr); if (ret) { @@ -2352,8 +2367,6 @@ static int nintendo_hid_probe(struct hid_device *hdev, hid_dbg(hdev, "probe - success\n"); return 0; -err_mutex: - mutex_unlock(&ctlr->output_mutex); err_close: hid_hw_close(hdev); err_stop: @@ -2383,6 +2396,20 @@ static void nintendo_hid_remove(struct hid_device *hdev) hid_hw_stop(hdev); } +#ifdef CONFIG_PM + +static int nintendo_hid_resume(struct hid_device *hdev) +{ + int ret = joycon_init(hdev); + + if (ret) + hid_err(hdev, "Failed to restore controller after resume"); + + return ret; +} + +#endif + static const struct hid_device_id nintendo_hid_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_PROCON) }, @@ -2404,6 +2431,10 @@ static struct hid_driver nintendo_hid_driver = { .probe = nintendo_hid_probe, .remove = nintendo_hid_remove, .raw_event = nintendo_hid_event, + +#ifdef CONFIG_PM + .resume = nintendo_hid_resume, +#endif }; module_hid_driver(nintendo_hid_driver); diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c index 9a3576dbf421..c463e54decbc 100644 --- a/drivers/hid/hid-nvidia-shield.c +++ b/drivers/hid/hid-nvidia-shield.c @@ -801,7 +801,7 @@ static inline int thunderstrike_led_create(struct thunderstrike *ts) led->name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL, "thunderstrike%d:blue:led", ts->id); led->max_brightness = 1; - led->flags = LED_CORE_SUSPENDRESUME; + led->flags = LED_CORE_SUSPENDRESUME | LED_RETAIN_AT_SHUTDOWN; led->brightness_get = &thunderstrike_led_get_brightness; led->brightness_set = &thunderstrike_led_set_brightness; @@ -1058,7 +1058,7 @@ static int shield_probe(struct hid_device *hdev, const struct hid_device_id *id) ret = hid_hw_start(hdev, HID_CONNECT_HIDINPUT); if (ret) { hid_err(hdev, "Failed to start HID device\n"); - goto err_haptics; + goto err_ts_create; } ret = hid_hw_open(hdev); @@ -1073,9 +1073,12 @@ static int shield_probe(struct hid_device *hdev, const struct hid_device_id *id) err_stop: hid_hw_stop(hdev); -err_haptics: +err_ts_create: + power_supply_unregister(ts->base.battery_dev.psy); if (ts->haptics_dev) input_unregister_device(ts->haptics_dev); + led_classdev_unregister(&ts->led_dev); + ida_free(&thunderstrike_ida, ts->id); return ret; } diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index dd942061fd77..ebc0aa4e4345 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -2155,6 +2155,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) return ret; err: + usb_free_urb(sc->ghl_urb); + hid_hw_stop(hdev); return ret; } diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c index 43d2cf7153d7..b3edadf42d6d 100644 --- a/drivers/hid/hid-steelseries.c +++ b/drivers/hid/hid-steelseries.c @@ -390,7 +390,7 @@ static int steelseries_headset_arctis_1_fetch_battery(struct hid_device *hdev) ret = hid_hw_raw_request(hdev, arctis_1_battery_request[0], write_buf, sizeof(arctis_1_battery_request), HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); - if (ret < sizeof(arctis_1_battery_request)) { + if (ret < (int)sizeof(arctis_1_battery_request)) { hid_err(hdev, "hid_hw_raw_request() failed with %d\n", ret); ret = -ENODATA; } diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 9601c0605fd9..2735cd585af0 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -998,45 +998,29 @@ static int i2c_hid_core_resume(struct i2c_hid *ihid) return hid_driver_reset_resume(hid); } -/** - * __do_i2c_hid_core_initial_power_up() - First time power up of the i2c-hid device. - * @ihid: The ihid object created during probe. - * - * This function is called at probe time. - * - * The initial power on is where we do some basic validation that the device - * exists, where we fetch the HID descriptor, and where we create the actual - * HID devices. - * - * Return: 0 or error code. +/* + * Check that the device exists and parse the HID descriptor. */ -static int __do_i2c_hid_core_initial_power_up(struct i2c_hid *ihid) +static int __i2c_hid_core_probe(struct i2c_hid *ihid) { struct i2c_client *client = ihid->client; struct hid_device *hid = ihid->hid; int ret; - ret = i2c_hid_core_power_up(ihid); - if (ret) - return ret; - /* Make sure there is something at this address */ ret = i2c_smbus_read_byte(client); if (ret < 0) { i2c_hid_dbg(ihid, "nothing at this address: %d\n", ret); - ret = -ENXIO; - goto err; + return -ENXIO; } ret = i2c_hid_fetch_hid_descriptor(ihid); if (ret < 0) { dev_err(&client->dev, "Failed to fetch the HID Descriptor\n"); - goto err; + return ret; } - enable_irq(client->irq); - hid->version = le16_to_cpu(ihid->hdesc.bcdVersion); hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID); hid->product = le16_to_cpu(ihid->hdesc.wProductID); @@ -1050,17 +1034,49 @@ static int __do_i2c_hid_core_initial_power_up(struct i2c_hid *ihid) ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product); + return 0; +} + +static int i2c_hid_core_register_hid(struct i2c_hid *ihid) +{ + struct i2c_client *client = ihid->client; + struct hid_device *hid = ihid->hid; + int ret; + + enable_irq(client->irq); + ret = hid_add_device(hid); if (ret) { if (ret != -ENODEV) hid_err(client, "can't add hid device: %d\n", ret); - goto err; + disable_irq(client->irq); + return ret; } return 0; +} + +static int i2c_hid_core_probe_panel_follower(struct i2c_hid *ihid) +{ + int ret; + + ret = i2c_hid_core_power_up(ihid); + if (ret) + return ret; -err: + ret = __i2c_hid_core_probe(ihid); + if (ret) + goto err_power_down; + + ret = i2c_hid_core_register_hid(ihid); + if (ret) + goto err_power_down; + + return 0; + +err_power_down: i2c_hid_core_power_down(ihid); + return ret; } @@ -1077,7 +1093,7 @@ static void ihid_core_panel_prepare_work(struct work_struct *work) * steps. */ if (!hid->version) - ret = __do_i2c_hid_core_initial_power_up(ihid); + ret = i2c_hid_core_probe_panel_follower(ihid); else ret = i2c_hid_core_resume(ihid); @@ -1136,7 +1152,6 @@ static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid) struct device *dev = &ihid->client->dev; int ret; - ihid->is_panel_follower = true; ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_funcs; /* @@ -1156,30 +1171,6 @@ static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid) return 0; } -static int i2c_hid_core_initial_power_up(struct i2c_hid *ihid) -{ - /* - * If we're a panel follower, we'll register and do our initial power - * up when the panel turns on; otherwise we do it right away. - */ - if (drm_is_panel_follower(&ihid->client->dev)) - return i2c_hid_core_register_panel_follower(ihid); - else - return __do_i2c_hid_core_initial_power_up(ihid); -} - -static void i2c_hid_core_final_power_down(struct i2c_hid *ihid) -{ - /* - * If we're a follower, the act of unfollowing will cause us to be - * powered down. Otherwise we need to manually do it. - */ - if (ihid->is_panel_follower) - drm_panel_remove_follower(&ihid->panel_follower); - else - i2c_hid_core_suspend(ihid, true); -} - int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops, u16 hid_descriptor_address, u32 quirks) { @@ -1211,6 +1202,7 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops, ihid->ops = ops; ihid->client = client; ihid->wHIDDescRegister = cpu_to_le16(hid_descriptor_address); + ihid->is_panel_follower = drm_is_panel_follower(&client->dev); init_waitqueue_head(&ihid->wait); mutex_init(&ihid->reset_lock); @@ -1224,14 +1216,10 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops, return ret; device_enable_async_suspend(&client->dev); - ret = i2c_hid_init_irq(client); - if (ret < 0) - goto err_buffers_allocated; - hid = hid_allocate_device(); if (IS_ERR(hid)) { ret = PTR_ERR(hid); - goto err_irq; + goto err_free_buffers; } ihid->hid = hid; @@ -1242,19 +1230,42 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops, hid->bus = BUS_I2C; hid->initial_quirks = quirks; - ret = i2c_hid_core_initial_power_up(ihid); + /* Power on and probe unless device is a panel follower. */ + if (!ihid->is_panel_follower) { + ret = i2c_hid_core_power_up(ihid); + if (ret < 0) + goto err_destroy_device; + + ret = __i2c_hid_core_probe(ihid); + if (ret < 0) + goto err_power_down; + } + + ret = i2c_hid_init_irq(client); + if (ret < 0) + goto err_power_down; + + /* + * If we're a panel follower, we'll register when the panel turns on; + * otherwise we do it right away. + */ + if (ihid->is_panel_follower) + ret = i2c_hid_core_register_panel_follower(ihid); + else + ret = i2c_hid_core_register_hid(ihid); if (ret) - goto err_mem_free; + goto err_free_irq; return 0; -err_mem_free: - hid_destroy_device(hid); - -err_irq: +err_free_irq: free_irq(client->irq, ihid); - -err_buffers_allocated: +err_power_down: + if (!ihid->is_panel_follower) + i2c_hid_core_power_down(ihid); +err_destroy_device: + hid_destroy_device(hid); +err_free_buffers: i2c_hid_free_buffers(ihid); return ret; @@ -1266,7 +1277,14 @@ void i2c_hid_core_remove(struct i2c_client *client) struct i2c_hid *ihid = i2c_get_clientdata(client); struct hid_device *hid; - i2c_hid_core_final_power_down(ihid); + /* + * If we're a follower, the act of unfollowing will cause us to be + * powered down. Otherwise we need to manually do it. + */ + if (ihid->is_panel_follower) + drm_panel_remove_follower(&ihid->panel_follower); + else + i2c_hid_core_suspend(ihid, true); hid = ihid->hid; hid_destroy_device(hid); diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 55cb25038e63..710fda5f19e1 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -133,6 +133,14 @@ static int enable_gpe(struct device *dev) } wakeup = &adev->wakeup; + /* + * Call acpi_disable_gpe(), so that reference count + * gpe_event_info->runtime_count doesn't overflow. + * When gpe_event_info->runtime_count = 0, the call + * to acpi_disable_gpe() simply return. + */ + acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number); + acpi_sts = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number); if (ACPI_FAILURE(acpi_sts)) { dev_err(dev, "enable ose_gpe failed\n"); diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 66dc5f97a009..8311e1028ddb 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -610,7 +610,8 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata, flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size, &flat_buf->daddr, - DMA_FROM_DEVICE, GFP_KERNEL); + DMA_FROM_DEVICE, + GFP_KERNEL | __GFP_NOWARN); if (!flat_buf->vaddr) { kfree(flat_buf); return -ENOMEM; @@ -1174,16 +1175,6 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev) } /* - * In sysFS mode we can have multiple writers per sink. Since this - * sink is already enabled no memory is needed and the HW need not be - * touched, even if the buffer size has changed. - */ - if (drvdata->mode == CS_MODE_SYSFS) { - atomic_inc(&csdev->refcnt); - goto out; - } - - /* * If we don't have a buffer or it doesn't match the requested size, * use the buffer allocated above. Otherwise reuse the existing buffer. */ @@ -1204,7 +1195,7 @@ out: static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) { - int ret; + int ret = 0; unsigned long flags; struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct etr_buf *sysfs_buf = tmc_etr_get_sysfs_buffer(csdev); @@ -1213,12 +1204,24 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) return PTR_ERR(sysfs_buf); spin_lock_irqsave(&drvdata->spinlock, flags); + + /* + * In sysFS mode we can have multiple writers per sink. Since this + * sink is already enabled no memory is needed and the HW need not be + * touched, even if the buffer size has changed. + */ + if (drvdata->mode == CS_MODE_SYSFS) { + atomic_inc(&csdev->refcnt); + goto out; + } + ret = tmc_etr_enable_hw(drvdata, sysfs_buf); if (!ret) { drvdata->mode = CS_MODE_SYSFS; atomic_inc(&csdev->refcnt); } +out: spin_unlock_irqrestore(&drvdata->spinlock, flags); if (!ret) diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c index 69d1103b9508..b64fd365f83f 100644 --- a/drivers/iio/adc/ad7192.c +++ b/drivers/iio/adc/ad7192.c @@ -177,6 +177,7 @@ struct ad7192_chip_info { struct ad7192_state { const struct ad7192_chip_info *chip_info; struct regulator *avdd; + struct regulator *vref; struct clk *mclk; u16 int_vref_mv; u32 fclk; @@ -1008,10 +1009,30 @@ static int ad7192_probe(struct spi_device *spi) if (ret) return dev_err_probe(&spi->dev, ret, "Failed to enable specified DVdd supply\n"); - ret = regulator_get_voltage(st->avdd); - if (ret < 0) { - dev_err(&spi->dev, "Device tree error, reference voltage undefined\n"); - return ret; + st->vref = devm_regulator_get_optional(&spi->dev, "vref"); + if (IS_ERR(st->vref)) { + if (PTR_ERR(st->vref) != -ENODEV) + return PTR_ERR(st->vref); + + ret = regulator_get_voltage(st->avdd); + if (ret < 0) + return dev_err_probe(&spi->dev, ret, + "Device tree error, AVdd voltage undefined\n"); + } else { + ret = regulator_enable(st->vref); + if (ret) { + dev_err(&spi->dev, "Failed to enable specified Vref supply\n"); + return ret; + } + + ret = devm_add_action_or_reset(&spi->dev, ad7192_reg_disable, st->vref); + if (ret) + return ret; + + ret = regulator_get_voltage(st->vref); + if (ret < 0) + return dev_err_probe(&spi->dev, ret, + "Device tree error, Vref voltage undefined\n"); } st->int_vref_mv = ret / 1000; diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c index f5a0fc9e64c5..fff6e5a2d956 100644 --- a/drivers/iio/adc/imx8qxp-adc.c +++ b/drivers/iio/adc/imx8qxp-adc.c @@ -38,8 +38,8 @@ #define IMX8QXP_ADR_ADC_FCTRL 0x30 #define IMX8QXP_ADR_ADC_SWTRIG 0x34 #define IMX8QXP_ADR_ADC_TCTRL(tid) (0xc0 + (tid) * 4) -#define IMX8QXP_ADR_ADC_CMDH(cid) (0x100 + (cid) * 8) -#define IMX8QXP_ADR_ADC_CMDL(cid) (0x104 + (cid) * 8) +#define IMX8QXP_ADR_ADC_CMDL(cid) (0x100 + (cid) * 8) +#define IMX8QXP_ADR_ADC_CMDH(cid) (0x104 + (cid) * 8) #define IMX8QXP_ADR_ADC_RESFIFO 0x300 #define IMX8QXP_ADR_ADC_TST 0xffc diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig index 877f9124803c..397544f23b85 100644 --- a/drivers/iio/addac/Kconfig +++ b/drivers/iio/addac/Kconfig @@ -24,6 +24,8 @@ config AD74413R depends on GPIOLIB && SPI select REGMAP_SPI select CRC8 + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say yes here to build support for Analog Devices AD74412R/AD74413R quad-channel software configurable input/output solution. diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index b72d39fc2434..6bfe5d6847e7 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -190,8 +190,11 @@ int cros_ec_sensors_push_data(struct iio_dev *indio_dev, /* * Ignore samples if the buffer is not set: it is needed if the ODR is * set but the buffer is not enabled yet. + * + * Note: iio_device_claim_buffer_mode() returns -EBUSY if the buffer + * is not enabled. */ - if (!iio_buffer_enabled(indio_dev)) + if (iio_device_claim_buffer_mode(indio_dev) < 0) return 0; out = (s16 *)st->samples; @@ -210,6 +213,7 @@ int cros_ec_sensors_push_data(struct iio_dev *indio_dev, iio_push_to_buffers_with_timestamp(indio_dev, st->samples, timestamp + delta); + iio_device_release_buffer_mode(indio_dev); return 0; } EXPORT_SYMBOL_GPL(cros_ec_sensors_push_data); diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c index d5ea1a1be122..a492e8f2fc0f 100644 --- a/drivers/iio/dac/ad3552r.c +++ b/drivers/iio/dac/ad3552r.c @@ -140,8 +140,8 @@ enum ad3552r_ch_vref_select { }; enum ad3542r_id { - AD3542R_ID = 0x4008, - AD3552R_ID = 0x4009, + AD3542R_ID = 0x4009, + AD3552R_ID = 0x4008, }; enum ad3552r_ch_output_range { diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c index 6355c1f28423..92923074f930 100644 --- a/drivers/iio/frequency/admv1013.c +++ b/drivers/iio/frequency/admv1013.c @@ -351,9 +351,9 @@ static int admv1013_update_mixer_vgate(struct admv1013_state *st) if (vcm < 0) return vcm; - if (vcm < 1800000) + if (vcm <= 1800000) mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100; - else if (vcm > 1800000 && vcm < 2600000) + else if (vcm > 1800000 && vcm <= 2600000) mixer_vgate = (2375 * vcm / 1000000 + 125) / 100; else return -EINVAL; diff --git a/drivers/iio/imu/bno055/Kconfig b/drivers/iio/imu/bno055/Kconfig index fa79b1ac4f85..83e53acfbe88 100644 --- a/drivers/iio/imu/bno055/Kconfig +++ b/drivers/iio/imu/bno055/Kconfig @@ -2,6 +2,8 @@ config BOSCH_BNO055 tristate + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER config BOSCH_BNO055_SERIAL tristate "Bosch BNO055 attached via UART" diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c index 3a52b09c2823..fdf763a04b0b 100644 --- a/drivers/iio/light/vcnl4000.c +++ b/drivers/iio/light/vcnl4000.c @@ -1513,7 +1513,6 @@ static int vcnl4040_write_event_config(struct iio_dev *indio_dev, out: mutex_unlock(&data->vcnl4000_lock); - data->chip_spec->set_power_state(data, data->ps_int || data->als_int); return ret; } diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 6089f3f9d8f4..a2ef1373a274 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -2179,7 +2179,7 @@ int bmp280_common_probe(struct device *dev, * however as it happens, the BMP085 shares the chip ID of BMP180 * so we look for an IRQ if we have that. */ - if (irq > 0 || (chip_id == BMP180_CHIP_ID)) { + if (irq > 0 && (chip_id == BMP180_CHIP_ID)) { ret = bmp085_fetch_eoc_irq(dev, name, irq, data); if (ret) return ret; diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c index b10dbf5cf494..1ff091b2f764 100644 --- a/drivers/iio/pressure/dps310.c +++ b/drivers/iio/pressure/dps310.c @@ -57,8 +57,8 @@ #define DPS310_RESET_MAGIC 0x09 #define DPS310_COEF_BASE 0x10 -/* Make sure sleep time is <= 20ms for usleep_range */ -#define DPS310_POLL_SLEEP_US(t) min(20000, (t) / 8) +/* Make sure sleep time is <= 30ms for usleep_range */ +#define DPS310_POLL_SLEEP_US(t) min(30000, (t) / 8) /* Silently handle error in rate value here */ #define DPS310_POLL_TIMEOUT_US(rc) ((rc) <= 0 ? 1000000 : 1000000 / (rc)) @@ -402,8 +402,8 @@ static int dps310_reset_wait(struct dps310_data *data) if (rc) return rc; - /* Wait for device chip access: 2.5ms in specification */ - usleep_range(2500, 12000); + /* Wait for device chip access: 15ms in specification */ + usleep_range(15000, 55000); return 0; } diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c index 627497e61a63..2fc706f9d8ae 100644 --- a/drivers/iio/pressure/ms5611_core.c +++ b/drivers/iio/pressure/ms5611_core.c @@ -76,7 +76,7 @@ static bool ms5611_prom_is_valid(u16 *prom, size_t len) crc = (crc >> 12) & 0x000F; - return crc_orig != 0x0000 && crc == crc_orig; + return crc == crc_orig; } static int ms5611_read_prom(struct iio_dev *indio_dev) diff --git a/drivers/iio/proximity/irsd200.c b/drivers/iio/proximity/irsd200.c index 5bd791b46d98..bdff91f6b1a3 100644 --- a/drivers/iio/proximity/irsd200.c +++ b/drivers/iio/proximity/irsd200.c @@ -759,14 +759,14 @@ static irqreturn_t irsd200_trigger_handler(int irq, void *pollf) { struct iio_dev *indio_dev = ((struct iio_poll_func *)pollf)->indio_dev; struct irsd200_data *data = iio_priv(indio_dev); - s16 buf = 0; + s64 buf[2] = {}; int ret; - ret = irsd200_read_data(data, &buf); + ret = irsd200_read_data(data, (s16 *)buf); if (ret) goto end; - iio_push_to_buffers_with_timestamp(indio_dev, &buf, + iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns(indio_dev)); end: diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index c343edf2f664..1e2cd7c8716e 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -4968,7 +4968,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, int err = 0; struct sockaddr *addr = (struct sockaddr *)&mc->addr; struct net_device *ndev = NULL; - struct ib_sa_multicast ib; + struct ib_sa_multicast ib = {}; enum ib_gid_type gid_type; bool send_only; diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c index 7b68b3ea979f..f2fb2d8a6597 100644 --- a/drivers/infiniband/core/cma_configfs.c +++ b/drivers/infiniband/core/cma_configfs.c @@ -217,7 +217,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group, return -ENOMEM; for (i = 0; i < ports_num; i++) { - char port_str[10]; + char port_str[11]; ports[i].port_num = i + 1; snprintf(port_str, sizeof(port_str), "%u", i + 1); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index d5d3e4f0de77..6d1dbc978759 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -2529,6 +2529,7 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { }, [RDMA_NLDEV_CMD_SYS_SET] = { .doit = nldev_set_sys_set_doit, + .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_STAT_SET] = { .doit = nldev_stat_set_doit, diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index bf800f8cb3e4..495d5a5d0373 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -546,7 +546,7 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr, if (hdr->in_words * 4 != count) return -EINVAL; - if (count < method_elm->req_size + sizeof(hdr)) { + if (count < method_elm->req_size + sizeof(*hdr)) { /* * rdma-core v18 and v19 have a bug where they send DESTROY_CQ * with a 16 byte write instead of 24. Old kernels didn't diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 0848c2c2ffcf..faa88d12ee86 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -910,6 +910,10 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) list_del(&qp->list); mutex_unlock(&rdev->qp_lock); atomic_dec(&rdev->stats.res.qp_count); + if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC) + atomic_dec(&rdev->stats.res.rc_qp_count); + else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD) + atomic_dec(&rdev->stats.res.ud_qp_count); ib_umem_release(qp->rumem); ib_umem_release(qp->sumem); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index c8c4017fe405..e47b4ca64d33 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -665,7 +665,6 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, blocked = cookie & RCFW_CMD_IS_BLOCKING; cookie &= RCFW_MAX_COOKIE_VALUE; crsqe = &rcfw->crsqe_tbl[cookie]; - crsqe->is_in_used = false; if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags), @@ -681,8 +680,14 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, atomic_dec(&rcfw->timeout_send); if (crsqe->is_waiter_alive) { - if (crsqe->resp) + if (crsqe->resp) { memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); + /* Insert write memory barrier to ensure that + * response data is copied before clearing the + * flags + */ + smp_wmb(); + } if (!blocked) wait_cmds++; } @@ -694,6 +699,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, if (!is_waiter_alive) crsqe->resp = NULL; + crsqe->is_in_used = false; + hwq->cons += req_size; /* This is a case to handle below scenario - diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index ced615b5ea09..040ba2224f9f 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1965,6 +1965,9 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) int win; skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + req = __skb_put_zero(skb, sizeof(*req)); req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR)); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c index dcccb6015232..c317947563fb 100644 --- a/drivers/infiniband/hw/erdma/erdma_verbs.c +++ b/drivers/infiniband/hw/erdma/erdma_verbs.c @@ -133,8 +133,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp) static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr) { struct erdma_pd *pd = to_epd(mr->ibmr.pd); + u32 mtt_level = ERDMA_MR_MTT_0LEVEL; struct erdma_cmdq_reg_mr_req req; - u32 mtt_level; erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_REG_MR); @@ -147,10 +147,9 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr) req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist); mtt_level = mr->mem.mtt->level; } - } else { + } else if (mr->type != ERDMA_MR_TYPE_DMA) { memcpy(req.phy_addr, mr->mem.mtt->buf, MTT_SIZE(mr->mem.page_cnt)); - mtt_level = ERDMA_MR_MTT_0LEVEL; } req.cfg0 = FIELD_PREP(ERDMA_CMD_MR_VALID_MASK, mr->valid) | @@ -655,7 +654,7 @@ static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev, mtt = kzalloc(sizeof(*mtt), GFP_KERNEL); if (!mtt) - return NULL; + return ERR_PTR(-ENOMEM); mtt->size = ALIGN(size, PAGE_SIZE); mtt->buf = vzalloc(mtt->size); diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index 24ee79aa2122..88f534cf690e 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -223,7 +223,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, static int add_port_entries(struct mlx4_ib_dev *device, int port_num) { int i; - char buff[11]; + char buff[12]; struct mlx4_ib_iov_port *port = NULL; int ret = 0 ; struct ib_port_attr attr; diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c index 1e419e080b53..520034acf73a 100644 --- a/drivers/infiniband/hw/mlx5/fs.c +++ b/drivers/infiniband/hw/mlx5/fs.c @@ -2470,8 +2470,8 @@ destroy_res: mlx5_steering_anchor_destroy_res(ft_prio); put_flow_table: put_flow_table(dev, ft_prio, true); - mutex_unlock(&dev->flow_db->lock); free_obj: + mutex_unlock(&dev->flow_db->lock); kfree(obj); return err; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index aed5cdea50e6..555629b798b9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2084,7 +2084,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) case MLX5_IB_MMAP_DEVICE_MEM: return "Device Memory"; default: - return NULL; + return "Unknown"; } } diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 3e345ef380f1..8a3762d9ff58 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -301,7 +301,8 @@ static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs) static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc) { - set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd); + set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0, + ent->dev->umrc.pd); MLX5_SET(mkc, mkc, free, 1); MLX5_SET(mkc, mkc, umr_en, 1); MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); @@ -1024,19 +1025,26 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) if (!dev->cache.wq) return; - cancel_delayed_work_sync(&dev->cache.remove_ent_dwork); mutex_lock(&dev->cache.rb_lock); for (node = rb_first(root); node; node = rb_next(node)) { ent = rb_entry(node, struct mlx5_cache_ent, node); xa_lock_irq(&ent->mkeys); ent->disabled = true; xa_unlock_irq(&ent->mkeys); - cancel_delayed_work_sync(&ent->dwork); } + mutex_unlock(&dev->cache.rb_lock); + + /* + * After all entries are disabled and will not reschedule on WQ, + * flush it and all async commands. + */ + flush_workqueue(dev->cache.wq); mlx5_mkey_cache_debugfs_cleanup(dev); mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); + /* At this point all entries are disabled and have no concurrent work. */ + mutex_lock(&dev->cache.rb_lock); node = rb_first(root); while (node) { ent = rb_entry(node, struct mlx5_cache_ent, node); diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index a2605178f4ed..43e776073f49 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -976,6 +976,7 @@ static void siw_accept_newconn(struct siw_cep *cep) siw_cep_put(cep); new_cep->listen_cep = NULL; if (rv) { + siw_cancel_mpatimer(new_cep); siw_cep_set_free(new_cep); goto error; } @@ -1100,9 +1101,12 @@ static void siw_cm_work_handler(struct work_struct *w) /* * Socket close before MPA request received. */ - siw_dbg_cep(cep, "no mpareq: drop listener\n"); - siw_cep_put(cep->listen_cep); - cep->listen_cep = NULL; + if (cep->listen_cep) { + siw_dbg_cep(cep, + "no mpareq: drop listener\n"); + siw_cep_put(cep->listen_cep); + cep->listen_cep = NULL; + } } } release_cep = 1; @@ -1227,7 +1231,11 @@ static void siw_cm_llp_data_ready(struct sock *sk) if (!cep) goto out; - siw_dbg_cep(cep, "state: %d\n", cep->state); + siw_dbg_cep(cep, "cep state: %d, socket state %d\n", + cep->state, sk->sk_state); + + if (sk->sk_state != TCP_ESTABLISHED) + goto out; switch (cep->state) { case SIW_EPSTATE_RDMA_MODE: diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 1574218764e0..2916e77f589b 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -2784,7 +2784,6 @@ static int srp_abort(struct scsi_cmnd *scmnd) u32 tag; u16 ch_idx; struct srp_rdma_ch *ch; - int ret; shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); @@ -2798,19 +2797,14 @@ static int srp_abort(struct scsi_cmnd *scmnd) shost_printk(KERN_ERR, target->scsi_host, "Sending SRP abort for tag %#x\n", tag); if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, - SRP_TSK_ABORT_TASK, NULL) == 0) - ret = SUCCESS; - else if (target->rport->state == SRP_RPORT_LOST) - ret = FAST_IO_FAIL; - else - ret = FAILED; - if (ret == SUCCESS) { + SRP_TSK_ABORT_TASK, NULL) == 0) { srp_free_req(ch, req, scmnd, 0); - scmnd->result = DID_ABORT << 16; - scsi_done(scmnd); + return SUCCESS; } + if (target->rport->state == SRP_RPORT_LOST) + return FAST_IO_FAIL; - return ret; + return FAILED; } static int srp_reset_device(struct scsi_cmnd *scmnd) diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index ede380551e55..f5c21565bb3c 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -130,6 +130,7 @@ static const struct xpad_device { { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 }, { 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 }, { 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 }, + { 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, @@ -272,6 +273,7 @@ static const struct xpad_device { { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, + { 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 }, { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 }, { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, @@ -459,6 +461,7 @@ static const struct usb_device_id xpad_table[] = { { USB_INTERFACE_INFO('X', 'B', 0) }, /* Xbox USB-IF not-approved class */ XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 controller */ XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */ + XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One controllers */ XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster Xbox 360 controllers */ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */ XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft Xbox One controllers */ @@ -477,6 +480,7 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries controllers */ XPAD_XBOXONE_VENDOR(0x10f5), /* Turtle Beach Controllers */ XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ + XPAD_XBOX360_VENDOR(0x11ff), /* PXN V900 */ XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */ XPAD_XBOX360_VENDOR(0x12ab), /* Xbox 360 dance pads */ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane Xbox 360 controllers */ diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c index c1c733a9cb89..db2ba89adaef 100644 --- a/drivers/input/misc/powermate.c +++ b/drivers/input/misc/powermate.c @@ -425,6 +425,7 @@ static void powermate_disconnect(struct usb_interface *intf) pm->requires_update = 0; usb_kill_urb(pm->irq); input_unregister_device(pm->input); + usb_kill_urb(pm->config); usb_free_urb(pm->irq); usb_free_urb(pm->config); powermate_free_buffers(interface_to_usbdev(intf), pm); diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 2118b2075f43..4e38229404b4 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -2114,6 +2114,7 @@ static int elantech_setup_ps2(struct psmouse *psmouse, psmouse->protocol_handler = elantech_process_byte; psmouse->disconnect = elantech_disconnect; psmouse->reconnect = elantech_reconnect; + psmouse->fast_reconnect = NULL; psmouse->pktsize = info->hw_version > 1 ? 6 : 4; return 0; diff --git a/drivers/input/mouse/psmouse-smbus.c b/drivers/input/mouse/psmouse-smbus.c index 7b13de979908..2a2459b1b4f2 100644 --- a/drivers/input/mouse/psmouse-smbus.c +++ b/drivers/input/mouse/psmouse-smbus.c @@ -5,7 +5,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/libps2.h> @@ -119,18 +118,13 @@ static psmouse_ret_t psmouse_smbus_process_byte(struct psmouse *psmouse) return PSMOUSE_FULL_PACKET; } -static void psmouse_activate_smbus_mode(struct psmouse_smbus_dev *smbdev) -{ - if (smbdev->need_deactivate) { - psmouse_deactivate(smbdev->psmouse); - /* Give the device time to switch into SMBus mode */ - msleep(30); - } -} - static int psmouse_smbus_reconnect(struct psmouse *psmouse) { - psmouse_activate_smbus_mode(psmouse->private); + struct psmouse_smbus_dev *smbdev = psmouse->private; + + if (smbdev->need_deactivate) + psmouse_deactivate(psmouse); + return 0; } @@ -263,7 +257,8 @@ int psmouse_smbus_init(struct psmouse *psmouse, } } - psmouse_activate_smbus_mode(smbdev); + if (need_deactivate) + psmouse_deactivate(psmouse); psmouse->private = smbdev; psmouse->protocol_handler = psmouse_smbus_process_byte; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index ada299ec5bba..22d16d80efb9 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -1623,6 +1623,7 @@ static int synaptics_init_ps2(struct psmouse *psmouse, psmouse->set_rate = synaptics_set_rate; psmouse->disconnect = synaptics_disconnect; psmouse->reconnect = synaptics_reconnect; + psmouse->fast_reconnect = NULL; psmouse->cleanup = synaptics_reset; /* Synaptics can usually stay in sync without extra help */ psmouse->resync_time = 0; @@ -1752,6 +1753,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse, psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) && !SYN_CAP_EXT_BUTTONS_STICK(info->ext_cap_10); const struct rmi_device_platform_data pdata = { + .reset_delay_ms = 30, .sensor_pdata = { .sensor_type = rmi_sensor_touchpad, .axis_align.flip_y = true, diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c index 7059a2762aeb..b0b099b5528a 100644 --- a/drivers/input/rmi4/rmi_smbus.c +++ b/drivers/input/rmi4/rmi_smbus.c @@ -235,12 +235,29 @@ static void rmi_smb_clear_state(struct rmi_smb_xport *rmi_smb) static int rmi_smb_enable_smbus_mode(struct rmi_smb_xport *rmi_smb) { - int retval; + struct i2c_client *client = rmi_smb->client; + int smbus_version; + + /* + * psmouse driver resets the controller, we only need to wait + * to give the firmware chance to fully reinitialize. + */ + if (rmi_smb->xport.pdata.reset_delay_ms) + msleep(rmi_smb->xport.pdata.reset_delay_ms); /* we need to get the smbus version to activate the touchpad */ - retval = rmi_smb_get_version(rmi_smb); - if (retval < 0) - return retval; + smbus_version = rmi_smb_get_version(rmi_smb); + if (smbus_version < 0) + return smbus_version; + + rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d", + smbus_version); + + if (smbus_version != 2 && smbus_version != 3) { + dev_err(&client->dev, "Unrecognized SMB version %d\n", + smbus_version); + return -ENODEV; + } return 0; } @@ -253,11 +270,10 @@ static int rmi_smb_reset(struct rmi_transport_dev *xport, u16 reset_addr) rmi_smb_clear_state(rmi_smb); /* - * we do not call the actual reset command, it has to be handled in - * PS/2 or there will be races between PS/2 and SMBus. - * PS/2 should ensure that a psmouse_reset is called before - * intializing the device and after it has been removed to be in a known - * state. + * We do not call the actual reset command, it has to be handled in + * PS/2 or there will be races between PS/2 and SMBus. PS/2 should + * ensure that a psmouse_reset is called before initializing the + * device and after it has been removed to be in a known state. */ return rmi_smb_enable_smbus_mode(rmi_smb); } @@ -272,7 +288,6 @@ static int rmi_smb_probe(struct i2c_client *client) { struct rmi_device_platform_data *pdata = dev_get_platdata(&client->dev); struct rmi_smb_xport *rmi_smb; - int smbus_version; int error; if (!pdata) { @@ -311,18 +326,9 @@ static int rmi_smb_probe(struct i2c_client *client) rmi_smb->xport.proto_name = "smb"; rmi_smb->xport.ops = &rmi_smb_ops; - smbus_version = rmi_smb_get_version(rmi_smb); - if (smbus_version < 0) - return smbus_version; - - rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d", - smbus_version); - - if (smbus_version != 2 && smbus_version != 3) { - dev_err(&client->dev, "Unrecognized SMB version %d\n", - smbus_version); - return -ENODEV; - } + error = rmi_smb_enable_smbus_mode(rmi_smb); + if (error) + return error; i2c_set_clientdata(client, rmi_smb); diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h index 1724d6cb8649..9c39553d30fa 100644 --- a/drivers/input/serio/i8042-acpipnpio.h +++ b/drivers/input/serio/i8042-acpipnpio.h @@ -619,6 +619,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { + /* Fujitsu Lifebook E5411 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU CLIENT COMPUTING LIMITED"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E5411"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOAUX) + }, + { /* Gigabyte M912 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index da9954d6df44..af32fbe57b63 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -900,6 +900,25 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts) dev_info(dev, "No ACPI GpioInt resource, assuming that the GPIO order is reset, int\n"); ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO; gpio_mapping = acpi_goodix_int_last_gpios; + } else if (ts->gpio_count == 1 && ts->gpio_int_idx == 0) { + /* + * On newer devices there is only 1 GpioInt resource and _PS0 + * does the whole reset sequence for us. + */ + acpi_device_fix_up_power(ACPI_COMPANION(dev)); + + /* + * Before the _PS0 call the int GPIO may have been in output + * mode and the call should have put the int GPIO in input mode, + * but the GPIO subsys cached state may still think it is + * in output mode, causing gpiochip_lock_as_irq() failure. + * + * Add a mapping for the int GPIO to make the + * gpiod_int = gpiod_get(..., GPIOD_IN) call succeed, + * which will explicitly set the direction to input. + */ + ts->irq_pin_access_method = IRQ_PIN_ACCESS_NONE; + gpio_mapping = acpi_goodix_int_first_gpios; } else { dev_warn(dev, "Unexpected ACPI resources: gpio_count %d, gpio_int_idx %d\n", ts->gpio_count, ts->gpio_int_idx); diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c index 2082081402d3..0b8927508427 100644 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@ -671,8 +671,7 @@ static int apple_dart_attach_dev(struct iommu_domain *domain, return ret; switch (domain->type) { - case IOMMU_DOMAIN_DMA: - case IOMMU_DOMAIN_UNMANAGED: + default: ret = apple_dart_domain_add_streams(dart_domain, cfg); if (ret) return ret; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 4d83edc2be99..8a16cd3ef487 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -186,6 +186,15 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd) } } +/* + * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this + * is used as a threshold to replace per-page TLBI commands to issue in the + * command queue with an address-space TLBI command, when SMMU w/o a range + * invalidation feature handles too many per-page TLBI commands, which will + * otherwise result in a soft lockup. + */ +#define CMDQ_MAX_TLBI_OPS (1 << (PAGE_SHIFT - 3)) + static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, @@ -201,8 +210,13 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, * range. So do a simple translation here by calculating size correctly. */ size = end - start; - if (size == ULONG_MAX) - size = 0; + if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) { + if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE) + size = 0; + } else { + if (size == ULONG_MAX) + size = 0; + } if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) { if (!size) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index e82bf1c449a3..bd0a596f9863 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1895,18 +1895,23 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, /* Get the leaf page size */ tg = __ffs(smmu_domain->domain.pgsize_bitmap); + num_pages = size >> tg; + /* Convert page size of 12,14,16 (log2) to 1,2,3 */ cmd->tlbi.tg = (tg - 10) / 2; /* - * Determine what level the granule is at. For non-leaf, io-pgtable - * assumes .tlb_flush_walk can invalidate multiple levels at once, - * so ignore the nominal last-level granule and leave TTL=0. + * Determine what level the granule is at. For non-leaf, both + * io-pgtable and SVA pass a nominal last-level granule because + * they don't know what level(s) actually apply, so ignore that + * and leave TTL=0. However for various errata reasons we still + * want to use a range command, so avoid the SVA corner case + * where both scale and num could be 0 as well. */ if (cmd->tlbi.leaf) cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); - - num_pages = size >> tg; + else if ((num_pages & CMDQ_TLBI_RANGE_NUM_MAX) == 1) + num_pages++; } cmds.num = 0; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 5db283c17e0d..3685ba90ec88 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2998,13 +2998,6 @@ static int iommu_suspend(void) struct intel_iommu *iommu = NULL; unsigned long flag; - for_each_active_iommu(iommu, drhd) { - iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32), - GFP_KERNEL); - if (!iommu->iommu_state) - goto nomem; - } - iommu_flush_all(); for_each_active_iommu(iommu, drhd) { @@ -3024,12 +3017,6 @@ static int iommu_suspend(void) raw_spin_unlock_irqrestore(&iommu->register_lock, flag); } return 0; - -nomem: - for_each_active_iommu(iommu, drhd) - kfree(iommu->iommu_state); - - return -ENOMEM; } static void iommu_resume(void) @@ -3061,9 +3048,6 @@ static void iommu_resume(void) raw_spin_unlock_irqrestore(&iommu->register_lock, flag); } - - for_each_active_iommu(iommu, drhd) - kfree(iommu->iommu_state); } static struct syscore_ops iommu_syscore_ops = { diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index c18fb699c87a..7dac94f62b4e 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -681,7 +681,7 @@ struct intel_iommu { struct iopf_queue *iopf_queue; unsigned char iopfq_name[16]; struct q_inval *qi; /* Queued invalidation info */ - u32 *iommu_state; /* Store iommu states between suspend and resume.*/ + u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/ #ifdef CONFIG_IRQ_REMAP struct ir_table *ir_table; /* Interrupt remapping info */ diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 640275873a27..fab6c347ce57 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -262,7 +262,7 @@ struct mtk_iommu_data { struct device *smicomm_dev; struct mtk_iommu_bank_data *bank; - struct mtk_iommu_domain *share_dom; /* For 2 HWs share pgtable */ + struct mtk_iommu_domain *share_dom; struct regmap *pericfg; struct mutex mutex; /* Protect m4u_group/m4u_dom above */ @@ -643,8 +643,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, struct mtk_iommu_domain *share_dom = data->share_dom; const struct mtk_iommu_iova_region *region; - /* Always use share domain in sharing pgtable case */ - if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE) && share_dom) { + /* Share pgtable when 2 MM IOMMU share the pgtable or one IOMMU use multiple iova ranges */ + if (share_dom) { dom->iop = share_dom->iop; dom->cfg = share_dom->cfg; dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap; @@ -677,8 +677,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, /* Update our support page sizes bitmap */ dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; - if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) - data->share_dom = dom; + data->share_dom = dom; update_iova_region: /* Update the iova region for this domain */ diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h index 3db4592cda1c..f407cce9ecaa 100644 --- a/drivers/irqchip/irq-gic-common.h +++ b/drivers/irqchip/irq-gic-common.h @@ -29,4 +29,8 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, void gic_enable_of_quirks(const struct device_node *np, const struct gic_quirk *quirks, void *data); +#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) +#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) +#define RDIST_FLAGS_FORCE_NON_SHAREABLE (1 << 2) + #endif /* _IRQ_GIC_COMMON_H */ diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index e0c2b10d154d..75a2dd550625 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -44,10 +44,6 @@ #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) #define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3) -#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) -#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) -#define RDIST_FLAGS_FORCE_NON_SHAREABLE (1 << 2) - #define RD_LOCAL_LPI_ENABLED BIT(0) #define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1) #define RD_LOCAL_MEMRESERVE_DONE BIT(2) @@ -4754,6 +4750,14 @@ static bool __maybe_unused its_enable_rk3588001(void *data) return true; } +static bool its_set_non_coherent(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; + return true; +} + static const struct gic_quirk its_quirks[] = { #ifdef CONFIG_CAVIUM_ERRATUM_22375 { @@ -4809,6 +4813,11 @@ static const struct gic_quirk its_quirks[] = { }, #endif { + .desc = "ITS: non-coherent attribute", + .property = "dma-noncoherent", + .init = its_set_non_coherent, + }, + { } }; @@ -4817,6 +4826,10 @@ static void its_enable_quirks(struct its_node *its) u32 iidr = readl_relaxed(its->base + GITS_IIDR); gic_enable_quirks(iidr, its_quirks, its); + + if (is_of_node(its->fwnode_handle)) + gic_enable_of_quirks(to_of_node(its->fwnode_handle), + its_quirks, its); } static int its_save_disable(void) @@ -4952,7 +4965,7 @@ out_unmap: return NULL; } -static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) +static int its_init_domain(struct its_node *its) { struct irq_domain *inner_domain; struct msi_domain_info *info; @@ -4966,7 +4979,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) inner_domain = irq_domain_create_hierarchy(its_parent, its->msi_domain_flags, 0, - handle, &its_domain_ops, + its->fwnode_handle, &its_domain_ops, info); if (!inner_domain) { kfree(info); @@ -5017,8 +5030,7 @@ static int its_init_vpe_domain(void) return 0; } -static int __init its_compute_its_list_map(struct resource *res, - void __iomem *its_base) +static int __init its_compute_its_list_map(struct its_node *its) { int its_number; u32 ctlr; @@ -5032,15 +5044,15 @@ static int __init its_compute_its_list_map(struct resource *res, its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); if (its_number >= GICv4_ITS_LIST_MAX) { pr_err("ITS@%pa: No ITSList entry available!\n", - &res->start); + &its->phys_base); return -EINVAL; } - ctlr = readl_relaxed(its_base + GITS_CTLR); + ctlr = readl_relaxed(its->base + GITS_CTLR); ctlr &= ~GITS_CTLR_ITS_NUMBER; ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; - writel_relaxed(ctlr, its_base + GITS_CTLR); - ctlr = readl_relaxed(its_base + GITS_CTLR); + writel_relaxed(ctlr, its->base + GITS_CTLR); + ctlr = readl_relaxed(its->base + GITS_CTLR); if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { its_number = ctlr & GITS_CTLR_ITS_NUMBER; its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; @@ -5048,75 +5060,50 @@ static int __init its_compute_its_list_map(struct resource *res, if (test_and_set_bit(its_number, &its_list_map)) { pr_err("ITS@%pa: Duplicate ITSList entry %d\n", - &res->start, its_number); + &its->phys_base, its_number); return -EINVAL; } return its_number; } -static int __init its_probe_one(struct resource *res, - struct fwnode_handle *handle, int numa_node) +static int __init its_probe_one(struct its_node *its) { - struct its_node *its; - void __iomem *its_base; - u64 baser, tmp, typer; + u64 baser, tmp; struct page *page; u32 ctlr; int err; - its_base = its_map_one(res, &err); - if (!its_base) - return err; - - pr_info("ITS %pR\n", res); - - its = kzalloc(sizeof(*its), GFP_KERNEL); - if (!its) { - err = -ENOMEM; - goto out_unmap; - } - - raw_spin_lock_init(&its->lock); - mutex_init(&its->dev_alloc_lock); - INIT_LIST_HEAD(&its->entry); - INIT_LIST_HEAD(&its->its_device_list); - typer = gic_read_typer(its_base + GITS_TYPER); - its->typer = typer; - its->base = its_base; - its->phys_base = res->start; if (is_v4(its)) { - if (!(typer & GITS_TYPER_VMOVP)) { - err = its_compute_its_list_map(res, its_base); + if (!(its->typer & GITS_TYPER_VMOVP)) { + err = its_compute_its_list_map(its); if (err < 0) - goto out_free_its; + goto out; its->list_nr = err; pr_info("ITS@%pa: Using ITS number %d\n", - &res->start, err); + &its->phys_base, err); } else { - pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); + pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base); } if (is_v4_1(its)) { - u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer); + u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); - its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K); + its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K); if (!its->sgir_base) { err = -ENOMEM; - goto out_free_its; + goto out; } - its->mpidr = readl_relaxed(its_base + GITS_MPIDR); + its->mpidr = readl_relaxed(its->base + GITS_MPIDR); pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", - &res->start, its->mpidr, svpet); + &its->phys_base, its->mpidr, svpet); } } - its->numa_node = numa_node; - page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, get_order(ITS_CMD_QUEUE_SZ)); if (!page) { @@ -5125,12 +5112,9 @@ static int __init its_probe_one(struct resource *res, } its->cmd_base = (void *)page_address(page); its->cmd_write = its->cmd_base; - its->fwnode_handle = handle; its->get_msi_base = its_irq_get_msi_base; its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI; - its_enable_quirks(its); - err = its_alloc_tables(its); if (err) goto out_free_cmd; @@ -5174,7 +5158,7 @@ static int __init its_probe_one(struct resource *res, ctlr |= GITS_CTLR_ImDe; writel_relaxed(ctlr, its->base + GITS_CTLR); - err = its_init_domain(handle, its); + err = its_init_domain(its); if (err) goto out_free_tables; @@ -5191,11 +5175,8 @@ out_free_cmd: out_unmap_sgir: if (its->sgir_base) iounmap(its->sgir_base); -out_free_its: - kfree(its); -out_unmap: - iounmap(its_base); - pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); +out: + pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err); return err; } @@ -5356,10 +5337,53 @@ static const struct of_device_id its_device_id[] = { {}, }; +static struct its_node __init *its_node_init(struct resource *res, + struct fwnode_handle *handle, int numa_node) +{ + void __iomem *its_base; + struct its_node *its; + int err; + + its_base = its_map_one(res, &err); + if (!its_base) + return NULL; + + pr_info("ITS %pR\n", res); + + its = kzalloc(sizeof(*its), GFP_KERNEL); + if (!its) + goto out_unmap; + + raw_spin_lock_init(&its->lock); + mutex_init(&its->dev_alloc_lock); + INIT_LIST_HEAD(&its->entry); + INIT_LIST_HEAD(&its->its_device_list); + + its->typer = gic_read_typer(its_base + GITS_TYPER); + its->base = its_base; + its->phys_base = res->start; + + its->numa_node = numa_node; + its->fwnode_handle = handle; + + return its; + +out_unmap: + iounmap(its_base); + return NULL; +} + +static void its_node_destroy(struct its_node *its) +{ + iounmap(its->base); + kfree(its); +} + static int __init its_of_probe(struct device_node *node) { struct device_node *np; struct resource res; + int err; /* * Make sure *all* the ITS are reset before we probe any, as @@ -5369,8 +5393,6 @@ static int __init its_of_probe(struct device_node *node) */ for (np = of_find_matching_node(node, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { - int err; - if (!of_device_is_available(np) || !of_property_read_bool(np, "msi-controller") || of_address_to_resource(np, 0, &res)) @@ -5383,6 +5405,8 @@ static int __init its_of_probe(struct device_node *node) for (np = of_find_matching_node(node, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { + struct its_node *its; + if (!of_device_is_available(np)) continue; if (!of_property_read_bool(np, "msi-controller")) { @@ -5396,7 +5420,17 @@ static int __init its_of_probe(struct device_node *node) continue; } - its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); + + its = its_node_init(&res, &np->fwnode, of_node_to_nid(np)); + if (!its) + return -ENOMEM; + + its_enable_quirks(its); + err = its_probe_one(its); + if (err) { + its_node_destroy(its); + return err; + } } return 0; } @@ -5508,6 +5542,7 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, { struct acpi_madt_generic_translator *its_entry; struct fwnode_handle *dom_handle; + struct its_node *its; struct resource res; int err; @@ -5532,11 +5567,18 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, goto dom_err; } - err = its_probe_one(&res, dom_handle, - acpi_get_its_numa_node(its_entry->translation_id)); + its = its_node_init(&res, dom_handle, + acpi_get_its_numa_node(its_entry->translation_id)); + if (!its) { + err = -ENOMEM; + goto node_err; + } + + err = its_probe_one(its); if (!err) return 0; +node_err: iort_deregister_domain_token(its_entry->translation_id); dom_err: irq_domain_free_fwnode(dom_handle); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index eedfa8e9f077..f59ac9586b7b 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1857,6 +1857,14 @@ static bool gic_enable_quirk_arm64_2941627(void *data) return true; } +static bool rd_set_non_coherent(void *data) +{ + struct gic_chip_data *d = data; + + d->rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; + return true; +} + static const struct gic_quirk gic_quirks[] = { { .desc = "GICv3: Qualcomm MSM8996 broken firmware", @@ -1924,6 +1932,11 @@ static const struct gic_quirk gic_quirks[] = { .init = gic_enable_quirk_arm64_2941627, }, { + .desc = "GICv3: non-coherent attribute", + .property = "dma-noncoherent", + .init = rd_set_non_coherent, + }, + { } }; diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c index 4bbfa2b0a4df..96f4e322ed6b 100644 --- a/drivers/irqchip/irq-renesas-rzg2l.c +++ b/drivers/irqchip/irq-renesas-rzg2l.c @@ -118,7 +118,7 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d) raw_spin_lock(&priv->lock); reg = readl_relaxed(priv->base + TSSR(tssr_index)); - reg &= ~(TSSEL_MASK << tssr_offset); + reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset)); writel_relaxed(reg, priv->base + TSSR(tssr_index)); raw_spin_unlock(&priv->lock); } @@ -130,8 +130,8 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d) unsigned int hw_irq = irqd_to_hwirq(d); if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) { + unsigned long tint = (uintptr_t)irq_data_get_irq_chip_data(d); struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); - unsigned long tint = (uintptr_t)d->chip_data; u32 offset = hw_irq - IRQC_TINT_START; u32 tssr_offset = TSSR_OFFSET(offset); u8 tssr_index = TSSR_INDEX(offset); diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c index 4adeee1bc391..e8d01b14ccdd 100644 --- a/drivers/irqchip/irq-riscv-intc.c +++ b/drivers/irqchip/irq-riscv-intc.c @@ -155,8 +155,16 @@ static int __init riscv_intc_init(struct device_node *node, * for each INTC DT node. We only need to do INTC initialization * for the INTC DT node belonging to boot CPU (or boot HART). */ - if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) + if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) { + /* + * The INTC nodes of each CPU are suppliers for downstream + * interrupt controllers (such as PLIC, IMSIC and APLIC + * direct-mode) so we should mark an INTC node as initialized + * if we are not creating IRQ domain for it. + */ + fwnode_dev_initialized(of_fwnode_handle(node), true); return 0; + } return riscv_intc_init_common(of_node_to_fwnode(node)); } diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index d8ba5fba7450..971240e2e31b 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -460,6 +460,7 @@ static const struct irq_domain_ops irq_exti_domain_ops = { .map = irq_map_generic_chip, .alloc = stm32_exti_alloc, .free = stm32_exti_free, + .xlate = irq_domain_xlate_twocell, }; static void stm32_irq_ack(struct irq_data *d) diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c index a32c0d28d038..74b2f124116e 100644 --- a/drivers/irqchip/qcom-pdc.c +++ b/drivers/irqchip/qcom-pdc.c @@ -22,9 +22,20 @@ #define PDC_MAX_GPIO_IRQS 256 +/* Valid only on HW version < 3.2 */ #define IRQ_ENABLE_BANK 0x10 #define IRQ_i_CFG 0x110 +/* Valid only on HW version >= 3.2 */ +#define IRQ_i_CFG_IRQ_ENABLE 3 + +#define IRQ_i_CFG_TYPE_MASK GENMASK(2, 0) + +#define PDC_VERSION_REG 0x1000 + +/* Notable PDC versions */ +#define PDC_VERSION_3_2 0x30200 + struct pdc_pin_region { u32 pin_base; u32 parent_base; @@ -37,6 +48,7 @@ static DEFINE_RAW_SPINLOCK(pdc_lock); static void __iomem *pdc_base; static struct pdc_pin_region *pdc_region; static int pdc_region_cnt; +static unsigned int pdc_version; static void pdc_reg_write(int reg, u32 i, u32 val) { @@ -48,20 +60,32 @@ static u32 pdc_reg_read(int reg, u32 i) return readl_relaxed(pdc_base + reg + i * sizeof(u32)); } -static void pdc_enable_intr(struct irq_data *d, bool on) +static void __pdc_enable_intr(int pin_out, bool on) { - int pin_out = d->hwirq; unsigned long enable; - unsigned long flags; - u32 index, mask; - index = pin_out / 32; - mask = pin_out % 32; + if (pdc_version < PDC_VERSION_3_2) { + u32 index, mask; + + index = pin_out / 32; + mask = pin_out % 32; + + enable = pdc_reg_read(IRQ_ENABLE_BANK, index); + __assign_bit(mask, &enable, on); + pdc_reg_write(IRQ_ENABLE_BANK, index, enable); + } else { + enable = pdc_reg_read(IRQ_i_CFG, pin_out); + __assign_bit(IRQ_i_CFG_IRQ_ENABLE, &enable, on); + pdc_reg_write(IRQ_i_CFG, pin_out, enable); + } +} + +static void pdc_enable_intr(struct irq_data *d, bool on) +{ + unsigned long flags; raw_spin_lock_irqsave(&pdc_lock, flags); - enable = pdc_reg_read(IRQ_ENABLE_BANK, index); - __assign_bit(mask, &enable, on); - pdc_reg_write(IRQ_ENABLE_BANK, index, enable); + __pdc_enable_intr(d->hwirq, on); raw_spin_unlock_irqrestore(&pdc_lock, flags); } @@ -142,6 +166,7 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type) } old_pdc_type = pdc_reg_read(IRQ_i_CFG, d->hwirq); + pdc_type |= (old_pdc_type & ~IRQ_i_CFG_TYPE_MASK); pdc_reg_write(IRQ_i_CFG, d->hwirq, pdc_type); ret = irq_chip_set_type_parent(d, type); @@ -246,7 +271,6 @@ static const struct irq_domain_ops qcom_pdc_ops = { static int pdc_setup_pin_mapping(struct device_node *np) { int ret, n, i; - u32 irq_index, reg_index, val; n = of_property_count_elems_of_size(np, "qcom,pdc-ranges", sizeof(u32)); if (n <= 0 || n % 3) @@ -276,29 +300,38 @@ static int pdc_setup_pin_mapping(struct device_node *np) if (ret) return ret; - for (i = 0; i < pdc_region[n].cnt; i++) { - reg_index = (i + pdc_region[n].pin_base) >> 5; - irq_index = (i + pdc_region[n].pin_base) & 0x1f; - val = pdc_reg_read(IRQ_ENABLE_BANK, reg_index); - val &= ~BIT(irq_index); - pdc_reg_write(IRQ_ENABLE_BANK, reg_index, val); - } + for (i = 0; i < pdc_region[n].cnt; i++) + __pdc_enable_intr(i + pdc_region[n].pin_base, 0); } return 0; } +#define QCOM_PDC_SIZE 0x30000 + static int qcom_pdc_init(struct device_node *node, struct device_node *parent) { struct irq_domain *parent_domain, *pdc_domain; + resource_size_t res_size; + struct resource res; int ret; - pdc_base = of_iomap(node, 0); + /* compat with old sm8150 DT which had very small region for PDC */ + if (of_address_to_resource(node, 0, &res)) + return -EINVAL; + + res_size = max_t(resource_size_t, resource_size(&res), QCOM_PDC_SIZE); + if (res_size > resource_size(&res)) + pr_warn("%pOF: invalid reg size, please fix DT\n", node); + + pdc_base = ioremap(res.start, res_size); if (!pdc_base) { pr_err("%pOF: unable to map PDC registers\n", node); return -ENXIO; } + pdc_version = pdc_reg_read(PDC_VERSION_REG, 0); + parent_domain = irq_find_host(parent); if (!parent_domain) { pr_err("%pOF: unable to find PDC's parent domain\n", node); diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index 04f9ea675f2c..214ed81eb0e9 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c @@ -479,10 +479,6 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data, led_parse_fwnode_props(dev, fwnode, &props); - /* We want to label LEDs that can produce full range of colors - * as RGB, not multicolor */ - BUG_ON(props.color == LED_COLOR_ID_MULTI); - if (props.label) { /* * If init_data.devicename is NULL, then it indicates that diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c index 978fdfc19a06..0cac5bead84f 100644 --- a/drivers/mcb/mcb-core.c +++ b/drivers/mcb/mcb-core.c @@ -387,17 +387,13 @@ EXPORT_SYMBOL_NS_GPL(mcb_free_dev, MCB); static int __mcb_bus_add_devices(struct device *dev, void *data) { - struct mcb_device *mdev = to_mcb_device(dev); int retval; - if (mdev->is_added) - return 0; - retval = device_attach(dev); - if (retval < 0) + if (retval < 0) { dev_err(dev, "Error adding device (%d)\n", retval); - - mdev->is_added = true; + return retval; + } return 0; } diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c index 2aef990f379f..656b6b71c768 100644 --- a/drivers/mcb/mcb-parse.c +++ b/drivers/mcb/mcb-parse.c @@ -99,8 +99,6 @@ static int chameleon_parse_gdd(struct mcb_bus *bus, mdev->mem.end = mdev->mem.start + size - 1; mdev->mem.flags = IORESOURCE_MEM; - mdev->is_added = false; - ret = mcb_device_register(bus, mdev); if (ret < 0) goto err; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index f2662c21a6df..5315fd261c23 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -753,7 +753,8 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, int err; u8 *buf; - reqsize = ALIGN(crypto_skcipher_reqsize(tfm), __alignof__(__le64)); + reqsize = sizeof(*req) + crypto_skcipher_reqsize(tfm); + reqsize = ALIGN(reqsize, __alignof__(__le64)); req = kmalloc(reqsize + cc->iv_size, GFP_NOIO); if (!req) diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index ad8e670a2f9b..b487f7acc860 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -748,17 +748,16 @@ err: /* * Cleanup zoned device information. */ -static void dmz_put_zoned_device(struct dm_target *ti) +static void dmz_put_zoned_devices(struct dm_target *ti) { struct dmz_target *dmz = ti->private; int i; - for (i = 0; i < dmz->nr_ddevs; i++) { - if (dmz->ddev[i]) { + for (i = 0; i < dmz->nr_ddevs; i++) + if (dmz->ddev[i]) dm_put_device(ti, dmz->ddev[i]); - dmz->ddev[i] = NULL; - } - } + + kfree(dmz->ddev); } static int dmz_fixup_devices(struct dm_target *ti) @@ -948,7 +947,7 @@ err_bio: err_meta: dmz_dtr_metadata(dmz->metadata); err_dev: - dmz_put_zoned_device(ti); + dmz_put_zoned_devices(ti); err: kfree(dmz->dev); kfree(dmz); @@ -978,7 +977,7 @@ static void dmz_dtr(struct dm_target *ti) bioset_exit(&dmz->bio_set); - dmz_put_zoned_device(ti); + dmz_put_zoned_devices(ti); mutex_destroy(&dmz->chunk_lock); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4cb9c608ee19..284cd71bcc68 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -854,6 +854,13 @@ struct stripe_head *raid5_get_active_stripe(struct r5conf *conf, set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state); r5l_wake_reclaim(conf->log, 0); + + /* release batch_last before wait to avoid risk of deadlock */ + if (ctx && ctx->batch_last) { + raid5_release_stripe(ctx->batch_last); + ctx->batch_last = NULL; + } + wait_event_lock_irq(conf->wait_for_stripe, is_inactive_blocked(conf, hash), *(conf->hash_locks + hash)); diff --git a/drivers/media/i2c/ov8858.c b/drivers/media/i2c/ov8858.c index 3af6125a2eee..4d9fd76e2f60 100644 --- a/drivers/media/i2c/ov8858.c +++ b/drivers/media/i2c/ov8858.c @@ -1850,9 +1850,9 @@ static int ov8858_parse_of(struct ov8858 *ov8858) } ret = v4l2_fwnode_endpoint_parse(endpoint, &vep); + fwnode_handle_put(endpoint); if (ret) { dev_err(dev, "Failed to parse endpoint: %d\n", ret); - fwnode_handle_put(endpoint); return ret; } @@ -1864,12 +1864,9 @@ static int ov8858_parse_of(struct ov8858 *ov8858) default: dev_err(dev, "Unsupported number of data lanes %u\n", ov8858->num_lanes); - fwnode_handle_put(endpoint); return -EINVAL; } - ov8858->subdev.fwnode = endpoint; - return 0; } @@ -1913,7 +1910,7 @@ static int ov8858_probe(struct i2c_client *client) ret = ov8858_init_ctrls(ov8858); if (ret) - goto err_put_fwnode; + return ret; sd = &ov8858->subdev; sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; @@ -1964,8 +1961,6 @@ err_clean_entity: media_entity_cleanup(&sd->entity); err_free_handler: v4l2_ctrl_handler_free(&ov8858->ctrl_handler); -err_put_fwnode: - fwnode_handle_put(ov8858->subdev.fwnode); return ret; } @@ -1978,7 +1973,6 @@ static void ov8858_remove(struct i2c_client *client) v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(&ov8858->ctrl_handler); - fwnode_handle_put(ov8858->subdev.fwnode); pm_runtime_disable(&client->dev); if (!pm_runtime_status_suspended(&client->dev)) diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig index e113902fa806..ee4684159d3d 100644 --- a/drivers/media/pci/intel/Kconfig +++ b/drivers/media/pci/intel/Kconfig @@ -1,11 +1,19 @@ # SPDX-License-Identifier: GPL-2.0-only + +source "drivers/media/pci/intel/ipu3/Kconfig" +source "drivers/media/pci/intel/ivsc/Kconfig" + config IPU_BRIDGE - tristate + tristate "Intel IPU Bridge" depends on I2C && ACPI help - This is a helper module for the IPU bridge, which can be - used by ipu3 and other drivers. In order to handle module - dependencies, this is selected by each driver that needs it. + The IPU bridge is a helper library for Intel IPU drivers to + function on systems shipped with Windows. -source "drivers/media/pci/intel/ipu3/Kconfig" -source "drivers/media/pci/intel/ivsc/Kconfig" + Currently used by the ipu3-cio2 and atomisp drivers. + + Supported systems include: + + - Microsoft Surface models (except Surface Pro 3) + - The Lenovo Miix line (for example the 510, 520, 710 and 720) + - Dell 7285 diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c index 1bde8b6e0b11..e38198e259c0 100644 --- a/drivers/media/pci/intel/ipu-bridge.c +++ b/drivers/media/pci/intel/ipu-bridge.c @@ -107,8 +107,10 @@ static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev for_each_acpi_dev_match(ivsc_adev, acpi_id->id, NULL, -1) /* camera sensor depends on IVSC in DSDT if exist */ for_each_acpi_consumer_dev(ivsc_adev, consumer) - if (consumer->handle == handle) + if (consumer->handle == handle) { + acpi_dev_put(consumer); return ivsc_adev; + } } return NULL; diff --git a/drivers/media/pci/intel/ipu3/Kconfig b/drivers/media/pci/intel/ipu3/Kconfig index 0951545eab21..c0a250daa927 100644 --- a/drivers/media/pci/intel/ipu3/Kconfig +++ b/drivers/media/pci/intel/ipu3/Kconfig @@ -2,13 +2,13 @@ config VIDEO_IPU3_CIO2 tristate "Intel ipu3-cio2 driver" depends on VIDEO_DEV && PCI + depends on IPU_BRIDGE || !IPU_BRIDGE depends on ACPI || COMPILE_TEST depends on X86 select MEDIA_CONTROLLER select VIDEO_V4L2_SUBDEV_API select V4L2_FWNODE select VIDEOBUF2_DMA_SG - select IPU_BRIDGE if CIO2_BRIDGE help This is the Intel IPU3 CIO2 CSI-2 receiver unit, found in Intel @@ -18,22 +18,3 @@ config VIDEO_IPU3_CIO2 Say Y or M here if you have a Skylake/Kaby Lake SoC with MIPI CSI-2 connected camera. The module will be called ipu3-cio2. - -config CIO2_BRIDGE - bool "IPU3 CIO2 Sensors Bridge" - depends on VIDEO_IPU3_CIO2 && ACPI - depends on I2C - help - This extension provides an API for the ipu3-cio2 driver to create - connections to cameras that are hidden in the SSDB buffer in ACPI. - It can be used to enable support for cameras in detachable / hybrid - devices that ship with Windows. - - Say Y here if your device is a detachable / hybrid laptop that comes - with Windows installed by the OEM, for example: - - - Microsoft Surface models (except Surface Pro 3) - - The Lenovo Miix line (for example the 510, 520, 710 and 720) - - Dell 7285 - - If in doubt, say N here. diff --git a/drivers/media/pci/intel/ivsc/Kconfig b/drivers/media/pci/intel/ivsc/Kconfig index 212753450576..a8cb981544f7 100644 --- a/drivers/media/pci/intel/ivsc/Kconfig +++ b/drivers/media/pci/intel/ivsc/Kconfig @@ -6,7 +6,7 @@ config INTEL_VSC depends on INTEL_MEI && ACPI && VIDEO_DEV select MEDIA_CONTROLLER select VIDEO_V4L2_SUBDEV_API - select V4L2_ASYNC + select V4L2_FWNODE help This adds support for Intel Visual Sensing Controller (IVSC). diff --git a/drivers/media/platform/intel/pxa_camera.c b/drivers/media/platform/intel/pxa_camera.c index 6e6caf50e11e..59b89e421dc2 100644 --- a/drivers/media/platform/intel/pxa_camera.c +++ b/drivers/media/platform/intel/pxa_camera.c @@ -2398,7 +2398,7 @@ static int pxa_camera_probe(struct platform_device *pdev) PXA_CAM_DRV_NAME, pcdev); if (err) { dev_err(&pdev->dev, "Camera interrupt register failed\n"); - goto exit_v4l2_device_unregister; + goto exit_deactivate; } pcdev->notifier.ops = &pxa_camera_sensor_ops; diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c index d299cc2962a5..ae6290d28f8e 100644 --- a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c +++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c @@ -138,7 +138,8 @@ int vpu_enc_init(struct venc_vpu_inst *vpu) vpu->ctx->vpu_inst = vpu; status = mtk_vcodec_fw_ipi_register(vpu->ctx->dev->fw_handler, vpu->id, - vpu_enc_ipi_handler, "venc", NULL); + vpu_enc_ipi_handler, "venc", + vpu->ctx->dev); if (status) { mtk_venc_err(vpu->ctx, "vpu_ipi_register fail %d", status); diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c index 4285770fde18..996684a73038 100644 --- a/drivers/media/platform/xilinx/xilinx-vipp.c +++ b/drivers/media/platform/xilinx/xilinx-vipp.c @@ -55,11 +55,18 @@ xvip_graph_find_entity(struct xvip_composite_device *xdev, { struct xvip_graph_entity *entity; struct v4l2_async_connection *asd; - - list_for_each_entry(asd, &xdev->notifier.done_list, asc_entry) { - entity = to_xvip_entity(asd); - if (entity->asd.match.fwnode == fwnode) - return entity; + struct list_head *lists[] = { + &xdev->notifier.done_list, + &xdev->notifier.waiting_list + }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(lists); i++) { + list_for_each_entry(asd, lists[i], asc_entry) { + entity = to_xvip_entity(asd); + if (entity->asd.match.fwnode == fwnode) + return entity; + } } return NULL; diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c index b92348ad61f6..31752c06d1f0 100644 --- a/drivers/media/v4l2-core/v4l2-subdev.c +++ b/drivers/media/v4l2-core/v4l2-subdev.c @@ -502,6 +502,13 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg, V4L2_SUBDEV_CLIENT_CAP_STREAMS; int rval; + /* + * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS. + * Remove this when the API is no longer experimental. + */ + if (!v4l2_subdev_enable_streams_api) + streams_subdev = false; + switch (cmd) { case VIDIOC_SUBDEV_QUERYCAP: { struct v4l2_subdev_capability *cap = arg; diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index aea95745c73f..90ce58fd629e 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -241,6 +241,7 @@ config MFD_CS42L43 tristate select MFD_CORE select REGMAP + select REGMAP_IRQ config MFD_CS42L43_I2C tristate "Cirrus Logic CS42L43 (I2C)" diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 8b91a55ec0d2..8ee51e49fced 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -894,6 +894,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, return -EINVAL; } + /* UBI cannot work on flashes with zero erasesize. */ + if (!mtd->erasesize) { + pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n", + mtd->index); + return -EINVAL; + } + if (ubi_num == UBI_DEV_NUM_AUTO) { /* Search for an empty slot in the @ubi_devices array */ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 649453a3c858..f8cde9f9f554 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -190,7 +190,7 @@ config CAN_SLCAN config CAN_SUN4I tristate "Allwinner A10 CAN controller" - depends on MACH_SUN4I || MACH_SUN7I || RISCV || COMPILE_TEST + depends on MACH_SUN4I || MACH_SUN7I || (RISCV && ARCH_SUNXI) || COMPILE_TEST help Say Y here if you want to use CAN controller found on Allwinner A10/A20/D1 SoCs. diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c index add39e922b89..d15f85a40c1e 100644 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -348,7 +348,7 @@ static struct flexcan_devtype_data fsl_imx8mp_devtype_data = { static struct flexcan_devtype_data fsl_imx93_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX | - FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_AUTO_STOP_MODE | + FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR | FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, @@ -544,11 +544,6 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv) } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) { regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, 1 << priv->stm.req_bit, 1 << priv->stm.req_bit); - } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) { - /* For the auto stop mode, software do nothing, hardware will cover - * all the operation automatically after system go into low power mode. - */ - return 0; } return flexcan_low_power_enter_ack(priv); @@ -574,12 +569,6 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv) reg_mcr &= ~FLEXCAN_MCR_SLF_WAK; priv->write(reg_mcr, ®s->mcr); - /* For the auto stop mode, hardware will exist stop mode - * automatically after system go out of low power mode. - */ - if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) - return 0; - return flexcan_low_power_exit_ack(priv); } @@ -1994,13 +1983,18 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev) ret = flexcan_setup_stop_mode_scfw(pdev); else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) ret = flexcan_setup_stop_mode_gpr(pdev); - else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) - ret = 0; else /* return 0 directly if doesn't support stop mode feature */ return 0; - if (ret) + /* If ret is -EINVAL, this means SoC claim to support stop mode, but + * dts file lack the stop mode property definition. For this case, + * directly return 0, this will skip the wakeup capable setting and + * will not block the driver probe. + */ + if (ret == -EINVAL) + return 0; + else if (ret) return ret; device_set_wakeup_capable(&pdev->dev, true); @@ -2320,16 +2314,8 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device) if (netif_running(dev)) { int err; - if (device_may_wakeup(device)) { + if (device_may_wakeup(device)) flexcan_enable_wakeup_irq(priv, true); - /* For auto stop mode, need to keep the clock on before - * system go into low power mode. After system go into - * low power mode, hardware will config the flexcan into - * stop mode, and gate off the clock automatically. - */ - if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) - return 0; - } err = pm_runtime_force_suspend(device); if (err) @@ -2347,15 +2333,9 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device) if (netif_running(dev)) { int err; - /* For the wakeup in auto stop mode, no need to gate on the - * clock here, hardware will do this automatically. - */ - if (!(device_may_wakeup(device) && - priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)) { - err = pm_runtime_force_resume(device); - if (err) - return err; - } + err = pm_runtime_force_resume(device); + if (err) + return err; if (device_may_wakeup(device)) flexcan_enable_wakeup_irq(priv, false); diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h index 91402977780b..025c3417031f 100644 --- a/drivers/net/can/flexcan/flexcan.h +++ b/drivers/net/can/flexcan/flexcan.h @@ -68,8 +68,6 @@ #define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15) /* Device supports RX via FIFO */ #define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16) -/* auto enter stop mode to support wakeup */ -#define FLEXCAN_QUIRK_AUTO_STOP_MODE BIT(17) struct flexcan_devtype_data { u32 quirks; /* quirks needed for different IP cores */ diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c index 8a4143809d33..ae8c42f5debd 100644 --- a/drivers/net/can/m_can/tcan4x5x-core.c +++ b/drivers/net/can/m_can/tcan4x5x-core.c @@ -125,7 +125,7 @@ static const struct tcan4x5x_version_info tcan4x5x_versions[] = { }, [TCAN4553] = { .name = "4553", - .id2_register = 0x32353534, + .id2_register = 0x33353534, }, /* generic version with no id2_register at the end */ [TCAN4X5X] = { diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 0ada0e160e93..743c2eb62b87 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -392,7 +392,13 @@ static irqreturn_t sja1000_reset_interrupt(int irq, void *dev_id) struct net_device *dev = (struct net_device *)dev_id; netdev_dbg(dev, "performing a soft reset upon overrun\n"); - sja1000_start(dev); + + netif_tx_lock(dev); + + can_free_echo_skb(dev, 0, NULL); + sja1000_set_mode(dev, CAN_MODE_START); + + netif_tx_unlock(dev); return IRQ_HANDLED; } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 52a99d8bada0..ab434a77b059 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2958,14 +2958,16 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip) * from the wrong location resulting in the switch booting * to wrong mode and inoperable. */ - mv88e6xxx_g1_wait_eeprom_done(chip); + if (chip->info->ops->get_eeprom) + mv88e6xxx_g2_eeprom_wait(chip); gpiod_set_value_cansleep(gpiod, 1); usleep_range(10000, 20000); gpiod_set_value_cansleep(gpiod, 0); usleep_range(10000, 20000); - mv88e6xxx_g1_wait_eeprom_done(chip); + if (chip->info->ops->get_eeprom) + mv88e6xxx_g2_eeprom_wait(chip); } } diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 2fa55a643591..174c773b38c2 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -75,37 +75,6 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip) return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1); } -void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip) -{ - const unsigned long timeout = jiffies + 1 * HZ; - u16 val; - int err; - - /* Wait up to 1 second for the switch to finish reading the - * EEPROM. - */ - while (time_before(jiffies, timeout)) { - err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val); - if (err) { - dev_err(chip->dev, "Error reading status"); - return; - } - - /* If the switch is still resetting, it may not - * respond on the bus, and so MDIO read returns - * 0xffff. Differentiate between that, and waiting for - * the EEPROM to be done by bit 0 being set. - */ - if (val != 0xffff && - val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE)) - return; - - usleep_range(1000, 2000); - } - - dev_err(chip->dev, "Timeout waiting for EEPROM done"); -} - /* Offset 0x01: Switch MAC Address Register Bytes 0 & 1 * Offset 0x02: Switch MAC Address Register Bytes 2 & 3 * Offset 0x03: Switch MAC Address Register Bytes 4 & 5 diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index c99ddd117fe6..1095261f5b49 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -282,7 +282,6 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr); int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip); int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip); int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip); -void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip); int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip); int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 937a01f2ba75..b2b5f6ba438f 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -340,7 +340,7 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip) * Offset 0x15: EEPROM Addr (for 8-bit data access) */ -static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip) +int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip) { int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY); int err; diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 7e091965582b..d9434f7cae53 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -365,6 +365,7 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target, int port); +int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip); extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops; diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c index de1dc22cf683..4ce68e655a63 100644 --- a/drivers/net/dsa/qca/qca8k-8xxx.c +++ b/drivers/net/dsa/qca/qca8k-8xxx.c @@ -505,8 +505,8 @@ qca8k_bulk_read(void *ctx, const void *reg_buf, size_t reg_len, void *val_buf, size_t val_len) { int i, count = val_len / sizeof(u32), ret; - u32 reg = *(u32 *)reg_buf & U16_MAX; struct qca8k_priv *priv = ctx; + u32 reg = *(u16 *)reg_buf; if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val_buf, val_len)) @@ -527,8 +527,8 @@ qca8k_bulk_gather_write(void *ctx, const void *reg_buf, size_t reg_len, const void *val_buf, size_t val_len) { int i, count = val_len / sizeof(u32), ret; - u32 reg = *(u32 *)reg_buf & U16_MAX; struct qca8k_priv *priv = ctx; + u32 reg = *(u16 *)reg_buf; u32 *val = (u32 *)val_buf; if (priv->mgmt_master && @@ -666,6 +666,15 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, goto err_read_skb; } + /* It seems that accessing the switch's internal PHYs via management + * packets still uses the MDIO bus within the switch internally, and + * these accesses can conflict with external MDIO accesses to other + * devices on the MDIO bus. + * We therefore need to lock the MDIO bus onto which the switch is + * connected. + */ + mutex_lock(&priv->bus->mdio_lock); + /* Actually start the request: * 1. Send mdio master packet * 2. Busy Wait for mdio master command @@ -678,6 +687,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, mgmt_master = priv->mgmt_master; if (!mgmt_master) { mutex_unlock(&mgmt_eth_data->mutex); + mutex_unlock(&priv->bus->mdio_lock); ret = -EINVAL; goto err_mgmt_master; } @@ -765,6 +775,7 @@ exit: QCA8K_ETHERNET_TIMEOUT); mutex_unlock(&mgmt_eth_data->mutex); + mutex_unlock(&priv->bus->mdio_lock); return ret; diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 832a2ae01950..a8d79ee350f8 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1303,24 +1303,23 @@ static void ibmveth_rx_csum_helper(struct sk_buff *skb, * the user space for finding a flow. During this process, OVS computes * checksum on the first packet when CHECKSUM_PARTIAL flag is set. * - * So, re-compute TCP pseudo header checksum when configured for - * trunk mode. + * So, re-compute TCP pseudo header checksum. */ + if (iph_proto == IPPROTO_TCP) { struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen); + if (tcph->check == 0x0000) { /* Recompute TCP pseudo header checksum */ - if (adapter->is_active_trunk) { - tcphdrlen = skb->len - iphlen; - if (skb_proto == ETH_P_IP) - tcph->check = - ~csum_tcpudp_magic(iph->saddr, - iph->daddr, tcphdrlen, iph_proto, 0); - else if (skb_proto == ETH_P_IPV6) - tcph->check = - ~csum_ipv6_magic(&iph6->saddr, - &iph6->daddr, tcphdrlen, iph_proto, 0); - } + tcphdrlen = skb->len - iphlen; + if (skb_proto == ETH_P_IP) + tcph->check = + ~csum_tcpudp_magic(iph->saddr, + iph->daddr, tcphdrlen, iph_proto, 0); + else if (skb_proto == ETH_P_IPV6) + tcph->check = + ~csum_ipv6_magic(&iph6->saddr, + &iph6->daddr, tcphdrlen, iph_proto, 0); /* Setup SKB fields for checksum offload */ skb_partial_csum_set(skb, iphlen, offsetof(struct tcphdr, check)); diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 4f39863b5537..7b1256992dcf 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -2093,3 +2093,35 @@ lag_rebuild_out: } mutex_unlock(&pf->lag_mutex); } + +/** + * ice_lag_is_switchdev_running + * @pf: pointer to PF structure + * + * Check if switchdev is running on any of the interfaces connected to lag. + */ +bool ice_lag_is_switchdev_running(struct ice_pf *pf) +{ + struct ice_lag *lag = pf->lag; + struct net_device *tmp_nd; + + if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag) + return false; + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { + struct ice_netdev_priv *priv = netdev_priv(tmp_nd); + + if (!netif_is_ice(tmp_nd) || !priv || !priv->vsi || + !priv->vsi->back) + continue; + + if (ice_is_switchdev_running(priv->vsi->back)) { + rcu_read_unlock(); + return true; + } + } + rcu_read_unlock(); + + return false; +} diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h index 18075b82485a..facb6c894b6d 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.h +++ b/drivers/net/ethernet/intel/ice/ice_lag.h @@ -62,4 +62,5 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf); int ice_init_lag(struct ice_pf *pf); void ice_deinit_lag(struct ice_pf *pf); void ice_lag_rebuild(struct ice_pf *pf); +bool ice_lag_is_switchdev_running(struct ice_pf *pf); #endif /* _ICE_LAG_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 201570cd2e0b..7bf9b7069754 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -3575,6 +3575,12 @@ int ice_set_dflt_vsi(struct ice_vsi *vsi) dev = ice_pf_to_dev(vsi->back); + if (ice_lag_is_switchdev_running(vsi->back)) { + dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n", + vsi->vsi_num); + return 0; + } + /* the VSI passed in is already the default VSI */ if (ice_is_vsi_dflt_vsi(vsi)) { dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n", diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index b03426ac932b..db97353efd06 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -2617,12 +2617,14 @@ static int ice_vc_query_rxdid(struct ice_vf *vf) goto err; } - /* Read flexiflag registers to determine whether the - * corresponding RXDID is configured and supported or not. - * Since Legacy 16byte descriptor format is not supported, - * start from Legacy 32byte descriptor. + /* RXDIDs supported by DDP package can be read from the register + * to get the supported RXDID bitmap. But the legacy 32byte RXDID + * is not listed in DDP package, add it in the bitmap manually. + * Legacy 16byte descriptor is not supported. */ - for (i = ICE_RXDID_LEGACY_1; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { + rxdid->supported_rxdids |= BIT(ICE_RXDID_LEGACY_1); + + for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0)); if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 29cc60988071..ea88ac04ab9a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -28,6 +28,9 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, struct vf_macvlans *mv_list; int num_vf_macvlans, i; + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + num_vf_macvlans = hw->mac.num_rar_entries - (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); if (!num_vf_macvlans) @@ -36,8 +39,6 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), GFP_KERNEL); if (mv_list) { - /* Initialize list of VF macvlans */ - INIT_LIST_HEAD(&adapter->vf_mvs.l); for (i = 0; i < num_vf_macvlans; i++) { mv_list[i].vf = -1; mv_list[i].free = true; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c index 59b138214af2..6cc7a78968fc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -1357,10 +1357,12 @@ static int cn10k_mdo_upd_txsa(struct macsec_context *ctx) if (netif_running(secy->netdev)) { /* Keys cannot be changed after creation */ - err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, - sw_tx_sa->next_pn); - if (err) - return err; + if (ctx->sa.update_pn) { + err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, + sw_tx_sa->next_pn); + if (err) + return err; + } err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, sw_tx_sa->active); @@ -1529,6 +1531,9 @@ static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx) if (err) return err; + if (!ctx->sa.update_pn) + return 0; + err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, rx_sa->next_pn); if (err) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 997fedac3a98..818ce76185b2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -1403,6 +1403,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, return 0; } + pp_params.order = get_order(buf_size); pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP; pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); pp_params.nid = NUMA_NO_NODE; diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index ddec1627f1a7..8d0bacf4e49c 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h @@ -2195,7 +2195,7 @@ struct rx_ring_info { struct sk_buff *skb; dma_addr_t data_addr; DEFINE_DMA_UNMAP_LEN(data_size); - dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; + dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT ?: 1]; }; enum flow_control { diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 3cffd1bd3067..20afe79f380a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -3171,8 +3171,8 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) eth->rx_events++; if (likely(napi_schedule_prep(ð->rx_napi))) { - __napi_schedule(ð->rx_napi); mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); + __napi_schedule(ð->rx_napi); } return IRQ_HANDLED; @@ -3184,8 +3184,8 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) eth->tx_events++; if (likely(napi_schedule_prep(ð->tx_napi))) { - __napi_schedule(ð->tx_napi); mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); + __napi_schedule(ð->tx_napi); } return IRQ_HANDLED; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c index c9c1db971652..d4ebd8743114 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c @@ -580,7 +580,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx) goto out; } - if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) { + if (ctx->sa.update_pn) { netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n", assoc_num); err = -EINVAL; @@ -973,7 +973,7 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx) goto out; } - if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) { + if (ctx->sa.update_pn) { netdev_err(ctx->netdev, "MACsec offload update RX sa %d PN isn't supported\n", assoc_num); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a2ae791538ed..acb40770cf0c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3952,13 +3952,14 @@ static int set_feature_rx_fcs(struct net_device *netdev, bool enable) struct mlx5e_channels *chs = &priv->channels; struct mlx5e_params new_params; int err; + bool rx_ts_over_crc = !enable; mutex_lock(&priv->state_lock); new_params = chs->params; new_params.scatter_fcs_en = enable; err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap, - &new_params.scatter_fcs_en, true); + &rx_ts_over_crc, true); mutex_unlock(&priv->state_lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c index bb8eeb86edf7..52c2fe3644d4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c @@ -310,8 +310,8 @@ const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = { .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload, }; -static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp, - bool learning_en) +static int mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp, + bool learning_en) { char tnpc_pl[MLXSW_REG_TNPC_LEN]; diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index 329e374b9539..43ba71e82260 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -46,6 +46,7 @@ config LAN743X tristate "LAN743x support" depends on PCI depends on PTP_1588_CLOCK_OPTIONAL + select PHYLIB select FIXED_PHY select CRC16 select CRC32 diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 4a16ebff3d1d..48ea4aeeea5d 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -91,63 +91,137 @@ static unsigned int mana_checksum_info(struct sk_buff *skb) return 0; } +static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash, + int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey) +{ + ash->dma_handle[sg_i] = da; + ash->size[sg_i] = sge_len; + + tp->wqe_req.sgl[sg_i].address = da; + tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey; + tp->wqe_req.sgl[sg_i].size = sge_len; +} + static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc, - struct mana_tx_package *tp) + struct mana_tx_package *tp, int gso_hs) { struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; + int hsg = 1; /* num of SGEs of linear part */ struct gdma_dev *gd = apc->ac->gdma_dev; + int skb_hlen = skb_headlen(skb); + int sge0_len, sge1_len = 0; struct gdma_context *gc; struct device *dev; skb_frag_t *frag; dma_addr_t da; + int sg_i; int i; gc = gd->gdma_context; dev = gc->dev; - da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (gso_hs && gso_hs < skb_hlen) { + sge0_len = gso_hs; + sge1_len = skb_hlen - gso_hs; + } else { + sge0_len = skb_hlen; + } + + da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE); if (dma_mapping_error(dev, da)) return -ENOMEM; - ash->dma_handle[0] = da; - ash->size[0] = skb_headlen(skb); + mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey); - tp->wqe_req.sgl[0].address = ash->dma_handle[0]; - tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey; - tp->wqe_req.sgl[0].size = ash->size[0]; + if (sge1_len) { + sg_i = 1; + da = dma_map_single(dev, skb->data + sge0_len, sge1_len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, da)) + goto frag_err; + + mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey); + hsg = 2; + } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + sg_i = hsg + i; + frag = &skb_shinfo(skb)->frags[i]; da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); - if (dma_mapping_error(dev, da)) goto frag_err; - ash->dma_handle[i + 1] = da; - ash->size[i + 1] = skb_frag_size(frag); - - tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1]; - tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey; - tp->wqe_req.sgl[i + 1].size = ash->size[i + 1]; + mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag), + gd->gpa_mkey); } return 0; frag_err: - for (i = i - 1; i >= 0; i--) - dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1], + for (i = sg_i - 1; i >= hsg; i--) + dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], DMA_TO_DEVICE); - dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); + for (i = hsg - 1; i >= 0; i--) + dma_unmap_single(dev, ash->dma_handle[i], ash->size[i], + DMA_TO_DEVICE); return -ENOMEM; } +/* Handle the case when GSO SKB linear length is too large. + * MANA NIC requires GSO packets to put only the packet header to SGE0. + * So, we need 2 SGEs for the skb linear part which contains more than the + * header. + * Return a positive value for the number of SGEs, or a negative value + * for an error. + */ +static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb, + int gso_hs) +{ + int num_sge = 1 + skb_shinfo(skb)->nr_frags; + int skb_hlen = skb_headlen(skb); + + if (gso_hs < skb_hlen) { + num_sge++; + } else if (gso_hs > skb_hlen) { + if (net_ratelimit()) + netdev_err(ndev, + "TX nonlinear head: hs:%d, skb_hlen:%d\n", + gso_hs, skb_hlen); + + return -EINVAL; + } + + return num_sge; +} + +/* Get the GSO packet's header size */ +static int mana_get_gso_hs(struct sk_buff *skb) +{ + int gso_hs; + + if (skb->encapsulation) { + gso_hs = skb_inner_tcp_all_headers(skb); + } else { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + gso_hs = skb_transport_offset(skb) + + sizeof(struct udphdr); + } else { + gso_hs = skb_tcp_all_headers(skb); + } + } + + return gso_hs; +} + netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) { enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT; struct mana_port_context *apc = netdev_priv(ndev); + int gso_hs = 0; /* zero for non-GSO pkts */ u16 txq_idx = skb_get_queue_mapping(skb); struct gdma_dev *gd = apc->ac->gdma_dev; bool ipv4 = false, ipv6 = false; @@ -159,7 +233,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct mana_txq *txq; struct mana_cq *cq; int err, len; - u16 ihs; if (unlikely(!apc->port_is_up)) goto tx_drop; @@ -209,19 +282,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) pkg.wqe_req.client_data_unit = 0; pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; - WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES); - - if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { - pkg.wqe_req.sgl = pkg.sgl_array; - } else { - pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge, - sizeof(struct gdma_sge), - GFP_ATOMIC); - if (!pkg.sgl_ptr) - goto tx_drop_count; - - pkg.wqe_req.sgl = pkg.sgl_ptr; - } if (skb->protocol == htons(ETH_P_IP)) ipv4 = true; @@ -229,6 +289,26 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) ipv6 = true; if (skb_is_gso(skb)) { + int num_sge; + + gso_hs = mana_get_gso_hs(skb); + + num_sge = mana_fix_skb_head(ndev, skb, gso_hs); + if (num_sge > 0) + pkg.wqe_req.num_sge = num_sge; + else + goto tx_drop_count; + + u64_stats_update_begin(&tx_stats->syncp); + if (skb->encapsulation) { + tx_stats->tso_inner_packets++; + tx_stats->tso_inner_bytes += skb->len - gso_hs; + } else { + tx_stats->tso_packets++; + tx_stats->tso_bytes += skb->len - gso_hs; + } + u64_stats_update_end(&tx_stats->syncp); + pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; @@ -252,28 +332,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } - - if (skb->encapsulation) { - ihs = skb_inner_tcp_all_headers(skb); - u64_stats_update_begin(&tx_stats->syncp); - tx_stats->tso_inner_packets++; - tx_stats->tso_inner_bytes += skb->len - ihs; - u64_stats_update_end(&tx_stats->syncp); - } else { - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { - ihs = skb_transport_offset(skb) + sizeof(struct udphdr); - } else { - ihs = skb_tcp_all_headers(skb); - if (ipv6_has_hopopt_jumbo(skb)) - ihs -= sizeof(struct hop_jumbo_hdr); - } - - u64_stats_update_begin(&tx_stats->syncp); - tx_stats->tso_packets++; - tx_stats->tso_bytes += skb->len - ihs; - u64_stats_update_end(&tx_stats->syncp); - } - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { csum_type = mana_checksum_info(skb); @@ -296,11 +354,25 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) } else { /* Can't do offload of this type of checksum */ if (skb_checksum_help(skb)) - goto free_sgl_ptr; + goto tx_drop_count; } } - if (mana_map_skb(skb, apc, &pkg)) { + WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES); + + if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { + pkg.wqe_req.sgl = pkg.sgl_array; + } else { + pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge, + sizeof(struct gdma_sge), + GFP_ATOMIC); + if (!pkg.sgl_ptr) + goto tx_drop_count; + + pkg.wqe_req.sgl = pkg.sgl_ptr; + } + + if (mana_map_skb(skb, apc, &pkg, gso_hs)) { u64_stats_update_begin(&tx_stats->syncp); tx_stats->mana_map_err++; u64_stats_update_end(&tx_stats->syncp); @@ -1258,11 +1330,16 @@ static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; struct device *dev = gc->dev; - int i; + int hsg, i; - dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); + /* Number of SGEs of linear part */ + hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1; - for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) + for (i = 0; i < hsg; i++) + dma_unmap_single(dev, ash->dma_handle[i], ash->size[i], + DMA_TO_DEVICE); + + for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++) dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], DMA_TO_DEVICE); } @@ -1317,19 +1394,23 @@ static void mana_poll_tx_cq(struct mana_cq *cq) case CQE_TX_VPORT_IDX_OUT_OF_RANGE: case CQE_TX_VPORT_DISABLED: case CQE_TX_VLAN_TAGGING_VIOLATION: - WARN_ONCE(1, "TX: CQE error %d: ignored.\n", - cqe_oob->cqe_hdr.cqe_type); + if (net_ratelimit()) + netdev_err(ndev, "TX: CQE error %d\n", + cqe_oob->cqe_hdr.cqe_type); + apc->eth_stats.tx_cqe_err++; break; default: - /* If the CQE type is unexpected, log an error, assert, - * and go through the error path. + /* If the CQE type is unknown, log an error, + * and still free the SKB, update tail, etc. */ - WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n", - cqe_oob->cqe_hdr.cqe_type); + if (net_ratelimit()) + netdev_err(ndev, "TX: unknown CQE type %d\n", + cqe_oob->cqe_hdr.cqe_type); + apc->eth_stats.tx_cqe_unknown_type++; - return; + break; } if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num)) diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index f21cf1f40f98..153533cd8f08 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -210,6 +210,7 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb) unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb); struct nfp_flower_cmsg_merge_hint *msg; struct nfp_fl_payload *sub_flows[2]; + struct nfp_flower_priv *priv; int err, i, flow_cnt; msg = nfp_flower_cmsg_get_data(skb); @@ -228,14 +229,15 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb) return; } - rtnl_lock(); + priv = app->priv; + mutex_lock(&priv->nfp_fl_lock); for (i = 0; i < flow_cnt; i++) { u32 ctx = be32_to_cpu(msg->flow[i].host_ctx); sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx); if (!sub_flows[i]) { nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n"); - goto err_rtnl_unlock; + goto err_mutex_unlock; } } @@ -244,8 +246,8 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb) if (err == -ENOMEM) nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n"); -err_rtnl_unlock: - rtnl_unlock(); +err_mutex_unlock: + mutex_unlock(&priv->nfp_fl_lock); } static void diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 2643c4b3ff1f..2967bab72505 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -2131,8 +2131,6 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl struct nfp_fl_ct_flow_entry *ct_entry; struct netlink_ext_ack *extack = NULL; - ASSERT_RTNL(); - extack = flow->common.extack; switch (flow->command) { case FLOW_CLS_REPLACE: @@ -2178,9 +2176,13 @@ int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb switch (type) { case TC_SETUP_CLSFLOWER: - rtnl_lock(); + while (!mutex_trylock(&zt->priv->nfp_fl_lock)) { + if (!zt->nft) /* avoid deadlock */ + return err; + msleep(20); + } err = nfp_fl_ct_offload_nft_flow(zt, flow); - rtnl_unlock(); + mutex_unlock(&zt->priv->nfp_fl_lock); break; default: return -EOPNOTSUPP; @@ -2208,6 +2210,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent) struct nfp_fl_ct_flow_entry *ct_entry; struct nfp_fl_ct_zone_entry *zt; struct rhashtable *m_table; + struct nf_flowtable *nft; if (!ct_map_ent) return -ENOENT; @@ -2226,8 +2229,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent) if (ct_map_ent->cookie > 0) kfree(ct_map_ent); - if (!zt->pre_ct_count) { - zt->nft = NULL; + if (!zt->pre_ct_count && zt->nft) { + nft = zt->nft; + zt->nft = NULL; /* avoid deadlock */ + nf_flow_table_offload_del_cb(nft, + nfp_fl_ct_handle_nft_flow, + zt); nfp_fl_ct_clean_nft_entries(zt); } break; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 40372545148e..2b7c947ff4f2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -297,6 +297,7 @@ struct nfp_fl_internal_ports { * @predt_list: List to keep track of decap pretun flows * @neigh_table: Table to keep track of neighbor entries * @predt_lock: Lock to serialise predt/neigh table updates + * @nfp_fl_lock: Lock to protect the flow offload operation */ struct nfp_flower_priv { struct nfp_app *app; @@ -339,6 +340,7 @@ struct nfp_flower_priv { struct list_head predt_list; struct rhashtable neigh_table; spinlock_t predt_lock; /* Lock to serialise predt/neigh table updates */ + struct mutex nfp_fl_lock; /* Protect the flow operation */ }; /** diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 0f06ef6e24bf..80e4675582bf 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -528,6 +528,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, if (err) goto err_free_stats_ctx_table; + mutex_init(&priv->nfp_fl_lock); + err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params); if (err) goto err_free_merge_table; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index c153f0575b92..0aceef9fe582 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1009,8 +1009,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, u64 parent_ctx = 0; int err; - ASSERT_RTNL(); - if (sub_flow1 == sub_flow2 || nfp_flower_is_merge_flow(sub_flow1) || nfp_flower_is_merge_flow(sub_flow2)) @@ -1727,19 +1725,30 @@ static int nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, struct flow_cls_offload *flower) { + struct nfp_flower_priv *priv = app->priv; + int ret; + if (!eth_proto_is_802_3(flower->common.protocol)) return -EOPNOTSUPP; + mutex_lock(&priv->nfp_fl_lock); switch (flower->command) { case FLOW_CLS_REPLACE: - return nfp_flower_add_offload(app, netdev, flower); + ret = nfp_flower_add_offload(app, netdev, flower); + break; case FLOW_CLS_DESTROY: - return nfp_flower_del_offload(app, netdev, flower); + ret = nfp_flower_del_offload(app, netdev, flower); + break; case FLOW_CLS_STATS: - return nfp_flower_get_stats(app, netdev, flower); + ret = nfp_flower_get_stats(app, netdev, flower); + break; default: - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + break; } + mutex_unlock(&priv->nfp_fl_lock); + + return ret; } static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, @@ -1778,6 +1787,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, repr_priv = repr->app_priv; repr_priv->block_shared = f->block_shared; f->driver_block_list = &nfp_block_cb_list; + f->unlocked_driver_cb = true; switch (f->command) { case FLOW_BLOCK_BIND: @@ -1876,6 +1886,8 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str nfp_flower_internal_port_can_offload(app, netdev))) return -EOPNOTSUPP; + f->unlocked_driver_cb = true; + switch (f->command) { case FLOW_BLOCK_BIND: cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c index 99052a925d9e..e7180b4793c7 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c @@ -523,25 +523,31 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, { struct netlink_ext_ack *extack = flow->common.extack; struct nfp_flower_priv *fl_priv = app->priv; + int ret; if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload"); return -EOPNOTSUPP; } + mutex_lock(&fl_priv->nfp_fl_lock); switch (flow->command) { case TC_CLSMATCHALL_REPLACE: - return nfp_flower_install_rate_limiter(app, netdev, flow, - extack); + ret = nfp_flower_install_rate_limiter(app, netdev, flow, extack); + break; case TC_CLSMATCHALL_DESTROY: - return nfp_flower_remove_rate_limiter(app, netdev, flow, - extack); + ret = nfp_flower_remove_rate_limiter(app, netdev, flow, extack); + break; case TC_CLSMATCHALL_STATS: - return nfp_flower_stats_rate_limiter(app, netdev, flow, - extack); + ret = nfp_flower_stats_rate_limiter(app, netdev, flow, extack); + break; default: - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + break; } + mutex_unlock(&fl_priv->nfp_fl_lock); + + return ret; } /* Offload tc action, currently only for tc police */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 0bfc375161ed..a174c6fc626a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -110,9 +110,9 @@ struct qed_ll2_info { enum core_tx_dest tx_dest; u8 tx_stats_en; bool main_func_queue; + struct qed_ll2_cbs cbs; struct qed_ll2_rx_queue rx_queue; struct qed_ll2_tx_queue tx_queue; - struct qed_ll2_cbs cbs; }; extern const struct qed_ll2_ops qed_ll2_ops_pass; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 7df9f9f8e134..0ef0b88b7145 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2167,6 +2167,8 @@ static int ravb_close(struct net_device *ndev) of_phy_deregister_fixed_link(np); } + cancel_work_sync(&priv->work); + if (info->multi_irqs) { free_irq(priv->tx_irqs[RAVB_NC], ndev); free_irq(priv->rx_irqs[RAVB_NC], ndev); @@ -2891,8 +2893,6 @@ static int ravb_remove(struct platform_device *pdev) clk_disable_unprepare(priv->gptp_clk); clk_disable_unprepare(priv->refclk); - dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, - priv->desc_bat_dma); /* Set reset mode */ ravb_write(ndev, CCC_OPC_RESET, CCC); unregister_netdev(ndev); @@ -2900,6 +2900,8 @@ static int ravb_remove(struct platform_device *pdev) netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); ravb_mdio_release(priv); + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, + priv->desc_bat_dma); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); reset_control_assert(priv->rstc); diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index ea9186178091..0fc0b6bea753 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -4,6 +4,7 @@ * Copyright (C) 2022 Renesas Electronics Corporation */ +#include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/etherdevice.h> @@ -1049,7 +1050,7 @@ static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) static void rswitch_etha_enable_mii(struct rswitch_etha *etha) { rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK, - MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06)); + MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06)); rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45); } @@ -1253,7 +1254,7 @@ static void rswitch_adjust_link(struct net_device *ndev) phy_print_status(phydev); if (phydev->link) phy_power_on(rdev->serdes); - else + else if (rdev->serdes->power_count) phy_power_off(rdev->serdes); rdev->etha->link = phydev->link; @@ -1693,6 +1694,12 @@ static void rswitch_etha_init(struct rswitch_private *priv, int index) etha->index = index; etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE; etha->coma_addr = priv->addr; + + /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1. + * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply + * both the numerator and the denominator by 10. + */ + etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1; } static int rswitch_device_alloc(struct rswitch_private *priv, int index) @@ -1900,6 +1907,10 @@ static int renesas_eth_sw_probe(struct platform_device *pdev) return -ENOMEM; spin_lock_init(&priv->lock); + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + attr = soc_device_match(rswitch_soc_no_speed_change); if (attr) priv->etha_no_runtime_change = true; @@ -1953,15 +1964,17 @@ static void rswitch_deinit(struct rswitch_private *priv) rswitch_gwca_hw_deinit(priv); rcar_gen4_ptp_unregister(priv->ptp_priv); - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { + rswitch_for_each_enabled_port(priv, i) { struct rswitch_device *rdev = priv->rdev[i]; - phy_exit(priv->rdev[i]->serdes); - rswitch_ether_port_deinit_one(rdev); unregister_netdev(rdev->ndev); - rswitch_device_free(priv, i); + rswitch_ether_port_deinit_one(rdev); + phy_exit(priv->rdev[i]->serdes); } + for (i = 0; i < RSWITCH_NUM_PORTS; i++) + rswitch_device_free(priv, i); + rswitch_gwca_ts_queue_free(priv); rswitch_gwca_linkfix_free(priv); diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index f0c16a37ea55..04f49a7a5843 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -915,6 +915,7 @@ struct rswitch_etha { bool external_phy; struct mii_bus *mii; phy_interface_t phy_interface; + u32 psmcs; u8 mac_addr[MAX_ADDR_LEN]; int link; int speed; @@ -1012,6 +1013,7 @@ struct rswitch_private { struct rswitch_mfwd mfwd; spinlock_t lock; /* lock interrupt registers' control */ + struct clk *clk; bool etha_no_runtime_change; bool gwca_halt; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 26ea8c687881..a0e276783e65 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -104,6 +104,7 @@ struct stm32_ops { int (*parse_data)(struct stm32_dwmac *dwmac, struct device *dev); u32 syscfg_eth_mask; + bool clk_rx_enable_in_suspend; }; static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) @@ -121,7 +122,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) if (ret) return ret; - if (!dwmac->dev->power.is_suspended) { + if (!dwmac->ops->clk_rx_enable_in_suspend || + !dwmac->dev->power.is_suspended) { ret = clk_prepare_enable(dwmac->clk_rx); if (ret) { clk_disable_unprepare(dwmac->clk_tx); @@ -513,7 +515,8 @@ static struct stm32_ops stm32mp1_dwmac_data = { .suspend = stm32mp1_suspend, .resume = stm32mp1_resume, .parse_data = stm32mp1_parse_data, - .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK + .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK, + .clk_rx_enable_in_suspend = true }; static const struct of_device_id stm32_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 83c567a89a46..ed1a5a31a491 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -6002,33 +6002,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) return IRQ_HANDLED; } -#ifdef CONFIG_NET_POLL_CONTROLLER -/* Polling receive - used by NETCONSOLE and other diagnostic tools - * to allow network I/O with interrupts disabled. - */ -static void stmmac_poll_controller(struct net_device *dev) -{ - struct stmmac_priv *priv = netdev_priv(dev); - int i; - - /* If adapter is down, do nothing */ - if (test_bit(STMMAC_DOWN, &priv->state)) - return; - - if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) { - for (i = 0; i < priv->plat->rx_queues_to_use; i++) - stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]); - - for (i = 0; i < priv->plat->tx_queues_to_use; i++) - stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]); - } else { - disable_irq(dev->irq); - stmmac_interrupt(dev->irq, dev); - enable_irq(dev->irq); - } -} -#endif - /** * stmmac_ioctl - Entry point for the Ioctl * @dev: Device pointer. @@ -6989,9 +6962,6 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_get_stats64 = stmmac_get_stats64, .ndo_setup_tc = stmmac_setup_tc, .ndo_select_queue = stmmac_select_queue, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = stmmac_poll_controller, -#endif .ndo_set_mac_address = stmmac_set_mac_address, .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 0f28795e581c..2f0678f15fb7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -901,7 +901,7 @@ static int __maybe_unused stmmac_pltfr_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); int ret; - ret = stmmac_pltfr_init(pdev, priv->plat->bsp_priv); + ret = stmmac_pltfr_init(pdev, priv->plat); if (ret) return ret; diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index bea6fc0f324c..24120605502f 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1747,9 +1747,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) } tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); - if (tx_chn->irq <= 0) { + if (tx_chn->irq < 0) { dev_err(dev, "Failed to get tx dma irq %d\n", tx_chn->irq); + ret = tx_chn->irq; goto err; } diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 410612f43cbd..4914d0ef58e9 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -316,12 +316,12 @@ static int prueth_init_tx_chns(struct prueth_emac *emac) goto fail; } - tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); - if (tx_chn->irq <= 0) { - ret = -EINVAL; + ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); + if (ret < 0) { netdev_err(ndev, "failed to get tx irq\n"); goto fail; } + tx_chn->irq = ret; snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d", dev_name(dev), tx_chn->id); diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index aebb19f1b3a4..4ec0dab38872 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -2740,7 +2740,6 @@ static int ca8210_register_ext_clock(struct spi_device *spi) struct device_node *np = spi->dev.of_node; struct ca8210_priv *priv = spi_get_drvdata(spi); struct ca8210_platform_data *pdata = spi->dev.platform_data; - int ret = 0; if (!np) return -EFAULT; @@ -2757,18 +2756,8 @@ static int ca8210_register_ext_clock(struct spi_device *spi) dev_crit(&spi->dev, "Failed to register external clk\n"); return PTR_ERR(priv->clk); } - ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk); - if (ret) { - clk_unregister(priv->clk); - dev_crit( - &spi->dev, - "Failed to register external clock as clock provider\n" - ); - } else { - dev_info(&spi->dev, "External clock set as clock provider\n"); - } - return ret; + return of_clk_add_provider(np, of_clk_src_simple_get, priv->clk); } /** @@ -2780,8 +2769,8 @@ static void ca8210_unregister_ext_clock(struct spi_device *spi) { struct ca8210_priv *priv = spi_get_drvdata(spi); - if (!priv->clk) - return + if (IS_ERR_OR_NULL(priv->clk)) + return; of_clk_del_provider(spi->dev.of_node); clk_unregister(priv->clk); diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index b7e151439c48..c5cd4551c67c 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -2383,6 +2383,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) ctx.sa.assoc_num = assoc_num; ctx.sa.tx_sa = tx_sa; + ctx.sa.update_pn = !!prev_pn.full64; ctx.secy = secy; ret = macsec_offload(ops->mdo_upd_txsa, &ctx); @@ -2476,6 +2477,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) ctx.sa.assoc_num = assoc_num; ctx.sa.rx_sa = rx_sa; + ctx.sa.update_pn = !!prev_pn.full64; ctx.secy = secy; ret = macsec_offload(ops->mdo_upd_rxsa, &ctx); diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c index 018253a573b8..4f39ba63a9a9 100644 --- a/drivers/net/phy/mscc/mscc_macsec.c +++ b/drivers/net/phy/mscc/mscc_macsec.c @@ -849,6 +849,9 @@ static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx) struct macsec_flow *flow; int ret; + if (ctx->sa.update_pn) + return -EINVAL; + flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR); if (IS_ERR(flow)) return PTR_ERR(flow); @@ -900,6 +903,9 @@ static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx) struct macsec_flow *flow; int ret; + if (ctx->sa.update_pn) + return -EINVAL; + flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR); if (IS_ERR(flow)) return PTR_ERR(flow); diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 48d7d278631e..99ec1d4a972d 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -222,13 +222,18 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc) struct usbnet *dev = netdev_priv(netdev); __le16 res; + int err; if (phy_id) { netdev_dbg(dev->net, "Only internal phy supported\n"); return 0; } - dm_read_shared_word(dev, 1, loc, &res); + err = dm_read_shared_word(dev, 1, loc, &res); + if (err < 0) { + netdev_err(dev->net, "MDIO read error: %d\n", err); + return err; + } netdev_dbg(dev->net, "dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 5d6454fedb3f..78ad2da3ee29 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -90,7 +90,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) { + if (unlikely(ret < 4)) { + ret = ret < 0 ? ret : -ENODATA; + netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", index, ret); return ret; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index fe7f314d65c9..d67f742fbd4c 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -607,16 +607,16 @@ static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len) --dma->ref; - if (dma->ref) { - if (dma->need_sync && len) { - offset = buf - (head + sizeof(*dma)); + if (dma->need_sync && len) { + offset = buf - (head + sizeof(*dma)); - virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, offset, - len, DMA_FROM_DEVICE); - } + virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, + offset, len, + DMA_FROM_DEVICE); + } + if (dma->ref) return; - } virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 47c2ad7a3e42..fd50bb313b92 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -34,6 +34,8 @@ #define TDM_PPPOHT_SLIC_MAXIN #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S) +static int uhdlc_close(struct net_device *dev); + static struct ucc_tdm_info utdm_primary_info = { .uf_info = { .tsa = 0, @@ -708,6 +710,7 @@ static int uhdlc_open(struct net_device *dev) hdlc_device *hdlc = dev_to_hdlc(dev); struct ucc_hdlc_private *priv = hdlc->priv; struct ucc_tdm *utdm = priv->utdm; + int rc = 0; if (priv->hdlc_busy != 1) { if (request_irq(priv->ut_info->uf_info.irq, @@ -731,10 +734,13 @@ static int uhdlc_open(struct net_device *dev) napi_enable(&priv->napi); netdev_reset_queue(dev); netif_start_queue(dev); - hdlc_open(dev); + + rc = hdlc_open(dev); + if (rc) + uhdlc_close(dev); } - return 0; + return rc; } static void uhdlc_memclean(struct ucc_hdlc_private *priv) @@ -824,6 +830,8 @@ static int uhdlc_close(struct net_device *dev) netdev_reset_queue(dev); priv->hdlc_busy = 0; + hdlc_close(dev); + return 0; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index bece26741d3a..611d1a6aabb9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -442,7 +442,12 @@ struct brcmf_scan_params_v2_le { * fixed parameter portion is assumed, otherwise * ssid in the fixed portion is ignored */ - __le16 channel_list[1]; /* list of chanspecs */ + union { + __le16 padding; /* Reserve space for at least 1 entry for abort + * which uses an on stack brcmf_scan_params_v2_le + */ + DECLARE_FLEX_ARRAY(__le16, channel_list); /* chanspecs */ + }; }; struct brcmf_scan_results { @@ -702,7 +707,7 @@ struct brcmf_sta_info_le { struct brcmf_chanspec_list { __le32 count; /* # of entries */ - __le32 element[1]; /* variable length uint32 list */ + __le32 element[]; /* variable length uint32 list */ }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index f5e08988dc7b..06d6f7f66430 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -310,9 +310,9 @@ struct iwl_fw_ini_fifo_hdr { struct iwl_fw_ini_error_dump_range { __le32 range_data_size; union { - __le32 internal_base_addr; - __le64 dram_base_addr; - __le32 page_num; + __le32 internal_base_addr __packed; + __le64 dram_base_addr __packed; + __le32 page_num __packed; struct iwl_fw_ini_fifo_hdr fifo_hdr; struct iwl_cmd_header fw_pkt_hdr; }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 1f5db65a088d..1d5ee4330f29 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -802,7 +802,7 @@ out: mvm->nvm_data->bands[0].n_channels = 1; mvm->nvm_data->bands[0].n_bitrates = 1; mvm->nvm_data->bands[0].bitrates = - (void *)((u8 *)mvm->nvm_data->channels + 1); + (void *)(mvm->nvm_data->channels + 1); mvm->nvm_data->bands[0].bitrates->hw_value = 10; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index 8b6c641772ee..b719843e9457 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -731,73 +731,78 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, mvmvif->associated = vif->cfg.assoc; - if (!(changes & BSS_CHANGED_ASSOC)) - return; - - if (vif->cfg.assoc) { - /* clear statistics to get clean beacon counter */ - iwl_mvm_request_statistics(mvm, true); - iwl_mvm_sf_update(mvm, vif, false); - iwl_mvm_power_vif_assoc(mvm, vif); - - for_each_mvm_vif_valid_link(mvmvif, i) { - memset(&mvmvif->link[i]->beacon_stats, 0, - sizeof(mvmvif->link[i]->beacon_stats)); + if (changes & BSS_CHANGED_ASSOC) { + if (vif->cfg.assoc) { + /* clear statistics to get clean beacon counter */ + iwl_mvm_request_statistics(mvm, true); + iwl_mvm_sf_update(mvm, vif, false); + iwl_mvm_power_vif_assoc(mvm, vif); + + for_each_mvm_vif_valid_link(mvmvif, i) { + memset(&mvmvif->link[i]->beacon_stats, 0, + sizeof(mvmvif->link[i]->beacon_stats)); + + if (vif->p2p) { + iwl_mvm_update_smps(mvm, vif, + IWL_MVM_SMPS_REQ_PROT, + IEEE80211_SMPS_DYNAMIC, i); + } + + rcu_read_lock(); + link_conf = rcu_dereference(vif->link_conf[i]); + if (link_conf && !link_conf->dtim_period) + protect = true; + rcu_read_unlock(); + } - if (vif->p2p) { - iwl_mvm_update_smps(mvm, vif, - IWL_MVM_SMPS_REQ_PROT, - IEEE80211_SMPS_DYNAMIC, i); + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + protect) { + /* If we're not restarting and still haven't + * heard a beacon (dtim period unknown) then + * make sure we still have enough minimum time + * remaining in the time event, since the auth + * might actually have taken quite a while + * (especially for SAE) and so the remaining + * time could be small without us having heard + * a beacon yet. + */ + iwl_mvm_protect_assoc(mvm, vif, 0); } - rcu_read_lock(); - link_conf = rcu_dereference(vif->link_conf[i]); - if (link_conf && !link_conf->dtim_period) - protect = true; - rcu_read_unlock(); - } + iwl_mvm_sf_update(mvm, vif, false); + + /* FIXME: need to decide about misbehaving AP handling */ + iwl_mvm_power_vif_assoc(mvm, vif); + } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) { + iwl_mvm_mei_host_disassociated(mvm); - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && - protect) { - /* If we're not restarting and still haven't - * heard a beacon (dtim period unknown) then - * make sure we still have enough minimum time - * remaining in the time event, since the auth - * might actually have taken quite a while - * (especially for SAE) and so the remaining - * time could be small without us having heard - * a beacon yet. + /* If update fails - SF might be running in associated + * mode while disassociated - which is forbidden. */ - iwl_mvm_protect_assoc(mvm, vif, 0); + ret = iwl_mvm_sf_update(mvm, vif, false); + WARN_ONCE(ret && + !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status), + "Failed to update SF upon disassociation\n"); + + /* If we get an assert during the connection (after the + * station has been added, but before the vif is set + * to associated), mac80211 will re-add the station and + * then configure the vif. Since the vif is not + * associated, we would remove the station here and + * this would fail the recovery. + */ + iwl_mvm_mld_vif_delete_all_stas(mvm, vif); } - iwl_mvm_sf_update(mvm, vif, false); - - /* FIXME: need to decide about misbehaving AP handling */ - iwl_mvm_power_vif_assoc(mvm, vif); - } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) { - iwl_mvm_mei_host_disassociated(mvm); - - /* If update fails - SF might be running in associated - * mode while disassociated - which is forbidden. - */ - ret = iwl_mvm_sf_update(mvm, vif, false); - WARN_ONCE(ret && - !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, - &mvm->status), - "Failed to update SF upon disassociation\n"); - - /* If we get an assert during the connection (after the - * station has been added, but before the vif is set - * to associated), mac80211 will re-add the station and - * then configure the vif. Since the vif is not - * associated, we would remove the station here and - * this would fail the recovery. - */ - iwl_mvm_mld_vif_delete_all_stas(mvm, vif); + iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes); } - iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes); + if (changes & BSS_CHANGED_PS) { + ret = iwl_mvm_power_update_mac(mvm); + if (ret) + IWL_ERR(mvm, "failed to update power mode\n"); + } } static void diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index c1d9ce753468..3cbe2c0b8d6b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -2342,7 +2342,7 @@ iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm, if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2) gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; - if (version < 12) { + if (version < 16) { gp->scan_start_mac_or_link_id = scan_vif->id; } else { struct iwl_mvm_vif_link_info *link_info; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 36d70d589aed..898dca393643 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1612,6 +1612,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); memset(&info->status, 0, sizeof(info->status)); + info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED); /* inform mac80211 about what happened with the frame */ switch (status & TX_STATUS_MSK) { @@ -1964,6 +1965,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, */ if (!is_flush) info->flags |= IEEE80211_TX_STAT_ACK; + else + info->flags &= ~IEEE80211_TX_STAT_ACK; } /* diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c index 391793a16adc..10690e82358b 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c @@ -918,9 +918,17 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:", event_buf, len); - while (tlv_buf_left >= sizeof(*tlv_rxba)) { + while (tlv_buf_left > sizeof(*tlv_rxba)) { tlv_type = le16_to_cpu(tlv_rxba->header.type); tlv_len = le16_to_cpu(tlv_rxba->header.len); + if (size_add(sizeof(tlv_rxba->header), tlv_len) > tlv_buf_left) { + mwifiex_dbg(priv->adapter, WARN, + "TLV size (%zu) overflows event_buf buf_left=%d\n", + size_add(sizeof(tlv_rxba->header), tlv_len), + tlv_buf_left); + return; + } + if (tlv_type != TLV_TYPE_RXBA_SYNC) { mwifiex_dbg(priv->adapter, ERROR, "Wrong TLV id=0x%x\n", tlv_type); @@ -929,6 +937,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, tlv_seq_num = le16_to_cpu(tlv_rxba->seq_num); tlv_bitmap_len = le16_to_cpu(tlv_rxba->bitmap_len); + if (size_add(sizeof(*tlv_rxba), tlv_bitmap_len) > tlv_buf_left) { + mwifiex_dbg(priv->adapter, WARN, + "TLV size (%zu) overflows event_buf buf_left=%d\n", + size_add(sizeof(*tlv_rxba), tlv_bitmap_len), + tlv_buf_left); + return; + } + mwifiex_dbg(priv->adapter, INFO, "%pM tid=%d seq_num=%d bitmap_len=%d\n", tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num, @@ -965,8 +981,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, } } - tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len); - tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba); + tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len); + tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len; tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp; } } diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index f2168fac95ed..8e6db904e5b2 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -779,7 +779,7 @@ struct mwifiex_ie_types_rxba_sync { u8 reserved; __le16 seq_num; __le16 bitmap_len; - u8 bitmap[1]; + u8 bitmap[]; } __packed; struct chan_band_param_set { diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c index 65420ad67416..257737137cd7 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c @@ -86,7 +86,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv, rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length); rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off; - if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) { + if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) + + rx_pkt_off > skb->len) { mwifiex_dbg(priv->adapter, ERROR, "wrong rx packet offset: len=%d, rx_pkt_off=%d\n", skb->len, rx_pkt_off); @@ -95,12 +96,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv, return -1; } - if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, - sizeof(bridge_tunnel_header))) || - (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, - sizeof(rfc1042_header)) && - ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP && - ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) { + if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len && + ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, + sizeof(bridge_tunnel_header))) || + (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, + sizeof(rfc1042_header)) && + ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP && + ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) { /* * Replace the 803 header and rfc1042 header (llc/snap) with an * EthernetII header, keep the src/dst and snap_type diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 05d9ab3ce819..dc8f4e157eb2 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -93,13 +93,13 @@ __mt76_get_rxwi(struct mt76_dev *dev) { struct mt76_txwi_cache *t = NULL; - spin_lock(&dev->wed_lock); + spin_lock_bh(&dev->wed_lock); if (!list_empty(&dev->rxwi_cache)) { t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache, list); list_del(&t->list); } - spin_unlock(&dev->wed_lock); + spin_unlock_bh(&dev->wed_lock); return t; } @@ -145,9 +145,9 @@ mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) if (!t) return; - spin_lock(&dev->wed_lock); + spin_lock_bh(&dev->wed_lock); list_add(&t->list, &dev->rxwi_cache); - spin_unlock(&dev->wed_lock); + spin_unlock_bh(&dev->wed_lock); } EXPORT_SYMBOL_GPL(mt76_put_rxwi); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c index 0acabba2d1a5..5d402cf2951c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c @@ -131,15 +131,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev, s8 *lna_2g, s8 *lna_5g, struct ieee80211_channel *chan) { - u16 val; u8 lna; - val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1); - if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G) - *lna_2g = 0; - if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G) - memset(lna_5g, 0, sizeof(s8) * 3); - if (chan->band == NL80211_BAND_2GHZ) lna = *lna_2g; else if (chan->hw_value <= 64) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c index d5809408d1d3..8c01855885ce 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c @@ -256,7 +256,8 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev) struct ieee80211_channel *chan = dev->mphy.chandef.chan; int channel = chan->hw_value; s8 lna_5g[3], lna_2g; - u8 lna; + bool use_lna; + u8 lna = 0; u16 val; if (chan->band == NL80211_BAND_2GHZ) @@ -275,7 +276,15 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev) dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16; dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24; - lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan); + val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1); + if (chan->band == NL80211_BAND_2GHZ) + use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G); + else + use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G); + + if (use_lna) + lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan); + dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8); } EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h index 3642a2c7f80c..2434e2480cbe 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h @@ -46,6 +46,7 @@ struct rtw8723du_efuse { u8 vender_id[2]; /* 0x100 */ u8 product_id[2]; /* 0x102 */ u8 usb_option; /* 0x104 */ + u8 res5[2]; /* 0x105 */ u8 mac_addr[ETH_ALEN]; /* 0x107 */ }; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f3f2c07423a6..fc3bb63b9ac3 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -41,8 +41,6 @@ #include <asm/xen/hypercall.h> #include <xen/balloon.h> -#define XENVIF_QUEUE_LENGTH 32 - /* Number of bytes allowed on the internal guest Rx queue. */ #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE) @@ -530,8 +528,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, dev->features = dev->hw_features | NETIF_F_RXCSUM; dev->ethtool_ops = &xenvif_ethtool_ops; - dev->tx_queue_len = XENVIF_QUEUE_LENGTH; - dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index 0a3483e247a8..f63250c650ca 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -890,13 +890,13 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action, { struct of_changeset_entry *ce; + if (WARN_ON(action >= ARRAY_SIZE(action_names))) + return -EINVAL; + ce = kzalloc(sizeof(*ce), GFP_KERNEL); if (!ce) return -ENOMEM; - if (WARN_ON(action >= ARRAY_SIZE(action_names))) - return -EINVAL; - /* get a reference to the node */ ce->action = action; ce->np = of_node_get(np); diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index dfb6fb962fc7..a9a292d6d59b 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -45,8 +45,8 @@ struct target { /** * struct fragment - info about fragment nodes in overlay expanded device tree - * @target: target of the overlay operation * @overlay: pointer to the __overlay__ node + * @target: target of the overlay operation */ struct fragment { struct device_node *overlay; diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index e2f29404c84e..64420ecc24d1 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -43,7 +43,6 @@ #define PARF_PHY_REFCLK 0x4c #define PARF_CONFIG_BITS 0x50 #define PARF_DBI_BASE_ADDR 0x168 -#define PARF_SLV_ADDR_SPACE_SIZE_2_3_3 0x16c /* Register offset specific to IP ver 2.3.3 */ #define PARF_MHI_CLOCK_RESET_CTRL 0x174 #define PARF_AXI_MSTR_WR_ADDR_HALT 0x178 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8 @@ -797,8 +796,7 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); u32 val; - writel(SLV_ADDR_SPACE_SZ, - pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_2_3_3); + writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE); val = readl(pcie->parf + PARF_PHY_CTRL); val &= ~PHY_TEST_PWR_DOWN; diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 2af64bcb7da3..51e3dd0ea5ab 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -657,30 +657,33 @@ void of_pci_make_dev_node(struct pci_dev *pdev) cset = kmalloc(sizeof(*cset), GFP_KERNEL); if (!cset) - goto failed; + goto out_free_name; of_changeset_init(cset); np = of_changeset_create_node(cset, ppnode, name); if (!np) - goto failed; - np->data = cset; + goto out_destroy_cset; ret = of_pci_add_properties(pdev, cset, np); if (ret) - goto failed; + goto out_free_node; ret = of_changeset_apply(cset); if (ret) - goto failed; + goto out_free_node; + np->data = cset; pdev->dev.of_node = np; kfree(name); return; -failed: - if (np) - of_node_put(np); +out_free_node: + of_node_put(np); +out_destroy_cset: + of_changeset_destroy(cset); + kfree(cset); +out_free_name: kfree(name); } #endif diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c index 710ec35ba4a1..c2c7334152bc 100644 --- a/drivers/pci/of_property.c +++ b/drivers/pci/of_property.c @@ -186,8 +186,8 @@ static int of_pci_prop_interrupts(struct pci_dev *pdev, static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs, struct device_node *np) { + u32 i, addr_sz[OF_PCI_MAX_INT_PIN] = { 0 }, map_sz = 0; struct of_phandle_args out_irq[OF_PCI_MAX_INT_PIN]; - u32 i, addr_sz[OF_PCI_MAX_INT_PIN], map_sz = 0; __be32 laddr[OF_PCI_ADDRESS_CELLS] = { 0 }; u32 int_map_mask[] = { 0xffff00, 0, 0, 7 }; struct device_node *pnode; @@ -213,33 +213,44 @@ static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs, out_irq[i].args[0] = pin; ret = of_irq_parse_raw(laddr, &out_irq[i]); if (ret) { - pci_err(pdev, "parse irq %d failed, ret %d", pin, ret); + out_irq[i].np = NULL; + pci_dbg(pdev, "parse irq %d failed, ret %d", pin, ret); continue; } - ret = of_property_read_u32(out_irq[i].np, "#address-cells", - &addr_sz[i]); - if (ret) - addr_sz[i] = 0; + of_property_read_u32(out_irq[i].np, "#address-cells", + &addr_sz[i]); } list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { for (pin = 1; pin <= OF_PCI_MAX_INT_PIN; pin++) { i = pci_swizzle_interrupt_pin(child, pin) - 1; + if (!out_irq[i].np) + continue; map_sz += 5 + addr_sz[i] + out_irq[i].args_count; } } + /* + * Parsing interrupt failed for all pins. In this case, it does not + * need to generate interrupt-map property. + */ + if (!map_sz) + return 0; + int_map = kcalloc(map_sz, sizeof(u32), GFP_KERNEL); mapp = int_map; list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { for (pin = 1; pin <= OF_PCI_MAX_INT_PIN; pin++) { + i = pci_swizzle_interrupt_pin(child, pin) - 1; + if (!out_irq[i].np) + continue; + *mapp = (child->bus->number << 16) | (child->devfn << 8); mapp += OF_PCI_ADDRESS_CELLS; *mapp = pin; mapp++; - i = pci_swizzle_interrupt_pin(child, pin) - 1; *mapp = out_irq[i].np->phandle; mapp++; if (addr_sz[i]) { diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index a79c110c7e51..51ec9e7e784f 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -572,7 +572,19 @@ static void pci_pm_default_resume_early(struct pci_dev *pci_dev) static void pci_pm_bridge_power_up_actions(struct pci_dev *pci_dev) { - pci_bridge_wait_for_secondary_bus(pci_dev, "resume"); + int ret; + + ret = pci_bridge_wait_for_secondary_bus(pci_dev, "resume"); + if (ret) { + /* + * The downstream link failed to come up, so mark the + * devices below as disconnected to make sure we don't + * attempt to resume them. + */ + pci_walk_bus(pci_dev->subordinate, pci_dev_set_disconnected, + NULL); + return; + } /* * When powering on a bridge from D3cold, the whole hierarchy may be diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 913dc04b3a40..6b50bc551984 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -1972,7 +1972,7 @@ static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id) u64 delta; int i; - for (i = 0; i < CMN_DTM_NUM_COUNTERS; i++) { + for (i = 0; i < CMN_DT_NUM_COUNTERS; i++) { if (status & (1U << i)) { ret = IRQ_HANDLED; if (WARN_ON(!dtc->counters[i])) diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c index 1f9a35f724f5..0dda70e1ef90 100644 --- a/drivers/perf/riscv_pmu.c +++ b/drivers/perf/riscv_pmu.c @@ -23,7 +23,8 @@ static bool riscv_perf_user_access(struct perf_event *event) return ((event->attr.type == PERF_TYPE_HARDWARE) || (event->attr.type == PERF_TYPE_HW_CACHE) || (event->attr.type == PERF_TYPE_RAW)) && - !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT); + !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) && + (event->hw.idx != -1); } void arch_perf_update_userpage(struct perf_event *event, diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 9a51053b1f99..96c7f670c8f0 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -510,16 +510,18 @@ static void pmu_sbi_set_scounteren(void *arg) { struct perf_event *event = (struct perf_event *)arg; - csr_write(CSR_SCOUNTEREN, - csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event))); + if (event->hw.idx != -1) + csr_write(CSR_SCOUNTEREN, + csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event))); } static void pmu_sbi_reset_scounteren(void *arg) { struct perf_event *event = (struct perf_event *)arg; - csr_write(CSR_SCOUNTEREN, - csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event))); + if (event->hw.idx != -1) + csr_write(CSR_SCOUNTEREN, + csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event))); } static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival) @@ -541,7 +543,8 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival) if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) && (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) - pmu_sbi_set_scounteren((void *)event); + on_each_cpu_mask(mm_cpumask(event->owner->mm), + pmu_sbi_set_scounteren, (void *)event, 1); } static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag) @@ -551,7 +554,8 @@ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag) if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) && (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) - pmu_sbi_reset_scounteren((void *)event); + on_each_cpu_mask(mm_cpumask(event->owner->mm), + pmu_sbi_reset_scounteren, (void *)event, 1); ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0); if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) && diff --git a/drivers/phy/freescale/phy-fsl-lynx-28g.c b/drivers/phy/freescale/phy-fsl-lynx-28g.c index 4f036c77284e..e2187767ce00 100644 --- a/drivers/phy/freescale/phy-fsl-lynx-28g.c +++ b/drivers/phy/freescale/phy-fsl-lynx-28g.c @@ -127,6 +127,10 @@ struct lynx_28g_lane { struct lynx_28g_priv { void __iomem *base; struct device *dev; + /* Serialize concurrent access to registers shared between lanes, + * like PCCn + */ + spinlock_t pcc_lock; struct lynx_28g_pll pll[LYNX_28G_NUM_PLL]; struct lynx_28g_lane lane[LYNX_28G_NUM_LANE]; @@ -397,6 +401,8 @@ static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode) if (powered_up) lynx_28g_power_off(phy); + spin_lock(&priv->pcc_lock); + switch (submode) { case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_1000BASEX: @@ -413,6 +419,8 @@ static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode) lane->interface = submode; out: + spin_unlock(&priv->pcc_lock); + /* Power up the lane if necessary */ if (powered_up) lynx_28g_power_on(phy); @@ -508,11 +516,12 @@ static void lynx_28g_cdr_lock_check(struct work_struct *work) for (i = 0; i < LYNX_28G_NUM_LANE; i++) { lane = &priv->lane[i]; - if (!lane->init) - continue; + mutex_lock(&lane->phy->mutex); - if (!lane->powered_up) + if (!lane->init || !lane->powered_up) { + mutex_unlock(&lane->phy->mutex); continue; + } rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL); if (!(rrstctl & LYNX_28G_LNaRRSTCTL_CDR_LOCK)) { @@ -521,6 +530,8 @@ static void lynx_28g_cdr_lock_check(struct work_struct *work) rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL); } while (!(rrstctl & LYNX_28G_LNaRRSTCTL_RST_DONE)); } + + mutex_unlock(&lane->phy->mutex); } queue_delayed_work(system_power_efficient_wq, &priv->cdr_check, msecs_to_jiffies(1000)); @@ -593,6 +604,7 @@ static int lynx_28g_probe(struct platform_device *pdev) dev_set_drvdata(dev, priv); + spin_lock_init(&priv->pcc_lock); INIT_DELAYED_WORK(&priv->cdr_check, lynx_28g_cdr_lock_check); queue_delayed_work(system_power_efficient_wq, &priv->cdr_check, @@ -604,6 +616,14 @@ static int lynx_28g_probe(struct platform_device *pdev) return PTR_ERR_OR_ZERO(provider); } +static void lynx_28g_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct lynx_28g_priv *priv = dev_get_drvdata(dev); + + cancel_delayed_work_sync(&priv->cdr_check); +} + static const struct of_device_id lynx_28g_of_match_table[] = { { .compatible = "fsl,lynx-28g" }, { }, @@ -612,6 +632,7 @@ MODULE_DEVICE_TABLE(of, lynx_28g_of_match_table); static struct platform_driver lynx_28g_driver = { .probe = lynx_28g_probe, + .remove_new = lynx_28g_remove, .driver = { .name = "lynx-28g", .of_match_table = lynx_28g_of_match_table, diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index e9dc9638120a..e2f7519bef04 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1022,17 +1022,20 @@ static int add_setting(struct pinctrl *p, struct pinctrl_dev *pctldev, static struct pinctrl *find_pinctrl(struct device *dev) { - struct pinctrl *p; + struct pinctrl *entry, *p = NULL; mutex_lock(&pinctrl_list_mutex); - list_for_each_entry(p, &pinctrl_list, node) - if (p->dev == dev) { - mutex_unlock(&pinctrl_list_mutex); - return p; + + list_for_each_entry(entry, &pinctrl_list, node) { + if (entry->dev == dev) { + p = entry; + kref_get(&p->users); + break; } + } mutex_unlock(&pinctrl_list_mutex); - return NULL; + return p; } static void pinctrl_free(struct pinctrl *p, bool inlist); @@ -1140,7 +1143,6 @@ struct pinctrl *pinctrl_get(struct device *dev) p = find_pinctrl(dev); if (p) { dev_dbg(dev, "obtain a copy of previously claimed pinctrl\n"); - kref_get(&p->users); return p; } diff --git a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c index 2d1c1652cfd9..8a9961ac8712 100644 --- a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c +++ b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c @@ -1062,13 +1062,13 @@ static int wpcm450_gpio_register(struct platform_device *pdev, if (ret < 0) return ret; - gpio = &pctrl->gpio_bank[reg]; - gpio->pctrl = pctrl; - if (reg >= WPCM450_NUM_BANKS) return dev_err_probe(dev, -EINVAL, "GPIO index %d out of range!\n", reg); + gpio = &pctrl->gpio_bank[reg]; + gpio->pctrl = pctrl; + bank = &wpcm450_banks[reg]; gpio->bank = bank; diff --git a/drivers/pinctrl/pinctrl-lantiq.h b/drivers/pinctrl/pinctrl-lantiq.h index efb25fc34f14..1ac49ae638de 100644 --- a/drivers/pinctrl/pinctrl-lantiq.h +++ b/drivers/pinctrl/pinctrl-lantiq.h @@ -198,5 +198,4 @@ enum ltq_pin { extern int ltq_pinctrl_register(struct platform_device *pdev, struct ltq_pinmux_info *info); -extern int ltq_pinctrl_unregister(struct platform_device *pdev); #endif /* __PINCTRL_LANTIQ_H */ diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig index 77730dc548ed..c8d519ca53eb 100644 --- a/drivers/pinctrl/renesas/Kconfig +++ b/drivers/pinctrl/renesas/Kconfig @@ -235,6 +235,7 @@ config PINCTRL_RZN1 depends on OF depends on ARCH_RZN1 || COMPILE_TEST select GENERIC_PINCONF + select PINMUX help This selects pinctrl driver for Renesas RZ/N1 devices. diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c index 4bfe3aa57f8a..cf42e204cbf0 100644 --- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c +++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c @@ -31,6 +31,8 @@ #define JH7110_AON_NGPIO 4 #define JH7110_AON_GC_BASE 64 +#define JH7110_AON_REGS_NUM 37 + /* registers */ #define JH7110_AON_DOEN 0x0 #define JH7110_AON_DOUT 0x4 @@ -145,6 +147,7 @@ static const struct jh7110_pinctrl_soc_info jh7110_aon_pinctrl_info = { .gpi_mask = GENMASK(3, 0), .gpioin_reg_base = JH7110_AON_GPIOIN, .irq_reg = &jh7110_aon_irq_reg, + .nsaved_regs = JH7110_AON_REGS_NUM, .jh7110_set_one_pin_mux = jh7110_aon_set_one_pin_mux, .jh7110_get_padcfg_base = jh7110_aon_get_padcfg_base, .jh7110_gpio_irq_handler = jh7110_aon_irq_handler, @@ -165,6 +168,7 @@ static struct platform_driver jh7110_aon_pinctrl_driver = { .driver = { .name = "starfive-jh7110-aon-pinctrl", .of_match_table = jh7110_aon_pinctrl_of_match, + .pm = pm_sleep_ptr(&jh7110_pinctrl_pm_ops), }, }; module_platform_driver(jh7110_aon_pinctrl_driver); diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c index 20c85db1cd3a..03c2ad808d61 100644 --- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c +++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c @@ -31,6 +31,8 @@ #define JH7110_SYS_NGPIO 64 #define JH7110_SYS_GC_BASE 0 +#define JH7110_SYS_REGS_NUM 174 + /* registers */ #define JH7110_SYS_DOEN 0x000 #define JH7110_SYS_DOUT 0x040 @@ -417,6 +419,7 @@ static const struct jh7110_pinctrl_soc_info jh7110_sys_pinctrl_info = { .gpi_mask = GENMASK(6, 0), .gpioin_reg_base = JH7110_SYS_GPIOIN, .irq_reg = &jh7110_sys_irq_reg, + .nsaved_regs = JH7110_SYS_REGS_NUM, .jh7110_set_one_pin_mux = jh7110_sys_set_one_pin_mux, .jh7110_get_padcfg_base = jh7110_sys_get_padcfg_base, .jh7110_gpio_irq_handler = jh7110_sys_irq_handler, @@ -437,6 +440,7 @@ static struct platform_driver jh7110_sys_pinctrl_driver = { .driver = { .name = "starfive-jh7110-sys-pinctrl", .of_match_table = jh7110_sys_pinctrl_of_match, + .pm = pm_sleep_ptr(&jh7110_pinctrl_pm_ops), }, }; module_platform_driver(jh7110_sys_pinctrl_driver); diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c index b9081805c8f6..640f827a9b2c 100644 --- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c +++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c @@ -872,6 +872,13 @@ int jh7110_pinctrl_probe(struct platform_device *pdev) if (!sfp) return -ENOMEM; +#if IS_ENABLED(CONFIG_PM_SLEEP) + sfp->saved_regs = devm_kcalloc(dev, info->nsaved_regs, + sizeof(*sfp->saved_regs), GFP_KERNEL); + if (!sfp->saved_regs) + return -ENOMEM; +#endif + sfp->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(sfp->base)) return PTR_ERR(sfp->base); @@ -967,14 +974,45 @@ int jh7110_pinctrl_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "could not register gpiochip\n"); - irq_domain_set_pm_device(sfp->gc.irq.domain, dev); - dev_info(dev, "StarFive GPIO chip registered %d GPIOs\n", sfp->gc.ngpio); return pinctrl_enable(sfp->pctl); } EXPORT_SYMBOL_GPL(jh7110_pinctrl_probe); +static int jh7110_pinctrl_suspend(struct device *dev) +{ + struct jh7110_pinctrl *sfp = dev_get_drvdata(dev); + unsigned long flags; + unsigned int i; + + raw_spin_lock_irqsave(&sfp->lock, flags); + for (i = 0 ; i < sfp->info->nsaved_regs ; i++) + sfp->saved_regs[i] = readl_relaxed(sfp->base + 4 * i); + + raw_spin_unlock_irqrestore(&sfp->lock, flags); + return 0; +} + +static int jh7110_pinctrl_resume(struct device *dev) +{ + struct jh7110_pinctrl *sfp = dev_get_drvdata(dev); + unsigned long flags; + unsigned int i; + + raw_spin_lock_irqsave(&sfp->lock, flags); + for (i = 0 ; i < sfp->info->nsaved_regs ; i++) + writel_relaxed(sfp->saved_regs[i], sfp->base + 4 * i); + + raw_spin_unlock_irqrestore(&sfp->lock, flags); + return 0; +} + +const struct dev_pm_ops jh7110_pinctrl_pm_ops = { + LATE_SYSTEM_SLEEP_PM_OPS(jh7110_pinctrl_suspend, jh7110_pinctrl_resume) +}; +EXPORT_SYMBOL_GPL(jh7110_pinctrl_pm_ops); + MODULE_DESCRIPTION("Pinctrl driver for the StarFive JH7110 SoC"); MODULE_AUTHOR("Emil Renner Berthing <kernel@esmil.dk>"); MODULE_AUTHOR("Jianlong Huang <jianlong.huang@starfivetech.com>"); diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h index 3f20b7ff96dd..a33d0d4e1382 100644 --- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h +++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h @@ -21,6 +21,7 @@ struct jh7110_pinctrl { /* register read/write mutex */ struct mutex mutex; const struct jh7110_pinctrl_soc_info *info; + u32 *saved_regs; }; struct jh7110_gpio_irq_reg { @@ -50,6 +51,8 @@ struct jh7110_pinctrl_soc_info { const struct jh7110_gpio_irq_reg *irq_reg; + unsigned int nsaved_regs; + /* generic pinmux */ int (*jh7110_set_one_pin_mux)(struct jh7110_pinctrl *sfp, unsigned int pin, @@ -66,5 +69,6 @@ void jh7110_set_gpiomux(struct jh7110_pinctrl *sfp, unsigned int pin, unsigned int din, u32 dout, u32 doen); int jh7110_pinctrl_probe(struct platform_device *pdev); struct jh7110_pinctrl *jh7110_from_irq_desc(struct irq_desc *desc); +extern const struct dev_pm_ops jh7110_pinctrl_pm_ops; #endif /* __PINCTRL_STARFIVE_JH7110_H__ */ diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c index cfeda5b3e048..734c71ef005b 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c @@ -96,7 +96,6 @@ static const struct cfg_param { {"nvidia,slew-rate-falling", TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING}, {"nvidia,slew-rate-rising", TEGRA_PINCONF_PARAM_SLEW_RATE_RISING}, {"nvidia,drive-type", TEGRA_PINCONF_PARAM_DRIVE_TYPE}, - {"nvidia,function", TEGRA_PINCONF_PARAM_FUNCTION}, }; static int tegra_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, @@ -471,12 +470,6 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx, *bit = g->drvtype_bit; *width = 2; break; - case TEGRA_PINCONF_PARAM_FUNCTION: - *bank = g->mux_bank; - *reg = g->mux_reg; - *bit = g->mux_bit; - *width = 2; - break; default: dev_err(pmx->dev, "Invalid config param %04x\n", param); return -ENOTSUPP; @@ -640,16 +633,8 @@ static void tegra_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, val >>= bit; val &= (1 << width) - 1; - if (cfg_params[i].param == TEGRA_PINCONF_PARAM_FUNCTION) { - u8 idx = pmx->soc->groups[group].funcs[val]; - - seq_printf(s, "\n\t%s=%s", - strip_prefix(cfg_params[i].property), - pmx->functions[idx].name); - } else { - seq_printf(s, "\n\t%s=%u", - strip_prefix(cfg_params[i].property), val); - } + seq_printf(s, "\n\t%s=%u", + strip_prefix(cfg_params[i].property), val); } } diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h index e728efeaa4de..b3289bdf727d 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.h +++ b/drivers/pinctrl/tegra/pinctrl-tegra.h @@ -54,8 +54,6 @@ enum tegra_pinconf_param { TEGRA_PINCONF_PARAM_SLEW_RATE_RISING, /* argument: Integer, range is HW-dependant */ TEGRA_PINCONF_PARAM_DRIVE_TYPE, - /* argument: pinmux settings */ - TEGRA_PINCONF_PARAM_FUNCTION, }; enum tegra_pinconf_pull { diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c index f3696a54a2bd..fd38d8c8371e 100644 --- a/drivers/platform/mellanox/mlxbf-tmfifo.c +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c @@ -53,7 +53,7 @@ struct mlxbf_tmfifo; /** - * mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring + * struct mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring * @va: virtual address of the ring * @dma: dma address of the ring * @vq: pointer to the virtio virtqueue @@ -113,12 +113,13 @@ enum { }; /** - * mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device + * struct mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device * @vdev: virtio device, in which the vdev.id.device field has the * VIRTIO_ID_xxx id to distinguish the virtual device. * @status: status of the device * @features: supported features of the device * @vrings: array of tmfifo vrings of this device + * @config: non-anonymous union for cons and net * @config.cons: virtual console config - * select if vdev.id.device is VIRTIO_ID_CONSOLE * @config.net: virtual network config - @@ -138,7 +139,7 @@ struct mlxbf_tmfifo_vdev { }; /** - * mlxbf_tmfifo_irq_info - Structure of the interrupt information + * struct mlxbf_tmfifo_irq_info - Structure of the interrupt information * @fifo: pointer to the tmfifo structure * @irq: interrupt number * @index: index into the interrupt array @@ -150,7 +151,7 @@ struct mlxbf_tmfifo_irq_info { }; /** - * mlxbf_tmfifo_io - Structure of the TmFifo IO resource (for both rx & tx) + * struct mlxbf_tmfifo_io - Structure of the TmFifo IO resource (for both rx & tx) * @ctl: control register offset (TMFIFO_RX_CTL / TMFIFO_TX_CTL) * @sts: status register offset (TMFIFO_RX_STS / TMFIFO_TX_STS) * @data: data register offset (TMFIFO_RX_DATA / TMFIFO_TX_DATA) @@ -162,7 +163,7 @@ struct mlxbf_tmfifo_io { }; /** - * mlxbf_tmfifo - Structure of the TmFifo + * struct mlxbf_tmfifo - Structure of the TmFifo * @vdev: array of the virtual devices running over the TmFifo * @lock: lock to protect the TmFifo access * @res0: mapped resource block 0 @@ -198,7 +199,7 @@ struct mlxbf_tmfifo { }; /** - * mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header + * struct mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header * @type: message type * @len: payload length in network byte order. Messages sent into the FIFO * will be read by the other side as data stream in the same byte order. @@ -208,6 +209,7 @@ struct mlxbf_tmfifo { struct mlxbf_tmfifo_msg_hdr { u8 type; __be16 len; + /* private: */ u8 unused[5]; } __packed __aligned(sizeof(u64)); diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c index 8c4f9e12f018..5798b49ddaba 100644 --- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c +++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c @@ -659,7 +659,7 @@ static int hp_init_bios_package_attribute(enum hp_wmi_data_type attr_type, const char *guid, int min_elements, int instance_id) { - struct kobject *attr_name_kobj; + struct kobject *attr_name_kobj, *duplicate; union acpi_object *elements; struct kset *temp_kset; @@ -704,8 +704,11 @@ static int hp_init_bios_package_attribute(enum hp_wmi_data_type attr_type, } /* All duplicate attributes found are ignored */ - if (kset_find_obj(temp_kset, str_value)) { + duplicate = kset_find_obj(temp_kset, str_value); + if (duplicate) { pr_debug("Duplicate attribute name found - %s\n", str_value); + /* kset_find_obj() returns a reference */ + kobject_put(duplicate); goto pack_attr_exit; } @@ -768,7 +771,7 @@ static int hp_init_bios_buffer_attribute(enum hp_wmi_data_type attr_type, const char *guid, int min_elements, int instance_id) { - struct kobject *attr_name_kobj; + struct kobject *attr_name_kobj, *duplicate; struct kset *temp_kset; char str[MAX_BUFF_SIZE]; @@ -794,8 +797,11 @@ static int hp_init_bios_buffer_attribute(enum hp_wmi_data_type attr_type, temp_kset = bioscfg_drv.main_dir_kset; /* All duplicate attributes found are ignored */ - if (kset_find_obj(temp_kset, str)) { + duplicate = kset_find_obj(temp_kset, str); + if (duplicate) { pr_debug("Duplicate attribute name found - %s\n", str); + /* kset_find_obj() returns a reference */ + kobject_put(duplicate); goto buff_attr_exit; } diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c index e76e5458db35..8ebb7be52ee7 100644 --- a/drivers/platform/x86/hp/hp-wmi.c +++ b/drivers/platform/x86/hp/hp-wmi.c @@ -1548,7 +1548,13 @@ static const struct dev_pm_ops hp_wmi_pm_ops = { .restore = hp_wmi_resume_handler, }; -static struct platform_driver hp_wmi_driver = { +/* + * hp_wmi_bios_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound at + * runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. + */ +static struct platform_driver hp_wmi_driver __refdata = { .driver = { .name = "hp-wmi", .pm = &hp_wmi_pm_ops, diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index 1061eb7ec399..43c864add778 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -331,14 +331,15 @@ int do_core_test(int cpu, struct device *dev) switch (test->test_num) { case IFS_TYPE_SAF: if (!ifsd->loaded) - return -EPERM; - ifs_test_core(cpu, dev); + ret = -EPERM; + else + ifs_test_core(cpu, dev); break; case IFS_TYPE_ARRAY_BIST: ifs_array_test_core(cpu, dev); break; default: - return -EINVAL; + ret = -EINVAL; } out: cpus_read_unlock(); diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c index 79346881cadb..aee869769843 100644 --- a/drivers/platform/x86/think-lmi.c +++ b/drivers/platform/x86/think-lmi.c @@ -1248,6 +1248,24 @@ static void tlmi_release_attr(void) kset_unregister(tlmi_priv.authentication_kset); } +static int tlmi_validate_setting_name(struct kset *attribute_kset, char *name) +{ + struct kobject *duplicate; + + if (!strcmp(name, "Reserved")) + return -EINVAL; + + duplicate = kset_find_obj(attribute_kset, name); + if (duplicate) { + pr_debug("Duplicate attribute name found - %s\n", name); + /* kset_find_obj() returns a reference */ + kobject_put(duplicate); + return -EBUSY; + } + + return 0; +} + static int tlmi_sysfs_init(void) { int i, ret; @@ -1276,10 +1294,8 @@ static int tlmi_sysfs_init(void) continue; /* check for duplicate or reserved values */ - if (kset_find_obj(tlmi_priv.attribute_kset, tlmi_priv.setting[i]->display_name) || - !strcmp(tlmi_priv.setting[i]->display_name, "Reserved")) { - pr_debug("duplicate or reserved attribute name found - %s\n", - tlmi_priv.setting[i]->display_name); + if (tlmi_validate_setting_name(tlmi_priv.attribute_kset, + tlmi_priv.setting[i]->display_name) < 0) { kfree(tlmi_priv.setting[i]->possible_values); kfree(tlmi_priv.setting[i]); tlmi_priv.setting[i] = NULL; diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index f9301a9382e7..0c6733772698 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -42,6 +42,21 @@ static const struct ts_dmi_data archos_101_cesium_educ_data = { .properties = archos_101_cesium_educ_props, }; +static const struct property_entry bush_bush_windows_tablet_props[] = { + PROPERTY_ENTRY_U32("touchscreen-size-x", 1850), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + PROPERTY_ENTRY_BOOL("silead,home-button"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-bush-bush-windows-tablet.fw"), + { } +}; + +static const struct ts_dmi_data bush_bush_windows_tablet_data = { + .acpi_name = "MSSL1680:00", + .properties = bush_bush_windows_tablet_props, +}; + static const struct property_entry chuwi_hi8_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1665), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), @@ -756,6 +771,21 @@ static const struct ts_dmi_data pipo_w11_data = { .properties = pipo_w11_props, }; +static const struct property_entry positivo_c4128b_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 4), + PROPERTY_ENTRY_U32("touchscreen-min-y", 13), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1915), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1269), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-positivo-c4128b.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct ts_dmi_data positivo_c4128b_data = { + .acpi_name = "MSSL1680:00", + .properties = positivo_c4128b_props, +}; + static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-x", 32), PROPERTY_ENTRY_U32("touchscreen-min-y", 16), @@ -1071,6 +1101,13 @@ const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* Bush Windows tablet */ + .driver_data = (void *)&bush_bush_windows_tablet_data, + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "Bush Windows tablet"), + }, + }, + { /* Chuwi Hi8 */ .driver_data = (void *)&chuwi_hi8_data, .matches = { @@ -1481,6 +1518,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* Positivo C4128B */ + .driver_data = (void *)&positivo_c4128b_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"), + DMI_MATCH(DMI_PRODUCT_NAME, "C4128B-1"), + }, + }, + { /* Point of View mobii wintab p800w (v2.0) */ .driver_data = (void *)&pov_mobii_wintab_p800w_v20_data, .matches = { diff --git a/drivers/pmdomain/imx/scu-pd.c b/drivers/pmdomain/imx/scu-pd.c index 2f693b67ddb4..891c1d925a9d 100644 --- a/drivers/pmdomain/imx/scu-pd.c +++ b/drivers/pmdomain/imx/scu-pd.c @@ -150,7 +150,8 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = { { "mclk-out-1", IMX_SC_R_MCLK_OUT_1, 1, false, 0 }, { "dma0-ch", IMX_SC_R_DMA_0_CH0, 32, true, 0 }, { "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 }, - { "dma2-ch", IMX_SC_R_DMA_2_CH0, 32, true, 0 }, + { "dma2-ch-0", IMX_SC_R_DMA_2_CH0, 5, true, 0 }, + { "dma2-ch-1", IMX_SC_R_DMA_2_CH5, 27, true, 0 }, { "dma3-ch", IMX_SC_R_DMA_3_CH0, 32, true, 0 }, { "asrc0", IMX_SC_R_ASRC_0, 1, false, 0 }, { "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 }, diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c index de77df97b3a4..ec163d1bcd18 100644 --- a/drivers/power/supply/qcom_battmgr.c +++ b/drivers/power/supply/qcom_battmgr.c @@ -105,7 +105,7 @@ struct qcom_battmgr_property_request { struct qcom_battmgr_update_request { struct pmic_glink_hdr hdr; - u32 battery_id; + __le32 battery_id; }; struct qcom_battmgr_charge_time_request { @@ -1282,9 +1282,9 @@ static void qcom_battmgr_enable_worker(struct work_struct *work) { struct qcom_battmgr *battmgr = container_of(work, struct qcom_battmgr, enable_work); struct qcom_battmgr_enable_request req = { - .hdr.owner = PMIC_GLINK_OWNER_BATTMGR, - .hdr.type = PMIC_GLINK_NOTIFY, - .hdr.opcode = BATTMGR_REQUEST_NOTIFICATION, + .hdr.owner = cpu_to_le32(PMIC_GLINK_OWNER_BATTMGR), + .hdr.type = cpu_to_le32(PMIC_GLINK_NOTIFY), + .hdr.opcode = cpu_to_le32(BATTMGR_REQUEST_NOTIFICATION), }; int ret; diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 20a974ced8d6..a7a6947ab4bc 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -3998,7 +3998,6 @@ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev) return 0; out: - ptp_ocp_dev_release(&bp->dev); put_device(&bp->dev); return err; } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index d8e1caaf207e..3137e40fcd3e 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -5542,6 +5542,8 @@ regulator_register(struct device *dev, goto rinse; } device_initialize(&rdev->dev); + dev_set_drvdata(&rdev->dev, rdev); + rdev->dev.class = ®ulator_class; spin_lock_init(&rdev->err_lock); /* @@ -5603,11 +5605,9 @@ regulator_register(struct device *dev, rdev->supply_name = regulator_desc->supply_name; /* register with sysfs */ - rdev->dev.class = ®ulator_class; rdev->dev.parent = config->dev; dev_set_name(&rdev->dev, "regulator.%lu", (unsigned long) atomic_inc_return(®ulator_no)); - dev_set_drvdata(&rdev->dev, rdev); /* set regulator constraints */ if (init_data) @@ -5724,15 +5724,11 @@ wash: mutex_lock(®ulator_list_mutex); regulator_ena_gpio_free(rdev); mutex_unlock(®ulator_list_mutex); - put_device(&rdev->dev); - rdev = NULL; clean: if (dangling_of_gpiod) gpiod_put(config->ena_gpiod); - if (rdev && rdev->dev.of_node) - of_node_put(rdev->dev.of_node); - kfree(rdev); kfree(config); + put_device(&rdev->dev); rinse: if (dangling_cfg_gpiod) gpiod_put(cfg->ena_gpiod); diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c index b9cda2210c33..65fbd95f1dbb 100644 --- a/drivers/regulator/mt6358-regulator.c +++ b/drivers/regulator/mt6358-regulator.c @@ -43,7 +43,7 @@ struct mt6358_regulator_info { .desc = { \ .name = #vreg, \ .of_match = of_match_ptr(match), \ - .ops = &mt6358_volt_range_ops, \ + .ops = &mt6358_buck_ops, \ .type = REGULATOR_VOLTAGE, \ .id = MT6358_ID_##vreg, \ .owner = THIS_MODULE, \ @@ -139,7 +139,7 @@ struct mt6358_regulator_info { .desc = { \ .name = #vreg, \ .of_match = of_match_ptr(match), \ - .ops = &mt6358_volt_range_ops, \ + .ops = &mt6358_buck_ops, \ .type = REGULATOR_VOLTAGE, \ .id = MT6366_ID_##vreg, \ .owner = THIS_MODULE, \ @@ -450,7 +450,7 @@ static unsigned int mt6358_regulator_get_mode(struct regulator_dev *rdev) } } -static const struct regulator_ops mt6358_volt_range_ops = { +static const struct regulator_ops mt6358_buck_ops = { .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .set_voltage_sel = regulator_set_voltage_sel_regmap, @@ -464,6 +464,18 @@ static const struct regulator_ops mt6358_volt_range_ops = { .get_mode = mt6358_regulator_get_mode, }; +static const struct regulator_ops mt6358_volt_range_ops = { + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = mt6358_get_buck_voltage_sel, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .get_status = mt6358_get_status, +}; + static const struct regulator_ops mt6358_volt_table_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_iterate, diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index 74760c1a163b..4902d45e929c 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -102,7 +102,7 @@ config CCWGROUP config ISM tristate "Support for ISM vPCI Adapter" - depends on PCI && SMC + depends on PCI default n help Select this option if you want to use the Internal Shared Memory diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index df782646e856..ab2f35bc294d 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -518,12 +518,12 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, if (port) { put_device(&port->dev); retval = -EEXIST; - goto err_out; + goto err_put; } port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); if (!port) - goto err_out; + goto err_put; rwlock_init(&port->unit_list_lock); INIT_LIST_HEAD(&port->unit_list); @@ -546,7 +546,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) { kfree(port); - goto err_out; + goto err_put; } retval = -EINVAL; @@ -563,7 +563,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, return port; -err_out: +err_put: zfcp_ccw_adapter_put(adapter); +err_out: return ERR_PTR(retval); } diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 93c68931a593..22cef283b2b9 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -27,7 +27,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.6.0.56" +#define DRV_VERSION "1.6.0.57" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " @@ -237,6 +237,8 @@ struct fnic { unsigned int cq_count; struct mutex sgreset_mutex; + spinlock_t sgreset_lock; /* lock for sgreset */ + struct scsi_cmnd *sgreset_sc; struct dentry *fnic_stats_debugfs_host; struct dentry *fnic_stats_debugfs_file; struct dentry *fnic_reset_debugfs_file; diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h index f4c8769df312..5895ead20e14 100644 --- a/drivers/scsi/fnic/fnic_io.h +++ b/drivers/scsi/fnic/fnic_io.h @@ -52,6 +52,8 @@ struct fnic_io_req { unsigned long start_time; /* in jiffies */ struct completion *abts_done; /* completion for abts */ struct completion *dr_done; /* completion for device reset */ + unsigned int tag; + struct scsi_cmnd *sc; /* midlayer's cmd pointer */ }; enum fnic_port_speeds { diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 984bc5fc55e2..f27f9319e0b2 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -754,6 +754,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < FNIC_IO_LOCKS; i++) spin_lock_init(&fnic->io_req_lock[i]); + spin_lock_init(&fnic->sgreset_lock); + err = -ENOMEM; fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); if (!fnic->io_req_pool) diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 9761b2c9db48..416d81954819 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -1047,9 +1047,9 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, { u8 type; u8 hdr_status; - struct fcpio_tag tag; + struct fcpio_tag ftag; u32 id; - struct scsi_cmnd *sc; + struct scsi_cmnd *sc = NULL; struct fnic_io_req *io_req; struct fnic_stats *fnic_stats = &fnic->fnic_stats; struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats; @@ -1058,27 +1058,43 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned long flags; spinlock_t *io_lock; unsigned long start_time; + unsigned int tag; - fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); - fcpio_tag_id_dec(&tag, &id); + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag); + fcpio_tag_id_dec(&ftag, &id); - if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) { + tag = id & FNIC_TAG_MASK; + if (tag == fnic->fnic_max_tag_id) { + if (!(id & FNIC_TAG_DEV_RST)) { + shost_printk(KERN_ERR, fnic->lport->host, + "Tag out of range id 0x%x hdr status = %s\n", + id, fnic_fcpio_status_to_str(hdr_status)); + return; + } + } else if (tag > fnic->fnic_max_tag_id) { shost_printk(KERN_ERR, fnic->lport->host, - "Tag out of range tag %x hdr status = %s\n", - id, fnic_fcpio_status_to_str(hdr_status)); + "Tag out of range tag 0x%x hdr status = %s\n", + tag, fnic_fcpio_status_to_str(hdr_status)); return; } - sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); + if ((tag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) { + sc = fnic->sgreset_sc; + io_lock = &fnic->sgreset_lock; + } else { + sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); + io_lock = fnic_io_lock_hash(fnic, sc); + } + WARN_ON_ONCE(!sc); if (!sc) { atomic64_inc(&fnic_stats->io_stats.sc_null); shost_printk(KERN_ERR, fnic->lport->host, "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", - fnic_fcpio_status_to_str(hdr_status), id); + fnic_fcpio_status_to_str(hdr_status), tag); return; } - io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); io_req = fnic_priv(sc)->io_req; WARN_ON_ONCE(!io_req); @@ -1089,7 +1105,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, shost_printk(KERN_ERR, fnic->lport->host, "itmf_cmpl io_req is null - " "hdr status = %s tag = 0x%x sc 0x%p\n", - fnic_fcpio_status_to_str(hdr_status), id, sc); + fnic_fcpio_status_to_str(hdr_status), tag, sc); return; } start_time = io_req->start_time; @@ -1938,6 +1954,10 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, struct scsi_lun fc_lun; int ret = 0; unsigned long intr_flags; + unsigned int tag = scsi_cmd_to_rq(sc)->tag; + + if (tag == SCSI_NO_TAG) + tag = io_req->tag; spin_lock_irqsave(host->host_lock, intr_flags); if (unlikely(fnic_chk_state_flags_locked(fnic, @@ -1964,7 +1984,8 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, /* fill in the lun info */ int_to_scsilun(sc->device->lun, &fc_lun); - fnic_queue_wq_copy_desc_itmf(wq, scsi_cmd_to_rq(sc)->tag | FNIC_TAG_DEV_RST, + tag |= FNIC_TAG_DEV_RST; + fnic_queue_wq_copy_desc_itmf(wq, tag, 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG, fc_lun.scsi_lun, io_req->port_id, fnic->config.ra_tov, fnic->config.ed_tov); @@ -2146,8 +2167,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, .ret = SUCCESS, }; - if (new_sc) - iter_data.lr_sc = lr_sc; + iter_data.lr_sc = lr_sc; scsi_host_busy_iter(fnic->lport->host, fnic_pending_aborts_iter, &iter_data); @@ -2230,8 +2250,14 @@ int fnic_device_reset(struct scsi_cmnd *sc) mutex_lock(&fnic->sgreset_mutex); tag = fnic->fnic_max_tag_id; new_sc = 1; - } - io_lock = fnic_io_lock_hash(fnic, sc); + fnic->sgreset_sc = sc; + io_lock = &fnic->sgreset_lock; + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "fcid: 0x%x lun: 0x%llx flags: 0x%x tag: 0x%x Issuing sgreset\n", + rport->port_id, sc->device->lun, fnic_priv(sc)->flags, tag); + } else + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); io_req = fnic_priv(sc)->io_req; @@ -2247,6 +2273,8 @@ int fnic_device_reset(struct scsi_cmnd *sc) } memset(io_req, 0, sizeof(*io_req)); io_req->port_id = rport->port_id; + io_req->tag = tag; + io_req->sc = sc; fnic_priv(sc)->io_req = io_req; } io_req->dr_done = &tm_done; @@ -2400,8 +2428,10 @@ fnic_device_reset_end: (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), fnic_flags_and_state(sc)); - if (new_sc) + if (new_sc) { + fnic->sgreset_sc = NULL; mutex_unlock(&fnic->sgreset_mutex); + } FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Returning from device reset %s\n", diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 902655d75947..44680f65ea14 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1627,12 +1627,13 @@ int scsi_rescan_device(struct scsi_device *sdev) device_lock(dev); /* - * Bail out if the device is not running. Otherwise, the rescan may - * block waiting for commands to be executed, with us holding the - * device lock. This can result in a potential deadlock in the power - * management core code when system resume is on-going. + * Bail out if the device or its queue are not running. Otherwise, + * the rescan may block waiting for commands to be executed, with us + * holding the device lock. This can result in a potential deadlock + * in the power management core code when system resume is on-going. */ - if (sdev->sdev_state != SDEV_RUNNING) { + if (sdev->sdev_state != SDEV_RUNNING || + blk_queue_pm_only(sdev->request_queue)) { ret = -EWOULDBLOCK; goto unlock; } diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index 5a75ab64d1ed..12040ce116a5 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -333,6 +333,7 @@ if RISCV config ARCH_R9A07G043 bool "RISC-V Platform support for RZ/Five" + depends on NONPORTABLE select ARCH_RZG2L select AX45MP_L2_CACHE if RISCV_DMA_NONCOHERENT select DMA_GLOBAL_POOL diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile index c3d3ab3262d3..657f5888a77b 100644 --- a/drivers/soundwire/Makefile +++ b/drivers/soundwire/Makefile @@ -15,6 +15,10 @@ ifdef CONFIG_DEBUG_FS soundwire-bus-y += debugfs.o endif +ifdef CONFIG_IRQ_DOMAIN +soundwire-bus-y += irq.o +endif + #AMD driver soundwire-amd-y := amd_manager.o obj-$(CONFIG_SOUNDWIRE_AMD) += soundwire-amd.o diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index 1720031f35a3..0e7bc3c40f9d 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -3,13 +3,13 @@ #include <linux/acpi.h> #include <linux/delay.h> -#include <linux/irq.h> #include <linux/mod_devicetable.h> #include <linux/pm_runtime.h> #include <linux/soundwire/sdw_registers.h> #include <linux/soundwire/sdw.h> #include <linux/soundwire/sdw_type.h> #include "bus.h" +#include "irq.h" #include "sysfs_local.h" static DEFINE_IDA(sdw_bus_ida); @@ -25,23 +25,6 @@ static int sdw_get_id(struct sdw_bus *bus) return 0; } -static int sdw_irq_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - struct sdw_bus *bus = h->host_data; - - irq_set_chip_data(virq, bus); - irq_set_chip(virq, &bus->irq_chip); - irq_set_nested_thread(virq, 1); - irq_set_noprobe(virq); - - return 0; -} - -static const struct irq_domain_ops sdw_domain_ops = { - .map = sdw_irq_map, -}; - /** * sdw_bus_master_add() - add a bus Master instance * @bus: bus instance @@ -168,13 +151,9 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, bus->params.curr_bank = SDW_BANK0; bus->params.next_bank = SDW_BANK1; - bus->irq_chip.name = dev_name(bus->dev); - bus->domain = irq_domain_create_linear(fwnode, SDW_MAX_DEVICES, - &sdw_domain_ops, bus); - if (!bus->domain) { - dev_err(bus->dev, "Failed to add IRQ domain\n"); - return -EINVAL; - } + ret = sdw_irq_create(bus, fwnode); + if (ret) + return ret; return 0; } @@ -213,7 +192,7 @@ void sdw_bus_master_delete(struct sdw_bus *bus) { device_for_each_child(bus->dev, NULL, sdw_delete_slave); - irq_domain_remove(bus->domain); + sdw_irq_delete(bus); sdw_master_device_del(bus); diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c index fafbc284e82d..9fa93bb923d7 100644 --- a/drivers/soundwire/bus_type.c +++ b/drivers/soundwire/bus_type.c @@ -7,6 +7,7 @@ #include <linux/soundwire/sdw.h> #include <linux/soundwire/sdw_type.h> #include "bus.h" +#include "irq.h" #include "sysfs_local.h" /** @@ -122,11 +123,8 @@ static int sdw_drv_probe(struct device *dev) if (drv->ops && drv->ops->read_prop) drv->ops->read_prop(slave); - if (slave->prop.use_domain_irq) { - slave->irq = irq_create_mapping(slave->bus->domain, slave->dev_num); - if (!slave->irq) - dev_warn(dev, "Failed to map IRQ\n"); - } + if (slave->prop.use_domain_irq) + sdw_irq_create_mapping(slave); /* init the sysfs as we have properties now */ ret = sdw_slave_sysfs_init(slave); @@ -176,8 +174,7 @@ static int sdw_drv_remove(struct device *dev) slave->probed = false; if (slave->prop.use_domain_irq) - irq_dispose_mapping(irq_find_mapping(slave->bus->domain, - slave->dev_num)); + sdw_irq_dispose_mapping(slave); mutex_unlock(&slave->sdw_dev_lock); diff --git a/drivers/soundwire/irq.c b/drivers/soundwire/irq.c new file mode 100644 index 000000000000..0c08cebb1235 --- /dev/null +++ b/drivers/soundwire/irq.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2023 Cirrus Logic, Inc. and +// Cirrus Logic International Semiconductor Ltd. + +#include <linux/device.h> +#include <linux/fwnode.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/soundwire/sdw.h> +#include "irq.h" + +static int sdw_irq_map(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct sdw_bus *bus = h->host_data; + + irq_set_chip_data(virq, bus); + irq_set_chip(virq, &bus->irq_chip); + irq_set_nested_thread(virq, 1); + irq_set_noprobe(virq); + + return 0; +} + +static const struct irq_domain_ops sdw_domain_ops = { + .map = sdw_irq_map, +}; + +int sdw_irq_create(struct sdw_bus *bus, + struct fwnode_handle *fwnode) +{ + bus->irq_chip.name = dev_name(bus->dev); + + bus->domain = irq_domain_create_linear(fwnode, SDW_MAX_DEVICES, + &sdw_domain_ops, bus); + if (!bus->domain) { + dev_err(bus->dev, "Failed to add IRQ domain\n"); + return -EINVAL; + } + + return 0; +} + +void sdw_irq_delete(struct sdw_bus *bus) +{ + irq_domain_remove(bus->domain); +} + +void sdw_irq_create_mapping(struct sdw_slave *slave) +{ + slave->irq = irq_create_mapping(slave->bus->domain, slave->dev_num); + if (!slave->irq) + dev_warn(&slave->dev, "Failed to map IRQ\n"); +} + +void sdw_irq_dispose_mapping(struct sdw_slave *slave) +{ + irq_dispose_mapping(irq_find_mapping(slave->bus->domain, slave->dev_num)); +} diff --git a/drivers/soundwire/irq.h b/drivers/soundwire/irq.h new file mode 100644 index 000000000000..58a58046d92b --- /dev/null +++ b/drivers/soundwire/irq.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Cirrus Logic, Inc. and + * Cirrus Logic International Semiconductor Ltd. + */ + +#ifndef __SDW_IRQ_H +#define __SDW_IRQ_H + +#include <linux/soundwire/sdw.h> +#include <linux/fwnode.h> + +#if IS_ENABLED(CONFIG_IRQ_DOMAIN) + +int sdw_irq_create(struct sdw_bus *bus, + struct fwnode_handle *fwnode); +void sdw_irq_delete(struct sdw_bus *bus); +void sdw_irq_create_mapping(struct sdw_slave *slave); +void sdw_irq_dispose_mapping(struct sdw_slave *slave); + +#else /* CONFIG_IRQ_DOMAIN */ + +static inline int sdw_irq_create(struct sdw_bus *bus, + struct fwnode_handle *fwnode) +{ + return 0; +} + +static inline void sdw_irq_delete(struct sdw_bus *bus) +{ +} + +static inline void sdw_irq_create_mapping(struct sdw_slave *slave) +{ +} + +static inline void sdw_irq_dispose_mapping(struct sdw_slave *slave) +{ +} + +#endif /* CONFIG_IRQ_DOMAIN */ + +#endif /* __SDW_IRQ_H */ diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig index 5d8917160d41..75c985da75b5 100644 --- a/drivers/staging/media/atomisp/Kconfig +++ b/drivers/staging/media/atomisp/Kconfig @@ -12,12 +12,12 @@ menuconfig INTEL_ATOMISP config VIDEO_ATOMISP tristate "Intel Atom Image Signal Processor Driver" depends on VIDEO_DEV && INTEL_ATOMISP + depends on IPU_BRIDGE depends on MEDIA_PCI_SUPPORT depends on PMIC_OPREGION depends on I2C select V4L2_FWNODE select IOSF_MBI - select IPU_BRIDGE select VIDEOBUF2_VMALLOC select VIDEO_V4L2_SUBDEV_API help diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c index e98b3010520e..94171e62dee9 100644 --- a/drivers/staging/media/tegra-video/vi.c +++ b/drivers/staging/media/tegra-video/vi.c @@ -1455,17 +1455,18 @@ static int __maybe_unused vi_runtime_suspend(struct device *dev) } /* - * Graph Management + * Find the entity matching a given fwnode in an v4l2_async_notifier list */ static struct tegra_vi_graph_entity * -tegra_vi_graph_find_entity(struct tegra_vi_channel *chan, +tegra_vi_graph_find_entity(struct list_head *list, const struct fwnode_handle *fwnode) { struct tegra_vi_graph_entity *entity; struct v4l2_async_connection *asd; - list_for_each_entry(asd, &chan->notifier.done_list, asc_entry) { + list_for_each_entry(asd, list, asc_entry) { entity = to_tegra_vi_graph_entity(asd); + if (entity->asd.match.fwnode == fwnode) return entity; } @@ -1532,7 +1533,8 @@ static int tegra_vi_graph_build(struct tegra_vi_channel *chan, } /* find the remote entity from notifier list */ - ent = tegra_vi_graph_find_entity(chan, link.remote_node); + ent = tegra_vi_graph_find_entity(&chan->notifier.done_list, + link.remote_node); if (!ent) { dev_err(vi->dev, "no entity found for %pOF\n", to_of_node(link.remote_node)); @@ -1664,7 +1666,8 @@ static int tegra_vi_graph_notify_bound(struct v4l2_async_notifier *notifier, * Locate the entity corresponding to the bound subdev and store the * subdev pointer. */ - entity = tegra_vi_graph_find_entity(chan, subdev->fwnode); + entity = tegra_vi_graph_find_entity(&chan->notifier.waiting_list, + subdev->fwnode); if (!entity) { dev_err(vi->dev, "no entity for subdev %s\n", subdev->name); return -EINVAL; @@ -1713,7 +1716,8 @@ static int tegra_vi_graph_parse_one(struct tegra_vi_channel *chan, /* skip entities that are already processed */ if (device_match_fwnode(vi->dev, remote) || - tegra_vi_graph_find_entity(chan, remote)) { + tegra_vi_graph_find_entity(&chan->notifier.waiting_list, + remote)) { fwnode_handle_put(remote); continue; } diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index b7ac60f4a219..b6523d4b9259 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -843,7 +843,6 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) EXPORT_SYMBOL(target_to_linux_sector); struct devices_idr_iter { - struct config_item *prev_item; int (*fn)(struct se_device *dev, void *data); void *data; }; @@ -853,11 +852,9 @@ static int target_devices_idr_iter(int id, void *p, void *data) { struct devices_idr_iter *iter = data; struct se_device *dev = p; + struct config_item *item; int ret; - config_item_put(iter->prev_item); - iter->prev_item = NULL; - /* * We add the device early to the idr, so it can be used * by backend modules during configuration. We do not want @@ -867,12 +864,13 @@ static int target_devices_idr_iter(int id, void *p, void *data) if (!target_dev_configured(dev)) return 0; - iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item); - if (!iter->prev_item) + item = config_item_get_unless_zero(&dev->dev_group.cg_item); + if (!item) return 0; mutex_unlock(&device_mutex); ret = iter->fn(dev, iter->data); + config_item_put(item); mutex_lock(&device_mutex); return ret; @@ -895,7 +893,6 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data), mutex_lock(&device_mutex); ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); mutex_unlock(&device_mutex); - config_item_put(iter.prev_item); return ret; } diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c index 372d64756ed6..3c15f6a9e91c 100644 --- a/drivers/tee/amdtee/core.c +++ b/drivers/tee/amdtee/core.c @@ -217,12 +217,12 @@ unlock: return rc; } +/* mutex must be held by caller */ static void destroy_session(struct kref *ref) { struct amdtee_session *sess = container_of(ref, struct amdtee_session, refcount); - mutex_lock(&session_list_mutex); list_del(&sess->list_node); mutex_unlock(&session_list_mutex); kfree(sess); @@ -272,7 +272,8 @@ int amdtee_open_session(struct tee_context *ctx, if (arg->ret != TEEC_SUCCESS) { pr_err("open_session failed %d\n", arg->ret); handle_unload_ta(ta_handle); - kref_put(&sess->refcount, destroy_session); + kref_put_mutex(&sess->refcount, destroy_session, + &session_list_mutex); goto out; } @@ -290,7 +291,8 @@ int amdtee_open_session(struct tee_context *ctx, pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS); handle_close_session(ta_handle, session_info); handle_unload_ta(ta_handle); - kref_put(&sess->refcount, destroy_session); + kref_put_mutex(&sess->refcount, destroy_session, + &session_list_mutex); rc = -ENOMEM; goto out; } @@ -331,7 +333,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session) handle_close_session(ta_handle, session_info); handle_unload_ta(ta_handle); - kref_put(&sess->refcount, destroy_session); + kref_put_mutex(&sess->refcount, destroy_session, &session_list_mutex); return 0; } diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index dbdcad8d73bf..d8b9c734abd3 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -41,6 +41,7 @@ #define PHY_PORT_CS1_LINK_STATE_SHIFT 26 #define ICM_TIMEOUT 5000 /* ms */ +#define ICM_RETRIES 3 #define ICM_APPROVE_TIMEOUT 10000 /* ms */ #define ICM_MAX_LINK 4 @@ -296,10 +297,9 @@ static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) static int icm_request(struct tb *tb, const void *request, size_t request_size, void *response, size_t response_size, size_t npackets, - unsigned int timeout_msec) + int retries, unsigned int timeout_msec) { struct icm *icm = tb_priv(tb); - int retries = 3; do { struct tb_cfg_request *req; @@ -410,7 +410,7 @@ static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) return -ENOMEM; ret = icm_request(tb, &request, sizeof(request), switches, - sizeof(*switches), npackets, ICM_TIMEOUT); + sizeof(*switches), npackets, ICM_RETRIES, ICM_TIMEOUT); if (ret) goto err_free; @@ -463,7 +463,7 @@ icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level, memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, ICM_TIMEOUT); + 1, ICM_RETRIES, ICM_TIMEOUT); if (ret) return ret; @@ -488,7 +488,7 @@ static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw) memset(&reply, 0, sizeof(reply)); /* Use larger timeout as establishing tunnels can take some time */ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, ICM_APPROVE_TIMEOUT); + 1, ICM_RETRIES, ICM_APPROVE_TIMEOUT); if (ret) return ret; @@ -515,7 +515,7 @@ static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw) memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, ICM_TIMEOUT); + 1, ICM_RETRIES, ICM_TIMEOUT); if (ret) return ret; @@ -543,7 +543,7 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, ICM_TIMEOUT); + 1, ICM_RETRIES, ICM_TIMEOUT); if (ret) return ret; @@ -577,7 +577,7 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, ICM_TIMEOUT); + 1, ICM_RETRIES, ICM_TIMEOUT); if (ret) return ret; @@ -1020,7 +1020,7 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, 20000); + 1, 10, 2000); if (ret) return ret; @@ -1053,7 +1053,7 @@ static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw) memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, ICM_APPROVE_TIMEOUT); + 1, ICM_RETRIES, ICM_APPROVE_TIMEOUT); if (ret) return ret; @@ -1081,7 +1081,7 @@ static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw) memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, ICM_TIMEOUT); + 1, ICM_RETRIES, ICM_TIMEOUT); if (ret) return ret; @@ -1110,7 +1110,7 @@ static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, ICM_TIMEOUT); + 1, ICM_RETRIES, ICM_TIMEOUT); if (ret) return ret; @@ -1144,7 +1144,7 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, ICM_TIMEOUT); + 1, ICM_RETRIES, ICM_TIMEOUT); if (ret) return ret; @@ -1170,7 +1170,7 @@ static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd, memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, ICM_TIMEOUT); + 1, ICM_RETRIES, ICM_TIMEOUT); if (ret) return ret; @@ -1496,7 +1496,7 @@ icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level, memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, ICM_TIMEOUT); + 1, ICM_RETRIES, ICM_TIMEOUT); if (ret) return ret; @@ -1522,7 +1522,7 @@ static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, ICM_TIMEOUT); + 1, ICM_RETRIES, ICM_TIMEOUT); if (ret) return ret; @@ -1543,7 +1543,7 @@ static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids) memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, ICM_TIMEOUT); + 1, ICM_RETRIES, ICM_TIMEOUT); if (ret) return ret; @@ -1604,7 +1604,7 @@ static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids, memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, ICM_TIMEOUT); + 1, ICM_RETRIES, ICM_TIMEOUT); if (ret) return ret; @@ -1626,7 +1626,7 @@ icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level, memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, 20000); + 1, ICM_RETRIES, 20000); if (ret) return ret; @@ -2298,7 +2298,7 @@ static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, ICM_TIMEOUT); + 1, ICM_RETRIES, ICM_TIMEOUT); if (ret) return ret; diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 43171cc1cc2d..bd5815f8f23b 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -2725,6 +2725,13 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw) !tb_port_is_width_supported(down, TB_LINK_WIDTH_DUAL)) return 0; + /* + * Both lanes need to be in CL0. Here we assume lane 0 already be in + * CL0 and check just for lane 1. + */ + if (tb_wait_for_port(down->dual_link_port, false) <= 0) + return -ENOTCONN; + ret = tb_port_lane_bonding_enable(up); if (ret) { tb_port_warn(up, "failed to enable lane bonding\n"); diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c index 747f88703d5c..11f2aec2a5d3 100644 --- a/drivers/thunderbolt/tmu.c +++ b/drivers/thunderbolt/tmu.c @@ -382,7 +382,7 @@ static int tmu_mode_init(struct tb_switch *sw) } else if (ucap && tb_port_tmu_is_unidirectional(up)) { if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate) sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES; - else if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate) + else if (tmu_rates[TB_SWITCH_TMU_MODE_HIFI_UNI] == rate) sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI; } else if (rate) { sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI; diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index 5b5566862318..9803f0bbf20d 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -703,6 +703,27 @@ out_unlock: mutex_unlock(&xdomain_lock); } +static void start_handshake(struct tb_xdomain *xd) +{ + xd->state = XDOMAIN_STATE_INIT; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); +} + +/* Can be called from state_work */ +static void __stop_handshake(struct tb_xdomain *xd) +{ + cancel_delayed_work_sync(&xd->properties_changed_work); + xd->properties_changed_retries = 0; + xd->state_retries = 0; +} + +static void stop_handshake(struct tb_xdomain *xd) +{ + cancel_delayed_work_sync(&xd->state_work); + __stop_handshake(xd); +} + static void tb_xdp_handle_request(struct work_struct *work) { struct xdomain_request_work *xw = container_of(work, typeof(*xw), work); @@ -765,6 +786,15 @@ static void tb_xdp_handle_request(struct work_struct *work) case UUID_REQUEST: tb_dbg(tb, "%llx: received XDomain UUID request\n", route); ret = tb_xdp_uuid_response(ctl, route, sequence, uuid); + /* + * If we've stopped the discovery with an error such as + * timing out, we will restart the handshake now that we + * received UUID request from the remote host. + */ + if (!ret && xd && xd->state == XDOMAIN_STATE_ERROR) { + dev_dbg(&xd->dev, "restarting handshake\n"); + start_handshake(xd); + } break; case LINK_STATE_STATUS_REQUEST: @@ -1521,6 +1551,13 @@ static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd) msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); } +static void tb_xdomain_failed(struct tb_xdomain *xd) +{ + xd->state = XDOMAIN_STATE_ERROR; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); +} + static void tb_xdomain_state_work(struct work_struct *work) { struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work); @@ -1547,7 +1584,7 @@ static void tb_xdomain_state_work(struct work_struct *work) if (ret) { if (ret == -EAGAIN) goto retry_state; - xd->state = XDOMAIN_STATE_ERROR; + tb_xdomain_failed(xd); } else { tb_xdomain_queue_properties_changed(xd); if (xd->bonding_possible) @@ -1612,7 +1649,7 @@ static void tb_xdomain_state_work(struct work_struct *work) if (ret) { if (ret == -EAGAIN) goto retry_state; - xd->state = XDOMAIN_STATE_ERROR; + tb_xdomain_failed(xd); } else { xd->state = XDOMAIN_STATE_ENUMERATED; } @@ -1623,6 +1660,8 @@ static void tb_xdomain_state_work(struct work_struct *work) break; case XDOMAIN_STATE_ERROR: + dev_dbg(&xd->dev, "discovery failed, stopping handshake\n"); + __stop_handshake(xd); break; default: @@ -1833,21 +1872,6 @@ static void tb_xdomain_release(struct device *dev) kfree(xd); } -static void start_handshake(struct tb_xdomain *xd) -{ - xd->state = XDOMAIN_STATE_INIT; - queue_delayed_work(xd->tb->wq, &xd->state_work, - msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); -} - -static void stop_handshake(struct tb_xdomain *xd) -{ - cancel_delayed_work_sync(&xd->properties_changed_work); - cancel_delayed_work_sync(&xd->state_work); - xd->properties_changed_retries = 0; - xd->state_retries = 0; -} - static int __maybe_unused tb_xdomain_suspend(struct device *dev) { stop_handshake(tb_to_xdomain(dev)); diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 26dd089d8e82..ca972fd37725 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -1617,7 +1617,7 @@ static int omap8250_suspend(struct device *dev) { struct omap8250_priv *priv = dev_get_drvdata(dev); struct uart_8250_port *up = serial8250_get_port(priv->line); - int err; + int err = 0; serial8250_suspend_port(priv->line); @@ -1627,7 +1627,8 @@ static int omap8250_suspend(struct device *dev) if (!device_may_wakeup(dev)) priv->wer = 0; serial_out(up, UART_OMAP_WER, priv->wer); - err = pm_runtime_force_suspend(dev); + if (uart_console(&up->port) && console_suspend_enabled) + err = pm_runtime_force_suspend(dev); flush_work(&priv->qos_work); return err; @@ -1636,11 +1637,15 @@ static int omap8250_suspend(struct device *dev) static int omap8250_resume(struct device *dev) { struct omap8250_priv *priv = dev_get_drvdata(dev); + struct uart_8250_port *up = serial8250_get_port(priv->line); int err; - err = pm_runtime_force_resume(dev); - if (err) - return err; + if (uart_console(&up->port) && console_suspend_enabled) { + err = pm_runtime_force_resume(dev); + if (err) + return err; + } + serial8250_resume_port(priv->line); /* Paired with pm_runtime_resume_and_get() in omap8250_suspend() */ pm_runtime_mark_last_busy(dev); @@ -1717,16 +1722,6 @@ static int omap8250_runtime_suspend(struct device *dev) if (priv->line >= 0) up = serial8250_get_port(priv->line); - /* - * When using 'no_console_suspend', the console UART must not be - * suspended. Since driver suspend is managed by runtime suspend, - * preventing runtime suspend (by returning error) will keep device - * active during suspend. - */ - if (priv->is_suspending && !console_suspend_enabled) { - if (up && uart_console(&up->port)) - return -EBUSY; - } if (priv->habit & UART_ERRATA_CLOCK_DISABLE) { int ret; diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 7bdc21d5e13b..d5ba6e90bd95 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -156,7 +156,7 @@ static void __uart_start(struct uart_state *state) * enabled, serial_port_runtime_resume() calls start_tx() again * after enabling the device. */ - if (pm_runtime_active(&port_dev->dev)) + if (!pm_runtime_enabled(port->dev) || pm_runtime_active(port->dev)) port->ops->start_tx(port); pm_runtime_mark_last_busy(&port_dev->dev); pm_runtime_put_autosuspend(&port_dev->dev); @@ -1404,12 +1404,18 @@ static void uart_set_rs485_termination(struct uart_port *port, static int uart_rs485_config(struct uart_port *port) { struct serial_rs485 *rs485 = &port->rs485; + unsigned long flags; int ret; + if (!(rs485->flags & SER_RS485_ENABLED)) + return 0; + uart_sanitize_serial_rs485(port, rs485); uart_set_rs485_termination(port, rs485); + spin_lock_irqsave(&port->lock, flags); ret = port->rs485_config(port, NULL, rs485); + spin_unlock_irqrestore(&port->lock, flags); if (ret) memset(rs485, 0, sizeof(*rs485)); @@ -2474,11 +2480,10 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) if (ret == 0) { if (tty) uart_change_line_settings(tty, state, NULL); + uart_rs485_config(uport); spin_lock_irq(&uport->lock); if (!(uport->rs485.flags & SER_RS485_ENABLED)) ops->set_mctrl(uport, uport->mctrl); - else - uart_rs485_config(uport); ops->start_tx(uport); spin_unlock_irq(&uport->lock); tty_port_set_initialized(port, true); @@ -2587,10 +2592,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, port->mctrl &= TIOCM_DTR; if (!(port->rs485.flags & SER_RS485_ENABLED)) port->ops->set_mctrl(port, port->mctrl); - else - uart_rs485_config(port); spin_unlock_irqrestore(&port->lock, flags); + uart_rs485_config(port); + /* * If this driver supports console, and it hasn't been * successfully registered yet, try to re-register it. diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index c2df07545f96..8382e8cfa414 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -6895,7 +6895,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) mask, 0, 1000, 1000); dev_err(hba->dev, "Clearing task management function with tag %d %s\n", - tag, err ? "succeeded" : "failed"); + tag, err < 0 ? "failed" : "succeeded"); out: return err; diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c index fff9ec9c391f..4b67749edb99 100644 --- a/drivers/usb/cdns3/cdnsp-gadget.c +++ b/drivers/usb/cdns3/cdnsp-gadget.c @@ -1125,6 +1125,9 @@ static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep, unsigned long flags; int ret; + if (request->status != -EINPROGRESS) + return 0; + if (!pep->endpoint.desc) { dev_err(pdev->dev, "%s: can't dequeue to disabled endpoint\n", diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h index 4a4dbc2c1561..81a9c9d6be08 100644 --- a/drivers/usb/cdns3/core.h +++ b/drivers/usb/cdns3/core.h @@ -131,8 +131,7 @@ void cdns_set_active(struct cdns *cdns, u8 set_active); #else /* CONFIG_PM_SLEEP */ static inline int cdns_resume(struct cdns *cdns) { return 0; } -static inline int cdns_set_active(struct cdns *cdns, u8 set_active) -{ return 0; } +static inline void cdns_set_active(struct cdns *cdns, u8 set_active) { } static inline int cdns_suspend(struct cdns *cdns) { return 0; } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 3c54b218301c..0ff47eeffb49 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -151,6 +151,10 @@ int usb_device_supports_lpm(struct usb_device *udev) if (udev->quirks & USB_QUIRK_NO_LPM) return 0; + /* Skip if the device BOS descriptor couldn't be read */ + if (!udev->bos) + return 0; + /* USB 2.1 (and greater) devices indicate LPM support through * their USB 2.0 Extended Capabilities BOS descriptor. */ @@ -327,6 +331,10 @@ static void usb_set_lpm_parameters(struct usb_device *udev) if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER) return; + /* Skip if the device BOS descriptor couldn't be read */ + if (!udev->bos) + return; + hub = usb_hub_to_struct_hub(udev->parent); /* It doesn't take time to transition the roothub into U0, since it * doesn't have an upstream link. @@ -2704,13 +2712,17 @@ out_authorized: static enum usb_ssp_rate get_port_ssp_rate(struct usb_device *hdev, u32 ext_portstatus) { - struct usb_ssp_cap_descriptor *ssp_cap = hdev->bos->ssp_cap; + struct usb_ssp_cap_descriptor *ssp_cap; u32 attr; u8 speed_id; u8 ssac; u8 lanes; int i; + if (!hdev->bos) + goto out; + + ssp_cap = hdev->bos->ssp_cap; if (!ssp_cap) goto out; @@ -4215,8 +4227,15 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, enum usb3_link_state state) { int timeout; - __u8 u1_mel = udev->bos->ss_cap->bU1devExitLat; - __le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat; + __u8 u1_mel; + __le16 u2_mel; + + /* Skip if the device BOS descriptor couldn't be read */ + if (!udev->bos) + return; + + u1_mel = udev->bos->ss_cap->bU1devExitLat; + u2_mel = udev->bos->ss_cap->bU2DevExitLat; /* If the device says it doesn't have *any* exit latency to come out of * U1 or U2, it's probably lying. Assume it doesn't implement that link diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h index 37897afd1b64..d44dd7f6623e 100644 --- a/drivers/usb/core/hub.h +++ b/drivers/usb/core/hub.h @@ -153,7 +153,7 @@ static inline int hub_is_superspeedplus(struct usb_device *hdev) { return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS && le16_to_cpu(hdev->descriptor.bcdUSB) >= 0x0310 && - hdev->bos->ssp_cap); + hdev->bos && hdev->bos->ssp_cap); } static inline unsigned hub_power_on_good_delay(struct usb_hub *hub) diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 44ee8526dc28..d25490965b27 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -279,9 +279,46 @@ int dwc3_core_soft_reset(struct dwc3 *dwc) * XHCI driver will reset the host block. If dwc3 was configured for * host-only mode or current role is host, then we can return early. */ - if (dwc->dr_mode == USB_DR_MODE_HOST || dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) + if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) return 0; + /* + * If the dr_mode is host and the dwc->current_dr_role is not the + * corresponding DWC3_GCTL_PRTCAP_HOST, then the dwc3_core_init_mode + * isn't executed yet. Ensure the phy is ready before the controller + * updates the GCTL.PRTCAPDIR or other settings by soft-resetting + * the phy. + * + * Note: GUSB3PIPECTL[n] and GUSB2PHYCFG[n] are port settings where n + * is port index. If this is a multiport host, then we need to reset + * all active ports. + */ + if (dwc->dr_mode == USB_DR_MODE_HOST) { + u32 usb3_port; + u32 usb2_port; + + usb3_port = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); + usb3_port |= DWC3_GUSB3PIPECTL_PHYSOFTRST; + dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port); + + usb2_port = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); + usb2_port |= DWC3_GUSB2PHYCFG_PHYSOFTRST; + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port); + + /* Small delay for phy reset assertion */ + usleep_range(1000, 2000); + + usb3_port &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST; + dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port); + + usb2_port &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST; + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port); + + /* Wait for clock synchronization */ + msleep(50); + return 0; + } + reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg |= DWC3_DCTL_CSFTRST; reg &= ~DWC3_DCTL_RUN_STOP; diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index feccf4c8cc4f..e6ab8cc225ff 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -1156,7 +1156,8 @@ static int ncm_unwrap_ntb(struct gether *port, struct sk_buff_head *list) { struct f_ncm *ncm = func_to_ncm(&port->func); - __le16 *tmp = (void *) skb->data; + unsigned char *ntb_ptr = skb->data; + __le16 *tmp; unsigned index, index2; int ndp_index; unsigned dg_len, dg_len2; @@ -1169,6 +1170,10 @@ static int ncm_unwrap_ntb(struct gether *port, const struct ndp_parser_opts *opts = ncm->parser_opts; unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; int dgram_counter; + int to_process = skb->len; + +parse_ntb: + tmp = (__le16 *)ntb_ptr; /* dwSignature */ if (get_unaligned_le32(tmp) != opts->nth_sign) { @@ -1215,7 +1220,7 @@ static int ncm_unwrap_ntb(struct gether *port, * walk through NDP * dwSignature */ - tmp = (void *)(skb->data + ndp_index); + tmp = (__le16 *)(ntb_ptr + ndp_index); if (get_unaligned_le32(tmp) != ncm->ndp_sign) { INFO(port->func.config->cdev, "Wrong NDP SIGN\n"); goto err; @@ -1272,11 +1277,11 @@ static int ncm_unwrap_ntb(struct gether *port, if (ncm->is_crc) { uint32_t crc, crc2; - crc = get_unaligned_le32(skb->data + + crc = get_unaligned_le32(ntb_ptr + index + dg_len - crc_len); crc2 = ~crc32_le(~0, - skb->data + index, + ntb_ptr + index, dg_len - crc_len); if (crc != crc2) { INFO(port->func.config->cdev, @@ -1303,7 +1308,7 @@ static int ncm_unwrap_ntb(struct gether *port, dg_len - crc_len); if (skb2 == NULL) goto err; - skb_put_data(skb2, skb->data + index, + skb_put_data(skb2, ntb_ptr + index, dg_len - crc_len); skb_queue_tail(list, skb2); @@ -1316,10 +1321,17 @@ static int ncm_unwrap_ntb(struct gether *port, } while (ndp_len > 2 * (opts->dgram_item_len * 2)); } while (ndp_index); - dev_consume_skb_any(skb); - VDBG(port->func.config->cdev, "Parsed NTB with %d frames\n", dgram_counter); + + to_process -= block_len; + if (to_process != 0) { + ntb_ptr = (unsigned char *)(ntb_ptr + block_len); + goto parse_ntb; + } + + dev_consume_skb_any(skb); + return 0; err: skb_queue_purge(list); diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index 56b8286a8009..74590f93ea61 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -497,11 +497,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req, /* Get the Buffer address and copy the transmit data.*/ eprambase = (u32 __force *)(udc->addr + ep->rambase); if (ep->is_in) { - memcpy(eprambase, bufferptr, bytestosend); + memcpy_toio((void __iomem *)eprambase, bufferptr, + bytestosend); udc->write_fn(udc->addr, ep->offset + XUSB_EP_BUF0COUNT_OFFSET, bufferlen); } else { - memcpy(bufferptr, eprambase, bytestosend); + memcpy_toio((void __iomem *)bufferptr, eprambase, + bytestosend); } /* * Enable the buffer for transmission. @@ -515,11 +517,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req, eprambase = (u32 __force *)(udc->addr + ep->rambase + ep->ep_usb.maxpacket); if (ep->is_in) { - memcpy(eprambase, bufferptr, bytestosend); + memcpy_toio((void __iomem *)eprambase, bufferptr, + bytestosend); udc->write_fn(udc->addr, ep->offset + XUSB_EP_BUF1COUNT_OFFSET, bufferlen); } else { - memcpy(bufferptr, eprambase, bytestosend); + memcpy_toio((void __iomem *)bufferptr, eprambase, + bytestosend); } /* * Enable the buffer for transmission. @@ -1021,7 +1025,7 @@ static int __xudc_ep0_queue(struct xusb_ep *ep0, struct xusb_req *req) udc->addr); length = req->usb_req.actual = min_t(u32, length, EP0_MAX_PACKET); - memcpy(corebuf, req->usb_req.buf, length); + memcpy_toio((void __iomem *)corebuf, req->usb_req.buf, length); udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, length); udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1); } else { @@ -1752,7 +1756,7 @@ static void xudc_handle_setup(struct xusb_udc *udc) /* Load up the chapter 9 command buffer.*/ ep0rambase = (u32 __force *) (udc->addr + XUSB_SETUP_PKT_ADDR_OFFSET); - memcpy(&setup, ep0rambase, 8); + memcpy_toio((void __iomem *)&setup, ep0rambase, 8); udc->setup = setup; udc->setup.wValue = cpu_to_le16((u16 __force)setup.wValue); @@ -1839,7 +1843,7 @@ static void xudc_ep0_out(struct xusb_udc *udc) (ep0->rambase << 2)); buffer = req->usb_req.buf + req->usb_req.actual; req->usb_req.actual = req->usb_req.actual + bytes_to_rx; - memcpy(buffer, ep0rambase, bytes_to_rx); + memcpy_toio((void __iomem *)buffer, ep0rambase, bytes_to_rx); if (req->usb_req.length == req->usb_req.actual) { /* Data transfer completed get ready for Status stage */ @@ -1915,7 +1919,7 @@ static void xudc_ep0_in(struct xusb_udc *udc) (ep0->rambase << 2)); buffer = req->usb_req.buf + req->usb_req.actual; req->usb_req.actual = req->usb_req.actual + length; - memcpy(ep0rambase, buffer, length); + memcpy_toio((void __iomem *)ep0rambase, buffer, length); } udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, count); udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1); diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 0054d02239e2..0df5d807a77e 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1062,19 +1062,19 @@ static void xhci_get_usb3_port_status(struct xhci_port *port, u32 *status, *status |= USB_PORT_STAT_C_CONFIG_ERROR << 16; /* USB3 specific wPortStatus bits */ - if (portsc & PORT_POWER) { + if (portsc & PORT_POWER) *status |= USB_SS_PORT_STAT_POWER; - /* link state handling */ - if (link_state == XDEV_U0) - bus_state->suspended_ports &= ~(1 << portnum); - } - /* remote wake resume signaling complete */ - if (bus_state->port_remote_wakeup & (1 << portnum) && + /* no longer suspended or resuming */ + if (link_state != XDEV_U3 && link_state != XDEV_RESUME && link_state != XDEV_RECOVERY) { - bus_state->port_remote_wakeup &= ~(1 << portnum); - usb_hcd_end_port_resume(&hcd->self, portnum); + /* remote wake resume signaling complete */ + if (bus_state->port_remote_wakeup & (1 << portnum)) { + bus_state->port_remote_wakeup &= ~(1 << portnum); + usb_hcd_end_port_resume(&hcd->self, portnum); + } + bus_state->suspended_ports &= ~(1 << portnum); } xhci_hub_report_usb3_link_state(xhci, status, portsc); @@ -1131,6 +1131,7 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status, usb_hcd_end_port_resume(&port->rhub->hcd->self, portnum); } port->rexit_active = 0; + bus_state->suspended_ports &= ~(1 << portnum); } } diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 8714ab5bf04d..0a37f0d511cf 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2285,8 +2285,8 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir, writel(erst_size, &ir->ir_set->erst_size); erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); - erst_base &= ERST_PTR_MASK; - erst_base |= (ir->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); + erst_base &= ERST_BASE_RSVDP; + erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP; xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base); /* Set the event ring dequeue address of this interrupter */ diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 1dde53f6eb31..3e5dc0723a8f 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -798,7 +798,7 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring, struct xhci_td *td) { - struct device *dev = xhci_to_hcd(xhci)->self.controller; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; struct xhci_segment *seg = td->bounce_seg; struct urb *urb = td->urb; size_t len; @@ -2996,7 +2996,8 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir) */ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, struct xhci_interrupter *ir, - union xhci_trb *event_ring_deq) + union xhci_trb *event_ring_deq, + bool clear_ehb) { u64 temp_64; dma_addr_t deq; @@ -3017,12 +3018,13 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, return; /* Update HC event ring dequeue pointer */ - temp_64 &= ERST_PTR_MASK; + temp_64 &= ERST_DESI_MASK; temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); } /* Clear the event handler busy flag (RW1C) */ - temp_64 |= ERST_EHB; + if (clear_ehb) + temp_64 |= ERST_EHB; xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue); } @@ -3103,7 +3105,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) while (xhci_handle_event(xhci, ir) > 0) { if (event_loop++ < TRBS_PER_SEGMENT / 2) continue; - xhci_update_erst_dequeue(xhci, ir, event_ring_deq); + xhci_update_erst_dequeue(xhci, ir, event_ring_deq, false); event_ring_deq = ir->event_ring->dequeue; /* ring is half-full, force isoc trbs to interrupt more often */ @@ -3113,7 +3115,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) event_loop = 0; } - xhci_update_erst_dequeue(xhci, ir, event_ring_deq); + xhci_update_erst_dequeue(xhci, ir, event_ring_deq, true); ret = IRQ_HANDLED; out: @@ -3469,7 +3471,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, u32 *trb_buff_len, struct xhci_segment *seg) { - struct device *dev = xhci_to_hcd(xhci)->self.controller; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; unsigned int unalign; unsigned int max_pkt; u32 new_buff_len; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 77016338bee1..8342076cb309 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -514,7 +514,7 @@ struct xhci_intr_reg { #define ERST_SIZE_MASK (0xffff << 16) /* erst_base bitmasks */ -#define ERST_BASE_RSVDP (0x3f) +#define ERST_BASE_RSVDP (GENMASK_ULL(5, 0)) /* erst_dequeue bitmasks */ /* Dequeue ERST Segment Index (DESI) - Segment number (or alias) diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c index a22fa3e22584..a341b2fbb7b4 100644 --- a/drivers/usb/misc/onboard_usb_hub.c +++ b/drivers/usb/misc/onboard_usb_hub.c @@ -429,6 +429,7 @@ static const struct usb_device_id onboard_hub_id_table[] = { { USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0620) }, /* Genesys Logic GL3523 USB 3.1 */ + { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2412) }, /* USB2412 USB 2.0 */ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */ diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h index a9e2f6023c1c..c4e24a7b9290 100644 --- a/drivers/usb/misc/onboard_usb_hub.h +++ b/drivers/usb/misc/onboard_usb_hub.h @@ -47,6 +47,7 @@ static const struct onboard_hub_pdata vialab_vl817_data = { }; static const struct of_device_id onboard_hub_match[] = { + { .compatible = "usb424,2412", .data = µchip_usb424_data, }, { .compatible = "usb424,2514", .data = µchip_usb424_data, }, { .compatible = "usb424,2517", .data = µchip_usb424_data, }, { .compatible = "usb451,8140", .data = &ti_tusb8041_data, }, diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c index 78c726a71b17..2d623284edf6 100644 --- a/drivers/usb/musb/musb_debugfs.c +++ b/drivers/usb/musb/musb_debugfs.c @@ -39,7 +39,7 @@ static const struct musb_register_map musb_regmap[] = { { "IntrUsbE", MUSB_INTRUSBE, 8 }, { "DevCtl", MUSB_DEVCTL, 8 }, { "VControl", 0x68, 32 }, - { "HWVers", 0x69, 16 }, + { "HWVers", MUSB_HWVERS, 16 }, { "LinkInfo", MUSB_LINKINFO, 8 }, { "VPLen", MUSB_VPLEN, 8 }, { "HS_EOF1", MUSB_HS_EOF1, 8 }, diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index a02c29216955..bc4507781167 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -321,10 +321,16 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb, musb_giveback(musb, urb, status); qh->is_ready = ready; + /* + * musb->lock had been unlocked in musb_giveback, so qh may + * be freed, need to get it again + */ + qh = musb_ep_get_qh(hw_ep, is_in); + /* reclaim resources (and bandwidth) ASAP; deschedule it, and * invalidate qh as soon as list_empty(&hep->urb_list) */ - if (list_empty(&qh->hep->urb_list)) { + if (qh && list_empty(&qh->hep->urb_list)) { struct list_head *head; struct dma_controller *dma = musb->dma_controller; @@ -2398,6 +2404,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) * and its URB list has emptied, recycle this qh. */ if (ready && list_empty(&qh->hep->urb_list)) { + musb_ep_set_qh(qh->hw_ep, is_in, NULL); qh->hep->hcpriv = NULL; list_del(&qh->ring); kfree(qh); diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c index f503cb4cd721..718da02036d8 100644 --- a/drivers/usb/typec/altmodes/displayport.c +++ b/drivers/usb/typec/altmodes/displayport.c @@ -307,6 +307,11 @@ static int dp_altmode_vdm(struct typec_altmode *alt, typec_altmode_update_active(alt, false); dp->data.status = 0; dp->data.conf = 0; + if (dp->hpd) { + drm_connector_oob_hotplug_event(dp->connector_fwnode); + dp->hpd = false; + sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd"); + } break; case DP_CMD_STATUS_UPDATE: dp->data.status = *vdo; diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c index bb0b8479d80f..52c81378e36e 100644 --- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c +++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c @@ -381,10 +381,6 @@ static int qcom_pmic_typec_pdphy_enable(struct pmic_typec_pdphy *pmic_typec_pdph struct device *dev = pmic_typec_pdphy->dev; int ret; - ret = regulator_enable(pmic_typec_pdphy->vdd_pdphy); - if (ret) - return ret; - /* PD 2.0, DR=TYPEC_DEVICE, PR=TYPEC_SINK */ ret = regmap_update_bits(pmic_typec_pdphy->regmap, pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG, @@ -422,8 +418,6 @@ static int qcom_pmic_typec_pdphy_disable(struct pmic_typec_pdphy *pmic_typec_pdp ret = regmap_write(pmic_typec_pdphy->regmap, pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0); - regulator_disable(pmic_typec_pdphy->vdd_pdphy); - return ret; } @@ -447,6 +441,10 @@ int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy, int i; int ret; + ret = regulator_enable(pmic_typec_pdphy->vdd_pdphy); + if (ret) + return ret; + pmic_typec_pdphy->tcpm_port = tcpm_port; ret = pmic_typec_pdphy_reset(pmic_typec_pdphy); @@ -467,6 +465,8 @@ void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy) disable_irq(pmic_typec_pdphy->irq_data[i].irq); qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy); + + regulator_disable(pmic_typec_pdphy->vdd_pdphy); } struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev) diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c index 384b42267f1f..b35c6e07911e 100644 --- a/drivers/usb/typec/ucsi/psy.c +++ b/drivers/usb/typec/ucsi/psy.c @@ -37,6 +37,15 @@ static int ucsi_psy_get_scope(struct ucsi_connector *con, struct device *dev = con->ucsi->dev; device_property_read_u8(dev, "scope", &scope); + if (scope == POWER_SUPPLY_SCOPE_UNKNOWN) { + u32 mask = UCSI_CAP_ATTR_POWER_AC_SUPPLY | + UCSI_CAP_ATTR_BATTERY_CHARGING; + + if (con->ucsi->cap.attributes & mask) + scope = POWER_SUPPLY_SCOPE_SYSTEM; + else + scope = POWER_SUPPLY_SCOPE_DEVICE; + } val->intval = scope; return 0; } diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index c6dfe3dff346..61b64558f96c 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -787,6 +787,7 @@ static void ucsi_unregister_partner(struct ucsi_connector *con) typec_set_mode(con->port, TYPEC_STATE_SAFE); + typec_partner_set_usb_power_delivery(con->partner, NULL); ucsi_unregister_partner_pdos(con); ucsi_unregister_altmodes(con, UCSI_RECIPIENT_SOP); typec_unregister_partner(con->partner); @@ -884,6 +885,7 @@ static void ucsi_handle_connector_change(struct work_struct *work) if (ret < 0) { dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n", __func__, ret); + clear_bit(EVENT_PENDING, &con->ucsi->flags); goto out_unlock; } diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 955d938eb663..7b8fd977f71c 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -123,8 +123,18 @@ static inline ssize_t vringh_iov_xfer(struct vringh *vrh, done += partlen; len -= partlen; ptr += partlen; + iov->consumed += partlen; + iov->iov[iov->i].iov_len -= partlen; + iov->iov[iov->i].iov_base += partlen; - vringh_kiov_advance(iov, partlen); + if (!iov->iov[iov->i].iov_len) { + /* Fix up old iov element then increment. */ + iov->iov[iov->i].iov_len = iov->consumed; + iov->iov[iov->i].iov_base -= iov->consumed; + + iov->consumed = 0; + iov->i++; + } } return done; } diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 0bb86e6c4d0a..1b2136fe0fa5 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <linux/irqnr.h> #include <linux/pci.h> +#include <linux/rcupdate.h> #include <linux/spinlock.h> #include <linux/cpuhotplug.h> #include <linux/atomic.h> @@ -96,6 +97,7 @@ enum xen_irq_type { struct irq_info { struct list_head list; struct list_head eoi_list; + struct rcu_work rwork; short refcnt; u8 spurious_cnt; u8 is_accounted; @@ -147,22 +149,12 @@ const struct evtchn_ops *evtchn_ops; static DEFINE_MUTEX(irq_mapping_update_lock); /* - * Lock protecting event handling loop against removing event channels. - * Adding of event channels is no issue as the associated IRQ becomes active - * only after everything is setup (before request_[threaded_]irq() the handler - * can't be entered for an event, as the event channel will be unmasked only - * then). - */ -static DEFINE_RWLOCK(evtchn_rwlock); - -/* * Lock hierarchy: * * irq_mapping_update_lock - * evtchn_rwlock - * IRQ-desc lock - * percpu eoi_list_lock - * irq_info->lock + * IRQ-desc lock + * percpu eoi_list_lock + * irq_info->lock */ static LIST_HEAD(xen_irq_list_head); @@ -306,6 +298,22 @@ static void channels_on_cpu_inc(struct irq_info *info) info->is_accounted = 1; } +static void delayed_free_irq(struct work_struct *work) +{ + struct irq_info *info = container_of(to_rcu_work(work), struct irq_info, + rwork); + unsigned int irq = info->irq; + + /* Remove the info pointer only now, with no potential users left. */ + set_info_for_irq(irq, NULL); + + kfree(info); + + /* Legacy IRQ descriptors are managed by the arch. */ + if (irq >= nr_legacy_irqs()) + irq_free_desc(irq); +} + /* Constructors for packed IRQ information. */ static int xen_irq_info_common_setup(struct irq_info *info, unsigned irq, @@ -668,33 +676,36 @@ static void xen_irq_lateeoi_worker(struct work_struct *work) eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed); - read_lock_irqsave(&evtchn_rwlock, flags); + rcu_read_lock(); while (true) { - spin_lock(&eoi->eoi_list_lock); + spin_lock_irqsave(&eoi->eoi_list_lock, flags); info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info, eoi_list); - if (info == NULL || now < info->eoi_time) { - spin_unlock(&eoi->eoi_list_lock); + if (info == NULL) + break; + + if (now < info->eoi_time) { + mod_delayed_work_on(info->eoi_cpu, system_wq, + &eoi->delayed, + info->eoi_time - now); break; } list_del_init(&info->eoi_list); - spin_unlock(&eoi->eoi_list_lock); + spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); info->eoi_time = 0; xen_irq_lateeoi_locked(info, false); } - if (info) - mod_delayed_work_on(info->eoi_cpu, system_wq, - &eoi->delayed, info->eoi_time - now); + spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); - read_unlock_irqrestore(&evtchn_rwlock, flags); + rcu_read_unlock(); } static void xen_cpu_init_eoi(unsigned int cpu) @@ -709,16 +720,15 @@ static void xen_cpu_init_eoi(unsigned int cpu) void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags) { struct irq_info *info; - unsigned long flags; - read_lock_irqsave(&evtchn_rwlock, flags); + rcu_read_lock(); info = info_for_irq(irq); if (info) xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS); - read_unlock_irqrestore(&evtchn_rwlock, flags); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(xen_irq_lateeoi); @@ -732,6 +742,7 @@ static void xen_irq_init(unsigned irq) info->type = IRQT_UNBOUND; info->refcnt = -1; + INIT_RCU_WORK(&info->rwork, delayed_free_irq); set_info_for_irq(irq, info); /* @@ -789,31 +800,18 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) static void xen_free_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); - unsigned long flags; if (WARN_ON(!info)) return; - write_lock_irqsave(&evtchn_rwlock, flags); - if (!list_empty(&info->eoi_list)) lateeoi_list_del(info); list_del(&info->list); - set_info_for_irq(irq, NULL); - WARN_ON(info->refcnt > 0); - write_unlock_irqrestore(&evtchn_rwlock, flags); - - kfree(info); - - /* Legacy IRQ descriptors are managed by the arch. */ - if (irq < nr_legacy_irqs()) - return; - - irq_free_desc(irq); + queue_rcu_work(system_wq, &info->rwork); } /* Not called for lateeoi events. */ @@ -1711,7 +1709,14 @@ int xen_evtchn_do_upcall(void) int cpu = smp_processor_id(); struct evtchn_loop_ctrl ctrl = { 0 }; - read_lock(&evtchn_rwlock); + /* + * When closing an event channel the associated IRQ must not be freed + * until all cpus have left the event handling loop. This is ensured + * by taking the rcu_read_lock() while handling events, as freeing of + * the IRQ is handled via queue_rcu_work() _after_ closing the event + * channel. + */ + rcu_read_lock(); do { vcpu_info->evtchn_upcall_pending = 0; @@ -1724,7 +1729,7 @@ int xen_evtchn_do_upcall(void) } while (vcpu_info->evtchn_upcall_pending); - read_unlock(&evtchn_rwlock); + rcu_read_unlock(); /* * Increment irq_epoch only now to defer EOIs only for |