diff options
Diffstat (limited to 'drivers')
137 files changed, 1582 insertions, 821 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 8351c5638880..f3b639e89dd8 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2295,6 +2295,7 @@ static int binder_do_deferred_txn_copies(struct binder_alloc *alloc, { int ret = 0; struct binder_sg_copy *sgc, *tmpsgc; + struct binder_ptr_fixup *tmppf; struct binder_ptr_fixup *pf = list_first_entry_or_null(pf_head, struct binder_ptr_fixup, node); @@ -2349,7 +2350,11 @@ static int binder_do_deferred_txn_copies(struct binder_alloc *alloc, list_del(&sgc->node); kfree(sgc); } - BUG_ON(!list_empty(pf_head)); + list_for_each_entry_safe(pf, tmppf, pf_head, node) { + BUG_ON(pf->skip_size == 0); + list_del(&pf->node); + kfree(pf); + } BUG_ON(!list_empty(sgc_head)); return ret > 0 ? -EINVAL : ret; @@ -2486,6 +2491,9 @@ static int binder_translate_fd_array(struct list_head *pf_head, struct binder_proc *proc = thread->proc; int ret; + if (fda->num_fds == 0) + return 0; + fd_buf_size = sizeof(u32) * fda->num_fds; if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 1d6636ebaac5..f73b836047cf 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -667,6 +667,15 @@ const struct cpumask *cpu_coregroup_mask(int cpu) core_mask = &cpu_topology[cpu].llc_sibling; } + /* + * For systems with no shared cpu-side LLC but with clusters defined, + * extend core_mask to cluster_siblings. The sched domain builder will + * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled. + */ + if (IS_ENABLED(CONFIG_SCHED_CLUSTER) && + cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling)) + core_mask = &cpu_topology[cpu].cluster_sibling; + return core_mask; } @@ -684,7 +693,7 @@ void update_siblings_masks(unsigned int cpuid) for_each_online_cpu(cpu) { cpu_topo = &cpu_topology[cpu]; - if (cpuid_topo->llc_id == cpu_topo->llc_id) { + if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) { cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); } diff --git a/drivers/base/topology.c b/drivers/base/topology.c index e9d1efcda89b..ac6ad9ab67f9 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -152,9 +152,19 @@ static struct attribute *default_attrs[] = { NULL }; +static umode_t topology_is_visible(struct kobject *kobj, + struct attribute *attr, int unused) +{ + if (attr == &dev_attr_ppin.attr && !topology_ppin(kobj_to_dev(kobj)->id)) + return 0; + + return attr->mode; +} + static const struct attribute_group topology_attr_group = { .attrs = default_attrs, .bin_attrs = bin_attrs, + .is_visible = topology_is_visible, .name = "topology" }; diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c index 5e0e4393ce4d..0cfe859a4ac4 100644 --- a/drivers/bus/fsl-mc/fsl-mc-msi.c +++ b/drivers/bus/fsl-mc/fsl-mc-msi.c @@ -224,8 +224,12 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev, unsigned int irq_count) if (error) return error; + msi_lock_descs(dev); if (msi_first_desc(dev, MSI_DESC_ALL)) - return -EINVAL; + error = -EINVAL; + msi_unlock_descs(dev); + if (error) + return error; /* * NOTE: Calling this function will trigger the invocation of the diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index 60fbd42041dd..828c66bbaa67 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c @@ -352,8 +352,7 @@ static int of_weim_notify(struct notifier_block *nb, unsigned long action, pdev = of_find_device_by_node(rd->dn); if (!pdev) { - dev_err(&pdev->dev, - "Could not find platform device for '%pOF'\n", + pr_err("Could not find platform device for '%pOF'\n", rd->dn); ret = notifier_from_errno(-EINVAL); @@ -370,7 +369,7 @@ static int of_weim_notify(struct notifier_block *nb, unsigned long action, return ret; } -struct notifier_block weim_of_notifier = { +static struct notifier_block weim_of_notifier = { .notifier_call = of_weim_notify, }; #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index 9527b7d63840..541ced27d941 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -1060,6 +1060,7 @@ static int __maybe_unused mhi_pci_freeze(struct device *dev) * the intermediate restore kernel reinitializes MHI device with new * context. */ + flush_work(&mhi_pdev->recovery_work); if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { mhi_power_down(mhi_cntrl, true); mhi_unprepare_after_power_down(mhi_cntrl); @@ -1085,6 +1086,7 @@ static const struct dev_pm_ops mhi_pci_pm_ops = { .resume = mhi_pci_resume, .freeze = mhi_pci_freeze, .thaw = mhi_pci_restore, + .poweroff = mhi_pci_freeze, .restore = mhi_pci_restore, #endif }; diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 4566e730ef2b..60b082fe2ed0 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -227,6 +227,8 @@ static struct sunxi_rsb_device *sunxi_rsb_device_create(struct sunxi_rsb *rsb, dev_dbg(&rdev->dev, "device %s registered\n", dev_name(&rdev->dev)); + return rdev; + err_device_add: put_device(&rdev->dev); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 54c0ee6dda30..7a1b1f9e4933 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -3232,13 +3232,27 @@ static int sysc_check_disabled_devices(struct sysc *ddata) */ static int sysc_check_active_timer(struct sysc *ddata) { + int error; + if (ddata->cap->type != TI_SYSC_OMAP2_TIMER && ddata->cap->type != TI_SYSC_OMAP4_TIMER) return 0; + /* + * Quirk for omap3 beagleboard revision A to B4 to use gpt12. + * Revision C and later are fixed with commit 23885389dbbb ("ARM: + * dts: Fix timer regression for beagleboard revision c"). This all + * can be dropped if we stop supporting old beagleboard revisions + * A to B4 at some point. + */ + if (sysc_soc->soc == SOC_3430) + error = -ENXIO; + else + error = -EBUSY; + if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) && (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)) - return -ENXIO; + return error; return 0; } diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index c59265146e9c..f1827257ef0e 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -3677,8 +3677,11 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf) void ipmi_unregister_smi(struct ipmi_smi *intf) { struct ipmi_smi_watcher *w; - int intf_num = intf->intf_num, index; + int intf_num, index; + if (!intf) + return; + intf_num = intf->intf_num; mutex_lock(&ipmi_interfaces_mutex); intf->intf_num = -1; intf->in_shutdown = true; @@ -4518,6 +4521,8 @@ return_unspecified: } else /* The message was sent, start the timer. */ intf_start_seq_timer(intf, msg->msgid); + requeue = 0; + goto out; } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) || (msg->rsp[1] != msg->data[1])) { /* diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 64dedb3ef8ec..5604a810fb3d 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2220,10 +2220,7 @@ static void cleanup_one_si(struct smi_info *smi_info) return; list_del(&smi_info->link); - - if (smi_info->intf) - ipmi_unregister_smi(smi_info->intf); - + ipmi_unregister_smi(smi_info->intf); kfree(smi_info); } diff --git a/drivers/char/random.c b/drivers/char/random.c index 3a293f919af9..4c9adb4f3d5d 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -318,6 +318,13 @@ static void crng_reseed(bool force) * the resultant ChaCha state to the user, along with the second * half of the block containing 32 bytes of random data that may * be used; random_data_len may not be greater than 32. + * + * The returned ChaCha state contains within it a copy of the old + * key value, at index 4, so the state should always be zeroed out + * immediately after using in order to maintain forward secrecy. + * If the state cannot be erased in a timely manner, then it is + * safer to set the random_data parameter to &chacha_state[4] so + * that this function overwrites it before returning. */ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], u32 chacha_state[CHACHA_STATE_WORDS], @@ -333,7 +340,7 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], chacha20_block(chacha_state, first_block); memcpy(key, first_block, CHACHA_KEY_SIZE); - memmove(random_data, first_block + CHACHA_KEY_SIZE, random_data_len); + memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len); memzero_explicit(first_block, sizeof(first_block)); } diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c index aa1561b773d6..070c3b896559 100644 --- a/drivers/clk/microchip/clk-mpfs.c +++ b/drivers/clk/microchip/clk-mpfs.c @@ -11,20 +11,48 @@ #include <dt-bindings/clock/microchip,mpfs-clock.h> /* address offset of control registers */ +#define REG_MSSPLL_REF_CR 0x08u +#define REG_MSSPLL_POSTDIV_CR 0x10u +#define REG_MSSPLL_SSCG_2_CR 0x2Cu #define REG_CLOCK_CONFIG_CR 0x08u +#define REG_RTC_CLOCK_CR 0x0Cu #define REG_SUBBLK_CLOCK_CR 0x84u #define REG_SUBBLK_RESET_CR 0x88u +#define MSSPLL_FBDIV_SHIFT 0x00u +#define MSSPLL_FBDIV_WIDTH 0x0Cu +#define MSSPLL_REFDIV_SHIFT 0x08u +#define MSSPLL_REFDIV_WIDTH 0x06u +#define MSSPLL_POSTDIV_SHIFT 0x08u +#define MSSPLL_POSTDIV_WIDTH 0x07u +#define MSSPLL_FIXED_DIV 4u + struct mpfs_clock_data { void __iomem *base; + void __iomem *msspll_base; struct clk_hw_onecell_data hw_data; }; +struct mpfs_msspll_hw_clock { + void __iomem *base; + unsigned int id; + u32 reg_offset; + u32 shift; + u32 width; + u32 flags; + struct clk_hw hw; + struct clk_init_data init; +}; + +#define to_mpfs_msspll_clk(_hw) container_of(_hw, struct mpfs_msspll_hw_clock, hw) + struct mpfs_cfg_clock { const struct clk_div_table *table; unsigned int id; + u32 reg_offset; u8 shift; u8 width; + u8 flags; }; struct mpfs_cfg_hw_clock { @@ -55,7 +83,7 @@ struct mpfs_periph_hw_clock { */ static DEFINE_SPINLOCK(mpfs_clk_lock); -static const struct clk_parent_data mpfs_cfg_parent[] = { +static const struct clk_parent_data mpfs_ext_ref[] = { { .index = 0 }, }; @@ -69,6 +97,86 @@ static const struct clk_div_table mpfs_div_ahb_table[] = { { 0, 0 } }; +/* + * The only two supported reference clock frequencies for the PolarFire SoC are + * 100 and 125 MHz, as the rtc reference is required to be 1 MHz. + * It therefore only needs to have divider table entries corresponding to + * divide by 100 and 125. + */ +static const struct clk_div_table mpfs_div_rtcref_table[] = { + { 100, 100 }, { 125, 125 }, + { 0, 0 } +}; + +static unsigned long mpfs_clk_msspll_recalc_rate(struct clk_hw *hw, unsigned long prate) +{ + struct mpfs_msspll_hw_clock *msspll_hw = to_mpfs_msspll_clk(hw); + void __iomem *mult_addr = msspll_hw->base + msspll_hw->reg_offset; + void __iomem *ref_div_addr = msspll_hw->base + REG_MSSPLL_REF_CR; + void __iomem *postdiv_addr = msspll_hw->base + REG_MSSPLL_POSTDIV_CR; + u32 mult, ref_div, postdiv; + + mult = readl_relaxed(mult_addr) >> MSSPLL_FBDIV_SHIFT; + mult &= clk_div_mask(MSSPLL_FBDIV_WIDTH); + ref_div = readl_relaxed(ref_div_addr) >> MSSPLL_REFDIV_SHIFT; + ref_div &= clk_div_mask(MSSPLL_REFDIV_WIDTH); + postdiv = readl_relaxed(postdiv_addr) >> MSSPLL_POSTDIV_SHIFT; + postdiv &= clk_div_mask(MSSPLL_POSTDIV_WIDTH); + + return prate * mult / (ref_div * MSSPLL_FIXED_DIV * postdiv); +} + +static const struct clk_ops mpfs_clk_msspll_ops = { + .recalc_rate = mpfs_clk_msspll_recalc_rate, +}; + +#define CLK_PLL(_id, _name, _parent, _shift, _width, _flags, _offset) { \ + .id = _id, \ + .shift = _shift, \ + .width = _width, \ + .reg_offset = _offset, \ + .flags = _flags, \ + .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parent, &mpfs_clk_msspll_ops, 0), \ +} + +static struct mpfs_msspll_hw_clock mpfs_msspll_clks[] = { + CLK_PLL(CLK_MSSPLL, "clk_msspll", mpfs_ext_ref, MSSPLL_FBDIV_SHIFT, + MSSPLL_FBDIV_WIDTH, 0, REG_MSSPLL_SSCG_2_CR), +}; + +static int mpfs_clk_register_msspll(struct device *dev, struct mpfs_msspll_hw_clock *msspll_hw, + void __iomem *base) +{ + msspll_hw->base = base; + + return devm_clk_hw_register(dev, &msspll_hw->hw); +} + +static int mpfs_clk_register_mssplls(struct device *dev, struct mpfs_msspll_hw_clock *msspll_hws, + unsigned int num_clks, struct mpfs_clock_data *data) +{ + void __iomem *base = data->msspll_base; + unsigned int i; + int ret; + + for (i = 0; i < num_clks; i++) { + struct mpfs_msspll_hw_clock *msspll_hw = &msspll_hws[i]; + + ret = mpfs_clk_register_msspll(dev, msspll_hw, base); + if (ret) + return dev_err_probe(dev, ret, "failed to register msspll id: %d\n", + CLK_MSSPLL); + + data->hw_data.hws[msspll_hw->id] = &msspll_hw->hw; + } + + return 0; +} + +/* + * "CFG" clocks + */ + static unsigned long mpfs_cfg_clk_recalc_rate(struct clk_hw *hw, unsigned long prate) { struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw); @@ -76,10 +184,10 @@ static unsigned long mpfs_cfg_clk_recalc_rate(struct clk_hw *hw, unsigned long p void __iomem *base_addr = cfg_hw->sys_base; u32 val; - val = readl_relaxed(base_addr + REG_CLOCK_CONFIG_CR) >> cfg->shift; + val = readl_relaxed(base_addr + cfg->reg_offset) >> cfg->shift; val &= clk_div_mask(cfg->width); - return prate / (1u << val); + return divider_recalc_rate(hw, prate, val, cfg->table, cfg->flags, cfg->width); } static long mpfs_cfg_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) @@ -105,11 +213,10 @@ static int mpfs_cfg_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned return divider_setting; spin_lock_irqsave(&mpfs_clk_lock, flags); - - val = readl_relaxed(base_addr + REG_CLOCK_CONFIG_CR); + val = readl_relaxed(base_addr + cfg->reg_offset); val &= ~(clk_div_mask(cfg->width) << cfg_hw->cfg.shift); val |= divider_setting << cfg->shift; - writel_relaxed(val, base_addr + REG_CLOCK_CONFIG_CR); + writel_relaxed(val, base_addr + cfg->reg_offset); spin_unlock_irqrestore(&mpfs_clk_lock, flags); @@ -122,19 +229,33 @@ static const struct clk_ops mpfs_clk_cfg_ops = { .set_rate = mpfs_cfg_clk_set_rate, }; -#define CLK_CFG(_id, _name, _parent, _shift, _width, _table, _flags) { \ - .cfg.id = _id, \ - .cfg.shift = _shift, \ - .cfg.width = _width, \ - .cfg.table = _table, \ - .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parent, &mpfs_clk_cfg_ops, \ - _flags), \ +#define CLK_CFG(_id, _name, _parent, _shift, _width, _table, _flags, _offset) { \ + .cfg.id = _id, \ + .cfg.shift = _shift, \ + .cfg.width = _width, \ + .cfg.table = _table, \ + .cfg.reg_offset = _offset, \ + .cfg.flags = _flags, \ + .hw.init = CLK_HW_INIT(_name, _parent, &mpfs_clk_cfg_ops, 0), \ } static struct mpfs_cfg_hw_clock mpfs_cfg_clks[] = { - CLK_CFG(CLK_CPU, "clk_cpu", mpfs_cfg_parent, 0, 2, mpfs_div_cpu_axi_table, 0), - CLK_CFG(CLK_AXI, "clk_axi", mpfs_cfg_parent, 2, 2, mpfs_div_cpu_axi_table, 0), - CLK_CFG(CLK_AHB, "clk_ahb", mpfs_cfg_parent, 4, 2, mpfs_div_ahb_table, 0), + CLK_CFG(CLK_CPU, "clk_cpu", "clk_msspll", 0, 2, mpfs_div_cpu_axi_table, 0, + REG_CLOCK_CONFIG_CR), + CLK_CFG(CLK_AXI, "clk_axi", "clk_msspll", 2, 2, mpfs_div_cpu_axi_table, 0, + REG_CLOCK_CONFIG_CR), + CLK_CFG(CLK_AHB, "clk_ahb", "clk_msspll", 4, 2, mpfs_div_ahb_table, 0, + REG_CLOCK_CONFIG_CR), + { + .cfg.id = CLK_RTCREF, + .cfg.shift = 0, + .cfg.width = 12, + .cfg.table = mpfs_div_rtcref_table, + .cfg.reg_offset = REG_RTC_CLOCK_CR, + .cfg.flags = CLK_DIVIDER_ONE_BASED, + .hw.init = + CLK_HW_INIT_PARENTS_DATA("clk_rtcref", mpfs_ext_ref, &mpfs_clk_cfg_ops, 0), + } }; static int mpfs_clk_register_cfg(struct device *dev, struct mpfs_cfg_hw_clock *cfg_hw, @@ -160,13 +281,17 @@ static int mpfs_clk_register_cfgs(struct device *dev, struct mpfs_cfg_hw_clock * return dev_err_probe(dev, ret, "failed to register clock id: %d\n", cfg_hw->cfg.id); - id = cfg_hws[i].cfg.id; + id = cfg_hw->cfg.id; data->hw_data.hws[id] = &cfg_hw->hw; } return 0; } +/* + * peripheral clocks - devices connected to axi or ahb buses. + */ + static int mpfs_periph_clk_enable(struct clk_hw *hw) { struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw); @@ -200,10 +325,6 @@ static void mpfs_periph_clk_disable(struct clk_hw *hw) spin_lock_irqsave(&mpfs_clk_lock, flags); - reg = readl_relaxed(base_addr + REG_SUBBLK_RESET_CR); - val = reg | (1u << periph->shift); - writel_relaxed(val, base_addr + REG_SUBBLK_RESET_CR); - reg = readl_relaxed(base_addr + REG_SUBBLK_CLOCK_CR); val = reg & ~(1u << periph->shift); writel_relaxed(val, base_addr + REG_SUBBLK_CLOCK_CR); @@ -249,8 +370,10 @@ static const struct clk_ops mpfs_periph_clk_ops = { * trap handler * - CLK_MMUART0: reserved by the hss * - CLK_DDRC: provides clock to the ddr subsystem - * - CLK_FICx: these provide clocks for sections of the fpga fabric, disabling them would - * cause the fabric to go into reset + * - CLK_FICx: these provide the processor side clocks to the "FIC" (Fabric InterConnect) + * clock domain crossers which provide the interface to the FPGA fabric. Disabling them + * causes the FPGA fabric to go into reset. + * - CLK_ATHENA: The athena clock is FIC4, which is reserved for the Athena TeraFire. */ static struct mpfs_periph_hw_clock mpfs_periph_clks[] = { @@ -258,7 +381,7 @@ static struct mpfs_periph_hw_clock mpfs_periph_clks[] = { CLK_PERIPH(CLK_MAC0, "clk_periph_mac0", PARENT_CLK(AHB), 1, 0), CLK_PERIPH(CLK_MAC1, "clk_periph_mac1", PARENT_CLK(AHB), 2, 0), CLK_PERIPH(CLK_MMC, "clk_periph_mmc", PARENT_CLK(AHB), 3, 0), - CLK_PERIPH(CLK_TIMER, "clk_periph_timer", PARENT_CLK(AHB), 4, 0), + CLK_PERIPH(CLK_TIMER, "clk_periph_timer", PARENT_CLK(RTCREF), 4, 0), CLK_PERIPH(CLK_MMUART0, "clk_periph_mmuart0", PARENT_CLK(AHB), 5, CLK_IS_CRITICAL), CLK_PERIPH(CLK_MMUART1, "clk_periph_mmuart1", PARENT_CLK(AHB), 6, 0), CLK_PERIPH(CLK_MMUART2, "clk_periph_mmuart2", PARENT_CLK(AHB), 7, 0), @@ -277,11 +400,11 @@ static struct mpfs_periph_hw_clock mpfs_periph_clks[] = { CLK_PERIPH(CLK_GPIO1, "clk_periph_gpio1", PARENT_CLK(AHB), 21, 0), CLK_PERIPH(CLK_GPIO2, "clk_periph_gpio2", PARENT_CLK(AHB), 22, 0), CLK_PERIPH(CLK_DDRC, "clk_periph_ddrc", PARENT_CLK(AHB), 23, CLK_IS_CRITICAL), - CLK_PERIPH(CLK_FIC0, "clk_periph_fic0", PARENT_CLK(AHB), 24, CLK_IS_CRITICAL), - CLK_PERIPH(CLK_FIC1, "clk_periph_fic1", PARENT_CLK(AHB), 25, CLK_IS_CRITICAL), - CLK_PERIPH(CLK_FIC2, "clk_periph_fic2", PARENT_CLK(AHB), 26, CLK_IS_CRITICAL), - CLK_PERIPH(CLK_FIC3, "clk_periph_fic3", PARENT_CLK(AHB), 27, CLK_IS_CRITICAL), - CLK_PERIPH(CLK_ATHENA, "clk_periph_athena", PARENT_CLK(AHB), 28, 0), + CLK_PERIPH(CLK_FIC0, "clk_periph_fic0", PARENT_CLK(AXI), 24, CLK_IS_CRITICAL), + CLK_PERIPH(CLK_FIC1, "clk_periph_fic1", PARENT_CLK(AXI), 25, CLK_IS_CRITICAL), + CLK_PERIPH(CLK_FIC2, "clk_periph_fic2", PARENT_CLK(AXI), 26, CLK_IS_CRITICAL), + CLK_PERIPH(CLK_FIC3, "clk_periph_fic3", PARENT_CLK(AXI), 27, CLK_IS_CRITICAL), + CLK_PERIPH(CLK_ATHENA, "clk_periph_athena", PARENT_CLK(AXI), 28, CLK_IS_CRITICAL), CLK_PERIPH(CLK_CFM, "clk_periph_cfm", PARENT_CLK(AHB), 29, 0), }; @@ -322,8 +445,9 @@ static int mpfs_clk_probe(struct platform_device *pdev) unsigned int num_clks; int ret; - /* CLK_RESERVED is not part of cfg_clks nor periph_clks, so add 1 */ - num_clks = ARRAY_SIZE(mpfs_cfg_clks) + ARRAY_SIZE(mpfs_periph_clks) + 1; + /* CLK_RESERVED is not part of clock arrays, so add 1 */ + num_clks = ARRAY_SIZE(mpfs_msspll_clks) + ARRAY_SIZE(mpfs_cfg_clks) + + ARRAY_SIZE(mpfs_periph_clks) + 1; clk_data = devm_kzalloc(dev, struct_size(clk_data, hw_data.hws, num_clks), GFP_KERNEL); if (!clk_data) @@ -333,8 +457,17 @@ static int mpfs_clk_probe(struct platform_device *pdev) if (IS_ERR(clk_data->base)) return PTR_ERR(clk_data->base); + clk_data->msspll_base = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(clk_data->msspll_base)) + return PTR_ERR(clk_data->msspll_base); + clk_data->hw_data.num = num_clks; + ret = mpfs_clk_register_mssplls(dev, mpfs_msspll_clks, ARRAY_SIZE(mpfs_msspll_clks), + clk_data); + if (ret) + return ret; + ret = mpfs_clk_register_cfgs(dev, mpfs_cfg_clks, ARRAY_SIZE(mpfs_cfg_clks), clk_data); if (ret) return ret; diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index f675fd969c4d..e9c357309fd9 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -818,7 +818,7 @@ EXPORT_SYMBOL_GPL(clk_pixel_ops); static int clk_gfx3d_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { - struct clk_rate_request parent_req = { }; + struct clk_rate_request parent_req = { .min_rate = 0, .max_rate = ULONG_MAX }; struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw); struct clk_hw *xo, *p0, *p1, *p2; unsigned long p0_rate; diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c index 8a10bade7e0d..2f3ddc908ebd 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c @@ -241,6 +241,7 @@ static struct clk_init_data rtc_32k_init_data = { .ops = &ccu_mux_ops, .parent_hws = rtc_32k_parents, .num_parents = ARRAY_SIZE(rtc_32k_parents), /* updated during probe */ + .flags = CLK_IS_CRITICAL, }; static struct ccu_mux rtc_32k_clk = { @@ -346,6 +347,7 @@ static const struct of_device_id sun6i_rtc_ccu_match[] = { .compatible = "allwinner,sun50i-r329-rtc", .data = &sun50i_r329_rtc_ccu_data, }, + {}, }; int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg) diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c index 542b31d6e96d..636bcf2439ef 100644 --- a/drivers/clk/sunxi/clk-sun9i-mmc.c +++ b/drivers/clk/sunxi/clk-sun9i-mmc.c @@ -109,6 +109,8 @@ static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev) spin_lock_init(&data->lock); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) + return -EINVAL; /* one clock/reset pair per word */ count = DIV_ROUND_UP((resource_size(r)), SUN9I_MMC_WIDTH); data->membase = devm_ioremap_resource(&pdev->dev, r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 29e9419a914b..7fd0277b2805 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2395,6 +2395,71 @@ static int amdgpu_pmops_restore(struct device *dev) return amdgpu_device_resume(drm_dev, true); } +static int amdgpu_runtime_idle_check_display(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + + if (adev->mode_info.num_crtc) { + struct drm_connector *list_connector; + struct drm_connector_list_iter iter; + int ret = 0; + + /* XXX: Return busy if any displays are connected to avoid + * possible display wakeups after runtime resume due to + * hotplug events in case any displays were connected while + * the GPU was in suspend. Remove this once that is fixed. + */ + mutex_lock(&drm_dev->mode_config.mutex); + drm_connector_list_iter_begin(drm_dev, &iter); + drm_for_each_connector_iter(list_connector, &iter) { + if (list_connector->status == connector_status_connected) { + ret = -EBUSY; + break; + } + } + drm_connector_list_iter_end(&iter); + mutex_unlock(&drm_dev->mode_config.mutex); + + if (ret) + return ret; + + if (amdgpu_device_has_dc_support(adev)) { + struct drm_crtc *crtc; + + drm_for_each_crtc(crtc, drm_dev) { + drm_modeset_lock(&crtc->mutex, NULL); + if (crtc->state->active) + ret = -EBUSY; + drm_modeset_unlock(&crtc->mutex); + if (ret < 0) + break; + } + } else { + mutex_lock(&drm_dev->mode_config.mutex); + drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL); + + drm_connector_list_iter_begin(drm_dev, &iter); + drm_for_each_connector_iter(list_connector, &iter) { + if (list_connector->dpms == DRM_MODE_DPMS_ON) { + ret = -EBUSY; + break; + } + } + + drm_connector_list_iter_end(&iter); + + drm_modeset_unlock(&drm_dev->mode_config.connection_mutex); + mutex_unlock(&drm_dev->mode_config.mutex); + } + if (ret) + return ret; + } + + return 0; +} + static int amdgpu_pmops_runtime_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -2407,6 +2472,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) return -EBUSY; } + ret = amdgpu_runtime_idle_check_display(dev); + if (ret) + return ret; + /* wait for all rings to drain before suspending */ for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; @@ -2516,41 +2585,7 @@ static int amdgpu_pmops_runtime_idle(struct device *dev) return -EBUSY; } - if (amdgpu_device_has_dc_support(adev)) { - struct drm_crtc *crtc; - - drm_for_each_crtc(crtc, drm_dev) { - drm_modeset_lock(&crtc->mutex, NULL); - if (crtc->state->active) - ret = -EBUSY; - drm_modeset_unlock(&crtc->mutex); - if (ret < 0) - break; - } - - } else { - struct drm_connector *list_connector; - struct drm_connector_list_iter iter; - - mutex_lock(&drm_dev->mode_config.mutex); - drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL); - - drm_connector_list_iter_begin(drm_dev, &iter); - drm_for_each_connector_iter(list_connector, &iter) { - if (list_connector->dpms == DRM_MODE_DPMS_ON) { - ret = -EBUSY; - break; - } - } - - drm_connector_list_iter_end(&iter); - - drm_modeset_unlock(&drm_dev->mode_config.connection_mutex); - mutex_unlock(&drm_dev->mode_config.mutex); - } - - if (ret == -EBUSY) - DRM_DEBUG_DRIVER("failing to power off - crtc active\n"); + ret = amdgpu_runtime_idle_check_display(dev); pm_runtime_mark_last_busy(dev); pm_runtime_autosuspend(dev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 5228421b0f72..7c956cf21bc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -1151,6 +1151,16 @@ static int gmc_v10_0_set_clockgating_state(void *handle, int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* + * The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled + * is a new problem observed at DF 3.0.3, however with the same suspend sequence not + * seen any issue on the DF 3.0.2 series platform. + */ + if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) { + dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n"); + return 0; + } + r = adev->mmhub.funcs->set_clockgating(adev, state); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index acf4f7975850..198672264492 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -130,19 +130,33 @@ void program_sh_mem_settings(struct device_queue_manager *dqm, } static void increment_queue_count(struct device_queue_manager *dqm, - enum kfd_queue_type type) + struct qcm_process_device *qpd, + struct queue *q) { dqm->active_queue_count++; - if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ) + if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || + q->properties.type == KFD_QUEUE_TYPE_DIQ) dqm->active_cp_queue_count++; + + if (q->properties.is_gws) { + dqm->gws_queue_count++; + qpd->mapped_gws_queue = true; + } } static void decrement_queue_count(struct device_queue_manager *dqm, - enum kfd_queue_type type) + struct qcm_process_device *qpd, + struct queue *q) { dqm->active_queue_count--; - if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ) + if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || + q->properties.type == KFD_QUEUE_TYPE_DIQ) dqm->active_cp_queue_count--; + + if (q->properties.is_gws) { + dqm->gws_queue_count--; + qpd->mapped_gws_queue = false; + } } /* @@ -412,7 +426,7 @@ add_queue_to_list: list_add(&q->list, &qpd->queues_list); qpd->queue_count++; if (q->properties.is_active) - increment_queue_count(dqm, q->properties.type); + increment_queue_count(dqm, qpd, q); /* * Unconditionally increment this counter, regardless of the queue's @@ -601,13 +615,8 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, deallocate_vmid(dqm, qpd, q); } qpd->queue_count--; - if (q->properties.is_active) { - decrement_queue_count(dqm, q->properties.type); - if (q->properties.is_gws) { - dqm->gws_queue_count--; - qpd->mapped_gws_queue = false; - } - } + if (q->properties.is_active) + decrement_queue_count(dqm, qpd, q); return retval; } @@ -700,12 +709,11 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, * dqm->active_queue_count to determine whether a new runlist must be * uploaded. */ - if (q->properties.is_active && !prev_active) - increment_queue_count(dqm, q->properties.type); - else if (!q->properties.is_active && prev_active) - decrement_queue_count(dqm, q->properties.type); - - if (q->gws && !q->properties.is_gws) { + if (q->properties.is_active && !prev_active) { + increment_queue_count(dqm, &pdd->qpd, q); + } else if (!q->properties.is_active && prev_active) { + decrement_queue_count(dqm, &pdd->qpd, q); + } else if (q->gws && !q->properties.is_gws) { if (q->properties.is_active) { dqm->gws_queue_count++; pdd->qpd.mapped_gws_queue = true; @@ -767,11 +775,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; q->properties.is_active = false; - decrement_queue_count(dqm, q->properties.type); - if (q->properties.is_gws) { - dqm->gws_queue_count--; - qpd->mapped_gws_queue = false; - } + decrement_queue_count(dqm, qpd, q); if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n")) continue; @@ -817,7 +821,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, continue; q->properties.is_active = false; - decrement_queue_count(dqm, q->properties.type); + decrement_queue_count(dqm, qpd, q); } pdd->last_evict_timestamp = get_jiffies_64(); retval = execute_queues_cpsch(dqm, @@ -888,11 +892,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; q->properties.is_active = true; - increment_queue_count(dqm, q->properties.type); - if (q->properties.is_gws) { - dqm->gws_queue_count++; - qpd->mapped_gws_queue = true; - } + increment_queue_count(dqm, qpd, q); if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n")) continue; @@ -950,7 +950,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, continue; q->properties.is_active = true; - increment_queue_count(dqm, q->properties.type); + increment_queue_count(dqm, &pdd->qpd, q); } retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); @@ -1378,7 +1378,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, dqm->total_queue_count); list_add(&kq->list, &qpd->priv_queue_list); - increment_queue_count(dqm, kq->queue->properties.type); + increment_queue_count(dqm, qpd, kq->queue); qpd->is_debug = true; execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); dqm_unlock(dqm); @@ -1392,7 +1392,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, { dqm_lock(dqm); list_del(&kq->list); - decrement_queue_count(dqm, kq->queue->properties.type); + decrement_queue_count(dqm, qpd, kq->queue); qpd->is_debug = false; execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); /* @@ -1467,7 +1467,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, qpd->queue_count++; if (q->properties.is_active) { - increment_queue_count(dqm, q->properties.type); + increment_queue_count(dqm, qpd, q); execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); @@ -1683,15 +1683,11 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, list_del(&q->list); qpd->queue_count--; if (q->properties.is_active) { - decrement_queue_count(dqm, q->properties.type); + decrement_queue_count(dqm, qpd, q); retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); if (retval == -ETIME) qpd->reset_wavefronts = true; - if (q->properties.is_gws) { - dqm->gws_queue_count--; - qpd->mapped_gws_queue = false; - } } /* @@ -1932,7 +1928,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, /* Clean all kernel queues */ list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) { list_del(&kq->list); - decrement_queue_count(dqm, kq->queue->properties.type); + decrement_queue_count(dqm, qpd, kq->queue); qpd->is_debug = false; dqm->total_queue_count--; filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES; @@ -1945,13 +1941,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) deallocate_sdma_queue(dqm, q); - if (q->properties.is_active) { - decrement_queue_count(dqm, q->properties.type); - if (q->properties.is_gws) { - dqm->gws_queue_count--; - qpd->mapped_gws_queue = false; - } - } + if (q->properties.is_active) + decrement_queue_count(dqm, qpd, q); dqm->total_queue_count--; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 9967a73d5b0f..8f58fc491b28 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1103,7 +1103,7 @@ struct kfd_criu_queue_priv_data { uint32_t priority; uint32_t q_percent; uint32_t doorbell_id; - uint32_t is_gws; + uint32_t gws; uint32_t sdma_id; uint32_t eop_ring_buffer_size; uint32_t ctx_save_restore_area_size; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 6eca9509f2e3..4f58e671d39b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -636,6 +636,8 @@ static int criu_checkpoint_queue(struct kfd_process_device *pdd, q_data->ctx_save_restore_area_size = q->properties.ctx_save_restore_area_size; + q_data->gws = !!q->gws; + ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack); if (ret) { pr_err("Failed checkpoint queue_mqd (%d)\n", ret); @@ -743,7 +745,6 @@ static void set_queue_properties_from_criu(struct queue_properties *qp, struct kfd_criu_queue_priv_data *q_data) { qp->is_interop = false; - qp->is_gws = q_data->is_gws; qp->queue_percent = q_data->q_percent; qp->priority = q_data->priority; qp->queue_address = q_data->q_address; @@ -826,12 +827,15 @@ int kfd_criu_restore_queue(struct kfd_process *p, NULL); if (ret) { pr_err("Failed to create new queue err:%d\n", ret); - ret = -EINVAL; + goto exit; } + if (q_data->gws) + ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws); + exit: if (ret) - pr_err("Failed to create queue (%d)\n", ret); + pr_err("Failed to restore queue (%d)\n", ret); else pr_debug("Queue id %d was restored successfully\n", queue_id); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 3fe4bfbb98a0..faab59508d82 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -997,6 +997,7 @@ static struct clock_source *dcn21_clock_source_create( return &clk_src->base; } + kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 5504d81c77b7..72e7b5d40af6 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -427,6 +427,7 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int i; if (!adev->pm.dpm_enabled) return; @@ -434,6 +435,15 @@ void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) if (!pp_funcs->pm_compute_clocks) return; + if (adev->mode_info.num_crtc) + amdgpu_display_bandwidth_update(adev); + + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (ring && ring->sched.ready) + amdgpu_fence_wait_empty(ring); + } + mutex_lock(&adev->pm.mutex); pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); mutex_unlock(&adev->pm.mutex); @@ -443,6 +453,20 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) { int ret = 0; + if (adev->family == AMDGPU_FAMILY_SI) { + mutex_lock(&adev->pm.mutex); + if (enable) { + adev->pm.dpm.uvd_active = true; + adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; + } else { + adev->pm.dpm.uvd_active = false; + } + mutex_unlock(&adev->pm.mutex); + + amdgpu_dpm_compute_clocks(adev); + return; + } + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); if (ret) DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", @@ -453,6 +477,21 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) { int ret = 0; + if (adev->family == AMDGPU_FAMILY_SI) { + mutex_lock(&adev->pm.mutex); + if (enable) { + adev->pm.dpm.vce_active = true; + /* XXX select vce level based on ring/task */ + adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; + } else { + adev->pm.dpm.vce_active = false; + } + mutex_unlock(&adev->pm.mutex); + + amdgpu_dpm_compute_clocks(adev); + return; + } + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); if (ret) DRM_ERROR("Dpm %s vce failed, ret = %d. \n", diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c index 9613c6181c17..d3fe149d8476 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c @@ -1028,16 +1028,6 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) void amdgpu_legacy_dpm_compute_clocks(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i = 0; - - if (adev->mode_info.num_crtc) - amdgpu_display_bandwidth_update(adev); - - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - if (ring && ring->sched.ready) - amdgpu_fence_wait_empty(ring); - } amdgpu_dpm_get_active_displays(adev); diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index caae54487f9c..633dab14f51c 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -3892,40 +3892,6 @@ static int si_set_boot_state(struct amdgpu_device *adev) } #endif -static int si_set_powergating_by_smu(void *handle, - uint32_t block_type, - bool gate) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - switch (block_type) { - case AMD_IP_BLOCK_TYPE_UVD: - if (!gate) { - adev->pm.dpm.uvd_active = true; - adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; - } else { - adev->pm.dpm.uvd_active = false; - } - - amdgpu_legacy_dpm_compute_clocks(handle); - break; - case AMD_IP_BLOCK_TYPE_VCE: - if (!gate) { - adev->pm.dpm.vce_active = true; - /* XXX select vce level based on ring/task */ - adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; - } else { - adev->pm.dpm.vce_active = false; - } - - amdgpu_legacy_dpm_compute_clocks(handle); - break; - default: - break; - } - return 0; -} - static int si_set_sw_state(struct amdgpu_device *adev) { return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ? @@ -8125,7 +8091,6 @@ static const struct amd_pm_funcs si_dpm_funcs = { .print_power_state = &si_dpm_print_power_state, .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level, .force_performance_level = &si_dpm_force_performance_level, - .set_powergating_by_smu = &si_set_powergating_by_smu, .vblank_too_short = &si_dpm_vblank_too_short, .set_fan_control_mode = &si_dpm_set_fan_control_mode, .get_fan_control_mode = &si_dpm_get_fan_control_mode, diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index a2da46bf3985..71e9c6ce6b1a 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -1487,16 +1487,6 @@ static void pp_pm_compute_clocks(void *handle) { struct pp_hwmgr *hwmgr = handle; struct amdgpu_device *adev = hwmgr->adev; - int i = 0; - - if (adev->mode_info.num_crtc) - amdgpu_display_bandwidth_update(adev); - - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - if (ring && ring->sched.ready) - amdgpu_fence_wait_empty(ring); - } if (!amdgpu_device_has_dc_support(adev)) { amdgpu_dpm_get_active_displays(adev); diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 97cf3cac0105..fb6cf30ee628 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -97,6 +97,14 @@ #define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359 +enum intel_dp_aux_backlight_modparam { + INTEL_DP_AUX_BACKLIGHT_AUTO = -1, + INTEL_DP_AUX_BACKLIGHT_OFF = 0, + INTEL_DP_AUX_BACKLIGHT_ON = 1, + INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2, + INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3, +}; + /* Intel EDP backlight callbacks */ static bool intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) @@ -126,6 +134,24 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) return false; } + /* + * If we don't have HDR static metadata there is no way to + * runtime detect used range for nits based control. For now + * do not use Intel proprietary eDP backlight control if we + * don't have this data in panel EDID. In case we find panel + * which supports only nits based control, but doesn't provide + * HDR static metadata we need to start maintaining table of + * ranges for such panels. + */ + if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL && + !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type & + BIT(HDMI_STATIC_METADATA_TYPE1))) { + drm_info(&i915->drm, + "Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n", + INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL); + return false; + } + panel->backlight.edp.intel.sdr_uses_aux = tcon_cap[2] & INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP; @@ -413,14 +439,6 @@ static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = { .get = intel_dp_aux_vesa_get_backlight, }; -enum intel_dp_aux_backlight_modparam { - INTEL_DP_AUX_BACKLIGHT_AUTO = -1, - INTEL_DP_AUX_BACKLIGHT_OFF = 0, - INTEL_DP_AUX_BACKLIGHT_ON = 1, - INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2, - INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3, -}; - int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector) { struct drm_device *dev = connector->base.dev; diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 87f4af3fd523..3e61a8936245 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -1037,7 +1037,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, struct intel_plane_state *plane_state = intel_atomic_get_new_plane_state(state, plane); const struct drm_framebuffer *fb = plane_state->hw.fb; - struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc); + struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); const struct intel_crtc_state *crtc_state; struct intel_fbc *fbc = plane->fbc; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3c87d77d2cf6..a9354f8f110d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4345,12 +4345,12 @@ #define _DSPAADDR 0x70184 #define _DSPASTRIDE 0x70188 #define _DSPAPOS 0x7018C /* reserved */ -#define DISP_POS_Y_MASK REG_GENMASK(31, 0) +#define DISP_POS_Y_MASK REG_GENMASK(31, 16) #define DISP_POS_Y(y) REG_FIELD_PREP(DISP_POS_Y_MASK, (y)) #define DISP_POS_X_MASK REG_GENMASK(15, 0) #define DISP_POS_X(x) REG_FIELD_PREP(DISP_POS_X_MASK, (x)) #define _DSPASIZE 0x70190 -#define DISP_HEIGHT_MASK REG_GENMASK(31, 0) +#define DISP_HEIGHT_MASK REG_GENMASK(31, 16) #define DISP_HEIGHT(h) REG_FIELD_PREP(DISP_HEIGHT_MASK, (h)) #define DISP_WIDTH_MASK REG_GENMASK(15, 0) #define DISP_WIDTH(w) REG_FIELD_PREP(DISP_WIDTH_MASK, (w)) @@ -5152,7 +5152,7 @@ #define _SEL_FETCH_PLANE_BASE_6_A 0x70940 #define _SEL_FETCH_PLANE_BASE_7_A 0x70960 #define _SEL_FETCH_PLANE_BASE_CUR_A 0x70880 -#define _SEL_FETCH_PLANE_BASE_1_B 0x70990 +#define _SEL_FETCH_PLANE_BASE_1_B 0x71890 #define _SEL_FETCH_PLANE_BASE_A(plane) _PICK(plane, \ _SEL_FETCH_PLANE_BASE_1_A, \ diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c index 56ae38389db0..462fae73eae9 100644 --- a/drivers/gpu/drm/sun4i/sun4i_frontend.c +++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c @@ -222,13 +222,11 @@ void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend, /* Set the physical address of the buffer in memory */ paddr = drm_fb_cma_get_gem_addr(fb, state, 0); - paddr -= PHYS_OFFSET; DRM_DEBUG_DRIVER("Setting buffer #0 address to %pad\n", &paddr); regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR0_REG, paddr); if (fb->format->num_planes > 1) { paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 2 : 1); - paddr -= PHYS_OFFSET; DRM_DEBUG_DRIVER("Setting buffer #1 address to %pad\n", &paddr); regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR1_REG, paddr); @@ -236,7 +234,6 @@ void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend, if (fb->format->num_planes > 2) { paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 1 : 2); - paddr -= PHYS_OFFSET; DRM_DEBUG_DRIVER("Setting buffer #2 address to %pad\n", &paddr); regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR2_REG, paddr); diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index fb6d14d213a1..c67cd037a93f 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c @@ -19,6 +19,7 @@ #include <linux/log2.h> #include <linux/kthread.h> #include <linux/regmap.h> +#include <linux/sched.h> #include <linux/slab.h> #include <linux/util_macros.h> @@ -294,11 +295,10 @@ static int adt7470_update_thread(void *p) adt7470_read_temperatures(data); mutex_unlock(&data->lock); - set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) break; - schedule_timeout(msecs_to_jiffies(data->auto_update_interval)); + schedule_timeout_interruptible(msecs_to_jiffies(data->auto_update_interval)); } return 0; diff --git a/drivers/hwmon/asus_wmi_sensors.c b/drivers/hwmon/asus_wmi_sensors.c index 8fdcb62ae52d..9e935e34c998 100644 --- a/drivers/hwmon/asus_wmi_sensors.c +++ b/drivers/hwmon/asus_wmi_sensors.c @@ -71,7 +71,7 @@ static const struct dmi_system_id asus_wmi_dmi_table[] = { DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X399-A"), DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X470-PRO"), DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VI EXTREME"), - DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VI HERO"), + DMI_EXACT_MATCH_ASUS_BOARD_NAME("CROSSHAIR VI HERO"), DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VI HERO (WI-FI AC)"), DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VII HERO"), DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VII HERO (WI-FI)"), diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index 938a8b9ec70d..6830e029995d 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c @@ -1578,8 +1578,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, temp *= 125; if (sign) temp -= 128000; - } else - temp = data->temp[nr] * 1000; + } else { + temp = ((s8)data->temp[nr]) * 1000; + } return sprintf(buf, "%d\n", temp); } diff --git a/drivers/hwmon/pmbus/delta-ahe50dc-fan.c b/drivers/hwmon/pmbus/delta-ahe50dc-fan.c index 40dffd9c4cbf..f546f0c12497 100644 --- a/drivers/hwmon/pmbus/delta-ahe50dc-fan.c +++ b/drivers/hwmon/pmbus/delta-ahe50dc-fan.c @@ -14,6 +14,21 @@ #define AHE50DC_PMBUS_READ_TEMP4 0xd0 +static int ahe50dc_fan_write_byte(struct i2c_client *client, int page, u8 value) +{ + /* + * The CLEAR_FAULTS operation seems to sometimes (unpredictably, perhaps + * 5% of the time or so) trigger a problematic phenomenon in which the + * fan speeds surge momentarily and at least some (perhaps all?) of the + * system's power outputs experience a glitch. + * + * However, according to Delta it should be OK to simply not send any + * CLEAR_FAULTS commands (the device doesn't seem to be capable of + * reporting any faults anyway), so just blackhole them unconditionally. + */ + return value == PMBUS_CLEAR_FAULTS ? -EOPNOTSUPP : -ENODATA; +} + static int ahe50dc_fan_read_word_data(struct i2c_client *client, int page, int phase, int reg) { /* temp1 in (virtual) page 1 is remapped to mfr-specific temp4 */ @@ -68,6 +83,7 @@ static struct pmbus_driver_info ahe50dc_fan_info = { PMBUS_HAVE_VIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_FAN34 | PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_STATUS_FAN34 | PMBUS_PAGE_VIRTUAL, .func[1] = PMBUS_HAVE_TEMP | PMBUS_PAGE_VIRTUAL, + .write_byte = ahe50dc_fan_write_byte, .read_word_data = ahe50dc_fan_read_word_data, }; diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index b2618b1d529e..d93574d6a1fb 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -2326,6 +2326,9 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, data->has_status_word = true; } + /* Make sure PEC is disabled, will be enabled later if needed */ + client->flags &= ~I2C_CLIENT_PEC; + /* Enable PEC if the controller and bus supports it */ if (!(data->flags & PMBUS_NO_CAPABILITY)) { ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY); diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c index 18fffc5d749b..32bc7736d609 100644 --- a/drivers/hwmon/pmbus/xdpe12284.c +++ b/drivers/hwmon/pmbus/xdpe12284.c @@ -124,7 +124,7 @@ static int xdpe122_identify(struct i2c_client *client, return 0; } -static const struct regulator_desc xdpe122_reg_desc[] = { +static const struct regulator_desc __maybe_unused xdpe122_reg_desc[] = { PMBUS_REGULATOR("vout", 0), PMBUS_REGULATOR("vout", 1), }; diff --git a/drivers/iio/adc/ad7280a.c b/drivers/iio/adc/ad7280a.c index ef9d27759961..ec9acbf12b9a 100644 --- a/drivers/iio/adc/ad7280a.c +++ b/drivers/iio/adc/ad7280a.c @@ -745,7 +745,7 @@ static int ad7280a_write_thresh(struct iio_dev *indio_dev, case IIO_EV_DIR_RISING: addr = AD7280A_CELL_OVERVOLTAGE_REG; ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, addr, - 1, val); + 1, value); if (ret) break; st->cell_threshhigh = value; @@ -753,7 +753,7 @@ static int ad7280a_write_thresh(struct iio_dev *indio_dev, case IIO_EV_DIR_FALLING: addr = AD7280A_CELL_UNDERVOLTAGE_REG; ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, addr, - 1, val); + 1, value); if (ret) break; st->cell_threshlow = value; @@ -770,18 +770,18 @@ static int ad7280a_write_thresh(struct iio_dev *indio_dev, case IIO_EV_DIR_RISING: addr = AD7280A_AUX_ADC_OVERVOLTAGE_REG; ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, addr, - 1, val); + 1, value); if (ret) break; - st->aux_threshhigh = val; + st->aux_threshhigh = value; break; case IIO_EV_DIR_FALLING: addr = AD7280A_AUX_ADC_UNDERVOLTAGE_REG; ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, addr, - 1, val); + 1, value); if (ret) break; - st->aux_threshlow = val; + st->aux_threshlow = value; break; default: ret = -EINVAL; diff --git a/drivers/iio/chemical/scd4x.c b/drivers/iio/chemical/scd4x.c index 20d4e7584e92..37143b5526ee 100644 --- a/drivers/iio/chemical/scd4x.c +++ b/drivers/iio/chemical/scd4x.c @@ -471,12 +471,15 @@ static ssize_t calibration_forced_value_store(struct device *dev, ret = scd4x_write_and_fetch(state, CMD_FRC, arg, &val, sizeof(val)); mutex_unlock(&state->lock); + if (ret) + return ret; + if (val == 0xff) { dev_err(dev, "forced calibration has failed"); return -EINVAL; } - return ret ?: len; + return len; } static IIO_DEVICE_ATTR_RW(calibration_auto_enable, 0); diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c index 97f13c0b9631..d5ea1a1be122 100644 --- a/drivers/iio/dac/ad3552r.c +++ b/drivers/iio/dac/ad3552r.c @@ -656,7 +656,7 @@ static int ad3552r_reset(struct ad3552r_desc *dac) { struct reg_addr_pool addr; int ret; - u16 val; + int val; dac->gpio_reset = devm_gpiod_get_optional(&dac->spi->dev, "reset", GPIOD_OUT_LOW); @@ -809,10 +809,10 @@ static int ad3552r_configure_custom_gain(struct ad3552r_desc *dac, gain_child = fwnode_get_named_child_node(child, "custom-output-range-config"); - if (IS_ERR(gain_child)) { + if (!gain_child) { dev_err(dev, "mandatory custom-output-range-config property missing\n"); - return PTR_ERR(gain_child); + return -EINVAL; } dac->ch_data[ch].range_override = 1; diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c index 14cfabacbea5..fdf824041497 100644 --- a/drivers/iio/dac/ad5446.c +++ b/drivers/iio/dac/ad5446.c @@ -178,7 +178,7 @@ static int ad5446_read_raw(struct iio_dev *indio_dev, switch (m) { case IIO_CHAN_INFO_RAW: - *val = st->cached_val; + *val = st->cached_val >> chan->scan_type.shift; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: *val = st->vref_mv; diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c index a424b7220b61..4434c1b2a322 100644 --- a/drivers/iio/dac/ad5592r-base.c +++ b/drivers/iio/dac/ad5592r-base.c @@ -522,7 +522,7 @@ static int ad5592r_alloc_channels(struct iio_dev *iio_dev) if (!ret) st->channel_modes[reg] = tmp; - fwnode_property_read_u32(child, "adi,off-state", &tmp); + ret = fwnode_property_read_u32(child, "adi,off-state", &tmp); if (!ret) st->channel_offstate[reg] = tmp; } diff --git a/drivers/iio/dac/ltc2688.c b/drivers/iio/dac/ltc2688.c index e41861d29767..2f9c384885f4 100644 --- a/drivers/iio/dac/ltc2688.c +++ b/drivers/iio/dac/ltc2688.c @@ -298,7 +298,7 @@ static int ltc2688_read_raw(struct iio_dev *indio_dev, if (ret) return ret; - *val = 16; + *val2 = 16; return IIO_VAL_FRACTIONAL_LOG2; case IIO_CHAN_INFO_CALIBBIAS: ret = regmap_read(st->regmap, diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c index 4a3b8d875518..0b775f943db3 100644 --- a/drivers/iio/dac/ti-dac5571.c +++ b/drivers/iio/dac/ti-dac5571.c @@ -19,6 +19,7 @@ #include <linux/i2c.h> #include <linux/module.h> #include <linux/mod_devicetable.h> +#include <linux/property.h> #include <linux/regulator/consumer.h> enum chip_id { @@ -311,6 +312,7 @@ static int dac5571_probe(struct i2c_client *client, const struct dac5571_spec *spec; struct dac5571_data *data; struct iio_dev *indio_dev; + enum chip_id chip_id; int ret, i; indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); @@ -326,7 +328,13 @@ static int dac5571_probe(struct i2c_client *client, indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = dac5571_channels; - spec = &dac5571_spec[id->driver_data]; + if (dev_fwnode(dev)) + chip_id = (uintptr_t)device_get_match_data(dev); + else + chip_id = id->driver_data; + + spec = &dac5571_spec[chip_id]; + indio_dev->num_channels = spec->num_channels; data->spec = spec; @@ -385,15 +393,15 @@ static int dac5571_remove(struct i2c_client *i2c) } static const struct of_device_id dac5571_of_id[] = { - {.compatible = "ti,dac5571"}, - {.compatible = "ti,dac6571"}, - {.compatible = "ti,dac7571"}, - {.compatible = "ti,dac5574"}, - {.compatible = "ti,dac6574"}, - {.compatible = "ti,dac7574"}, - {.compatible = "ti,dac5573"}, - {.compatible = "ti,dac6573"}, - {.compatible = "ti,dac7573"}, + {.compatible = "ti,dac5571", .data = (void *)single_8bit}, + {.compatible = "ti,dac6571", .data = (void *)single_10bit}, + {.compatible = "ti,dac7571", .data = (void *)single_12bit}, + {.compatible = "ti,dac5574", .data = (void *)quad_8bit}, + {.compatible = "ti,dac6574", .data = (void *)quad_10bit}, + {.compatible = "ti,dac7574", .data = (void *)quad_12bit}, + {.compatible = "ti,dac5573", .data = (void *)quad_8bit}, + {.compatible = "ti,dac6573", .data = (void *)quad_10bit}, + {.compatible = "ti,dac7573", .data = (void *)quad_12bit}, {} }; MODULE_DEVICE_TABLE(of, dac5571_of_id); diff --git a/drivers/iio/filter/Kconfig b/drivers/iio/filter/Kconfig index 3ae35817ad82..a85b345ea14e 100644 --- a/drivers/iio/filter/Kconfig +++ b/drivers/iio/filter/Kconfig @@ -8,6 +8,7 @@ menu "Filters" config ADMV8818 tristate "Analog Devices ADMV8818 High-Pass and Low-Pass Filter" depends on SPI && COMMON_CLK && 64BIT + select REGMAP_SPI help Say yes here to build support for Analog Devices ADMV8818 2 GHz to 18 GHz, Digitally Tunable, High-Pass and Low-Pass Filter. diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c index 824b5124a5f5..01336105792e 100644 --- a/drivers/iio/imu/bmi160/bmi160_core.c +++ b/drivers/iio/imu/bmi160/bmi160_core.c @@ -730,7 +730,7 @@ static int bmi160_chip_init(struct bmi160_data *data, bool use_spi) ret = regmap_write(data->regmap, BMI160_REG_CMD, BMI160_CMD_SOFTRESET); if (ret) - return ret; + goto disable_regulator; usleep_range(BMI160_SOFTRESET_USLEEP, BMI160_SOFTRESET_USLEEP + 1); @@ -741,29 +741,37 @@ static int bmi160_chip_init(struct bmi160_data *data, bool use_spi) if (use_spi) { ret = regmap_read(data->regmap, BMI160_REG_DUMMY, &val); if (ret) - return ret; + goto disable_regulator; } ret = regmap_read(data->regmap, BMI160_REG_CHIP_ID, &val); if (ret) { dev_err(dev, "Error reading chip id\n"); - return ret; + goto disable_regulator; } if (val != BMI160_CHIP_ID_VAL) { dev_err(dev, "Wrong chip id, got %x expected %x\n", val, BMI160_CHIP_ID_VAL); - return -ENODEV; + ret = -ENODEV; + goto disable_regulator; } ret = bmi160_set_mode(data, BMI160_ACCEL, true); if (ret) - return ret; + goto disable_regulator; ret = bmi160_set_mode(data, BMI160_GYRO, true); if (ret) - return ret; + goto disable_accel; return 0; + +disable_accel: + bmi160_set_mode(data, BMI160_ACCEL, false); + +disable_regulator: + regulator_bulk_disable(ARRAY_SIZE(data->supplies), data->supplies); + return ret; } static int bmi160_data_rdy_trigger_set_state(struct iio_trigger *trig, diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c index 33d9afb1ba91..d4a692b838d0 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c @@ -18,12 +18,15 @@ static int inv_icm42600_i2c_bus_setup(struct inv_icm42600_state *st) unsigned int mask, val; int ret; - /* setup interface registers */ - ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG6, - INV_ICM42600_INTF_CONFIG6_MASK, - INV_ICM42600_INTF_CONFIG6_I3C_EN); - if (ret) - return ret; + /* + * setup interface registers + * This register write to REG_INTF_CONFIG6 enables a spike filter that + * is impacting the line and can prevent the I2C ACK to be seen by the + * controller. So we don't test the return value. + */ + regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG6, + INV_ICM42600_INTF_CONFIG6_MASK, + INV_ICM42600_INTF_CONFIG6_I3C_EN); ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG4, INV_ICM42600_INTF_CONFIG4_I3C_BUS_ONLY, 0); diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index 088f748b683e..2432e697150c 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c @@ -416,6 +416,7 @@ static int ak8975_power_on(const struct ak8975_data *data) if (ret) { dev_warn(&data->client->dev, "Failed to enable specified Vid supply\n"); + regulator_disable(data->vdd); return ret; } diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c index 0d9bbbb50cb4..70c37f664f6d 100644 --- a/drivers/iio/proximity/sx9324.c +++ b/drivers/iio/proximity/sx9324.c @@ -70,13 +70,17 @@ #define SX9324_REG_AFE_PH2 0x2a #define SX9324_REG_AFE_PH3 0x2b #define SX9324_REG_AFE_CTRL8 0x2c -#define SX9324_REG_AFE_CTRL8_RESFILTN_4KOHM 0x02 +#define SX9324_REG_AFE_CTRL8_RESERVED 0x10 +#define SX9324_REG_AFE_CTRL8_RESFILTIN_4KOHM 0x02 #define SX9324_REG_AFE_CTRL9 0x2d #define SX9324_REG_AFE_CTRL9_AGAIN_1 0x08 #define SX9324_REG_PROX_CTRL0 0x30 #define SX9324_REG_PROX_CTRL0_GAIN_MASK GENMASK(5, 3) -#define SX9324_REG_PROX_CTRL0_GAIN_1 0x80 +#define SX9324_REG_PROX_CTRL0_GAIN_SHIFT 3 +#define SX9324_REG_PROX_CTRL0_GAIN_RSVD 0x0 +#define SX9324_REG_PROX_CTRL0_GAIN_1 0x1 +#define SX9324_REG_PROX_CTRL0_GAIN_8 0x4 #define SX9324_REG_PROX_CTRL0_RAWFILT_MASK GENMASK(2, 0) #define SX9324_REG_PROX_CTRL0_RAWFILT_1P50 0x01 #define SX9324_REG_PROX_CTRL1 0x31 @@ -379,7 +383,14 @@ static int sx9324_read_gain(struct sx_common_data *data, if (ret) return ret; - *val = 1 << FIELD_GET(SX9324_REG_PROX_CTRL0_GAIN_MASK, regval); + regval = FIELD_GET(SX9324_REG_PROX_CTRL0_GAIN_MASK, regval); + if (regval) + regval--; + else if (regval == SX9324_REG_PROX_CTRL0_GAIN_RSVD || + regval > SX9324_REG_PROX_CTRL0_GAIN_8) + return -EINVAL; + + *val = 1 << regval; return IIO_VAL_INT; } @@ -725,8 +736,12 @@ static int sx9324_write_gain(struct sx_common_data *data, unsigned int gain, reg; int ret; - gain = ilog2(val); reg = SX9324_REG_PROX_CTRL0 + chan->channel / 2; + + gain = ilog2(val) + 1; + if (val <= 0 || gain > SX9324_REG_PROX_CTRL0_GAIN_8) + return -EINVAL; + gain = FIELD_PREP(SX9324_REG_PROX_CTRL0_GAIN_MASK, gain); mutex_lock(&data->mutex); @@ -781,12 +796,15 @@ static const struct sx_common_reg_default sx9324_default_regs[] = { { SX9324_REG_AFE_PH2, 0x1a }, { SX9324_REG_AFE_PH3, 0x16 }, - { SX9324_REG_AFE_CTRL8, SX9324_REG_AFE_CTRL8_RESFILTN_4KOHM }, + { SX9324_REG_AFE_CTRL8, SX9324_REG_AFE_CTRL8_RESERVED | + SX9324_REG_AFE_CTRL8_RESFILTIN_4KOHM }, { SX9324_REG_AFE_CTRL9, SX9324_REG_AFE_CTRL9_AGAIN_1 }, - { SX9324_REG_PROX_CTRL0, SX9324_REG_PROX_CTRL0_GAIN_1 | + { SX9324_REG_PROX_CTRL0, + SX9324_REG_PROX_CTRL0_GAIN_1 << SX9324_REG_PROX_CTRL0_GAIN_SHIFT | SX9324_REG_PROX_CTRL0_RAWFILT_1P50 }, - { SX9324_REG_PROX_CTRL1, SX9324_REG_PROX_CTRL0_GAIN_1 | + { SX9324_REG_PROX_CTRL1, + SX9324_REG_PROX_CTRL0_GAIN_1 << SX9324_REG_PROX_CTRL0_GAIN_SHIFT | SX9324_REG_PROX_CTRL0_RAWFILT_1P50 }, { SX9324_REG_PROX_CTRL2, SX9324_REG_PROX_CTRL2_AVGNEG_THRESH_16K }, { SX9324_REG_PROX_CTRL3, SX9324_REG_PROX_CTRL3_AVGDEB_2SAMPLES | diff --git a/drivers/iio/proximity/sx_common.c b/drivers/iio/proximity/sx_common.c index a7c07316a0a9..8ad814d96b7e 100644 --- a/drivers/iio/proximity/sx_common.c +++ b/drivers/iio/proximity/sx_common.c @@ -521,6 +521,7 @@ int sx_common_probe(struct i2c_client *client, return dev_err_probe(dev, ret, "error reading WHOAMI\n"); ACPI_COMPANION_SET(&indio_dev->dev, ACPI_COMPANION(dev)); + indio_dev->dev.of_node = client->dev.of_node; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = data->chip_info->iio_channels; diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c index 12d59c36df53..5f7c0f85fa8e 100644 --- a/drivers/interconnect/qcom/sc7180.c +++ b/drivers/interconnect/qcom/sc7180.c @@ -47,7 +47,6 @@ DEFINE_QNODE(qnm_mnoc_sf, SC7180_MASTER_MNOC_SF_MEM_NOC, 1, 32, SC7180_SLAVE_GEM DEFINE_QNODE(qnm_snoc_gc, SC7180_MASTER_SNOC_GC_MEM_NOC, 1, 8, SC7180_SLAVE_LLCC); DEFINE_QNODE(qnm_snoc_sf, SC7180_MASTER_SNOC_SF_MEM_NOC, 1, 16, SC7180_SLAVE_LLCC); DEFINE_QNODE(qxm_gpu, SC7180_MASTER_GFX3D, 2, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC); -DEFINE_QNODE(ipa_core_master, SC7180_MASTER_IPA_CORE, 1, 8, SC7180_SLAVE_IPA_CORE); DEFINE_QNODE(llcc_mc, SC7180_MASTER_LLCC, 2, 4, SC7180_SLAVE_EBI1); DEFINE_QNODE(qhm_mnoc_cfg, SC7180_MASTER_CNOC_MNOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_MNOC); DEFINE_QNODE(qxm_camnoc_hf0, SC7180_MASTER_CAMNOC_HF0, 2, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC); @@ -129,7 +128,6 @@ DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SC7180_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4); DEFINE_QNODE(qns_gem_noc_snoc, SC7180_SLAVE_GEM_NOC_SNOC, 1, 8, SC7180_MASTER_GEM_NOC_SNOC); DEFINE_QNODE(qns_llcc, SC7180_SLAVE_LLCC, 1, 16, SC7180_MASTER_LLCC); DEFINE_QNODE(srvc_gemnoc, SC7180_SLAVE_SERVICE_GEM_NOC, 1, 4); -DEFINE_QNODE(ipa_core_slave, SC7180_SLAVE_IPA_CORE, 1, 8); DEFINE_QNODE(ebi, SC7180_SLAVE_EBI1, 2, 4); DEFINE_QNODE(qns_mem_noc_hf, SC7180_SLAVE_MNOC_HF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_HF_MEM_NOC); DEFINE_QNODE(qns_mem_noc_sf, SC7180_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_SF_MEM_NOC); @@ -160,7 +158,6 @@ DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi); DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc); DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf); DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto); -DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave); DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aop, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_rt_throttle_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_npu_dma_throttle_cfg, &qhs_npu_dsp_throttle_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_tlmm_1, &qhs_tlmm_2, &qhs_tlmm_3, &qhs_ufs_mem_cfg, &qhs_usb3, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc); DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qhm_mnoc_cfg, &qxm_mdp0, &qxm_rot, &qxm_venus0, &qxm_venus_arm9); DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu); @@ -372,22 +369,6 @@ static struct qcom_icc_desc sc7180_gem_noc = { .num_bcms = ARRAY_SIZE(gem_noc_bcms), }; -static struct qcom_icc_bcm *ipa_virt_bcms[] = { - &bcm_ip0, -}; - -static struct qcom_icc_node *ipa_virt_nodes[] = { - [MASTER_IPA_CORE] = &ipa_core_master, - [SLAVE_IPA_CORE] = &ipa_core_slave, -}; - -static struct qcom_icc_desc sc7180_ipa_virt = { - .nodes = ipa_virt_nodes, - .num_nodes = ARRAY_SIZE(ipa_virt_nodes), - .bcms = ipa_virt_bcms, - .num_bcms = ARRAY_SIZE(ipa_virt_bcms), -}; - static struct qcom_icc_bcm *mc_virt_bcms[] = { &bcm_acv, &bcm_mc0, @@ -519,8 +500,6 @@ static const struct of_device_id qnoc_of_match[] = { .data = &sc7180_dc_noc}, { .compatible = "qcom,sc7180-gem-noc", .data = &sc7180_gem_noc}, - { .compatible = "qcom,sc7180-ipa-virt", - .data = &sc7180_ipa_virt}, { .compatible = "qcom,sc7180-mc-virt", .data = &sc7180_mc_virt}, { .compatible = "qcom,sc7180-mmss-noc", diff --git a/drivers/interconnect/qcom/sdx55.c b/drivers/interconnect/qcom/sdx55.c index 03d604f84cc5..e3ac25a997b7 100644 --- a/drivers/interconnect/qcom/sdx55.c +++ b/drivers/interconnect/qcom/sdx55.c @@ -18,7 +18,6 @@ #include "icc-rpmh.h" #include "sdx55.h" -DEFINE_QNODE(ipa_core_master, SDX55_MASTER_IPA_CORE, 1, 8, SDX55_SLAVE_IPA_CORE); DEFINE_QNODE(llcc_mc, SDX55_MASTER_LLCC, 4, 4, SDX55_SLAVE_EBI_CH0); DEFINE_QNODE(acm_tcu, SDX55_MASTER_TCU_0, 1, 8, SDX55_SLAVE_LLCC, SDX55_SLAVE_MEM_NOC_SNOC, SDX55_SLAVE_MEM_NOC_PCIE_SNOC); DEFINE_QNODE(qnm_snoc_gc, SDX55_MASTER_SNOC_GC_MEM_NOC, 1, 8, SDX55_SLAVE_LLCC); @@ -40,7 +39,6 @@ DEFINE_QNODE(xm_pcie, SDX55_MASTER_PCIE, 1, 8, SDX55_SLAVE_ANOC_SNOC); DEFINE_QNODE(xm_qdss_etr, SDX55_MASTER_QDSS_ETR, 1, 8, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_AOSS, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG); DEFINE_QNODE(xm_sdc1, SDX55_MASTER_SDCC_1, 1, 8, SDX55_SLAVE_AOSS, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP, SDX55_SLAVE_AUDIO); DEFINE_QNODE(xm_usb3, SDX55_MASTER_USB3, 1, 8, SDX55_SLAVE_ANOC_SNOC); -DEFINE_QNODE(ipa_core_slave, SDX55_SLAVE_IPA_CORE, 1, 8); DEFINE_QNODE(ebi, SDX55_SLAVE_EBI_CH0, 1, 4); DEFINE_QNODE(qns_llcc, SDX55_SLAVE_LLCC, 1, 16, SDX55_SLAVE_EBI_CH0); DEFINE_QNODE(qns_memnoc_snoc, SDX55_SLAVE_MEM_NOC_SNOC, 1, 8, SDX55_MASTER_MEM_NOC_SNOC); @@ -82,7 +80,6 @@ DEFINE_QNODE(xs_sys_tcu_cfg, SDX55_SLAVE_TCU, 1, 8); DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi); DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc); DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto); -DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave); DEFINE_QBCM(bcm_pn0, "PN0", false, &qhm_snoc_cfg); DEFINE_QBCM(bcm_sh3, "SH3", false, &xm_apps_rdwr); DEFINE_QBCM(bcm_sh4, "SH4", false, &qns_memnoc_snoc, &qns_sys_pcie); @@ -219,22 +216,6 @@ static const struct qcom_icc_desc sdx55_system_noc = { .num_bcms = ARRAY_SIZE(system_noc_bcms), }; -static struct qcom_icc_bcm *ipa_virt_bcms[] = { - &bcm_ip0, -}; - -static struct qcom_icc_node *ipa_virt_nodes[] = { - [MASTER_IPA_CORE] = &ipa_core_master, - [SLAVE_IPA_CORE] = &ipa_core_slave, -}; - -static const struct qcom_icc_desc sdx55_ipa_virt = { - .nodes = ipa_virt_nodes, - .num_nodes = ARRAY_SIZE(ipa_virt_nodes), - .bcms = ipa_virt_bcms, - .num_bcms = ARRAY_SIZE(ipa_virt_bcms), -}; - static const struct of_device_id qnoc_of_match[] = { { .compatible = "qcom,sdx55-mc-virt", .data = &sdx55_mc_virt}, @@ -242,8 +223,6 @@ static const struct of_device_id qnoc_of_match[] = { .data = &sdx55_mem_noc}, { .compatible = "qcom,sdx55-system-noc", .data = &sdx55_system_noc}, - { .compatible = "qcom,sdx55-ipa-virt", - .data = &sdx55_ipa_virt}, { } }; MODULE_DEVICE_TABLE(of, qnoc_of_match); diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c index decafb07ad08..8af0242a90d9 100644 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@ -773,6 +773,7 @@ static const struct iommu_ops apple_dart_iommu_ops = { .get_resv_regions = apple_dart_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, .pgsize_bitmap = -1UL, /* Restricted during dart probe */ + .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = apple_dart_attach_dev, .detach_dev = apple_dart_detach_dev, @@ -859,16 +860,15 @@ static int apple_dart_probe(struct platform_device *pdev) dart->dev = dev; spin_lock_init(&dart->lock); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dart->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(dart->regs)) + return PTR_ERR(dart->regs); + if (resource_size(res) < 0x4000) { dev_err(dev, "MMIO region too small (%pr)\n", res); return -EINVAL; } - dart->regs = devm_ioremap_resource(dev, res); - if (IS_ERR(dart->regs)) - return PTR_ERR(dart->regs); - dart->irq = platform_get_irq(pdev, 0); if (dart->irq < 0) return -ENODEV; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 22ddd05bbdcd..c623dae1e115 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -183,7 +183,14 @@ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn, { struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); struct arm_smmu_domain *smmu_domain = smmu_mn->domain; - size_t size = end - start + 1; + size_t size; + + /* + * The mm_types defines vm_end as the first byte after the end address, + * different from IOMMU subsystem using the last address of an address + * range. So do a simple translation here by calculating size correctly. + */ + size = end - start; if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c index 01e9b50b10a1..87bf522b9d2e 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c @@ -258,6 +258,34 @@ static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct devi dev_name(dev), err); } +static int nvidia_smmu_init_context(struct arm_smmu_domain *smmu_domain, + struct io_pgtable_cfg *pgtbl_cfg, + struct device *dev) +{ + struct arm_smmu_device *smmu = smmu_domain->smmu; + const struct device_node *np = smmu->dev->of_node; + + /* + * Tegra194 and Tegra234 SoCs have the erratum that causes walk cache + * entries to not be invalidated correctly. The problem is that the walk + * cache index generated for IOVA is not same across translation and + * invalidation requests. This is leading to page faults when PMD entry + * is released during unmap and populated with new PTE table during + * subsequent map request. Disabling large page mappings avoids the + * release of PMD entry and avoid translations seeing stale PMD entry in + * walk cache. + * Fix this by limiting the page mappings to PAGE_SIZE on Tegra194 and + * Tegra234. + */ + if (of_device_is_compatible(np, "nvidia,tegra234-smmu") || + of_device_is_compatible(np, "nvidia,tegra194-smmu")) { + smmu->pgsize_bitmap = PAGE_SIZE; + pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap; + } + + return 0; +} + static const struct arm_smmu_impl nvidia_smmu_impl = { .read_reg = nvidia_smmu_read_reg, .write_reg = nvidia_smmu_write_reg, @@ -268,10 +296,12 @@ static const struct arm_smmu_impl nvidia_smmu_impl = { .global_fault = nvidia_smmu_global_fault, .context_fault = nvidia_smmu_context_fault, .probe_finalize = nvidia_smmu_probe_finalize, + .init_context = nvidia_smmu_init_context, }; static const struct arm_smmu_impl nvidia_smmu_single_impl = { .probe_finalize = nvidia_smmu_probe_finalize, + .init_context = nvidia_smmu_init_context, }; struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index df5c62ecf942..0ea47e17b379 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1588,7 +1588,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, unsigned long pfn, unsigned int pages, int ih, int map) { - unsigned int mask = ilog2(__roundup_pow_of_two(pages)); + unsigned int aligned_pages = __roundup_pow_of_two(pages); + unsigned int mask = ilog2(aligned_pages); uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; u16 did = domain->iommu_did[iommu->seq_id]; @@ -1600,10 +1601,30 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, if (domain_use_first_level(domain)) { qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, pages, ih); } else { + unsigned long bitmask = aligned_pages - 1; + + /* + * PSI masks the low order bits of the base address. If the + * address isn't aligned to the mask, then compute a mask value + * needed to ensure the target range is flushed. + */ + if (unlikely(bitmask & pfn)) { + unsigned long end_pfn = pfn + pages - 1, shared_bits; + + /* + * Since end_pfn <= pfn + bitmask, the only way bits + * higher than bitmask can differ in pfn and end_pfn is + * by carrying. This means after masking out bitmask, + * high bits starting with the first set bit in + * shared_bits are all equal in both pfn and end_pfn. + */ + shared_bits = ~(pfn ^ end_pfn) & ~bitmask; + mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG; + } + /* * Fallback to domain selective flush if no PSI support or - * the size is too big. PSI requires page size to be 2 ^ x, - * and the base address is naturally aligned to the size. + * the size is too big. */ if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 23a38763c1d1..7ee37d996e15 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -757,6 +757,10 @@ bad_req: goto bad_req; } + /* Drop Stop Marker message. No need for a response. */ + if (unlikely(req->lpig && !req->rd_req && !req->wr_req)) + goto prq_advance; + if (!svm || svm->pasid != req->pasid) { /* * It can't go away, because the driver is not permitted diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index f2c45b85b9fc..857d4c2fd1a2 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -506,6 +506,13 @@ int iommu_get_group_resv_regions(struct iommu_group *group, list_for_each_entry(device, &group->devices, list) { struct list_head dev_resv_regions; + /* + * Non-API groups still expose reserved_regions in sysfs, + * so filter out calls that get here that way. + */ + if (!device->dev->iommu) + break; + INIT_LIST_HEAD(&dev_resv_regions); iommu_get_resv_regions(device->dev, &dev_resv_regions); ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); @@ -3019,7 +3026,7 @@ static ssize_t iommu_group_store_type(struct iommu_group *group, if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; - if (WARN_ON(!group)) + if (WARN_ON(!group) || !group->default_domain) return -EINVAL; if (sysfs_streq(buf, "identity")) diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c index 2e545f473cc6..019a0822bde0 100644 --- a/drivers/memory/renesas-rpc-if.c +++ b/drivers/memory/renesas-rpc-if.c @@ -164,25 +164,39 @@ static const struct regmap_access_table rpcif_volatile_table = { /* - * Custom accessor functions to ensure SMRDR0 and SMWDR0 are always accessed - * with proper width. Requires SMENR_SPIDE to be correctly set before! + * Custom accessor functions to ensure SM[RW]DR[01] are always accessed with + * proper width. Requires rpcif.xfer_size to be correctly set before! */ static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val) { struct rpcif *rpc = context; - if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) { - u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF); - - if (spide == 0x8) { + switch (reg) { + case RPCIF_SMRDR0: + case RPCIF_SMWDR0: + switch (rpc->xfer_size) { + case 1: *val = readb(rpc->base + reg); return 0; - } else if (spide == 0xC) { + + case 2: *val = readw(rpc->base + reg); return 0; - } else if (spide != 0xF) { + + case 4: + case 8: + *val = readl(rpc->base + reg); + return 0; + + default: return -EILSEQ; } + + case RPCIF_SMRDR1: + case RPCIF_SMWDR1: + if (rpc->xfer_size != 8) + return -EILSEQ; + break; } *val = readl(rpc->base + reg); @@ -193,18 +207,34 @@ static int rpcif_reg_write(void *context, unsigned int reg, unsigned int val) { struct rpcif *rpc = context; - if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) { - u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF); - - if (spide == 0x8) { + switch (reg) { + case RPCIF_SMWDR0: + switch (rpc->xfer_size) { + case 1: writeb(val, rpc->base + reg); return 0; - } else if (spide == 0xC) { + + case 2: writew(val, rpc->base + reg); return 0; - } else if (spide != 0xF) { + + case 4: + case 8: + writel(val, rpc->base + reg); + return 0; + + default: return -EILSEQ; } + + case RPCIF_SMWDR1: + if (rpc->xfer_size != 8) + return -EILSEQ; + break; + + case RPCIF_SMRDR0: + case RPCIF_SMRDR1: + return -EPERM; } writel(val, rpc->base + reg); @@ -469,6 +499,7 @@ int rpcif_manual_xfer(struct rpcif *rpc) smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes)); regmap_write(rpc->regmap, RPCIF_SMENR, smenr); + rpc->xfer_size = nbytes; memcpy(data, rpc->buffer + pos, nbytes); if (nbytes == 8) { @@ -533,6 +564,7 @@ int rpcif_manual_xfer(struct rpcif *rpc) regmap_write(rpc->regmap, RPCIF_SMENR, smenr); regmap_write(rpc->regmap, RPCIF_SMCR, rpc->smcr | RPCIF_SMCR_SPIE); + rpc->xfer_size = nbytes; ret = wait_msg_xfer_end(rpc); if (ret) goto err_out; diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index 91f96abbb3f9..8d169a35cf13 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -31,6 +31,8 @@ */ #define FM25_SN_LEN 8 /* serial number length */ +#define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */ + struct at25_data { struct spi_eeprom chip; struct spi_device *spi; @@ -39,6 +41,7 @@ struct at25_data { struct nvmem_config nvmem_config; struct nvmem_device *nvmem; u8 sernum[FM25_SN_LEN]; + u8 command[EE_MAXADDRLEN + 1]; }; #define AT25_WREN 0x06 /* latch the write enable */ @@ -61,8 +64,6 @@ struct at25_data { #define FM25_ID_LEN 9 /* ID length */ -#define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */ - /* * Specs often allow 5ms for a page write, sometimes 20ms; * it's important to recover from write timeouts. @@ -78,7 +79,6 @@ static int at25_ee_read(void *priv, unsigned int offset, { struct at25_data *at25 = priv; char *buf = val; - u8 command[EE_MAXADDRLEN + 1]; u8 *cp; ssize_t status; struct spi_transfer t[2]; @@ -92,12 +92,15 @@ static int at25_ee_read(void *priv, unsigned int offset, if (unlikely(!count)) return -EINVAL; - cp = command; + cp = at25->command; instr = AT25_READ; if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) if (offset >= BIT(at25->addrlen * 8)) instr |= AT25_INSTR_BIT3; + + mutex_lock(&at25->lock); + *cp++ = instr; /* 8/16/24-bit address is written MSB first */ @@ -116,7 +119,7 @@ static int at25_ee_read(void *priv, unsigned int offset, spi_message_init(&m); memset(t, 0, sizeof(t)); - t[0].tx_buf = command; + t[0].tx_buf = at25->command; t[0].len = at25->addrlen + 1; spi_message_add_tail(&t[0], &m); @@ -124,8 +127,6 @@ static int at25_ee_read(void *priv, unsigned int offset, t[1].len = count; spi_message_add_tail(&t[1], &m); - mutex_lock(&at25->lock); - /* * Read it all at once. * @@ -152,7 +153,7 @@ static int fm25_aux_read(struct at25_data *at25, u8 *buf, uint8_t command, spi_message_init(&m); memset(t, 0, sizeof(t)); - t[0].tx_buf = &command; + t[0].tx_buf = at25->command; t[0].len = 1; spi_message_add_tail(&t[0], &m); @@ -162,6 +163,8 @@ static int fm25_aux_read(struct at25_data *at25, u8 *buf, uint8_t command, mutex_lock(&at25->lock); + at25->command[0] = command; + status = spi_sync(at25->spi, &m); dev_dbg(&at25->spi->dev, "read %d aux bytes --> %d\n", len, status); diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index d0c5a7a60daf..5215bd9b2c80 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -241,13 +241,14 @@ struct grcan_device_config { .rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \ } -#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100 +#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 4100 #define GRLIB_VERSION_MASK 0xffff /* GRCAN private data structure */ struct grcan_priv { struct can_priv can; /* must be the first member */ struct net_device *dev; + struct device *ofdev_dev; struct napi_struct napi; struct grcan_registers __iomem *regs; /* ioremap'ed registers */ @@ -921,7 +922,7 @@ static void grcan_free_dma_buffers(struct net_device *dev) struct grcan_priv *priv = netdev_priv(dev); struct grcan_dma *dma = &priv->dma; - dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf, + dma_free_coherent(priv->ofdev_dev, dma->base_size, dma->base_buf, dma->base_handle); memset(dma, 0, sizeof(*dma)); } @@ -946,7 +947,7 @@ static int grcan_allocate_dma_buffers(struct net_device *dev, /* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */ dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT; - dma->base_buf = dma_alloc_coherent(&dev->dev, + dma->base_buf = dma_alloc_coherent(priv->ofdev_dev, dma->base_size, &dma->base_handle, GFP_KERNEL); @@ -1102,8 +1103,10 @@ static int grcan_close(struct net_device *dev) priv->closing = true; if (priv->need_txbug_workaround) { + spin_unlock_irqrestore(&priv->lock, flags); del_timer_sync(&priv->hang_timer); del_timer_sync(&priv->rr_timer); + spin_lock_irqsave(&priv->lock, flags); } netif_stop_queue(dev); grcan_stop_hardware(dev); @@ -1122,7 +1125,7 @@ static int grcan_close(struct net_device *dev) return 0; } -static int grcan_transmit_catch_up(struct net_device *dev, int budget) +static void grcan_transmit_catch_up(struct net_device *dev) { struct grcan_priv *priv = netdev_priv(dev); unsigned long flags; @@ -1130,7 +1133,7 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget) spin_lock_irqsave(&priv->lock, flags); - work_done = catch_up_echo_skb(dev, budget, true); + work_done = catch_up_echo_skb(dev, -1, true); if (work_done) { if (!priv->resetting && !priv->closing && !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) @@ -1144,8 +1147,6 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget) } spin_unlock_irqrestore(&priv->lock, flags); - - return work_done; } static int grcan_receive(struct net_device *dev, int budget) @@ -1227,19 +1228,13 @@ static int grcan_poll(struct napi_struct *napi, int budget) struct net_device *dev = priv->dev; struct grcan_registers __iomem *regs = priv->regs; unsigned long flags; - int tx_work_done, rx_work_done; - int rx_budget = budget / 2; - int tx_budget = budget - rx_budget; + int work_done; - /* Half of the budget for receiving messages */ - rx_work_done = grcan_receive(dev, rx_budget); + work_done = grcan_receive(dev, budget); - /* Half of the budget for transmitting messages as that can trigger echo - * frames being received - */ - tx_work_done = grcan_transmit_catch_up(dev, tx_budget); + grcan_transmit_catch_up(dev); - if (rx_work_done < rx_budget && tx_work_done < tx_budget) { + if (work_done < budget) { napi_complete(napi); /* Guarantee no interference with a running reset that otherwise @@ -1256,7 +1251,7 @@ static int grcan_poll(struct napi_struct *napi, int budget) spin_unlock_irqrestore(&priv->lock, flags); } - return rx_work_done + tx_work_done; + return work_done; } /* Work tx bug by waiting while for the risky situation to clear. If that fails, @@ -1587,6 +1582,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev, memcpy(&priv->config, &grcan_module_config, sizeof(struct grcan_device_config)); priv->dev = dev; + priv->ofdev_dev = &ofdev->dev; priv->regs = base; priv->can.bittiming_const = &grcan_bittiming_const; priv->can.do_set_bittiming = grcan_set_bittiming; @@ -1639,6 +1635,7 @@ exit_free_candev: static int grcan_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; + struct device_node *sysid_parent; u32 sysid, ambafreq; int irq, err; void __iomem *base; @@ -1647,10 +1644,15 @@ static int grcan_probe(struct platform_device *ofdev) /* Compare GRLIB version number with the first that does not * have the tx bug (see start_xmit) */ - err = of_property_read_u32(np, "systemid", &sysid); - if (!err && ((sysid & GRLIB_VERSION_MASK) - >= GRCAN_TXBUG_SAFE_GRLIB_VERSION)) - txbug = false; + sysid_parent = of_find_node_by_path("/ambapp0"); + if (sysid_parent) { + of_node_get(sysid_parent); + err = of_property_read_u32(sysid_parent, "systemid", &sysid); + if (!err && ((sysid & GRLIB_VERSION_MASK) >= + GRCAN_TXBUG_SAFE_GRLIB_VERSION)) + txbug = false; + of_node_put(sysid_parent); + } err = of_property_read_u32(np, "freq", &ambafreq); if (err) { diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 77501f9c5915..fbb32aa49b24 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1354,46 +1354,25 @@ static void b53_phylink_get_caps(struct dsa_switch *ds, int port, config->legacy_pre_march2020 = false; } -int b53_phylink_mac_link_state(struct dsa_switch *ds, int port, - struct phylink_link_state *state) +static struct phylink_pcs *b53_phylink_mac_select_pcs(struct dsa_switch *ds, + int port, + phy_interface_t interface) { struct b53_device *dev = ds->priv; - int ret = -EOPNOTSUPP; - if ((phy_interface_mode_is_8023z(state->interface) || - state->interface == PHY_INTERFACE_MODE_SGMII) && - dev->ops->serdes_link_state) - ret = dev->ops->serdes_link_state(dev, port, state); + if (!dev->ops->phylink_mac_select_pcs) + return NULL; - return ret; + return dev->ops->phylink_mac_select_pcs(dev, port, interface); } -EXPORT_SYMBOL(b53_phylink_mac_link_state); void b53_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, const struct phylink_link_state *state) { - struct b53_device *dev = ds->priv; - - if (mode == MLO_AN_PHY || mode == MLO_AN_FIXED) - return; - - if ((phy_interface_mode_is_8023z(state->interface) || - state->interface == PHY_INTERFACE_MODE_SGMII) && - dev->ops->serdes_config) - dev->ops->serdes_config(dev, port, mode, state); } EXPORT_SYMBOL(b53_phylink_mac_config); -void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port) -{ - struct b53_device *dev = ds->priv; - - if (dev->ops->serdes_an_restart) - dev->ops->serdes_an_restart(dev, port); -} -EXPORT_SYMBOL(b53_phylink_mac_an_restart); - void b53_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface) @@ -2269,9 +2248,8 @@ static const struct dsa_switch_ops b53_switch_ops = { .phy_write = b53_phy_write16, .adjust_link = b53_adjust_link, .phylink_get_caps = b53_phylink_get_caps, - .phylink_mac_link_state = b53_phylink_mac_link_state, + .phylink_mac_select_pcs = b53_phylink_mac_select_pcs, .phylink_mac_config = b53_phylink_mac_config, - .phylink_mac_an_restart = b53_phylink_mac_an_restart, .phylink_mac_link_down = b53_phylink_mac_link_down, .phylink_mac_link_up = b53_phylink_mac_link_up, .port_enable = b53_enable_port, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 3085b6cc7d40..795cbffd5c2b 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -21,7 +21,7 @@ #include <linux/kernel.h> #include <linux/mutex.h> -#include <linux/phy.h> +#include <linux/phylink.h> #include <linux/etherdevice.h> #include <net/dsa.h> @@ -29,7 +29,6 @@ struct b53_device; struct net_device; -struct phylink_link_state; struct b53_io_ops { int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value); @@ -48,13 +47,10 @@ struct b53_io_ops { void (*irq_disable)(struct b53_device *dev, int port); void (*phylink_get_caps)(struct b53_device *dev, int port, struct phylink_config *config); + struct phylink_pcs *(*phylink_mac_select_pcs)(struct b53_device *dev, + int port, + phy_interface_t interface); u8 (*serdes_map_lane)(struct b53_device *dev, int port); - int (*serdes_link_state)(struct b53_device *dev, int port, - struct phylink_link_state *state); - void (*serdes_config)(struct b53_device *dev, int port, - unsigned int mode, - const struct phylink_link_state *state); - void (*serdes_an_restart)(struct b53_device *dev, int port); void (*serdes_link_set)(struct b53_device *dev, int port, unsigned int mode, phy_interface_t interface, bool link_up); @@ -85,8 +81,15 @@ enum { BCM7278_DEVICE_ID = 0x7278, }; +struct b53_pcs { + struct phylink_pcs pcs; + struct b53_device *dev; + u8 lane; +}; + #define B53_N_PORTS 9 #define B53_N_PORTS_25 6 +#define B53_N_PCS 2 struct b53_port { u16 vlan_ctl_mask; @@ -143,6 +146,8 @@ struct b53_device { bool vlan_enabled; unsigned int num_ports; struct b53_port *ports; + + struct b53_pcs pcs[B53_N_PCS]; }; #define b53_for_each_port(dev, i) \ @@ -336,12 +341,9 @@ int b53_br_flags(struct dsa_switch *ds, int port, struct netlink_ext_ack *extack); int b53_setup_devlink_resources(struct dsa_switch *ds); void b53_port_event(struct dsa_switch *ds, int port); -int b53_phylink_mac_link_state(struct dsa_switch *ds, int port, - struct phylink_link_state *state); void b53_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, const struct phylink_link_state *state); -void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port); void b53_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface); diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c index 555e5b372321..0690210770ff 100644 --- a/drivers/net/dsa/b53/b53_serdes.c +++ b/drivers/net/dsa/b53/b53_serdes.c @@ -17,6 +17,11 @@ #include "b53_serdes.h" #include "b53_regs.h" +static inline struct b53_pcs *pcs_to_b53_pcs(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct b53_pcs, pcs); +} + static void b53_serdes_write_blk(struct b53_device *dev, u8 offset, u16 block, u16 value) { @@ -60,51 +65,47 @@ static u16 b53_serdes_read(struct b53_device *dev, u8 lane, return b53_serdes_read_blk(dev, offset, block); } -void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode, - const struct phylink_link_state *state) +static int b53_serdes_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) { - u8 lane = b53_serdes_map_lane(dev, port); + struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev; + u8 lane = pcs_to_b53_pcs(pcs)->lane; u16 reg; - if (lane == B53_INVALID_LANE) - return; - reg = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_CONTROL(1), SERDES_DIGITAL_BLK); - if (state->interface == PHY_INTERFACE_MODE_1000BASEX) + if (interface == PHY_INTERFACE_MODE_1000BASEX) reg |= FIBER_MODE_1000X; else reg &= ~FIBER_MODE_1000X; b53_serdes_write(dev, lane, B53_SERDES_DIGITAL_CONTROL(1), SERDES_DIGITAL_BLK, reg); + + return 0; } -EXPORT_SYMBOL(b53_serdes_config); -void b53_serdes_an_restart(struct b53_device *dev, int port) +static void b53_serdes_an_restart(struct phylink_pcs *pcs) { - u8 lane = b53_serdes_map_lane(dev, port); + struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev; + u8 lane = pcs_to_b53_pcs(pcs)->lane; u16 reg; - if (lane == B53_INVALID_LANE) - return; - reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR), SERDES_MII_BLK); reg |= BMCR_ANRESTART; b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR), SERDES_MII_BLK, reg); } -EXPORT_SYMBOL(b53_serdes_an_restart); -int b53_serdes_link_state(struct b53_device *dev, int port, - struct phylink_link_state *state) +static void b53_serdes_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) { - u8 lane = b53_serdes_map_lane(dev, port); + struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev; + u8 lane = pcs_to_b53_pcs(pcs)->lane; u16 dig, bmsr; - if (lane == B53_INVALID_LANE) - return 1; - dig = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_STATUS, SERDES_DIGITAL_BLK); bmsr = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMSR), @@ -133,10 +134,7 @@ int b53_serdes_link_state(struct b53_device *dev, int port, state->pause |= MLO_PAUSE_RX; if (dig & PAUSE_RESOLUTION_TX_SIDE) state->pause |= MLO_PAUSE_TX; - - return 0; } -EXPORT_SYMBOL(b53_serdes_link_state); void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode, phy_interface_t interface, bool link_up) @@ -158,6 +156,12 @@ void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode, } EXPORT_SYMBOL(b53_serdes_link_set); +static const struct phylink_pcs_ops b53_pcs_ops = { + .pcs_get_state = b53_serdes_get_state, + .pcs_config = b53_serdes_config, + .pcs_an_restart = b53_serdes_an_restart, +}; + void b53_serdes_phylink_get_caps(struct b53_device *dev, int port, struct phylink_config *config) { @@ -187,9 +191,28 @@ void b53_serdes_phylink_get_caps(struct b53_device *dev, int port, } EXPORT_SYMBOL(b53_serdes_phylink_get_caps); +struct phylink_pcs *b53_serdes_phylink_mac_select_pcs(struct b53_device *dev, + int port, + phy_interface_t interface) +{ + u8 lane = b53_serdes_map_lane(dev, port); + + if (lane == B53_INVALID_LANE || lane >= B53_N_PCS || + !dev->pcs[lane].dev) + return NULL; + + if (!phy_interface_mode_is_8023z(interface) && + interface != PHY_INTERFACE_MODE_SGMII) + return NULL; + + return &dev->pcs[lane].pcs; +} +EXPORT_SYMBOL(b53_serdes_phylink_mac_select_pcs); + int b53_serdes_init(struct b53_device *dev, int port) { u8 lane = b53_serdes_map_lane(dev, port); + struct b53_pcs *pcs; u16 id0, msb, lsb; if (lane == B53_INVALID_LANE) @@ -212,6 +235,11 @@ int b53_serdes_init(struct b53_device *dev, int port) (id0 >> SERDES_ID0_REV_NUM_SHIFT) & SERDES_ID0_REV_NUM_MASK, (u32)msb << 16 | lsb); + pcs = &dev->pcs[lane]; + pcs->dev = dev; + pcs->lane = lane; + pcs->pcs.ops = &b53_pcs_ops; + return 0; } EXPORT_SYMBOL(b53_serdes_init); diff --git a/drivers/net/dsa/b53/b53_serdes.h b/drivers/net/dsa/b53/b53_serdes.h index f47d5caa7557..ef81f5da5f81 100644 --- a/drivers/net/dsa/b53/b53_serdes.h +++ b/drivers/net/dsa/b53/b53_serdes.h @@ -107,14 +107,11 @@ static inline u8 b53_serdes_map_lane(struct b53_device *dev, int port) return dev->ops->serdes_map_lane(dev, port); } -int b53_serdes_get_link(struct b53_device *dev, int port); -int b53_serdes_link_state(struct b53_device *dev, int port, - struct phylink_link_state *state); -void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode, - const struct phylink_link_state *state); -void b53_serdes_an_restart(struct b53_device *dev, int port); void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode, phy_interface_t interface, bool link_up); +struct phylink_pcs *b53_serdes_phylink_mac_select_pcs(struct b53_device *dev, + int port, + phy_interface_t interface); void b53_serdes_phylink_get_caps(struct b53_device *dev, int port, struct phylink_config *config); #if IS_ENABLED(CONFIG_B53_SERDES) diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index c51b716657db..da0b889880f6 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -491,10 +491,8 @@ static const struct b53_io_ops b53_srab_ops = { .irq_disable = b53_srab_irq_disable, .phylink_get_caps = b53_srab_phylink_get_caps, #if IS_ENABLED(CONFIG_B53_SERDES) + .phylink_mac_select_pcs = b53_serdes_phylink_mac_select_pcs, .serdes_map_lane = b53_srab_serdes_map_lane, - .serdes_link_state = b53_serdes_link_state, - .serdes_config = b53_serdes_config, - .serdes_an_restart = b53_serdes_an_restart, .serdes_link_set = b53_serdes_link_set, #endif }; diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 48c90e4cda30..61dd0fa97748 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -896,14 +896,32 @@ static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port, bool ingress, struct netlink_ext_ack *extack) { struct ksz_device *dev = ds->priv; + u8 data; + int p; + + /* Limit to one sniffer port + * Check if any of the port is already set for sniffing + * If yes, instruct the user to remove the previous entry & exit + */ + for (p = 0; p < dev->port_cnt; p++) { + /* Skip the current sniffing port */ + if (p == mirror->to_local_port) + continue; + + ksz_pread8(dev, p, P_MIRROR_CTRL, &data); + + if (data & PORT_MIRROR_SNIFFER) { + NL_SET_ERR_MSG_MOD(extack, + "Sniffer port is already configured, delete existing rules & retry"); + return -EBUSY; + } + } if (ingress) ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true); else ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true); - ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false); - /* configure mirror port */ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, true); @@ -917,16 +935,28 @@ static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror) { struct ksz_device *dev = ds->priv; + bool in_use = false; u8 data; + int p; if (mirror->ingress) ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false); else ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false); - ksz_pread8(dev, port, P_MIRROR_CTRL, &data); - if (!(data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) + /* Check if any of the port is still referring to sniffer port */ + for (p = 0; p < dev->port_cnt; p++) { + ksz_pread8(dev, p, P_MIRROR_CTRL, &data); + + if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) { + in_use = true; + break; + } + } + + /* delete sniffing if there are no other mirroring rules */ + if (!in_use) ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false); } diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index e55d962fef9f..2b02d823d497 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2234,6 +2234,7 @@ mt7530_setup(struct dsa_switch *ds) ret = of_get_phy_mode(mac_np, &interface); if (ret && ret != -ENODEV) { of_node_put(mac_np); + of_node_put(phy_node); return ret; } id = of_mdio_parse_addr(ds->dev, phy_node); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 0489c1c2e7dd..082518e68579 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2827,6 +2827,10 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget) u32 idx = le32_to_cpu(nqcmp->cq_handle_low); struct bnxt_cp_ring_info *cpr2; + /* No more budget for RX work */ + if (budget && work_done >= budget && idx == BNXT_RX_HDL) + break; + cpr2 = cpr->cp_ring_arr[idx]; work_done += __bnxt_poll_work(bp, cpr2, budget - work_done); @@ -11128,7 +11132,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp) if (bp->flags & BNXT_FLAG_CHIP_P5) return bnxt_rfs_supported(bp); - if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) + if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) return false; vnics = 1 + bp->rx_nr_rings; @@ -13382,10 +13386,9 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp) goto init_dflt_ring_err; bp->tx_nr_rings_per_tc = bp->tx_nr_rings; - if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { - bp->flags |= BNXT_FLAG_RFS; - bp->dev->features |= NETIF_F_NTUPLE; - } + + bnxt_set_dflt_rfs(bp); + init_dflt_ring_err: bnxt_ulp_irq_restart(bp, rc); return rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 9c2ad5e67a5d..00f2f80c0073 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -846,13 +846,6 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) if (rc) return rc; - if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) { - bnxt_ptp_timecounter_init(bp, false); - rc = bnxt_ptp_init_rtc(bp, phc_cfg); - if (rc) - goto out; - } - if (ptp->ptp_clock && bnxt_pps_config_ok(bp)) return 0; @@ -861,8 +854,14 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS); spin_lock_init(&ptp->ptp_lock); - if (!(bp->fw_cap & BNXT_FW_CAP_PTP_RTC)) + if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) { + bnxt_ptp_timecounter_init(bp, false); + rc = bnxt_ptp_init_rtc(bp, phc_cfg); + if (rc) + goto out; + } else { bnxt_ptp_timecounter_init(bp, true); + } ptp->ptp_info = bnxt_ptp_caps; if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) { diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index f2f1ce81fd9c..0ec65ec634df 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -59,7 +59,7 @@ struct nicpf { /* MSI-X */ u8 num_vec; - bool irq_allocated[NIC_PF_MSIX_VECTORS]; + unsigned int irq_allocated[NIC_PF_MSIX_VECTORS]; char irq_name[NIC_PF_MSIX_VECTORS][20]; }; @@ -1150,7 +1150,7 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) u64 intr; u8 vf; - if (irq == pci_irq_vector(nic->pdev, NIC_PF_INTR_ID_MBOX0)) + if (irq == nic->irq_allocated[NIC_PF_INTR_ID_MBOX0]) mbx = 0; else mbx = 1; @@ -1176,14 +1176,14 @@ static void nic_free_all_interrupts(struct nicpf *nic) for (irq = 0; irq < nic->num_vec; irq++) { if (nic->irq_allocated[irq]) - free_irq(pci_irq_vector(nic->pdev, irq), nic); - nic->irq_allocated[irq] = false; + free_irq(nic->irq_allocated[irq], nic); + nic->irq_allocated[irq] = 0; } } static int nic_register_interrupts(struct nicpf *nic) { - int i, ret; + int i, ret, irq; nic->num_vec = pci_msix_vec_count(nic->pdev); /* Enable MSI-X */ @@ -1201,13 +1201,13 @@ static int nic_register_interrupts(struct nicpf *nic) sprintf(nic->irq_name[i], "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0)); - ret = request_irq(pci_irq_vector(nic->pdev, i), - nic_mbx_intr_handler, 0, + irq = pci_irq_vector(nic->pdev, i); + ret = request_irq(irq, nic_mbx_intr_handler, 0, nic->irq_name[i], nic); if (ret) goto fail; - nic->irq_allocated[i] = true; + nic->irq_allocated[i] = irq; } /* Enable mailbox interrupt */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c index 2d9b06d7caad..f7dc7d825f63 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c @@ -771,7 +771,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, /* If we only have one page, still need to get shadown wqe when * wqe rolling-over page */ - if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) { + if (curr_pg != end_pg || end_prod_idx < *prod_idx) { void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx); @@ -841,7 +841,10 @@ struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, *cons_idx = curr_cons_idx; - if (curr_pg != end_pg) { + /* If we only have one page, still need to get shadown wqe when + * wqe rolling-over page + */ + if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) { void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx); diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c index 32d83421226a..5897940a418b 100644 --- a/drivers/net/ethernet/mediatek/mtk_sgmii.c +++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c @@ -26,6 +26,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3) break; ss->regmap[i] = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(ss->regmap[i])) return PTR_ERR(ss->regmap[i]); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c index 538adab6878b..c5b560a8b026 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c @@ -31,6 +31,7 @@ static const char *const mlx5_rsc_sgmt_name[] = { struct mlx5_rsc_dump { u32 pdn; u32 mkey; + u32 number_of_menu_items; u16 fw_segment_type[MLX5_SGMT_TYPE_NUM]; }; @@ -50,21 +51,37 @@ static int mlx5_rsc_dump_sgmt_get_by_name(char *name) return -EINVAL; } -static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page) +#define MLX5_RSC_DUMP_MENU_HEADER_SIZE (MLX5_ST_SZ_BYTES(resource_dump_info_segment) + \ + MLX5_ST_SZ_BYTES(resource_dump_command_segment) + \ + MLX5_ST_SZ_BYTES(resource_dump_menu_segment)) + +static int mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page, + int read_size, int start_idx) { void *data = page_address(page); enum mlx5_sgmt_type sgmt_idx; int num_of_items; char *sgmt_name; void *member; + int size = 0; void *menu; int i; - menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu); - num_of_items = MLX5_GET(resource_dump_menu_segment, menu, num_of_records); + if (!start_idx) { + menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu); + rsc_dump->number_of_menu_items = MLX5_GET(resource_dump_menu_segment, menu, + num_of_records); + size = MLX5_RSC_DUMP_MENU_HEADER_SIZE; + data += size; + } + num_of_items = rsc_dump->number_of_menu_items; + + for (i = 0; start_idx + i < num_of_items; i++) { + size += MLX5_ST_SZ_BYTES(resource_dump_menu_record); + if (size >= read_size) + return start_idx + i; - for (i = 0; i < num_of_items; i++) { - member = MLX5_ADDR_OF(resource_dump_menu_segment, menu, record[i]); + member = data + MLX5_ST_SZ_BYTES(resource_dump_menu_record) * i; sgmt_name = MLX5_ADDR_OF(resource_dump_menu_record, member, segment_name); sgmt_idx = mlx5_rsc_dump_sgmt_get_by_name(sgmt_name); if (sgmt_idx == -EINVAL) @@ -72,6 +89,7 @@ static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct rsc_dump->fw_segment_type[sgmt_idx] = MLX5_GET(resource_dump_menu_record, member, segment_type); } + return 0; } static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd, @@ -168,6 +186,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev) struct mlx5_rsc_dump_cmd *cmd = NULL; struct mlx5_rsc_key key = {}; struct page *page; + int start_idx = 0; int size; int err; @@ -189,7 +208,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev) if (err < 0) goto destroy_cmd; - mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page); + start_idx = mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page, size, start_idx); } while (err > 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index 673f1c82d381..c9d5d8d93994 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -309,8 +309,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (err) return err; - err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz, - xoff, &port_buffer, &update_buffer); + err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, xoff, + port_buff_cell_sz, &port_buffer, &update_buffer); if (err) return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c index af37a8d247a1..2755c25ba324 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c @@ -145,8 +145,7 @@ mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state, flow_action_for_each(i, act, flow_action) { tc_act = mlx5e_tc_act_get(act->id, ns_type); - if (!tc_act || !tc_act->post_parse || - !tc_act->can_offload(parse_state, act, i, attr)) + if (!tc_act || !tc_act->post_parse) continue; err = tc_act->post_parse(parse_state, priv, attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c index b9d38fe807df..a829c94289c1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c @@ -45,12 +45,41 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state, if (mlx5e_is_eswitch_flow(parse_state->flow)) attr->esw_attr->split_count = attr->esw_attr->out_count; - if (!clear_action) { + if (clear_action) { + parse_state->ct_clear = true; + } else { attr->flags |= MLX5_ATTR_FLAG_CT; flow_flag_set(parse_state->flow, CT); parse_state->ct = true; } - parse_state->ct_clear = clear_action; + + return 0; +} + +static int +tc_act_post_parse_ct(struct mlx5e_tc_act_parse_state *parse_state, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5e_tc_mod_hdr_acts *mod_acts = &attr->parse_attr->mod_hdr_acts; + int err; + + /* If ct action exist, we can ignore previous ct_clear actions */ + if (parse_state->ct) + return 0; + + if (parse_state->ct_clear) { + err = mlx5_tc_ct_set_ct_clear_regs(parse_state->ct_priv, mod_acts); + if (err) { + NL_SET_ERR_MSG_MOD(parse_state->extack, + "Failed to set registers for ct clear"); + return err; + } + attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + /* Prevent handling of additional, redundant clear actions */ + parse_state->ct_clear = false; + } return 0; } @@ -70,5 +99,6 @@ struct mlx5e_tc_act mlx5e_tc_act_ct = { .can_offload = tc_act_can_offload_ct, .parse_action = tc_act_parse_ct, .is_multi_table_act = tc_act_is_multi_table_act_ct, + .post_parse = tc_act_post_parse_ct, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 89aa0208b5d2..228fbd2c20d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -582,6 +582,12 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv, return 0; } +int mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv, + struct mlx5e_tc_mod_hdr_acts *mod_acts) +{ + return mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0); +} + static int mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry *act, char *modact) @@ -1410,9 +1416,6 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv, const struct flow_action_entry *act, struct netlink_ext_ack *extack) { - bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR; - int err; - if (!priv) { NL_SET_ERR_MSG_MOD(extack, "offload of ct action isn't available"); @@ -1423,17 +1426,6 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv, attr->ct_attr.ct_action = act->ct.action; attr->ct_attr.nf_ft = act->ct.flow_table; - if (!clear_action) - goto out; - - err = mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "Failed to set registers for ct clear"); - return err; - } - attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - -out: return 0; } @@ -1749,6 +1741,8 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg) static void mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) { + struct mlx5e_priv *priv; + if (!refcount_dec_and_test(&ft->refcount)) return; @@ -1758,6 +1752,8 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) rhashtable_free_and_destroy(&ft->ct_entries_ht, mlx5_tc_ct_flush_ft_entry, ct_priv); + priv = netdev_priv(ct_priv->netdev); + flush_workqueue(priv->wq); mlx5_tc_ct_free_pre_ct_tables(ft); mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id); kfree(ft); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h index 36d3652bf829..00a3ba862afb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h @@ -129,6 +129,10 @@ bool mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv, struct sk_buff *skb, u8 zone_restore_id); +int +mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv, + struct mlx5e_tc_mod_hdr_acts *mod_acts); + #else /* CONFIG_MLX5_TC_CT */ static inline struct mlx5_tc_ct_priv * @@ -171,6 +175,13 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec) } static inline int +mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv, + struct mlx5e_tc_mod_hdr_acts *mod_acts) +{ + return -EOPNOTSUPP; +} + +static inline int mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr, struct mlx5e_tc_mod_hdr_acts *mod_acts, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 378fc8e3bd97..d87bbb0be7c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -713,6 +713,7 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, struct net_device *filter_dev) { struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_tc_int_port *int_port; TC_TUN_ROUTE_ATTR_INIT(attr); u16 vport_num; @@ -747,7 +748,7 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value, misc_parameters.vxlan_vni); esw_attr->rx_tun_attr->decap_vport = vport_num; - } else if (netif_is_ovs_master(attr.route_dev)) { + } else if (netif_is_ovs_master(attr.route_dev) && mlx5e_tc_int_port_supported(esw)) { int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv), attr.route_dev->ifindex, MLX5E_TC_INT_PORT_INGRESS); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 6435517c0bee..2449731b7d79 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1191,6 +1191,16 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv) return err; WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state); + if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) { + /* + * Align the driver state with the register state. + * Temporary state change is required to enable the app list reset. + */ + priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP; + mlx5e_dcbnl_delete_app(priv); + priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP; + } + mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params, priv->dcbx_dp.trust_state); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index e3fc15ae7bb1..ac0f73074f7a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2459,6 +2459,17 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, match.key->vlan_priority); *match_level = MLX5_MATCH_L2; + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) && + match.mask->vlan_eth_type && + MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, + ft_field_support.outer_second_vid, + fs_type)) { + MLX5_SET(fte_match_set_misc, misc_c, + outer_second_cvlan_tag, 1); + spec->match_criteria_enable |= + MLX5_MATCH_MISC_PARAMETERS; + } } } else if (*match_level != MLX5_MATCH_NONE) { /* cvlan_tag enabled in match criteria and diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 3f63df127091..3b151332e2f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -139,7 +139,7 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, if (mlx5_esw_indir_table_decap_vport(attr)) vport = mlx5_esw_indir_table_decap_vport(attr); - if (esw_attr->int_port) + if (attr && !attr->chain && esw_attr->int_port) metadata = mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port); else diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 4aa22dce9b77..ca1aba845dd6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -155,6 +155,28 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev) } } +static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev) +{ + struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + + del_timer_sync(&fw_reset->timer); +} + +static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health) +{ + struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + + if (!test_and_clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) { + mlx5_core_warn(dev, "Reset request was already cleared\n"); + return -EALREADY; + } + + mlx5_stop_sync_reset_poll(dev); + if (poll_health) + mlx5_start_health_poll(dev); + return 0; +} + static void mlx5_sync_reset_reload_work(struct work_struct *work) { struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset, @@ -162,6 +184,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work) struct mlx5_core_dev *dev = fw_reset->dev; int err; + mlx5_sync_reset_clear_reset_requested(dev, false); mlx5_enter_error_state(dev, true); mlx5_unload_one(dev); err = mlx5_health_wait_pci_up(dev); @@ -171,23 +194,6 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work) mlx5_fw_reset_complete_reload(dev); } -static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev) -{ - struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; - - del_timer_sync(&fw_reset->timer); -} - -static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health) -{ - struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; - - mlx5_stop_sync_reset_poll(dev); - clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags); - if (poll_health) - mlx5_start_health_poll(dev); -} - #define MLX5_RESET_POLL_INTERVAL (HZ / 10) static void poll_sync_reset(struct timer_list *t) { @@ -202,7 +208,6 @@ static void poll_sync_reset(struct timer_list *t) if (fatal_error) { mlx5_core_warn(dev, "Got Device Reset\n"); - mlx5_sync_reset_clear_reset_requested(dev, false); queue_work(fw_reset->wq, &fw_reset->reset_reload_work); return; } @@ -229,13 +234,17 @@ static int mlx5_fw_reset_set_reset_sync_nack(struct mlx5_core_dev *dev) return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 2, false); } -static void mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev) +static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev) { struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + if (test_and_set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) { + mlx5_core_warn(dev, "Reset request was already set\n"); + return -EALREADY; + } mlx5_stop_health_poll(dev, true); - set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags); mlx5_start_sync_reset_poll(dev); + return 0; } static void mlx5_fw_live_patch_event(struct work_struct *work) @@ -264,7 +273,9 @@ static void mlx5_sync_reset_request_event(struct work_struct *work) err ? "Failed" : "Sent"); return; } - mlx5_sync_reset_set_reset_requested(dev); + if (mlx5_sync_reset_set_reset_requested(dev)) + return; + err = mlx5_fw_reset_set_reset_sync_ack(dev); if (err) mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack Failed. Error code: %d\n", err); @@ -362,7 +373,8 @@ static void mlx5_sync_reset_now_event(struct work_struct *work) struct mlx5_core_dev *dev = fw_reset->dev; int err; - mlx5_sync_reset_clear_reset_requested(dev, false); + if (mlx5_sync_reset_clear_reset_requested(dev, false)) + return; mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n"); @@ -391,10 +403,8 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work) reset_abort_work); struct mlx5_core_dev *dev = fw_reset->dev; - if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) + if (mlx5_sync_reset_clear_reset_requested(dev, true)) return; - - mlx5_sync_reset_clear_reset_requested(dev, true); mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n"); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c index 4a6ec15ef046..d6c3e6dfd71f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c @@ -100,6 +100,14 @@ static void mlx5_lag_fib_event_flush(struct notifier_block *nb) flush_workqueue(mp->wq); } +static void mlx5_lag_fib_set(struct lag_mp *mp, struct fib_info *fi, u32 dst, int dst_len) +{ + mp->fib.mfi = fi; + mp->fib.priority = fi->fib_priority; + mp->fib.dst = dst; + mp->fib.dst_len = dst_len; +} + struct mlx5_fib_event_work { struct work_struct work; struct mlx5_lag *ldev; @@ -110,10 +118,10 @@ struct mlx5_fib_event_work { }; }; -static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, - unsigned long event, - struct fib_info *fi) +static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event, + struct fib_entry_notifier_info *fen_info) { + struct fib_info *fi = fen_info->fi; struct lag_mp *mp = &ldev->lag_mp; struct fib_nh *fib_nh0, *fib_nh1; unsigned int nhs; @@ -121,13 +129,15 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, /* Handle delete event */ if (event == FIB_EVENT_ENTRY_DEL) { /* stop track */ - if (mp->mfi == fi) - mp->mfi = NULL; + if (mp->fib.mfi == fi) + mp->fib.mfi = NULL; return; } /* Handle multipath entry with lower priority value */ - if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority) + if (mp->fib.mfi && mp->fib.mfi != fi && + (mp->fib.dst != fen_info->dst || mp->fib.dst_len != fen_info->dst_len) && + fi->fib_priority >= mp->fib.priority) return; /* Handle add/replace event */ @@ -143,9 +153,9 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, i++; mlx5_lag_set_port_affinity(ldev, i); + mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len); } - mp->mfi = fi; return; } @@ -165,7 +175,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, } /* First time we see multipath route */ - if (!mp->mfi && !__mlx5_lag_is_active(ldev)) { + if (!mp->fib.mfi && !__mlx5_lag_is_active(ldev)) { struct lag_tracker tracker; tracker = ldev->tracker; @@ -173,7 +183,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, } mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY); - mp->mfi = fi; + mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len); } static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev, @@ -184,7 +194,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev, struct lag_mp *mp = &ldev->lag_mp; /* Check the nh event is related to the route */ - if (!mp->mfi || mp->mfi != fi) + if (!mp->fib.mfi || mp->fib.mfi != fi) return; /* nh added/removed */ @@ -214,7 +224,7 @@ static void mlx5_lag_fib_update(struct work_struct *work) case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_DEL: mlx5_lag_fib_route_event(ldev, fib_work->event, - fib_work->fen_info.fi); + &fib_work->fen_info); fib_info_put(fib_work->fen_info.fi); break; case FIB_EVENT_NH_ADD: @@ -313,7 +323,7 @@ void mlx5_lag_mp_reset(struct mlx5_lag *ldev) /* Clear mfi, as it might become stale when a route delete event * has been missed, see mlx5_lag_fib_route_event(). */ - ldev->lag_mp.mfi = NULL; + ldev->lag_mp.fib.mfi = NULL; } int mlx5_lag_mp_init(struct mlx5_lag *ldev) @@ -324,7 +334,7 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev) /* always clear mfi, as it might become stale when a route delete event * has been missed */ - mp->mfi = NULL; + mp->fib.mfi = NULL; if (mp->fib_nb.notifier_call) return 0; @@ -354,5 +364,5 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) unregister_fib_notifier(&init_net, &mp->fib_nb); destroy_workqueue(mp->wq); mp->fib_nb.notifier_call = NULL; - mp->mfi = NULL; + mp->fib.mfi = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h index 57af962cad29..056a066da604 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h @@ -15,7 +15,12 @@ enum mlx5_lag_port_affinity { struct lag_mp { struct notifier_block fib_nb; - struct fib_info *mfi; /* used in tracking fib events */ + struct { + const void *mfi; /* used in tracking fib events */ + u32 priority; + u32 dst; + int dst_len; + } fib; struct workqueue_struct *wq; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index a6592f9c3c05..5be322528279 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -505,7 +505,7 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev) struct ttc_params ttc_params = {}; mlx5_lag_set_inner_ttc_params(ldev, &ttc_params); - port_sel->inner.ttc = mlx5_create_ttc_table(dev, &ttc_params); + port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params); if (IS_ERR(port_sel->inner.ttc)) return PTR_ERR(port_sel->inner.ttc); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c index b63dec24747a..b78f2ba25c19 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c @@ -408,6 +408,8 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev, for (tt = 0; tt < MLX5_NUM_TT; tt++) { struct mlx5_ttc_rule *rule = &rules[tt]; + if (test_bit(tt, params->ignore_dests)) + continue; rule->rule = mlx5_generate_inner_ttc_rule(dev, ft, ¶ms->dests[tt], ttc_rules[tt].etype, diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 7a50ba00f8ae..c854efdf1f25 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2431,7 +2431,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev) if (irq == -EPROBE_DEFER) { retval = -EPROBE_DEFER; goto out_0; - } else if (irq <= 0) { + } else if (irq < 0) { pr_warn("Could not allocate irq resource\n"); retval = -ENODEV; goto out_0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 63754a9c4ba7..0b0be0898ac5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -454,6 +454,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->has_gmac4 = 1; plat->force_sf_dma_mode = 0; plat->tso_en = 1; + plat->sph_disable = 1; /* Multiplying factor to the clk_eee_i clock time * period to make it closer to 100 ns. This value diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index f86cc83003f2..f834472599f7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -907,6 +907,7 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv) ret = mdio_mux_init(priv->device, mdio_mux, mdio_mux_syscon_switch_fn, &gmac->mux_handle, priv, priv->mii); + of_node_put(mdio_mux); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 7c834c02e084..65ae3ae9582f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -7015,7 +7015,7 @@ int stmmac_dvr_probe(struct device *device, dev_info(priv->device, "TSO feature enabled\n"); } - if (priv->dma_cap.sphen) { + if (priv->dma_cap.sphen && !priv->plat->sph_disable) { ndev->hw_features |= NETIF_F_GRO; priv->sph_cap = true; priv->sph = priv->sph_cap; diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index b33781ed760e..299319320830 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1240,8 +1240,10 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw) data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM, sizeof(struct cpsw_slave_data), GFP_KERNEL); - if (!data->slave_data) + if (!data->slave_data) { + of_node_put(tmp_node); return -ENOMEM; + } /* Populate all the child nodes here... */ @@ -1335,6 +1337,7 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw) err_node_put: of_node_put(port_np); + of_node_put(tmp_node); return ret; } diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 7a86ae82fcc1..016a9c4f2c6c 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -803,10 +803,10 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) { struct mii_bus *bus; - int rc; struct resource res; struct device_node *np = of_get_parent(lp->phy_node); struct device_node *npp; + int rc, ret; /* Don't register the MDIO bus if the phy_node or its parent node * can't be found. @@ -816,8 +816,14 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) return -ENODEV; } npp = of_get_parent(np); - - of_address_to_resource(npp, 0, &res); + ret = of_address_to_resource(npp, 0, &res); + of_node_put(npp); + if (ret) { + dev_err(dev, "%s resource error!\n", + dev->of_node->full_name); + of_node_put(np); + return ret; + } if (lp->ndev->mem_start != res.start) { struct phy_device *phydev; @@ -827,6 +833,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) "MDIO of the phy is not registered yet\n"); else put_device(&phydev->mdio.dev); + of_node_put(np); return 0; } @@ -839,6 +846,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) bus = mdiobus_alloc(); if (!bus) { dev_err(dev, "Failed to allocate mdiobus\n"); + of_node_put(np); return -ENOMEM; } @@ -851,6 +859,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) bus->parent = dev; rc = of_mdiobus_register(bus, np); + of_node_put(np); if (rc) { dev_err(dev, "Failed to register mdio bus.\n"); goto err_register; @@ -907,8 +916,6 @@ static int xemaclite_open(struct net_device *dev) xemaclite_disable_interrupts(lp); if (lp->phy_node) { - u32 bmcr; - lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, xemaclite_adjust_link, 0, PHY_INTERFACE_MODE_MII); @@ -919,19 +926,6 @@ static int xemaclite_open(struct net_device *dev) /* EmacLite doesn't support giga-bit speeds */ phy_set_max_speed(lp->phy_dev, SPEED_100); - - /* Don't advertise 1000BASE-T Full/Half duplex speeds */ - phy_write(lp->phy_dev, MII_CTRL1000, 0); - - /* Advertise only 10 and 100mbps full/half duplex speeds */ - phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL | - ADVERTISE_CSMA); - - /* Restart auto negotiation */ - bmcr = phy_read(lp->phy_dev, MII_BMCR); - bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); - phy_write(lp->phy_dev, MII_BMCR, bmcr); - phy_start(lp->phy_dev); } diff --git a/drivers/net/mdio/mdio-mux-bcm6368.c b/drivers/net/mdio/mdio-mux-bcm6368.c index 6dcbf987d61b..8b444a8eb6b5 100644 --- a/drivers/net/mdio/mdio-mux-bcm6368.c +++ b/drivers/net/mdio/mdio-mux-bcm6368.c @@ -115,7 +115,7 @@ static int bcm6368_mdiomux_probe(struct platform_device *pdev) md->mii_bus = devm_mdiobus_alloc(&pdev->dev); if (!md->mii_bus) { dev_err(&pdev->dev, "mdiomux bus alloc failed\n"); - return ENOMEM; + return -ENOMEM; } bus = md->mii_bus; diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 4dfb79807823..9a5d5a10560f 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -250,6 +250,7 @@ struct sfp { struct sfp_eeprom_id id; unsigned int module_power_mW; unsigned int module_t_start_up; + bool tx_fault_ignore; #if IS_ENABLED(CONFIG_HWMON) struct sfp_diag diag; @@ -1956,6 +1957,12 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) else sfp->module_t_start_up = T_START_UP; + if (!memcmp(id.base.vendor_name, "HUAWEI ", 16) && + !memcmp(id.base.vendor_pn, "MA5671A ", 16)) + sfp->tx_fault_ignore = true; + else + sfp->tx_fault_ignore = false; + return 0; } @@ -2409,7 +2416,10 @@ static void sfp_check_state(struct sfp *sfp) mutex_lock(&sfp->st_mutex); state = sfp_get_state(sfp); changed = state ^ sfp->state; - changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT; + if (sfp->tx_fault_ignore) + changed &= SFP_F_PRESENT | SFP_F_LOS; + else + changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT; for (i = 0; i < GPIO_MAX; i++) if (changed & BIT(i)) diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c index 2fcf545012b1..1a5284de4341 100644 --- a/drivers/nfc/nfcmrvl/main.c +++ b/drivers/nfc/nfcmrvl/main.c @@ -183,6 +183,7 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv) { struct nci_dev *ndev = priv->ndev; + nci_unregister_device(ndev); if (priv->ndev->nfc_dev->fw_download_in_progress) nfcmrvl_fw_dnld_abort(priv); @@ -191,7 +192,6 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv) if (gpio_is_valid(priv->config.reset_n_io)) gpio_free(priv->config.reset_n_io); - nci_unregister_device(ndev); nci_free_device(ndev); kfree(priv); } diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c index 5b471ab80fe2..54d65a6f0fcc 100644 --- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c +++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c @@ -414,19 +414,19 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev) ret = clk_prepare_enable(priv->clk_ref); if (ret) - goto err_disable_clk_ref; + return ret; priv->reset = devm_reset_control_array_get_exclusive(dev); - if (IS_ERR(priv->reset)) - return PTR_ERR(priv->reset); + if (IS_ERR(priv->reset)) { + ret = PTR_ERR(priv->reset); + goto err_disable_clk_ref; + } priv->phy = devm_phy_create(dev, np, &phy_g12a_usb3_pcie_ops); if (IS_ERR(priv->phy)) { ret = PTR_ERR(priv->phy); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to create PHY\n"); - - return ret; + dev_err_probe(dev, ret, "failed to create PHY\n"); + goto err_disable_clk_ref; } phy_set_drvdata(priv->phy, priv); @@ -434,8 +434,12 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev) phy_provider = devm_of_phy_provider_register(dev, phy_g12a_usb3_pcie_xlate); + if (IS_ERR(phy_provider)) { + ret = PTR_ERR(phy_provider); + goto err_disable_clk_ref; + } - return PTR_ERR_OR_ZERO(phy_provider); + return 0; err_disable_clk_ref: clk_disable_unprepare(priv->clk_ref); diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c index 5172971f4c36..3cd4d51c247c 100644 --- a/drivers/phy/motorola/phy-mapphone-mdm6600.c +++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c @@ -629,7 +629,8 @@ idle: cleanup: if (error < 0) phy_mdm6600_device_power_off(ddata); - + pm_runtime_disable(ddata->dev); + pm_runtime_dont_use_autosuspend(ddata->dev); return error; } diff --git a/drivers/phy/samsung/phy-exynos5250-sata.c b/drivers/phy/samsung/phy-exynos5250-sata.c index 9ec234243f7c..595adba5fb8f 100644 --- a/drivers/phy/samsung/phy-exynos5250-sata.c +++ b/drivers/phy/samsung/phy-exynos5250-sata.c @@ -187,6 +187,7 @@ static int exynos_sata_phy_probe(struct platform_device *pdev) return -EINVAL; sata_phy->client = of_find_i2c_device_by_node(node); + of_node_put(node); if (!sata_phy->client) return -EPROBE_DEFER; @@ -195,20 +196,21 @@ static int exynos_sata_phy_probe(struct platform_device *pdev) sata_phy->phyclk = devm_clk_get(dev, "sata_phyctrl"); if (IS_ERR(sata_phy->phyclk)) { dev_err(dev, "failed to get clk for PHY\n"); - return PTR_ERR(sata_phy->phyclk); + ret = PTR_ERR(sata_phy->phyclk); + goto put_dev; } ret = clk_prepare_enable(sata_phy->phyclk); if (ret < 0) { dev_err(dev, "failed to enable source clk\n"); - return ret; + goto put_dev; } sata_phy->phy = devm_phy_create(dev, NULL, &exynos_sata_phy_ops); if (IS_ERR(sata_phy->phy)) { - clk_disable_unprepare(sata_phy->phyclk); dev_err(dev, "failed to create PHY\n"); - return PTR_ERR(sata_phy->phy); + ret = PTR_ERR(sata_phy->phy); + goto clk_disable; } phy_set_drvdata(sata_phy->phy, sata_phy); @@ -216,11 +218,18 @@ static int exynos_sata_phy_probe(struct platform_device *pdev) phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); if (IS_ERR(phy_provider)) { - clk_disable_unprepare(sata_phy->phyclk); - return PTR_ERR(phy_provider); + ret = PTR_ERR(phy_provider); + goto clk_disable; } return 0; + +clk_disable: + clk_disable_unprepare(sata_phy->phyclk); +put_dev: + put_device(&sata_phy->client->dev); + + return ret; } static const struct of_device_id exynos_sata_phy_of_match[] = { diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c index c1211c4f863c..0be727bb9f79 100644 --- a/drivers/phy/ti/phy-am654-serdes.c +++ b/drivers/phy/ti/phy-am654-serdes.c @@ -838,7 +838,7 @@ static int serdes_am654_probe(struct platform_device *pdev) clk_err: of_clk_del_provider(node); - + pm_runtime_disable(dev); return ret; } diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c index 3a505fe5715a..31a775877f6e 100644 --- a/drivers/phy/ti/phy-omap-usb2.c +++ b/drivers/phy/ti/phy-omap-usb2.c @@ -215,7 +215,7 @@ static int omap_usb2_enable_clocks(struct omap_usb *phy) return 0; err1: - clk_disable(phy->wkupclk); + clk_disable_unprepare(phy->wkupclk); err0: return ret; diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c index 2cbc91e535d4..f502c36f3be5 100644 --- a/drivers/phy/ti/phy-ti-pipe3.c +++ b/drivers/phy/ti/phy-ti-pipe3.c @@ -696,6 +696,7 @@ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy) } control_pdev = of_find_device_by_node(control_node); + of_node_put(control_node); if (!control_pdev) { dev_err(dev, "Failed to get control device\n"); return -EINVAL; diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c index a0cdbcadf09e..c3ab4b69ea68 100644 --- a/drivers/phy/ti/phy-tusb1210.c +++ b/drivers/phy/ti/phy-tusb1210.c @@ -155,7 +155,7 @@ static int tusb1210_set_mode(struct phy *phy, enum phy_mode mode, int submode) } #ifdef CONFIG_POWER_SUPPLY -const char * const tusb1210_chg_det_states[] = { +static const char * const tusb1210_chg_det_states[] = { "CHG_DET_CONNECTING", "CHG_DET_START_DET", "CHG_DET_READ_DET", @@ -537,12 +537,18 @@ static int tusb1210_probe(struct ulpi *ulpi) tusb1210_probe_charger_detect(tusb); tusb->phy = ulpi_phy_create(ulpi, &phy_ops); - if (IS_ERR(tusb->phy)) - return PTR_ERR(tusb->phy); + if (IS_ERR(tusb->phy)) { + ret = PTR_ERR(tusb->phy); + goto err_remove_charger; + } phy_set_drvdata(tusb->phy, tusb); ulpi_set_drvdata(ulpi, tusb); return 0; + +err_remove_charger: + tusb1210_remove_charger_detect(tusb); + return ret; } static void tusb1210_remove(struct ulpi *ulpi) diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c index 122f9c884b38..ccd0577a771e 100644 --- a/drivers/soc/imx/imx8m-blk-ctrl.c +++ b/drivers/soc/imx/imx8m-blk-ctrl.c @@ -50,7 +50,7 @@ struct imx8m_blk_ctrl_domain_data { u32 mipi_phy_rst_mask; }; -#define DOMAIN_MAX_CLKS 3 +#define DOMAIN_MAX_CLKS 4 struct imx8m_blk_ctrl_domain { struct generic_pm_domain genpd; diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index ff292b75e23f..60dafe4c581b 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -588,7 +588,7 @@ static void pscsi_destroy_device(struct se_device *dev) } static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status, - unsigned char *req_sense) + unsigned char *req_sense, int valid_data) { struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); struct scsi_device *sd = pdv->pdv_sd; @@ -681,7 +681,7 @@ after_mode_select: * back despite framework assumption that a * check condition means there is no data */ - if (sd->type == TYPE_TAPE && + if (sd->type == TYPE_TAPE && valid_data && cmd->data_direction == DMA_FROM_DEVICE) { /* * is sense data valid, fixed format, @@ -1032,6 +1032,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status) struct se_cmd *cmd = req->end_io_data; struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); enum sam_status scsi_status = scmd->result & 0xff; + int valid_data = cmd->data_length - scmd->resid_len; u8 *cdb = cmd->priv; if (scsi_status != SAM_STAT_GOOD) { @@ -1039,12 +1040,11 @@ static void pscsi_req_done(struct request *req, blk_status_t status) " 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result); } - pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer); + pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer, valid_data); switch (host_byte(scmd->result)) { case DID_OK: - target_complete_cmd_with_length(cmd, scsi_status, - cmd->data_length - scmd->resid_len); + target_complete_cmd_with_length(cmd, scsi_status, valid_data); break; default: pr_debug("PSCSI Host Byte exception at cmd: %p CDB:" diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index a5eb4ef46971..c9b3b2cfb2b2 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -865,6 +865,7 @@ err_rhashtable_free: rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL); optee_supp_uninit(&optee->supp); mutex_destroy(&optee->call_queue.mutex); + mutex_destroy(&optee->ffa.mutex); err_unreg_supp_teedev: tee_device_unregister(optee->supp_teedev); err_unreg_teedev: diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index fa92f727fdf8..a38b922bcbc1 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -73,6 +73,8 @@ module_param(debug, int, 0600); */ #define MAX_MRU 1500 #define MAX_MTU 1500 +/* SOF, ADDR, CTRL, LEN1, LEN2, ..., FCS, EOF */ +#define PROT_OVERHEAD 7 #define GSM_NET_TX_TIMEOUT (HZ*10) /* @@ -219,7 +221,6 @@ struct gsm_mux { int encoding; u8 control; u8 fcs; - u8 received_fcs; u8 *txframe; /* TX framing buffer */ /* Method for the receiver side */ @@ -231,6 +232,7 @@ struct gsm_mux { int initiator; /* Did we initiate connection */ bool dead; /* Has the mux been shut down */ struct gsm_dlci *dlci[NUM_DLCI]; + int old_c_iflag; /* termios c_iflag value before attach */ bool constipated; /* Asked by remote to shut up */ spinlock_t tx_lock; @@ -271,10 +273,6 @@ static DEFINE_SPINLOCK(gsm_mux_lock); static struct tty_driver *gsm_tty_driver; -/* Save dlci open address */ -static int addr_open[256] = { 0 }; -/* Save dlci open count */ -static int addr_cnt; /* * This section of the driver logic implements the GSM encodings * both the basic and the 'advanced'. Reliable transport is not @@ -369,6 +367,7 @@ static const u8 gsm_fcs8[256] = { #define GOOD_FCS 0xCF static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len); +static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk); /** * gsm_fcs_add - update FCS @@ -832,7 +831,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci) break; case 2: /* Unstructed with modem bits. Always one byte as we never send inline break data */ - *dp++ = gsm_encode_modem(dlci); + *dp++ = (gsm_encode_modem(dlci) << 1) | EA; break; } WARN_ON(kfifo_out_locked(&dlci->fifo, dp , len, &dlci->lock) != len); @@ -917,6 +916,66 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, } /** + * gsm_dlci_modem_output - try and push modem status out of a DLCI + * @gsm: mux + * @dlci: the DLCI to pull modem status from + * @brk: break signal + * + * Push an empty frame in to the transmit queue to update the modem status + * bits and to transmit an optional break. + * + * Caller must hold the tx_lock of the mux. + */ + +static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci, + u8 brk) +{ + u8 *dp = NULL; + struct gsm_msg *msg; + int size = 0; + + /* for modem bits without break data */ + switch (dlci->adaption) { + case 1: /* Unstructured */ + break; + case 2: /* Unstructured with modem bits. */ + size++; + if (brk > 0) + size++; + break; + default: + pr_err("%s: unsupported adaption %d\n", __func__, + dlci->adaption); + return -EINVAL; + } + + msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype); + if (!msg) { + pr_err("%s: gsm_data_alloc error", __func__); + return -ENOMEM; + } + dp = msg->data; + switch (dlci->adaption) { + case 1: /* Unstructured */ + break; + case 2: /* Unstructured with modem bits. */ + if (brk == 0) { + *dp++ = (gsm_encode_modem(dlci) << 1) | EA; + } else { + *dp++ = gsm_encode_modem(dlci) << 1; + *dp++ = (brk << 4) | 2 | EA; /* Length, Break, EA */ + } + break; + default: + /* Handled above */ + break; + } + + __gsm_data_queue(dlci, msg); + return size; +} + +/** * gsm_dlci_data_sweep - look for data to send * @gsm: the GSM mux * @@ -1093,7 +1152,6 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) { unsigned int addr = 0; unsigned int modem = 0; - unsigned int brk = 0; struct gsm_dlci *dlci; int len = clen; int slen; @@ -1123,17 +1181,8 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) return; } len--; - if (len > 0) { - while (gsm_read_ea(&brk, *dp++) == 0) { - len--; - if (len == 0) - return; - } - modem <<= 7; - modem |= (brk & 0x7f); - } tty = tty_port_tty_get(&dlci->port); - gsm_process_modem(tty, dlci, modem, slen); + gsm_process_modem(tty, dlci, modem, slen - len); if (tty) { tty_wakeup(tty); tty_kref_put(tty); @@ -1193,7 +1242,6 @@ static void gsm_control_rls(struct gsm_mux *gsm, const u8 *data, int clen) } static void gsm_dlci_begin_close(struct gsm_dlci *dlci); -static void gsm_dlci_close(struct gsm_dlci *dlci); /** * gsm_control_message - DLCI 0 control processing @@ -1212,28 +1260,15 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, { u8 buf[1]; unsigned long flags; - struct gsm_dlci *dlci; - int i; - int address; switch (command) { case CMD_CLD: { - if (addr_cnt > 0) { - for (i = 0; i < addr_cnt; i++) { - address = addr_open[i]; - dlci = gsm->dlci[address]; - gsm_dlci_close(dlci); - addr_open[i] = 0; - } - } + struct gsm_dlci *dlci = gsm->dlci[0]; /* Modem wishes to close down */ - dlci = gsm->dlci[0]; if (dlci) { dlci->dead = true; gsm->dead = true; - gsm_dlci_close(dlci); - addr_cnt = 0; - gsm_response(gsm, 0, UA|PF); + gsm_dlci_begin_close(dlci); } } break; @@ -1326,11 +1361,12 @@ static void gsm_control_response(struct gsm_mux *gsm, unsigned int command, static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl) { - struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 1, gsm->ftype); + struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 2, gsm->ftype); if (msg == NULL) return; - msg->data[0] = (ctrl->cmd << 1) | 2 | EA; /* command */ - memcpy(msg->data + 1, ctrl->data, ctrl->len); + msg->data[0] = (ctrl->cmd << 1) | CR | EA; /* command */ + msg->data[1] = (ctrl->len << 1) | EA; + memcpy(msg->data + 2, ctrl->data, ctrl->len); gsm_data_queue(gsm->dlci[0], msg); } @@ -1353,7 +1389,6 @@ static void gsm_control_retransmit(struct timer_list *t) spin_lock_irqsave(&gsm->control_lock, flags); ctrl = gsm->pending_cmd; if (ctrl) { - gsm->cretries--; if (gsm->cretries == 0) { gsm->pending_cmd = NULL; ctrl->error = -ETIMEDOUT; @@ -1362,6 +1397,7 @@ static void gsm_control_retransmit(struct timer_list *t) wake_up(&gsm->event); return; } + gsm->cretries--; gsm_control_transmit(gsm, ctrl); mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); } @@ -1402,7 +1438,7 @@ retry: /* If DLCI0 is in ADM mode skip retries, it won't respond */ if (gsm->dlci[0]->mode == DLCI_MODE_ADM) - gsm->cretries = 1; + gsm->cretries = 0; else gsm->cretries = gsm->n2; @@ -1450,20 +1486,22 @@ static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control) static void gsm_dlci_close(struct gsm_dlci *dlci) { + unsigned long flags; + del_timer(&dlci->t1); if (debug & 8) pr_debug("DLCI %d goes closed.\n", dlci->addr); dlci->state = DLCI_CLOSED; if (dlci->addr != 0) { tty_port_tty_hangup(&dlci->port, false); + spin_lock_irqsave(&dlci->lock, flags); kfifo_reset(&dlci->fifo); + spin_unlock_irqrestore(&dlci->lock, flags); /* Ensure that gsmtty_open() can return. */ tty_port_set_initialized(&dlci->port, 0); wake_up_interruptible(&dlci->port.open_wait); } else dlci->gsm->dead = true; - /* Unregister gsmtty driver,report gsmtty dev remove uevent for user */ - tty_unregister_device(gsm_tty_driver, dlci->addr); wake_up(&dlci->gsm->event); /* A DLCI 0 close is a MUX termination so we need to kick that back to userspace somehow */ @@ -1485,8 +1523,9 @@ static void gsm_dlci_open(struct gsm_dlci *dlci) dlci->state = DLCI_OPEN; if (debug & 8) pr_debug("DLCI %d goes open.\n", dlci->addr); - /* Register gsmtty driver,report gsmtty dev add uevent for user */ - tty_register_device(gsm_tty_driver, dlci->addr, NULL); + /* Send current modem state */ + if (dlci->addr) + gsm_modem_update(dlci, 0); wake_up(&dlci->gsm->event); } @@ -1623,6 +1662,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen) tty = tty_port_tty_get(port); if (tty) { gsm_process_modem(tty, dlci, modem, slen); + tty_wakeup(tty); tty_kref_put(tty); } fallthrough; @@ -1793,19 +1833,7 @@ static void gsm_queue(struct gsm_mux *gsm) struct gsm_dlci *dlci; u8 cr; int address; - int i, j, k, address_tmp; - /* We have to sneak a look at the packet body to do the FCS. - A somewhat layering violation in the spec */ - if ((gsm->control & ~PF) == UI) - gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len); - if (gsm->encoding == 0) { - /* WARNING: gsm->received_fcs is used for - gsm->encoding = 0 only. - In this case it contain the last piece of data - required to generate final CRC */ - gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs); - } if (gsm->fcs != GOOD_FCS) { gsm->bad_fcs++; if (debug & 4) @@ -1836,11 +1864,6 @@ static void gsm_queue(struct gsm_mux *gsm) else { gsm_response(gsm, address, UA|PF); gsm_dlci_open(dlci); - /* Save dlci open address */ - if (address) { - addr_open[addr_cnt] = address; - addr_cnt++; - } } break; case DISC|PF: @@ -1851,35 +1874,9 @@ static void gsm_queue(struct gsm_mux *gsm) return; } /* Real close complete */ - if (!address) { - if (addr_cnt > 0) { - for (i = 0; i < addr_cnt; i++) { - address = addr_open[i]; - dlci = gsm->dlci[address]; - gsm_dlci_close(dlci); - addr_open[i] = 0; - } - } - dlci = gsm->dlci[0]; - gsm_dlci_close(dlci); - addr_cnt = 0; - gsm_response(gsm, 0, UA|PF); - } else { - gsm_response(gsm, address, UA|PF); - gsm_dlci_close(dlci); - /* clear dlci address */ - for (j = 0; j < addr_cnt; j++) { - address_tmp = addr_open[j]; - if (address_tmp == address) { - for (k = j; k < addr_cnt; k++) - addr_open[k] = addr_open[k+1]; - addr_cnt--; - break; - } - } - } + gsm_response(gsm, address, UA|PF); + gsm_dlci_close(dlci); break; - case UA: case UA|PF: if (cr == 0 || dlci == NULL) break; @@ -1993,19 +1990,25 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c) break; case GSM_DATA: /* Data */ gsm->buf[gsm->count++] = c; - if (gsm->count == gsm->len) + if (gsm->count == gsm->len) { + /* Calculate final FCS for UI frames over all data */ + if ((gsm->control & ~PF) != UIH) { + gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, + gsm->count); + } gsm->state = GSM_FCS; + } break; case GSM_FCS: /* FCS follows the packet */ - gsm->received_fcs = c; - gsm_queue(gsm); + gsm->fcs = gsm_fcs_add(gsm->fcs, c); gsm->state = GSM_SSOF; break; case GSM_SSOF: - if (c == GSM0_SOF) { - gsm->state = GSM_SEARCH; - break; - } + gsm->state = GSM_SEARCH; + if (c == GSM0_SOF) + gsm_queue(gsm); + else + gsm->bad_size++; break; default: pr_debug("%s: unhandled state: %d\n", __func__, gsm->state); @@ -2023,12 +2026,35 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c) static void gsm1_receive(struct gsm_mux *gsm, unsigned char c) { + /* handle XON/XOFF */ + if ((c & ISO_IEC_646_MASK) == XON) { + gsm->constipated = true; + return; + } else if ((c & ISO_IEC_646_MASK) == XOFF) { + gsm->constipated = false; + /* Kick the link in case it is idling */ + gsm_data_kick(gsm, NULL); + return; + } if (c == GSM1_SOF) { - /* EOF is only valid in frame if we have got to the data state - and received at least one byte (the FCS) */ - if (gsm->state == GSM_DATA && gsm->count) { - /* Extract the FCS */ + /* EOF is only valid in frame if we have got to the data state */ + if (gsm->state == GSM_DATA) { + if (gsm->count < 1) { + /* Missing FSC */ + gsm->malformed++; + gsm->state = GSM_START; + return; + } + /* Remove the FCS from data */ gsm->count--; + if ((gsm->control & ~PF) != UIH) { + /* Calculate final FCS for UI frames over all + * data but FCS + */ + gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, + gsm->count); + } + /* Add the FCS itself to test against GOOD_FCS */ gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]); gsm->len = gsm->count; gsm_queue(gsm); @@ -2037,7 +2063,8 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c) } /* Any partial frame was a runt so go back to start */ if (gsm->state != GSM_START) { - gsm->malformed++; + if (gsm->state != GSM_SEARCH) + gsm->malformed++; gsm->state = GSM_START; } /* A SOF in GSM_START means we are still reading idling or @@ -2106,74 +2133,43 @@ static void gsm_error(struct gsm_mux *gsm) gsm->io_error++; } -static int gsm_disconnect(struct gsm_mux *gsm) -{ - struct gsm_dlci *dlci = gsm->dlci[0]; - struct gsm_control *gc; - - if (!dlci) - return 0; - - /* In theory disconnecting DLCI 0 is sufficient but for some - modems this is apparently not the case. */ - gc = gsm_control_send(gsm, CMD_CLD, NULL, 0); - if (gc) - gsm_control_wait(gsm, gc); - - del_timer_sync(&gsm->t2_timer); - /* Now we are sure T2 has stopped */ - - gsm_dlci_begin_close(dlci); - wait_event_interruptible(gsm->event, - dlci->state == DLCI_CLOSED); - - if (signal_pending(current)) - return -EINTR; - - return 0; -} - /** * gsm_cleanup_mux - generic GSM protocol cleanup * @gsm: our mux + * @disc: disconnect link? * * Clean up the bits of the mux which are the same for all framing * protocols. Remove the mux from the mux table, stop all the timers * and then shut down each device hanging up the channels as we go. */ -static void gsm_cleanup_mux(struct gsm_mux *gsm) +static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) { int i; struct gsm_dlci *dlci = gsm->dlci[0]; struct gsm_msg *txq, *ntxq; gsm->dead = true; + mutex_lock(&gsm->mutex); - spin_lock(&gsm_mux_lock); - for (i = 0; i < MAX_MUX; i++) { - if (gsm_mux[i] == gsm) { - gsm_mux[i] = NULL; - break; + if (dlci) { + if (disc && dlci->state != DLCI_CLOSED) { + gsm_dlci_begin_close(dlci); + wait_event(gsm->event, dlci->state == DLCI_CLOSED); } + dlci->dead = true; } - spin_unlock(&gsm_mux_lock); - /* open failed before registering => nothing to do */ - if (i == MAX_MUX) - return; + /* Finish outstanding timers, making sure they are done */ del_timer_sync(&gsm->t2_timer); - /* Now we are sure T2 has stopped */ - if (dlci) - dlci->dead = true; - /* Free up any link layer users */ - mutex_lock(&gsm->mutex); - for (i = 0; i < NUM_DLCI; i++) + /* Free up any link layer users and finally the control channel */ + for (i = NUM_DLCI - 1; i >= 0; i--) if (gsm->dlci[i]) gsm_dlci_release(gsm->dlci[i]); mutex_unlock(&gsm->mutex); /* Now wipe the queues */ + tty_ldisc_flush(gsm->tty); list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list) kfree(txq); INIT_LIST_HEAD(&gsm->tx_list); @@ -2191,7 +2187,6 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm) static int gsm_activate_mux(struct gsm_mux *gsm) { struct gsm_dlci *dlci; - int i = 0; timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0); init_waitqueue_head(&gsm->event); @@ -2203,18 +2198,6 @@ static int gsm_activate_mux(struct gsm_mux *gsm) else gsm->receive = gsm1_receive; - spin_lock(&gsm_mux_lock); - for (i = 0; i < MAX_MUX; i++) { - if (gsm_mux[i] == NULL) { - gsm->num = i; - gsm_mux[i] = gsm; - break; - } - } - spin_unlock(&gsm_mux_lock); - if (i == MAX_MUX) - return -EBUSY; - dlci = gsm_dlci_alloc(gsm, 0); if (dlci == NULL) return -ENOMEM; @@ -2230,6 +2213,15 @@ static int gsm_activate_mux(struct gsm_mux *gsm) */ static void gsm_free_mux(struct gsm_mux *gsm) { + int i; + + for (i = 0; i < MAX_MUX; i++) { + if (gsm == gsm_mux[i]) { + gsm_mux[i] = NULL; + break; + } + } + mutex_destroy(&gsm->mutex); kfree(gsm->txframe); kfree(gsm->buf); kfree(gsm); @@ -2249,12 +2241,20 @@ static void gsm_free_muxr(struct kref *ref) static inline void mux_get(struct gsm_mux *gsm) { + unsigned long flags; + + spin_lock_irqsave(&gsm_mux_lock, flags); kref_get(&gsm->ref); + spin_unlock_irqrestore(&gsm_mux_lock, flags); } static inline void mux_put(struct gsm_mux *gsm) { + unsigned long flags; + + spin_lock_irqsave(&gsm_mux_lock, flags); kref_put(&gsm->ref, gsm_free_muxr); + spin_unlock_irqrestore(&gsm_mux_lock, flags); } static inline unsigned int mux_num_to_base(struct gsm_mux *gsm) @@ -2275,6 +2275,7 @@ static inline unsigned int mux_line_to_num(unsigned int line) static struct gsm_mux *gsm_alloc_mux(void) { + int i; struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL); if (gsm == NULL) return NULL; @@ -2283,7 +2284,7 @@ static struct gsm_mux *gsm_alloc_mux(void) kfree(gsm); return NULL; } - gsm->txframe = kmalloc(2 * MAX_MRU + 2, GFP_KERNEL); + gsm->txframe = kmalloc(2 * (MAX_MTU + PROT_OVERHEAD - 1), GFP_KERNEL); if (gsm->txframe == NULL) { kfree(gsm->buf); kfree(gsm); @@ -2304,6 +2305,26 @@ static struct gsm_mux *gsm_alloc_mux(void) gsm->mtu = 64; gsm->dead = true; /* Avoid early tty opens */ + /* Store the instance to the mux array or abort if no space is + * available. + */ + spin_lock(&gsm_mux_lock); + for (i = 0; i < MAX_MUX; i++) { + if (!gsm_mux[i]) { + gsm_mux[i] = gsm; + gsm->num = i; + break; + } + } + spin_unlock(&gsm_mux_lock); + if (i == MAX_MUX) { + mutex_destroy(&gsm->mutex); + kfree(gsm->txframe); + kfree(gsm->buf); + kfree(gsm); + return NULL; + } + return gsm; } @@ -2339,7 +2360,7 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c) /* Check the MRU/MTU range looks sane */ if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8) return -EINVAL; - if (c->n2 < 3) + if (c->n2 > 255) return -EINVAL; if (c->encapsulation > 1) /* Basic, advanced, no I */ return -EINVAL; @@ -2370,19 +2391,11 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c) /* * Close down what is needed, restart and initiate the new - * configuration + * configuration. On the first time there is no DLCI[0] + * and closing or cleaning up is not necessary. */ - - if (gsm->initiator && (need_close || need_restart)) { - int ret; - - ret = gsm_disconnect(gsm); - - if (ret) - return ret; - } - if (need_restart) - gsm_cleanup_mux(gsm); + if (need_close || need_restart) + gsm_cleanup_mux(gsm, true); gsm->initiator = c->initiator; gsm->mru = c->mru; @@ -2450,25 +2463,26 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) int ret, i; gsm->tty = tty_kref_get(tty); + /* Turn off tty XON/XOFF handling to handle it explicitly. */ + gsm->old_c_iflag = tty->termios.c_iflag; + tty->termios.c_iflag &= (IXON | IXOFF); ret = gsm_activate_mux(gsm); if (ret != 0) tty_kref_put(gsm->tty); else { /* Don't register device 0 - this is the control channel and not a usable tty interface */ - if (gsm->initiator) { - base = mux_num_to_base(gsm); /* Base for this MUX */ - for (i = 1; i < NUM_DLCI; i++) { - struct device *dev; + base = mux_num_to_base(gsm); /* Base for this MUX */ + for (i = 1; i < NUM_DLCI; i++) { + struct device *dev; - dev = tty_register_device(gsm_tty_driver, + dev = tty_register_device(gsm_tty_driver, base + i, NULL); - if (IS_ERR(dev)) { - for (i--; i >= 1; i--) - tty_unregister_device(gsm_tty_driver, - base + i); - return PTR_ERR(dev); - } + if (IS_ERR(dev)) { + for (i--; i >= 1; i--) + tty_unregister_device(gsm_tty_driver, + base + i); + return PTR_ERR(dev); } } } @@ -2490,11 +2504,10 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) int i; WARN_ON(tty != gsm->tty); - if (gsm->initiator) { - for (i = 1; i < NUM_DLCI; i++) - tty_unregister_device(gsm_tty_driver, base + i); - } - gsm_cleanup_mux(gsm); + for (i = 1; i < NUM_DLCI; i++) + tty_unregister_device(gsm_tty_driver, base + i); + /* Restore tty XON/XOFF handling. */ + gsm->tty->termios.c_iflag = gsm->old_c_iflag; tty_kref_put(gsm->tty); gsm->tty = NULL; } @@ -2559,6 +2572,12 @@ static void gsmld_close(struct tty_struct *tty) { struct gsm_mux *gsm = tty->disc_data; + /* The ldisc locks and closes the port before calling our close. This + * means we have no way to do a proper disconnect. We will not bother + * to do one. + */ + gsm_cleanup_mux(gsm, false); + gsmld_detach_gsm(tty, gsm); gsmld_flush_buffer(tty); @@ -2597,7 +2616,7 @@ static int gsmld_open(struct tty_struct *tty) ret = gsmld_attach_gsm(tty, gsm); if (ret != 0) { - gsm_cleanup_mux(gsm); + gsm_cleanup_mux(gsm, false); mux_put(gsm); } return ret; @@ -2954,26 +2973,78 @@ static struct tty_ldisc_ops tty_ldisc_packet = { #define TX_SIZE 512 -static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk) +/** + * gsm_modem_upd_via_data - send modem bits via convergence layer + * @dlci: channel + * @brk: break signal + * + * Send an empty frame to signal mobile state changes and to transmit the + * break signal for adaption 2. + */ + +static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk) { - u8 modembits[5]; + struct gsm_mux *gsm = dlci->gsm; + unsigned long flags; + + if (dlci->state != DLCI_OPEN || dlci->adaption != 2) + return; + + spin_lock_irqsave(&gsm->tx_lock, flags); + gsm_dlci_modem_output(gsm, dlci, brk); + spin_unlock_irqrestore(&gsm->tx_lock, flags); +} + +/** + * gsm_modem_upd_via_msc - send modem bits via control frame + * @dlci: channel + * @brk: break signal + */ + +static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk) +{ + u8 modembits[3]; struct gsm_control *ctrl; int len = 2; - if (brk) - len++; + if (dlci->gsm->encoding != 0) + return 0; - modembits[0] = len << 1 | EA; /* Data bytes */ - modembits[1] = dlci->addr << 2 | 3; /* DLCI, EA, 1 */ - modembits[2] = gsm_encode_modem(dlci) << 1 | EA; - if (brk) - modembits[3] = brk << 4 | 2 | EA; /* Valid, EA */ - ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len + 1); + modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */ + if (!brk) { + modembits[1] = (gsm_encode_modem(dlci) << 1) | EA; + } else { + modembits[1] = gsm_encode_modem(dlci) << 1; + modembits[2] = (brk << 4) | 2 | EA; /* Length, Break, EA */ + len++; + } + ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len); if (ctrl == NULL) return -ENOMEM; return gsm_control_wait(dlci->gsm, ctrl); } +/** + * gsm_modem_update - send modem status line state + * @dlci: channel + * @brk: break signal + */ + +static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk) +{ + if (dlci->adaption == 2) { + /* Send convergence layer type 2 empty data frame. */ + gsm_modem_upd_via_data(dlci, brk); + return 0; + } else if (dlci->gsm->encoding == 0) { + /* Send as MSC control message. */ + return gsm_modem_upd_via_msc(dlci, brk); + } + + /* Modem status lines are not supported. */ + return -EPROTONOSUPPORT; +} + static int gsm_carrier_raised(struct tty_port *port) { struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); @@ -3006,7 +3077,7 @@ static void gsm_dtr_rts(struct tty_port *port, int onoff) modem_tx &= ~(TIOCM_DTR | TIOCM_RTS); if (modem_tx != dlci->modem_tx) { dlci->modem_tx = modem_tx; - gsmtty_modem_update(dlci, 0); + gsm_modem_update(dlci, 0); } } @@ -3155,13 +3226,17 @@ static unsigned int gsmtty_chars_in_buffer(struct tty_struct *tty) static void gsmtty_flush_buffer(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; + unsigned long flags; + if (dlci->state == DLCI_CLOSED) return; /* Caution needed: If we implement reliable transport classes then the data being transmitted can't simply be junked once it has first hit the stack. Until then we can just blow it away */ + spin_lock_irqsave(&dlci->lock, flags); kfifo_reset(&dlci->fifo); + spin_unlock_irqrestore(&dlci->lock, flags); /* Need to unhook this DLCI from the transmit queue logic */ } @@ -3193,7 +3268,7 @@ static int gsmtty_tiocmset(struct tty_struct *tty, if (modem_tx != dlci->modem_tx) { dlci->modem_tx = modem_tx; - return gsmtty_modem_update(dlci, 0); + return gsm_modem_update(dlci, 0); } return 0; } @@ -3254,7 +3329,7 @@ static void gsmtty_throttle(struct tty_struct *tty) dlci->modem_tx &= ~TIOCM_RTS; dlci->throttled = true; /* Send an MSC with RTS cleared */ - gsmtty_modem_update(dlci, 0); + gsm_modem_update(dlci, 0); } static void gsmtty_unthrottle(struct tty_struct *tty) @@ -3266,7 +3341,7 @@ static void gsmtty_unthrottle(struct tty_struct *tty) dlci->modem_tx |= TIOCM_RTS; dlci->throttled = false; /* Send an MSC with RTS set */ - gsmtty_modem_update(dlci, 0); + gsm_modem_update(dlci, 0); } static int gsmtty_break_ctl(struct tty_struct *tty, int state) @@ -3284,7 +3359,7 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state) if (encode > 0x0F) encode = 0x0F; /* Best effort */ } - return gsmtty_modem_update(dlci, encode); + return gsm_modem_update(dlci, encode); } static void gsmtty_cleanup(struct tty_struct *tty) diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index e17e97ea86fa..a293e9f107d0 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -2667,7 +2667,7 @@ enum pci_board_num_t { pbn_panacom2, pbn_panacom4, pbn_plx_romulus, - pbn_endrun_2_4000000, + pbn_endrun_2_3906250, pbn_oxsemi, pbn_oxsemi_1_3906250, pbn_oxsemi_2_3906250, @@ -3195,10 +3195,10 @@ static struct pciserial_board pci_boards[] = { * signal now many ports are available * 2 port 952 Uart support */ - [pbn_endrun_2_4000000] = { + [pbn_endrun_2_3906250] = { .flags = FL_BASE0, .num_ports = 2, - .base_baud = 4000000, + .base_baud = 3906250, .uart_offset = 0x200, .first_offset = 0x1000, }, @@ -4115,7 +4115,7 @@ static const struct pci_device_id serial_pci_tbl[] = { */ { PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_endrun_2_4000000 }, + pbn_endrun_2_3906250 }, /* * Quatech cards. These actually have configurable clocks but for * now we just use the default. diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 318af6f13605..1fbd5bf264be 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1675,11 +1675,11 @@ static void serial8250_start_tx(struct uart_port *port) struct uart_8250_port *up = up_to_u8250p(port); struct uart_8250_em485 *em485 = up->em485; - serial8250_rpm_get_tx(up); - if (!port->x_char && uart_circ_empty(&port->state->xmit)) return; + serial8250_rpm_get_tx(up); + if (em485 && em485->active_timer == &em485->start_tx_timer) return; @@ -3329,7 +3329,7 @@ static void serial8250_console_restore(struct uart_8250_port *up) serial8250_set_divisor(port, baud, quot, frac); serial_port_out(port, UART_LCR, up->lcr); - serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS); + serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS); } /* diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 51ecb050ae40..4d11a3e547f9 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1255,13 +1255,18 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) static void pl011_rs485_tx_stop(struct uart_amba_port *uap) { + /* + * To be on the safe side only time out after twice as many iterations + * as fifo size. + */ + const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2; struct uart_port *port = &uap->port; int i = 0; u32 cr; /* Wait until hardware tx queue is empty */ while (!pl011_tx_empty(port)) { - if (i == port->fifosize) { + if (i > MAX_TX_DRAIN_ITERS) { dev_warn(port->dev, "timeout while draining hardware tx queue\n"); break; @@ -2052,7 +2057,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, * with the given baud rate. We use this as the poll interval when we * wait for the tx queue to empty. */ - uap->rs485_tx_drain_interval = (bits * 1000 * 1000) / baud; + uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud); pl011_setup_status_masks(port, termios); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index fd38e6ed4fda..a2100be8d554 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -1448,7 +1448,7 @@ static int imx_uart_startup(struct uart_port *port) imx_uart_writel(sport, ucr1, UCR1); ucr4 = imx_uart_readl(sport, UCR4) & ~(UCR4_OREN | UCR4_INVR); - if (!sport->dma_is_enabled) + if (!dma_is_inited) ucr4 |= UCR4_OREN; if (sport->inverted_rx) ucr4 |= UCR4_INVR; diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index e857fb61efbf..5fb201c1b563 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -1238,12 +1238,10 @@ static void sc16is7xx_shutdown(struct uart_port *port) /* Disable all interrupts */ sc16is7xx_port_write(port, SC16IS7XX_IER_REG, 0); - /* Disable TX/RX, clear auto RS485 and RTS invert */ + /* Disable TX/RX */ sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG, SC16IS7XX_EFCR_RXDISABLE_BIT | - SC16IS7XX_EFCR_TXDISABLE_BIT | - SC16IS7XX_EFCR_AUTO_RS485_BIT | - SC16IS7XX_EFCR_RTS_INVERT_BIT, + SC16IS7XX_EFCR_TXDISABLE_BIT, SC16IS7XX_EFCR_RXDISABLE_BIT | SC16IS7XX_EFCR_TXDISABLE_BIT); diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c index f9af7ebe003d..d6d515d598dc 100644 --- a/drivers/usb/cdns3/cdns3-gadget.c +++ b/drivers/usb/cdns3/cdns3-gadget.c @@ -2684,6 +2684,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) struct usb_request *request; struct cdns3_request *priv_req; struct cdns3_trb *trb = NULL; + struct cdns3_trb trb_tmp; int ret; int val; @@ -2693,8 +2694,10 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) if (request) { priv_req = to_cdns3_request(request); trb = priv_req->trb; - if (trb) + if (trb) { + trb_tmp = *trb; trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); + } } writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); @@ -2709,7 +2712,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) if (request) { if (trb) - trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); + *trb = trb_tmp; cdns3_rearm_transfer(priv_ep, 1); } diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 6abb7294e919..b5b85bf80329 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -1209,12 +1209,16 @@ static int do_proc_control(struct usb_dev_state *ps, usb_unlock_device(dev); i = usbfs_start_wait_urb(urb, tmo, &actlen); + + /* Linger a bit, prior to the next control message. */ + if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) + msleep(200); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, tbuf, actlen); if (!i && actlen) { if (copy_to_user(ctrl->data, tbuf, actlen)) { ret = -EFAULT; - goto recv_fault; + goto done; } } } else { @@ -1231,6 +1235,10 @@ static int do_proc_control(struct usb_dev_state *ps, usb_unlock_device(dev); i = usbfs_start_wait_urb(urb, tmo, &actlen); + + /* Linger a bit, prior to the next control message. */ + if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) + msleep(200); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, NULL, 0); } @@ -1242,10 +1250,6 @@ static int do_proc_control(struct usb_dev_state *ps, } ret = (i < 0 ? i : actlen); - recv_fault: - /* Linger a bit, prior to the next control message. */ - if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) - msleep(200); done: kfree(dr); usb_free_urb(urb); diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index d3c14b5ed4a1..97b44a68668a 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -404,6 +404,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0b05, 0x17e0), .driver_info = USB_QUIRK_IGNORE_REMOTE_WAKEUP }, + /* Realtek Semiconductor Corp. Mass Storage Device (Multicard Reader)*/ + { USB_DEVICE(0x0bda, 0x0151), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Realtek hub in Dell WD19 (Type-C) */ { USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM }, @@ -507,6 +510,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* DJI CineSSD */ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, + /* VCOM device */ + { USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 1170b800acdc..d28cd1a6709b 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -274,7 +274,8 @@ int dwc3_core_soft_reset(struct dwc3 *dwc) reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg |= DWC3_DCTL_CSFTRST; - dwc3_writel(dwc->regs, DWC3_DCTL, reg); + reg &= ~DWC3_DCTL_RUN_STOP; + dwc3_gadget_dctl_write_safe(dwc, reg); /* * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit @@ -1377,10 +1378,10 @@ static void dwc3_get_properties(struct dwc3 *dwc) u8 lpm_nyet_threshold; u8 tx_de_emphasis; u8 hird_threshold; - u8 rx_thr_num_pkt_prd; - u8 rx_max_burst_prd; - u8 tx_thr_num_pkt_prd; - u8 tx_max_burst_prd; + u8 rx_thr_num_pkt_prd = 0; + u8 rx_max_burst_prd = 0; + u8 tx_thr_num_pkt_prd = 0; + u8 tx_max_burst_prd = 0; u8 tx_fifo_resize_max_num; const char *usb_psy_name; int ret; @@ -1690,21 +1691,44 @@ static int dwc3_probe(struct platform_device *pdev) /* * Clocks are optional, but new DT platforms should support all * clocks as required by the DT-binding. + * Some devices have different clock names in legacy device trees, + * check for them to retain backwards compatibility. */ dwc->bus_clk = devm_clk_get_optional(dev, "bus_early"); if (IS_ERR(dwc->bus_clk)) return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), "could not get bus clock\n"); + if (dwc->bus_clk == NULL) { + dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk"); + if (IS_ERR(dwc->bus_clk)) + return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), + "could not get bus clock\n"); + } + dwc->ref_clk = devm_clk_get_optional(dev, "ref"); if (IS_ERR(dwc->ref_clk)) return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), "could not get ref clock\n"); + if (dwc->ref_clk == NULL) { + dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk"); + if (IS_ERR(dwc->ref_clk)) + return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), + "could not get ref clock\n"); + } + dwc->susp_clk = devm_clk_get_optional(dev, "suspend"); if (IS_ERR(dwc->susp_clk)) return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), "could not get suspend clock\n"); + + if (dwc->susp_clk == NULL) { + dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk"); + if (IS_ERR(dwc->susp_clk)) + return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), + "could not get suspend clock\n"); + } } ret = reset_control_deassert(dwc->reset); diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c index b60b5f7b6dff..8cad9e7d3368 100644 --- a/drivers/usb/dwc3/drd.c +++ b/drivers/usb/dwc3/drd.c @@ -584,16 +584,15 @@ int dwc3_drd_init(struct dwc3 *dwc) { int ret, irq; + if (ROLE_SWITCH && + device_property_read_bool(dwc->dev, "usb-role-switch")) + return dwc3_setup_role_switch(dwc); + dwc->edev = dwc3_get_extcon(dwc); if (IS_ERR(dwc->edev)) return PTR_ERR(dwc->edev); - if (ROLE_SWITCH && - device_property_read_bool(dwc->dev, "usb-role-switch")) { - ret = dwc3_setup_role_switch(dwc); - if (ret < 0) - return ret; - } else if (dwc->edev) { + if (dwc->edev) { dwc->edev_nb.notifier_call = dwc3_drd_notifier; ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST, &dwc->edev_nb); diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 33f657d83246..2e19e0e4ea53 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -45,6 +45,8 @@ #define PCI_DEVICE_ID_INTEL_ADLM 0x54ee #define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1 #define PCI_DEVICE_ID_INTEL_RPLS 0x7a61 +#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1 +#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e #define PCI_DEVICE_ID_INTEL_TGL 0x9a15 #define PCI_DEVICE_ID_AMD_MR 0x163a @@ -456,6 +458,12 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS), (kernel_ulong_t) &dwc3_pci_intel_swnode, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLP), + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL), + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL), (kernel_ulong_t) &dwc3_pci_intel_swnode, }, diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index ab725d2262d6..0b9c2493844a 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -3274,6 +3274,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, const struct dwc3_event_depevt *event, struct dwc3_request *req, int status) { + int request_status; int ret; if (req->request.num_mapped_sgs) @@ -3294,7 +3295,35 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, req->needs_extra_trb = false; } - dwc3_gadget_giveback(dep, req, status); + /* + * The event status only reflects the status of the TRB with IOC set. + * For the requests that don't set interrupt on completion, the driver + * needs to check and return the status of the completed TRBs associated + * with the request. Use the status of the last TRB of the request. + */ + if (req->request.no_interrupt) { + struct dwc3_trb *trb; + + trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue); + switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) { + case DWC3_TRBSTS_MISSED_ISOC: + /* Isoc endpoint only */ + request_status = -EXDEV; + break; + case DWC3_TRB_STS_XFER_IN_PROG: + /* Applicable when End Transfer with ForceRM=0 */ + case DWC3_TRBSTS_SETUP_PENDING: + /* Control endpoint only */ + case DWC3_TRBSTS_OK: + default: + request_status = 0; + break; + } + } else { + request_status = status; + } + + dwc3_gadget_giveback(dep, req, request_status); out: return ret; diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 1fb837d9271e..84b73cb03f87 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1438,6 +1438,8 @@ static void configfs_composite_unbind(struct usb_gadget *gadget) usb_ep_autoconfig_reset(cdev->gadget); spin_lock_irqsave(&gi->spinlock, flags); cdev->gadget = NULL; + cdev->deactivations = 0; + gadget->deactivated = false; set_gadget_data(gadget, NULL); spin_unlock_irqrestore(&gi->spinlock, flags); } diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c index d852ac9e47e7..2cda982f3765 100644 --- a/drivers/usb/gadget/function/uvc_queue.c +++ b/drivers/usb/gadget/function/uvc_queue.c @@ -264,6 +264,8 @@ void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect) buf->state = UVC_BUF_STATE_ERROR; vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR); } + queue->buf_used = 0; + /* This must be protected by the irqlock spinlock to avoid race * conditions between uvc_queue_buffer and the disconnection event that * could result in an interruptible wait in uvc_dequeue_buffer. Do not diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 3d82e0b853be..684164fa9716 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -1103,6 +1103,26 @@ static void ehci_remove_device(struct usb_hcd *hcd, struct usb_device *udev) #ifdef CONFIG_PM +/* Clear wakeup signal locked in zhaoxin platform when device plug in. */ +static void ehci_zx_wakeup_clear(struct ehci_hcd *ehci) +{ + u32 __iomem *reg = &ehci->regs->port_status[4]; + u32 t1 = ehci_readl(ehci, reg); + + t1 &= (u32)~0xf0000; + t1 |= PORT_TEST_FORCE; + ehci_writel(ehci, t1, reg); + t1 = ehci_readl(ehci, reg); + msleep(1); + t1 &= (u32)~0xf0000; + ehci_writel(ehci, t1, reg); + ehci_readl(ehci, reg); + msleep(1); + t1 = ehci_readl(ehci, reg); + ehci_writel(ehci, t1 | PORT_CSC, reg); + ehci_readl(ehci, reg); +} + /* suspend/resume, section 4.3 */ /* These routines handle the generic parts of controller suspend/resume */ @@ -1154,6 +1174,9 @@ int ehci_resume(struct usb_hcd *hcd, bool force_reset) if (ehci->shutdown) return 0; /* Controller is dead */ + if (ehci->zx_wakeup_clear_needed) + ehci_zx_wakeup_clear(ehci); + /* * If CF is still set and reset isn't forced * then we maintained suspend power. diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 638f03b89739..9937c5a7efc2 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -231,6 +231,10 @@ static int ehci_pci_setup(struct usb_hcd *hcd) ehci->is_aspeed = 1; } break; + case PCI_VENDOR_ID_ZHAOXIN: + if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x90) + ehci->zx_wakeup_clear_needed = 1; + break; } /* optional debug port, normally in the first BAR */ diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index fdd073cc053b..ad3f13a3eaf1 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -220,6 +220,7 @@ struct ehci_hcd { /* one per controller */ unsigned imx28_write_fix:1; /* For Freescale i.MX28 */ unsigned spurious_oc:1; unsigned is_aspeed:1; + unsigned zx_wakeup_clear_needed:1; /* required for usb32 quirk */ #define OHCI_CTRL_HCFS (3 << 6) diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 1e7dc130c39a..f65f1ba2b592 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1434,7 +1434,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, } spin_unlock_irqrestore(&xhci->lock, flags); if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex], - msecs_to_jiffies(100))) + msecs_to_jiffies(500))) xhci_dbg(xhci, "missing U0 port change event for port %d-%d\n", hcd->self.busnum, wIndex + 1); spin_lock_irqsave(&xhci->lock, flags); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 5c351970cdf1..d7e0e6ebf080 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -59,6 +59,7 @@ #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13 #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 @@ -266,7 +267,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI)) + pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI)) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; if (pdev->vendor == PCI_VENDOR_ID_ETRON && diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index d0b6806275e0..f9707997969d 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -3141,6 +3141,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) if (event_loop++ < TRBS_PER_SEGMENT / 2) continue; xhci_update_erst_dequeue(xhci, event_ring_deq); + event_ring_deq = xhci->event_ring->dequeue; /* ring is half-full, force isoc trbs to interrupt more often */ if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN) diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index c8af2cd2216d..996958a6565c 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -1034,13 +1034,13 @@ static int tegra_xusb_unpowergate_partitions(struct tegra_xusb *tegra) int rc; if (tegra->use_genpd) { - rc = pm_runtime_get_sync(tegra->genpd_dev_ss); + rc = pm_runtime_resume_and_get(tegra->genpd_dev_ss); if (rc < 0) { dev_err(dev, "failed to enable XUSB SS partition\n"); return rc; } - rc = pm_runtime_get_sync(tegra->genpd_dev_host); + rc = pm_runtime_resume_and_get(tegra->genpd_dev_host); if (rc < 0) { dev_err(dev, "failed to enable XUSB Host partition\n"); pm_runtime_put_sync(tegra->genpd_dev_ss); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 642610c78f58..25b87e99b4dd 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -781,6 +781,17 @@ void xhci_shutdown(struct usb_hcd *hcd) if (xhci->quirks & XHCI_SPURIOUS_REBOOT) usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); + /* Don't poll the roothubs after shutdown. */ + xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", + __func__, hcd->self.busnum); + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); + del_timer_sync(&hcd->rh_timer); + + if (xhci->shared_hcd) { + clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); + del_timer_sync(&xhci->shared_hcd->rh_timer); + } + spin_lock_irq(&xhci->lock); xhci_halt(xhci); /* Workaround for spurious wakeups at shutdown with HSW */ diff --git a/drivers/usb/misc/qcom_eud.c b/drivers/usb/misc/qcom_eud.c index f929bffdc5d1..b7f13df00764 100644 --- a/drivers/usb/misc/qcom_eud.c +++ b/drivers/usb/misc/qcom_eud.c @@ -186,16 +186,16 @@ static int eud_probe(struct platform_device *pdev) chip->dev = &pdev->dev; - ret = devm_add_action_or_reset(chip->dev, eud_role_switch_release, chip); - if (ret) - return dev_err_probe(chip->dev, ret, - "failed to add role switch release action\n"); - chip->role_sw = usb_role_switch_get(&pdev->dev); if (IS_ERR(chip->role_sw)) return dev_err_probe(chip->dev, PTR_ERR(chip->role_sw), "failed to get role switch\n"); + ret = devm_add_action_or_reset(chip->dev, eud_role_switch_release, chip); + if (ret) + return dev_err_probe(chip->dev, ret, + "failed to add role switch release action\n"); + chip->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(chip->base)) return PTR_ERR(chip->base); diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index 748139d26263..0be8efcda15d 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -71,6 +71,7 @@ static void destroy_priv(struct kref *kref) dev_dbg(&priv->usbdev->dev, "destroying priv datastructure\n"); usb_put_dev(priv->usbdev); + priv->usbdev = NULL; kfree(priv); } @@ -736,7 +737,6 @@ static int uss720_probe(struct usb_interface *intf, parport_announce_port(pp); usb_set_intfdata(intf, pp); - usb_put_dev(usbdev); return 0; probe_abort: @@ -754,7 +754,6 @@ static void uss720_disconnect(struct usb_interface *intf) usb_set_intfdata(intf, NULL); if (pp) { priv = pp->private_data; - priv->usbdev = NULL; priv->pp = NULL; dev_dbg(&intf->dev, "parport_remove_port\n"); parport_remove_port(pp); diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c index a6b04831b20b..9b8aded3d95e 100644 --- a/drivers/usb/mtu3/mtu3_dr.c +++ b/drivers/usb/mtu3/mtu3_dr.c @@ -21,10 +21,8 @@ static inline struct ssusb_mtk *otg_sx_to_ssusb(struct otg_switch_mtk *otg_sx) static void toggle_opstate(struct ssusb_mtk *ssusb) { - if (!ssusb->otg_switch.is_u3_drd) { - mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION); - mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN); - } + mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION); + mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN); } /* only port0 supports dual-role mode */ diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c index 661a229c105d..34b9f8140187 100644 --- a/drivers/usb/phy/phy-generic.c +++ b/drivers/usb/phy/phy-generic.c @@ -268,6 +268,13 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop) return -EPROBE_DEFER; } + nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus"); + if (PTR_ERR(nop->vbus_draw) == -ENODEV) + nop->vbus_draw = NULL; + if (IS_ERR(nop->vbus_draw)) + return dev_err_probe(dev, PTR_ERR(nop->vbus_draw), + "could not get vbus regulator\n"); + nop->dev = dev; nop->phy.dev = nop->dev; nop->phy.label = "nop-xceiv"; diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index a27f7efcec6a..c374620a486f 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -194,6 +194,8 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */ { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */ + { USB_DEVICE(0x17A8, 0x0101) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Int Ant) */ + { USB_DEVICE(0x17A8, 0x0102) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Ext Ant) */ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index e7755d9cfc61..1364ce7f0abf 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -432,6 +432,8 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_CLS8 0x00b0 #define CINTERION_PRODUCT_MV31_MBIM 0x00b3 #define CINTERION_PRODUCT_MV31_RMNET 0x00b7 +#define CINTERION_PRODUCT_MV32_WA 0x00f1 +#define CINTERION_PRODUCT_MV32_WB 0x00f2 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c @@ -1217,6 +1219,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */ .driver_info = NCTRL(2) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1057, 0xff), /* Telit FN980 */ + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1058, 0xff), /* Telit FN980 (PCIe) */ + .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff), /* Telit LN920 (rmnet) */ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff), /* Telit LN920 (MBIM) */ @@ -1233,6 +1239,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */ .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */ + .driver_info = RSVD(0) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), @@ -1969,6 +1977,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(3)}, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), .driver_info = RSVD(0)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff), + .driver_info = RSVD(3)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff), + .driver_info = RSVD(3)}, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), .driver_info = RSVD(4) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index da65d14c9ed5..06aad0d727dd 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -584,9 +584,8 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command, switch (command) { case WHITEHEAT_GET_DTR_RTS: info = usb_get_serial_port_data(port); - memcpy(&info->mcr, command_info->result_buffer, - sizeof(struct whiteheat_dr_info)); - break; + info->mcr = command_info->result_buffer[0]; + break; } } exit: diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig index 8f921213b17d..ba24847fb245 100644 --- a/drivers/usb/typec/Kconfig +++ b/drivers/usb/typec/Kconfig @@ -56,6 +56,7 @@ config TYPEC_RT1719 tristate "Richtek RT1719 Sink Only Type-C controller driver" depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH depends on I2C + depends on POWER_SUPPLY select REGMAP_I2C help Say Y or M here if your system has Richtek RT1719 sink only diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index f0c2fa19f3e0..a6045aef0d04 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -949,6 +949,8 @@ static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role) role == TYPEC_HOST)) goto out_unlock; + reinit_completion(&con->complete); + command = UCSI_SET_UOR | UCSI_CONNECTOR_NUMBER(con->num); command |= UCSI_SET_UOR_ROLE(role); command |= UCSI_SET_UOR_ACCEPT_ROLE_SWAPS; @@ -956,14 +958,18 @@ static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role) if (ret < 0) goto out_unlock; + mutex_unlock(&con->lock); + if (!wait_for_completion_timeout(&con->complete, - msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) - ret = -ETIMEDOUT; + msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) + return -ETIMEDOUT; + + return 0; out_unlock: mutex_unlock(&con->lock); - return ret < 0 ? ret : 0; + return ret; } static int ucsi_pr_swap(struct typec_port *port, enum typec_role role) @@ -985,6 +991,8 @@ static int ucsi_pr_swap(struct typec_port *port, enum typec_role role) if (cur_role == role) goto out_unlock; + reinit_completion(&con->complete); + command = UCSI_SET_PDR | UCSI_CONNECTOR_NUMBER(con->num); command |= UCSI_SET_PDR_ROLE(role); command |= UCSI_SET_PDR_ACCEPT_ROLE_SWAPS; @@ -992,11 +1000,13 @@ static int ucsi_pr_swap(struct typec_port *port, enum typec_role role) if (ret < 0) goto out_unlock; + mutex_unlock(&con->lock); + if (!wait_for_completion_timeout(&con->complete, - msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) { - ret = -ETIMEDOUT; - goto out_unlock; - } + msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) + return -ETIMEDOUT; + + mutex_lock(&con->lock); /* Something has gone wrong while swapping the role */ if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) != |