diff options
Diffstat (limited to 'drivers')
81 files changed, 655 insertions, 328 deletions
diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c index 4cdff387deff..440cf9fb91aa 100644 --- a/drivers/acpi/riscv/cppc.c +++ b/drivers/acpi/riscv/cppc.c @@ -37,10 +37,8 @@ static int __init sbi_cppc_init(void) { if (sbi_spec_version >= sbi_mk_version(2, 0) && sbi_probe_extension(SBI_EXT_CPPC) > 0) { - pr_info("SBI CPPC extension detected\n"); cppc_ext_present = true; } else { - pr_info("SBI CPPC extension NOT detected!!\n"); cppc_ext_present = false; } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index a6ab666ef48a..7a50af416cac 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1280,6 +1280,22 @@ static void dpm_async_suspend_parent(struct device *dev, async_func_t func) dpm_async_with_cleanup(dev->parent, func); } +static void dpm_async_suspend_complete_all(struct list_head *device_list) +{ + struct device *dev; + + guard(mutex)(&async_wip_mtx); + + list_for_each_entry_reverse(dev, device_list, power.entry) { + /* + * In case the device is being waited for and async processing + * has not started for it yet, let the waiters make progress. + */ + if (!dev->power.work_in_progress) + complete_all(&dev->power.completion); + } +} + /** * resume_event - Return a "resume" message for given "suspend" sleep state. * @sleep_state: PM message representing a sleep state. @@ -1456,6 +1472,7 @@ static int dpm_noirq_suspend_devices(pm_message_t state) mutex_lock(&dpm_list_mtx); if (error || async_error) { + dpm_async_suspend_complete_all(&dpm_late_early_list); /* * Move all devices to the target list to resume them * properly. @@ -1658,6 +1675,7 @@ int dpm_suspend_late(pm_message_t state) mutex_lock(&dpm_list_mtx); if (error || async_error) { + dpm_async_suspend_complete_all(&dpm_suspended_list); /* * Move all devices to the target list to resume them * properly. @@ -1951,6 +1969,7 @@ int dpm_suspend(pm_message_t state) mutex_lock(&dpm_list_mtx); if (error || async_error) { + dpm_async_suspend_complete_all(&dpm_prepared_list); /* * Move all devices to the target list to resume them * properly. diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 500840e4a74e..8d994cae3b83 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -308,14 +308,13 @@ end_io: static void lo_rw_aio_do_completion(struct loop_cmd *cmd) { struct request *rq = blk_mq_rq_from_pdu(cmd); - struct loop_device *lo = rq->q->queuedata; if (!atomic_dec_and_test(&cmd->ref)) return; kfree(cmd->bvec); cmd->bvec = NULL; if (req_op(rq) == REQ_OP_WRITE) - file_end_write(lo->lo_backing_file); + kiocb_end_write(&cmd->iocb); if (likely(!blk_should_fake_timeout(rq->q))) blk_mq_complete_request(rq); } @@ -391,7 +390,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, } if (rw == ITER_SOURCE) { - file_start_write(lo->lo_backing_file); + kiocb_start_write(&cmd->iocb); ret = file->f_op->write_iter(&cmd->iocb, &iter); } else ret = file->f_op->read_iter(&cmd->iocb, &iter); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 7bdc7eb808ea..2592bd19ebc1 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -2198,9 +2198,7 @@ again: goto out; } } - ret = nbd_start_device(nbd); - if (ret) - goto out; + if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) { nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER], GFP_KERNEL); @@ -2216,6 +2214,8 @@ again: goto out; } set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags); + + ret = nbd_start_device(nbd); out: mutex_unlock(&nbd->config_lock); if (!ret) { diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index bf490967241a..2505df1f4e69 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -720,11 +720,6 @@ static const struct pci_device_id agp_amd64_pci_table[] = { MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); -static const struct pci_device_id agp_amd64_pci_promisc_table[] = { - { PCI_DEVICE_CLASS(0, 0) }, - { } -}; - static DEFINE_SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, NULL, agp_amd64_resume); static struct pci_driver agp_amd64_pci_driver = { @@ -739,6 +734,7 @@ static struct pci_driver agp_amd64_pci_driver = { /* Not static due to IOMMU code calling it early. */ int __init agp_amd64_init(void) { + struct pci_dev *pdev = NULL; int err = 0; if (agp_off) @@ -767,9 +763,13 @@ int __init agp_amd64_init(void) } /* Look for any AGP bridge */ - agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; - err = driver_attach(&agp_amd64_pci_driver.driver); - if (err == 0 && agp_bridges_found == 0) { + for_each_pci_dev(pdev) + if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) + pci_add_dynid(&agp_amd64_pci_driver, + pdev->vendor, pdev->device, + pdev->subsystem_vendor, + pdev->subsystem_device, 0, 0, 0); + if (agp_bridges_found == 0) { pci_unregister_driver(&agp_amd64_pci_driver); err = -ENODEV; } diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index 15510c2ff21c..1b1561c84127 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -404,6 +404,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev) const struct scmi_handle *handle = sdev->handle; struct scmi_protocol_handle *ph; const struct clk_ops *scmi_clk_ops_db[SCMI_MAX_CLK_OPS] = {}; + struct scmi_clk *sclks; if (!handle) return -ENODEV; @@ -430,18 +431,21 @@ static int scmi_clocks_probe(struct scmi_device *sdev) transport_is_atomic = handle->is_transport_atomic(handle, &atomic_threshold_us); + sclks = devm_kcalloc(dev, count, sizeof(*sclks), GFP_KERNEL); + if (!sclks) + return -ENOMEM; + + for (idx = 0; idx < count; idx++) + hws[idx] = &sclks[idx].hw; + for (idx = 0; idx < count; idx++) { - struct scmi_clk *sclk; + struct scmi_clk *sclk = &sclks[idx]; const struct clk_ops *scmi_ops; - sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL); - if (!sclk) - return -ENOMEM; - sclk->info = scmi_proto_clk_ops->info_get(ph, idx); if (!sclk->info) { dev_dbg(dev, "invalid clock info for idx %d\n", idx); - devm_kfree(dev, sclk); + hws[idx] = NULL; continue; } @@ -479,13 +483,11 @@ static int scmi_clocks_probe(struct scmi_device *sdev) if (err) { dev_err(dev, "failed to register clock %d\n", idx); devm_kfree(dev, sclk->parent_data); - devm_kfree(dev, sclk); hws[idx] = NULL; } else { dev_dbg(dev, "Registered clock:%s%s\n", sclk->info->name, scmi_ops->enable ? " (atomic ops)" : ""); - hws[idx] = &sclk->hw; } } diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c index 25974947ad0c..cc2ee2be1819 100644 --- a/drivers/clk/imx/clk-imx95-blk-ctl.c +++ b/drivers/clk/imx/clk-imx95-blk-ctl.c @@ -219,11 +219,15 @@ static const struct imx95_blk_ctl_dev_data lvds_csr_dev_data = { .clk_reg_offset = 0, }; +static const char * const disp_engine_parents[] = { + "videopll1", "dsi_pll", "ldb_pll_div7" +}; + static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = { [IMX95_CLK_DISPMIX_ENG0_SEL] = { .name = "disp_engine0_sel", - .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", }, - .num_parents = 4, + .parent_names = disp_engine_parents, + .num_parents = ARRAY_SIZE(disp_engine_parents), .reg = 0, .bit_idx = 0, .bit_width = 2, @@ -232,8 +236,8 @@ static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = { }, [IMX95_CLK_DISPMIX_ENG1_SEL] = { .name = "disp_engine1_sel", - .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", }, - .num_parents = 4, + .parent_names = disp_engine_parents, + .num_parents = ARRAY_SIZE(disp_engine_parents), .reg = 0, .bit_idx = 2, .bit_width = 2, diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index 4e1ba35deda9..b19bc60cc627 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -45,7 +45,6 @@ struct psci_cpuidle_domain_state { static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data); static DEFINE_PER_CPU(struct psci_cpuidle_domain_state, psci_domain_state); static bool psci_cpuidle_use_syscore; -static bool psci_cpuidle_use_cpuhp; void psci_set_domain_state(struct generic_pm_domain *pd, unsigned int state_idx, u32 state) @@ -124,8 +123,12 @@ static int psci_idle_cpuhp_up(unsigned int cpu) { struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); - if (pd_dev) - pm_runtime_get_sync(pd_dev); + if (pd_dev) { + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + pm_runtime_get_sync(pd_dev); + else + dev_pm_genpd_resume(pd_dev); + } return 0; } @@ -135,7 +138,11 @@ static int psci_idle_cpuhp_down(unsigned int cpu) struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); if (pd_dev) { - pm_runtime_put_sync(pd_dev); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + pm_runtime_put_sync(pd_dev); + else + dev_pm_genpd_suspend(pd_dev); + /* Clear domain state to start fresh at next online. */ psci_clear_domain_state(); } @@ -196,9 +203,6 @@ static void psci_idle_init_cpuhp(void) { int err; - if (!psci_cpuidle_use_cpuhp) - return; - err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING, "cpuidle/psci:online", psci_idle_cpuhp_up, @@ -259,10 +263,8 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv, * s2ram and s2idle. */ drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state; - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) drv->states[state_count - 1].enter = psci_enter_domain_idle_state; - psci_cpuidle_use_cpuhp = true; - } return 0; } @@ -339,7 +341,6 @@ static void psci_cpu_deinit_idle(int cpu) dt_idle_detach_cpu(data->dev); psci_cpuidle_use_syscore = false; - psci_cpuidle_use_cpuhp = false; } static int psci_idle_init_cpu(struct device *dev, int cpu) diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index af37477ffd8d..be21e4e2016c 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -314,30 +314,30 @@ static int chcr_compute_partial_hash(struct shash_desc *desc, if (digest_size == SHA1_DIGEST_SIZE) { error = crypto_shash_init(desc) ?: crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?: - crypto_shash_export(desc, (void *)&sha1_st); + crypto_shash_export_core(desc, &sha1_st); memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE); } else if (digest_size == SHA224_DIGEST_SIZE) { error = crypto_shash_init(desc) ?: crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: - crypto_shash_export(desc, (void *)&sha256_st); + crypto_shash_export_core(desc, &sha256_st); memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); } else if (digest_size == SHA256_DIGEST_SIZE) { error = crypto_shash_init(desc) ?: crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: - crypto_shash_export(desc, (void *)&sha256_st); + crypto_shash_export_core(desc, &sha256_st); memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); } else if (digest_size == SHA384_DIGEST_SIZE) { error = crypto_shash_init(desc) ?: crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: - crypto_shash_export(desc, (void *)&sha512_st); + crypto_shash_export_core(desc, &sha512_st); memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); } else if (digest_size == SHA512_DIGEST_SIZE) { error = crypto_shash_init(desc) ?: crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: - crypto_shash_export(desc, (void *)&sha512_st); + crypto_shash_export_core(desc, &sha512_st); memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); } else { error = -EINVAL; diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c index 3c4bba4a8779..c03a69851114 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c @@ -5,11 +5,11 @@ #include <linux/crypto.h> #include <crypto/internal/aead.h> #include <crypto/internal/cipher.h> +#include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/aes.h> #include <crypto/sha1.h> #include <crypto/sha2.h> -#include <crypto/hash.h> #include <crypto/hmac.h> #include <crypto/algapi.h> #include <crypto/authenc.h> @@ -154,19 +154,19 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, switch (ctx->qat_hash_alg) { case ICP_QAT_HW_AUTH_ALGO_SHA1: - if (crypto_shash_export(shash, &ctx->sha1)) + if (crypto_shash_export_core(shash, &ctx->sha1)) return -EFAULT; for (i = 0; i < digest_size >> 2; i++, hash_state_out++) *hash_state_out = cpu_to_be32(ctx->sha1.state[i]); break; case ICP_QAT_HW_AUTH_ALGO_SHA256: - if (crypto_shash_export(shash, &ctx->sha256)) + if (crypto_shash_export_core(shash, &ctx->sha256)) return -EFAULT; for (i = 0; i < digest_size >> 2; i++, hash_state_out++) *hash_state_out = cpu_to_be32(ctx->sha256.state[i]); break; case ICP_QAT_HW_AUTH_ALGO_SHA512: - if (crypto_shash_export(shash, &ctx->sha512)) + if (crypto_shash_export_core(shash, &ctx->sha512)) return -EFAULT; for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]); @@ -190,19 +190,19 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, switch (ctx->qat_hash_alg) { case ICP_QAT_HW_AUTH_ALGO_SHA1: - if (crypto_shash_export(shash, &ctx->sha1)) + if (crypto_shash_export_core(shash, &ctx->sha1)) return -EFAULT; for (i = 0; i < digest_size >> 2; i++, hash_state_out++) *hash_state_out = cpu_to_be32(ctx->sha1.state[i]); break; case ICP_QAT_HW_AUTH_ALGO_SHA256: - if (crypto_shash_export(shash, &ctx->sha256)) + if (crypto_shash_export_core(shash, &ctx->sha256)) return -EFAULT; for (i = 0; i < digest_size >> 2; i++, hash_state_out++) *hash_state_out = cpu_to_be32(ctx->sha256.state[i]); break; case ICP_QAT_HW_AUTH_ALGO_SHA512: - if (crypto_shash_export(shash, &ctx->sha512)) + if (crypto_shash_export_core(shash, &ctx->sha512)) return -EFAULT; for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]); diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c index 49f09998e5c0..3371e0a76d3c 100644 --- a/drivers/dma/dw-edma/dw-edma-pcie.c +++ b/drivers/dma/dw-edma/dw-edma-pcie.c @@ -161,12 +161,16 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { struct dw_edma_pcie_data *pdata = (void *)pid->driver_data; - struct dw_edma_pcie_data vsec_data; + struct dw_edma_pcie_data *vsec_data __free(kfree) = NULL; struct device *dev = &pdev->dev; struct dw_edma_chip *chip; int err, nr_irqs; int i, mask; + vsec_data = kmalloc(sizeof(*vsec_data), GFP_KERNEL); + if (!vsec_data) + return -ENOMEM; + /* Enable PCI device */ err = pcim_enable_device(pdev); if (err) { @@ -174,23 +178,23 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, return err; } - memcpy(&vsec_data, pdata, sizeof(struct dw_edma_pcie_data)); + memcpy(vsec_data, pdata, sizeof(struct dw_edma_pcie_data)); /* * Tries to find if exists a PCIe Vendor-Specific Extended Capability * for the DMA, if one exists, then reconfigures it. */ - dw_edma_pcie_get_vsec_dma_data(pdev, &vsec_data); + dw_edma_pcie_get_vsec_dma_data(pdev, vsec_data); /* Mapping PCI BAR regions */ - mask = BIT(vsec_data.rg.bar); - for (i = 0; i < vsec_data.wr_ch_cnt; i++) { - mask |= BIT(vsec_data.ll_wr[i].bar); - mask |= BIT(vsec_data.dt_wr[i].bar); + mask = BIT(vsec_data->rg.bar); + for (i = 0; i < vsec_data->wr_ch_cnt; i++) { + mask |= BIT(vsec_data->ll_wr[i].bar); + mask |= BIT(vsec_data->dt_wr[i].bar); } - for (i = 0; i < vsec_data.rd_ch_cnt; i++) { - mask |= BIT(vsec_data.ll_rd[i].bar); - mask |= BIT(vsec_data.dt_rd[i].bar); + for (i = 0; i < vsec_data->rd_ch_cnt; i++) { + mask |= BIT(vsec_data->ll_rd[i].bar); + mask |= BIT(vsec_data->dt_rd[i].bar); } err = pcim_iomap_regions(pdev, mask, pci_name(pdev)); if (err) { @@ -213,7 +217,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, return -ENOMEM; /* IRQs allocation */ - nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs, + nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data->irqs, PCI_IRQ_MSI | PCI_IRQ_MSIX); if (nr_irqs < 1) { pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n", @@ -224,22 +228,22 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, /* Data structure initialization */ chip->dev = dev; - chip->mf = vsec_data.mf; + chip->mf = vsec_data->mf; chip->nr_irqs = nr_irqs; chip->ops = &dw_edma_pcie_plat_ops; - chip->ll_wr_cnt = vsec_data.wr_ch_cnt; - chip->ll_rd_cnt = vsec_data.rd_ch_cnt; + chip->ll_wr_cnt = vsec_data->wr_ch_cnt; + chip->ll_rd_cnt = vsec_data->rd_ch_cnt; - chip->reg_base = pcim_iomap_table(pdev)[vsec_data.rg.bar]; + chip->reg_base = pcim_iomap_table(pdev)[vsec_data->rg.bar]; if (!chip->reg_base) return -ENOMEM; for (i = 0; i < chip->ll_wr_cnt; i++) { struct dw_edma_region *ll_region = &chip->ll_region_wr[i]; struct dw_edma_region *dt_region = &chip->dt_region_wr[i]; - struct dw_edma_block *ll_block = &vsec_data.ll_wr[i]; - struct dw_edma_block *dt_block = &vsec_data.dt_wr[i]; + struct dw_edma_block *ll_block = &vsec_data->ll_wr[i]; + struct dw_edma_block *dt_block = &vsec_data->dt_wr[i]; ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar]; if (!ll_region->vaddr.io) @@ -263,8 +267,8 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, for (i = 0; i < chip->ll_rd_cnt; i++) { struct dw_edma_region *ll_region = &chip->ll_region_rd[i]; struct dw_edma_region *dt_region = &chip->dt_region_rd[i]; - struct dw_edma_block *ll_block = &vsec_data.ll_rd[i]; - struct dw_edma_block *dt_block = &vsec_data.dt_rd[i]; + struct dw_edma_block *ll_block = &vsec_data->ll_rd[i]; + struct dw_edma_block *dt_block = &vsec_data->dt_rd[i]; ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar]; if (!ll_region->vaddr.io) @@ -298,31 +302,31 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf); pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p)\n", - vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz, + vsec_data->rg.bar, vsec_data->rg.off, vsec_data->rg.sz, chip->reg_base); for (i = 0; i < chip->ll_wr_cnt; i++) { pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", - i, vsec_data.ll_wr[i].bar, - vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz, + i, vsec_data->ll_wr[i].bar, + vsec_data->ll_wr[i].off, chip->ll_region_wr[i].sz, chip->ll_region_wr[i].vaddr.io, &chip->ll_region_wr[i].paddr); pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", - i, vsec_data.dt_wr[i].bar, - vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz, + i, vsec_data->dt_wr[i].bar, + vsec_data->dt_wr[i].off, chip->dt_region_wr[i].sz, chip->dt_region_wr[i].vaddr.io, &chip->dt_region_wr[i].paddr); } for (i = 0; i < chip->ll_rd_cnt; i++) { pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", - i, vsec_data.ll_rd[i].bar, - vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz, + i, vsec_data->ll_rd[i].bar, + vsec_data->ll_rd[i].off, chip->ll_region_rd[i].sz, chip->ll_region_rd[i].vaddr.io, &chip->ll_region_rd[i].paddr); pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", - i, vsec_data.dt_rd[i].bar, - vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz, + i, vsec_data->dt_rd[i].bar, + vsec_data->dt_rd[i].off, chip->dt_region_rd[i].sz, chip->dt_region_rd[i].vaddr.io, &chip->dt_region_rd[i].paddr); } diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c index 47c8adfdc155..9f0c41ca7770 100644 --- a/drivers/dma/mediatek/mtk-cqdma.c +++ b/drivers/dma/mediatek/mtk-cqdma.c @@ -449,9 +449,9 @@ static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c, return ret; spin_lock_irqsave(&cvc->pc->lock, flags); - spin_lock_irqsave(&cvc->vc.lock, flags); + spin_lock(&cvc->vc.lock); vd = mtk_cqdma_find_active_desc(c, cookie); - spin_unlock_irqrestore(&cvc->vc.lock, flags); + spin_unlock(&cvc->vc.lock); spin_unlock_irqrestore(&cvc->pc->lock, flags); if (vd) { diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 0d6324c4e2be..7a2488a0d6a3 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1351,7 +1351,7 @@ static int nbpf_probe(struct platform_device *pdev) if (irqs == 1) { eirq = irqbuf[0]; - for (i = 0; i <= num_channels; i++) + for (i = 0; i < num_channels; i++) nbpf->chan[i].irq = irqbuf[0]; } else { eirq = platform_get_irq_byname(pdev, "error"); @@ -1361,16 +1361,15 @@ static int nbpf_probe(struct platform_device *pdev) if (irqs == num_channels + 1) { struct nbpf_channel *chan; - for (i = 0, chan = nbpf->chan; i <= num_channels; + for (i = 0, chan = nbpf->chan; i < num_channels; i++, chan++) { /* Skip the error IRQ */ if (irqbuf[i] == eirq) i++; + if (i >= ARRAY_SIZE(irqbuf)) + return -EINVAL; chan->irq = irqbuf[i]; } - - if (chan != nbpf->chan + num_channels) - return -EINVAL; } else { /* 2 IRQs and more than one channel */ if (irqbuf[0] == eirq) @@ -1378,7 +1377,7 @@ static int nbpf_probe(struct platform_device *pdev) else irq = irqbuf[0]; - for (i = 0; i <= num_channels; i++) + for (i = 0; i < num_channels; i++) nbpf->chan[i].irq = irq; } } diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c index 219667315b2c..c13545dce349 100644 --- a/drivers/gpio/gpiolib-acpi-quirks.c +++ b/drivers/gpio/gpiolib-acpi-quirks.c @@ -331,6 +331,19 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = { .ignore_interrupt = "AMDI0030:00@11", }, }, + { + /* + * Wakeup only works when keyboard backlight is turned off + * https://gitlab.freedesktop.org/drm/amd/-/issues/4169 + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 15"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_interrupt = "AMDI0030:00@8", + }, + }, {} /* Terminating entry */ }; diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c index 4d5f83b17624..72422c5db364 100644 --- a/drivers/gpio/gpiolib-devres.c +++ b/drivers/gpio/gpiolib-devres.c @@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(devm_gpiod_unhinge); */ void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs) { - devm_remove_action(dev, devm_gpiod_release_array, descs); + devm_release_action(dev, devm_gpiod_release_array, descs); } EXPORT_SYMBOL_GPL(devm_gpiod_put_array); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 426834806fbf..6ac0ce361a2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -427,6 +427,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, { unsigned long flags; ktime_t deadline; + bool ret; if (unlikely(ring->adev->debug_disable_soft_recovery)) return false; @@ -441,12 +442,16 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, dma_fence_set_error(fence, -ENODATA); spin_unlock_irqrestore(fence->lock, flags); - atomic_inc(&ring->adev->gpu_reset_counter); while (!dma_fence_is_signaled(fence) && ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) ring->funcs->soft_recovery(ring, vmid); - return dma_fence_is_signaled(fence); + ret = dma_fence_is_signaled(fence); + /* increment the counter only if soft reset worked */ + if (ret) + atomic_inc(&ring->adev->gpu_reset_counter); + + return ret; } /* diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5ee2237d8ee8..bc983ecf3d99 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4640,6 +4640,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); /* reset ring buffer */ ring->wptr = 0; + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); amdgpu_ring_clear_ring(ring); } return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 87058271b00c..2551823382f8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -728,7 +728,16 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, * support programmable degamma anywhere. */ is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch; - drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0, + /* Dont't enable DRM CRTC degamma property for DCN401 since the + * pre-blending degamma LUT doesn't apply to cursor, and therefore + * can't work similar to a post-blending degamma LUT as in other hw + * versions. + * TODO: revisit it once KMS plane color API is merged. + */ + drm_crtc_enable_color_mgmt(&acrtc->base, + (is_dcn && + dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ? + MAX_COLOR_LUT_ENTRIES : 0, true, MAX_COLOR_LUT_ENTRIES); drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c index a3b8e3d4a429..4b17d2fcd565 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c @@ -1565,7 +1565,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct( clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL); if (!clk_mgr->base.bw_params) { BREAK_TO_DEBUGGER(); - kfree(clk_mgr); + kfree(clk_mgr401); return NULL; } @@ -1576,6 +1576,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct( if (!clk_mgr->wm_range_table) { BREAK_TO_DEBUGGER(); kfree(clk_mgr->base.bw_params); + kfree(clk_mgr401); return NULL; } diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index dc622c78db9d..ea78c6c8ca7a 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -725,7 +725,7 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, * monitor doesn't power down exactly after the throw away read. */ if (!aux->is_remote) { - ret = drm_dp_dpcd_probe(aux, DP_LANE0_1_STATUS); + ret = drm_dp_dpcd_probe(aux, DP_TRAINING_PATTERN_SET); if (ret < 0) return ret; } diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index b781601946db..63a70f285cce 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -862,11 +862,23 @@ EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_framebuffer_free); int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs) { + unsigned int i; int ret; + bool exists; if (WARN_ON_ONCE(fb->dev != dev || !fb->format)) return -EINVAL; + for (i = 0; i < fb->format->num_planes; i++) { + if (drm_WARN_ON_ONCE(dev, fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i))) + fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + if (fb->obj[i]) { + exists = drm_gem_object_handle_get_if_exists_unlocked(fb->obj[i]); + if (exists) + fb->internal_flags |= DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + } + } + INIT_LIST_HEAD(&fb->filp_head); fb->funcs = funcs; @@ -875,7 +887,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB, false, drm_framebuffer_free); if (ret) - goto out; + goto err; mutex_lock(&dev->mode_config.fb_lock); dev->mode_config.num_fb++; @@ -883,7 +895,16 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, mutex_unlock(&dev->mode_config.fb_lock); drm_mode_object_register(dev, &fb->base); -out: + + return 0; + +err: + for (i = 0; i < fb->format->num_planes; i++) { + if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) { + drm_gem_object_handle_put_unlocked(fb->obj[i]); + fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + } + } return ret; } EXPORT_SYMBOL(drm_framebuffer_init); @@ -960,6 +981,12 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private); void drm_framebuffer_cleanup(struct drm_framebuffer *fb) { struct drm_device *dev = fb->dev; + unsigned int i; + + for (i = 0; i < fb->format->num_planes; i++) { + if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) + drm_gem_object_handle_put_unlocked(fb->obj[i]); + } mutex_lock(&dev->mode_config.fb_lock); list_del(&fb->head); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 4bf0a76bb35e..ac0524595bd6 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -223,23 +223,34 @@ static void drm_gem_object_handle_get(struct drm_gem_object *obj) } /** - * drm_gem_object_handle_get_unlocked - acquire reference on user-space handles + * drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any * @obj: GEM object * - * Acquires a reference on the GEM buffer object's handle. Required - * to keep the GEM object alive. Call drm_gem_object_handle_put_unlocked() - * to release the reference. + * Acquires a reference on the GEM buffer object's handle. Required to keep + * the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked() + * to release the reference. Does nothing if the buffer object has no handle. + * + * Returns: + * True if a handle exists, or false otherwise */ -void drm_gem_object_handle_get_unlocked(struct drm_gem_object *obj) +bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; guard(mutex)(&dev->object_name_lock); - drm_WARN_ON(dev, !obj->handle_count); /* first ref taken in create-tail helper */ + /* + * First ref taken during GEM object creation, if any. Some + * drivers set up internal framebuffers with GEM objects that + * do not have a GEM handle. Hence, this counter can be zero. + */ + if (!obj->handle_count) + return false; + drm_gem_object_handle_get(obj); + + return true; } -EXPORT_SYMBOL(drm_gem_object_handle_get_unlocked); /** * drm_gem_object_handle_free - release resources bound to userspace handles @@ -272,7 +283,7 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) } /** - * drm_gem_object_handle_put_unlocked - releases reference on user-space handles + * drm_gem_object_handle_put_unlocked - releases reference on user-space handle * @obj: GEM object * * Releases a reference on the GEM buffer object's handle. Possibly releases @@ -283,14 +294,14 @@ void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) struct drm_device *dev = obj->dev; bool final = false; - if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) + if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0)) return; /* - * Must bump handle count first as this may be the last - * ref, in which case the object would disappear before we - * checked for a name - */ + * Must bump handle count first as this may be the last + * ref, in which case the object would disappear before + * we checked for a name. + */ mutex_lock(&dev->object_name_lock); if (--obj->handle_count == 0) { @@ -303,7 +314,6 @@ void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) if (final) drm_gem_object_put(obj); } -EXPORT_SYMBOL(drm_gem_object_handle_put_unlocked); /* * Called at device or object close to release the file's @@ -315,6 +325,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) struct drm_file *file_priv = data; struct drm_gem_object *obj = ptr; + if (drm_WARN_ON(obj->dev, !data)) + return 0; + if (obj->funcs->close) obj->funcs->close(obj, file_priv); @@ -435,7 +448,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, idr_preload(GFP_KERNEL); spin_lock(&file_priv->table_lock); - ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); + ret = idr_alloc(&file_priv->object_idr, NULL, 1, 0, GFP_NOWAIT); spin_unlock(&file_priv->table_lock); idr_preload_end(); @@ -456,6 +469,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, goto err_revoke; } + /* mirrors drm_gem_handle_delete to avoid races */ + spin_lock(&file_priv->table_lock); + obj = idr_replace(&file_priv->object_idr, obj, handle); + WARN_ON(obj != NULL); + spin_unlock(&file_priv->table_lock); *handlep = handle; return 0; diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 14a87788695d..6f72e7a0f427 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -99,7 +99,7 @@ void drm_gem_fb_destroy(struct drm_framebuffer *fb) unsigned int i; for (i = 0; i < fb->format->num_planes; i++) - drm_gem_object_handle_put_unlocked(fb->obj[i]); + drm_gem_object_put(fb->obj[i]); drm_framebuffer_cleanup(fb); kfree(fb); @@ -182,10 +182,8 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev, if (!objs[i]) { drm_dbg_kms(dev, "Failed to lookup GEM object\n"); ret = -ENOENT; - goto err_gem_object_handle_put_unlocked; + goto err_gem_object_put; } - drm_gem_object_handle_get_unlocked(objs[i]); - drm_gem_object_put(objs[i]); min_size = (height - 1) * mode_cmd->pitches[i] + drm_format_info_min_pitch(info, i, width) @@ -195,22 +193,22 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev, drm_dbg_kms(dev, "GEM object size (%zu) smaller than minimum size (%u) for plane %d\n", objs[i]->size, min_size, i); - drm_gem_object_handle_put_unlocked(objs[i]); + drm_gem_object_put(objs[i]); ret = -EINVAL; - goto err_gem_object_handle_put_unlocked; + goto err_gem_object_put; } } ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs); if (ret) - goto err_gem_object_handle_put_unlocked; + goto err_gem_object_put; return 0; -err_gem_object_handle_put_unlocked: +err_gem_object_put: while (i > 0) { --i; - drm_gem_object_handle_put_unlocked(objs[i]); + drm_gem_object_put(objs[i]); } return ret; } diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index be77d61a16ce..60c282881958 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -161,7 +161,7 @@ void drm_sysfs_lease_event(struct drm_device *dev); /* drm_gem.c */ int drm_gem_init(struct drm_device *dev); -void drm_gem_object_handle_get_unlocked(struct drm_gem_object *obj); +bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj); void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj); int drm_gem_handle_create_tail(struct drm_file *file_priv, struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs index dd55b1cb764d..18492daae4b3 100644 --- a/drivers/gpu/drm/drm_panic_qr.rs +++ b/drivers/gpu/drm/drm_panic_qr.rs @@ -27,7 +27,7 @@ //! * <https://github.com/erwanvivien/fast_qr> //! * <https://github.com/bjguillot/qr> -use kernel::{prelude::*, str::CStr}; +use kernel::prelude::*; #[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)] struct Version(usize); diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index ba7b8938b17c..166ee11831ab 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1938,7 +1938,7 @@ static int get_init_otp_deassert_fragment_len(struct intel_display *display, int index, len; if (drm_WARN_ON(display->drm, - !data || panel->vbt.dsi.seq_version != 1)) + !data || panel->vbt.dsi.seq_version >= 3)) return 0; /* index = 1 to skip sequence byte */ @@ -1961,7 +1961,7 @@ static int get_init_otp_deassert_fragment_len(struct intel_display *display, } /* - * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence. + * Some v1/v2 VBT MIPI sequences do the deassert in the init OTP sequence. * The deassert must be done before calling intel_dsi_device_ready, so for * these devices we split the init OTP sequence into a deassert sequence and * the actual init OTP part. @@ -1972,9 +1972,9 @@ static void vlv_fixup_mipi_sequences(struct intel_display *display, u8 *init_otp; int len; - /* Limit this to v1 vid-mode sequences */ + /* Limit this to v1/v2 vid-mode sequences */ if (panel->vbt.dsi.config->is_cmd_mode || - panel->vbt.dsi.seq_version != 1) + panel->vbt.dsi.seq_version >= 3) return; /* Only do this if there are otp and assert seqs and no deassert seq */ diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c index 41f5d89e78b8..3e349d039fc0 100644 --- a/drivers/gpu/drm/imagination/pvr_power.c +++ b/drivers/gpu/drm/imagination/pvr_power.c @@ -386,13 +386,13 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset) if (!err) { if (hard_reset) { pvr_dev->fw_dev.booted = false; - WARN_ON(pm_runtime_force_suspend(from_pvr_device(pvr_dev)->dev)); + WARN_ON(pvr_power_device_suspend(from_pvr_device(pvr_dev)->dev)); err = pvr_fw_hard_reset(pvr_dev); if (err) goto err_device_lost; - err = pm_runtime_force_resume(from_pvr_device(pvr_dev)->dev); + err = pvr_power_device_resume(from_pvr_device(pvr_dev)->dev); pvr_dev->fw_dev.booted = true; if (err) goto err_device_lost; diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c index 8f6fba4217ec..bc7527542fdc 100644 --- a/drivers/gpu/drm/mediatek/mtk_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_crtc.c @@ -719,6 +719,39 @@ int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, return 0; } +void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_plane_state *plane_state = to_mtk_plane_state(plane->state); + int i; + + /* no need to wait for disabling the plane by CPU */ + if (!mtk_crtc->cmdq_client.chan) + return; + + if (!mtk_crtc->enabled) + return; + + /* set pending plane state to disabled */ + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *mtk_plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(mtk_plane->state); + + if (mtk_plane->index == plane->index) { + memcpy(mtk_plane_state, plane_state, sizeof(*plane_state)); + break; + } + } + mtk_crtc_update_config(mtk_crtc, false); + + /* wait for planes to be disabled by CMDQ */ + wait_event_timeout(mtk_crtc->cb_blocking_queue, + mtk_crtc->cmdq_vblank_cnt == 0, + msecs_to_jiffies(500)); +#endif +} + void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, struct drm_atomic_state *state) { @@ -930,7 +963,8 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev, mtk_ddp_comp_supported_rotations(comp), mtk_ddp_comp_get_blend_modes(comp), mtk_ddp_comp_get_formats(comp), - mtk_ddp_comp_get_num_formats(comp), i); + mtk_ddp_comp_get_num_formats(comp), + mtk_ddp_comp_is_afbc_supported(comp), i); if (ret) return ret; diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.h b/drivers/gpu/drm/mediatek/mtk_crtc.h index 388e900b6f4d..828f109b83e7 100644 --- a/drivers/gpu/drm/mediatek/mtk_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_crtc.h @@ -21,6 +21,7 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path, unsigned int num_conn_routes); int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, struct mtk_plane_state *state); +void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane); void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, struct drm_atomic_state *plane_state); struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c index edc6417639e6..ac6620e10262 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c @@ -366,6 +366,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = { .get_blend_modes = mtk_ovl_get_blend_modes, .get_formats = mtk_ovl_get_formats, .get_num_formats = mtk_ovl_get_num_formats, + .is_afbc_supported = mtk_ovl_is_afbc_supported, }; static const struct mtk_ddp_comp_funcs ddp_postmask = { diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h index 39720b27f4e9..7289b3dcf22f 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h @@ -83,6 +83,7 @@ struct mtk_ddp_comp_funcs { u32 (*get_blend_modes)(struct device *dev); const u32 *(*get_formats)(struct device *dev); size_t (*get_num_formats)(struct device *dev); + bool (*is_afbc_supported)(struct device *dev); void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next); void (*disconnect)(struct device *dev, struct device *mmsys_dev, unsigned int next); void (*add)(struct device *dev, struct mtk_mutex *mutex); @@ -294,6 +295,14 @@ size_t mtk_ddp_comp_get_num_formats(struct mtk_ddp_comp *comp) return 0; } +static inline bool mtk_ddp_comp_is_afbc_supported(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->is_afbc_supported) + return comp->funcs->is_afbc_supported(comp->dev); + + return false; +} + static inline bool mtk_ddp_comp_add(struct mtk_ddp_comp *comp, struct mtk_mutex *mutex) { if (comp->funcs && comp->funcs->add) { diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h index 04217a36939c..679d413bf10b 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h @@ -106,6 +106,7 @@ void mtk_ovl_disable_vblank(struct device *dev); u32 mtk_ovl_get_blend_modes(struct device *dev); const u32 *mtk_ovl_get_formats(struct device *dev); size_t mtk_ovl_get_num_formats(struct device *dev); +bool mtk_ovl_is_afbc_supported(struct device *dev); void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex); void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index d0581c4e3c99..e0236353d499 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -236,6 +236,13 @@ size_t mtk_ovl_get_num_formats(struct device *dev) return ovl->data->num_formats; } +bool mtk_ovl_is_afbc_supported(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + return ovl->data->supports_afbc; +} + int mtk_ovl_clk_enable(struct device *dev) { struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 6fb85bc6487a..a2fdceadf209 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -1095,7 +1095,6 @@ static const u32 mt8183_output_fmts[] = { }; static const u32 mt8195_dpi_output_fmts[] = { - MEDIA_BUS_FMT_BGR888_1X24, MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_RGB888_2X12_LE, MEDIA_BUS_FMT_RGB888_2X12_BE, @@ -1103,18 +1102,19 @@ static const u32 mt8195_dpi_output_fmts[] = { MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV10_1X20, MEDIA_BUS_FMT_YUYV12_1X24, + MEDIA_BUS_FMT_BGR888_1X24, MEDIA_BUS_FMT_YUV8_1X24, MEDIA_BUS_FMT_YUV10_1X30, }; static const u32 mt8195_dp_intf_output_fmts[] = { - MEDIA_BUS_FMT_BGR888_1X24, MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_RGB888_2X12_LE, MEDIA_BUS_FMT_RGB888_2X12_BE, MEDIA_BUS_FMT_RGB101010_1X30, MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV10_1X20, + MEDIA_BUS_FMT_BGR888_1X24, MEDIA_BUS_FMT_YUV8_1X24, MEDIA_BUS_FMT_YUV10_1X30, }; diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c index 655106bbb76d..cbc4f37da8ba 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_plane.c @@ -285,9 +285,14 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane, struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state); + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + mtk_plane_state->pending.enable = false; wmb(); /* Make sure the above parameter is set before update */ mtk_plane_state->pending.dirty = true; + + mtk_crtc_plane_disable(old_state->crtc, plane); } static void mtk_plane_atomic_update(struct drm_plane *plane, @@ -321,7 +326,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, enum drm_plane_type type, unsigned int supported_rotations, const u32 blend_modes, - const u32 *formats, size_t num_formats, unsigned int plane_idx) + const u32 *formats, size_t num_formats, + bool supports_afbc, unsigned int plane_idx) { int err; @@ -332,7 +338,9 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, err = drm_universal_plane_init(dev, plane, possible_crtcs, &mtk_plane_funcs, formats, - num_formats, modifiers, type, NULL); + num_formats, + supports_afbc ? modifiers : NULL, + type, NULL); if (err) { DRM_ERROR("failed to initialize plane\n"); return err; diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h index 3b13b89989c7..95c5fa5295d8 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.h +++ b/drivers/gpu/drm/mediatek/mtk_plane.h @@ -49,5 +49,6 @@ to_mtk_plane_state(struct drm_plane_state *state) int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, enum drm_plane_type type, unsigned int supported_rotations, const u32 blend_modes, - const u32 *formats, size_t num_formats, unsigned int plane_idx); + const u32 *formats, size_t num_formats, + bool supports_afbc, unsigned int plane_idx); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 200e65a7cefc..c7869a639bef 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -314,14 +314,10 @@ nouveau_debugfs_fini(struct nouveau_drm *drm) drm->debugfs = NULL; } -int +void nouveau_module_debugfs_init(void) { nouveau_debugfs_root = debugfs_create_dir("nouveau", NULL); - if (IS_ERR(nouveau_debugfs_root)) - return PTR_ERR(nouveau_debugfs_root); - - return 0; } void diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h index b7617b344ee2..d05ed0e641c4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h @@ -24,7 +24,7 @@ extern void nouveau_debugfs_fini(struct nouveau_drm *); extern struct dentry *nouveau_debugfs_root; -int nouveau_module_debugfs_init(void); +void nouveau_module_debugfs_init(void); void nouveau_module_debugfs_fini(void); #else static inline void @@ -42,10 +42,9 @@ nouveau_debugfs_fini(struct nouveau_drm *drm) { } -static inline int +static inline void nouveau_module_debugfs_init(void) { - return 0; } static inline void diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 0c82a63cd49d..7bb64fcdd497 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1284,6 +1284,9 @@ nouveau_ioctls[] = { DRM_IOCTL_DEF_DRV(NOUVEAU_EXEC, nouveau_exec_ioctl_exec, DRM_RENDER_ALLOW), }; +#define DRM_IOCTL_NOUVEAU_NVIF _IOC(_IOC_READ | _IOC_WRITE, DRM_IOCTL_BASE, \ + DRM_COMMAND_BASE + DRM_NOUVEAU_NVIF, 0) + long nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -1297,14 +1300,10 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ret; } - switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) { - case DRM_NOUVEAU_NVIF: + if ((cmd & ~IOCSIZE_MASK) == DRM_IOCTL_NOUVEAU_NVIF) ret = nouveau_abi16_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd)); - break; - default: + else ret = drm_ioctl(file, cmd, arg); - break; - } pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); @@ -1461,9 +1460,7 @@ nouveau_drm_init(void) if (!nouveau_modeset) return 0; - ret = nouveau_module_debugfs_init(); - if (ret) - return ret; + nouveau_module_debugfs_init(); #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER platform_driver_register(&nouveau_platform_driver); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index baf42339f93e..588cb4ab85cb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -719,7 +719,6 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) union acpi_object argv4 = { .buffer.type = ACPI_TYPE_BUFFER, .buffer.length = 4, - .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), }, *obj; caps->status = 0xffff; @@ -727,17 +726,22 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a))) return; + argv4.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL); + if (!argv4.buffer.pointer) + return; + obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4); if (!obj) - return; + goto done; if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || WARN_ON(obj->buffer.length != 4)) - return; + goto done; caps->status = 0; caps->optimusCaps = *(u32 *)obj->buffer.pointer; +done: ACPI_FREE(obj); kfree(argv4.buffer.pointer); @@ -754,24 +758,28 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) union acpi_object argv4 = { .buffer.type = ACPI_TYPE_BUFFER, .buffer.length = sizeof(caps), - .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), }, *obj; jt->status = 0xffff; + argv4.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL); + if (!argv4.buffer.pointer) + return; + obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4); if (!obj) - return; + goto done; if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || WARN_ON(obj->buffer.length != 4)) - return; + goto done; jt->status = 0; jt->jtCaps = *(u32 *)obj->buffer.pointer; jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20; jt->bSBIOSCaps = 0; +done: ACPI_FREE(obj); kfree(argv4.buffer.pointer); @@ -1744,6 +1752,13 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt); return ret; } + + /* + * TODO: Debug the GSP firmware / RPC handling to find out why + * without this Turing (but none of the other architectures) + * ends up resetting all channels after resume. + */ + msleep(50); } ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 5657106c2f7d..15e2d505550f 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -841,7 +841,6 @@ int panfrost_job_init(struct panfrost_device *pfdev) .num_rqs = DRM_SCHED_PRIORITY_COUNT, .credit_limit = 2, .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), - .timeout_wq = pfdev->reset.wq, .name = "pan_js", .dev = pfdev->dev, }; @@ -879,6 +878,7 @@ int panfrost_job_init(struct panfrost_device *pfdev) pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0); if (!pfdev->reset.wq) return -ENOMEM; + args.timeout_wq = pfdev->reset.wq; for (j = 0; j < NUM_JOB_SLOTS; j++) { js->queue[j].fence_context = dma_fence_context_alloc(1); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index bbd39348a7ab..7a3e510327b7 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -26,7 +26,6 @@ * Jerome Glisse */ -#include <linux/console.h> #include <linux/efi.h> #include <linux/pci.h> #include <linux/pm_runtime.h> @@ -1635,11 +1634,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, pci_set_power_state(pdev, PCI_D3hot); } - if (notify_clients) { - console_lock(); - drm_client_dev_suspend(dev, true); - console_unlock(); - } + if (notify_clients) + drm_client_dev_suspend(dev, false); + return 0; } @@ -1661,17 +1658,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (notify_clients) { - console_lock(); - } if (resume) { pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - if (pci_enable_device(pdev)) { - if (notify_clients) - console_unlock(); + if (pci_enable_device(pdev)) return -1; - } } /* resume AGP if in use */ radeon_agp_resume(rdev); @@ -1747,10 +1738,8 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients) if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) radeon_pm_compute_clocks(rdev); - if (notify_clients) { - drm_client_dev_resume(dev, true); - console_unlock(); - } + if (notify_clients) + drm_client_dev_resume(dev, false); return 0; } diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c index 2d9a0a3f6c38..7a38664e890e 100644 --- a/drivers/gpu/drm/tegra/nvdec.c +++ b/drivers/gpu/drm/tegra/nvdec.c @@ -261,10 +261,8 @@ static int nvdec_load_falcon_firmware(struct nvdec *nvdec) if (!client->group) { virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL); - - err = dma_mapping_error(nvdec->dev, iova); - if (err < 0) - return err; + if (!virt) + return -ENOMEM; } else { virt = tegra_drm_alloc(tegra, size, &iova); if (IS_ERR(virt)) diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index 7a8af2311318..11e60d687572 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -171,14 +171,32 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss) #define XE_DEVCOREDUMP_CHUNK_MAX (SZ_512M + SZ_1G) +/** + * xe_devcoredump_read() - Read data from the Xe device coredump snapshot + * @buffer: Destination buffer to copy the coredump data into + * @offset: Offset in the coredump data to start reading from + * @count: Number of bytes to read + * @data: Pointer to the xe_devcoredump structure + * @datalen: Length of the data (unused) + * + * Reads a chunk of the coredump snapshot data into the provided buffer. + * If the devcoredump is smaller than 1.5 GB (XE_DEVCOREDUMP_CHUNK_MAX), + * it is read directly from a pre-written buffer. For larger devcoredumps, + * the pre-written buffer must be periodically repopulated from the snapshot + * state due to kmalloc size limitations. + * + * Return: Number of bytes copied on success, or a negative error code on failure. + */ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, size_t count, void *data, size_t datalen) { struct xe_devcoredump *coredump = data; struct xe_devcoredump_snapshot *ss; - ssize_t byte_copied; + ssize_t byte_copied = 0; u32 chunk_offset; ssize_t new_chunk_position; + bool pm_needed = false; + int ret = 0; if (!coredump) return -ENODEV; @@ -188,20 +206,19 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, /* Ensure delayed work is captured before continuing */ flush_work(&ss->work); - if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) + pm_needed = ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX; + if (pm_needed) xe_pm_runtime_get(gt_to_xe(ss->gt)); mutex_lock(&coredump->lock); if (!ss->read.buffer) { - mutex_unlock(&coredump->lock); - return -ENODEV; + ret = -ENODEV; + goto unlock; } - if (offset >= ss->read.size) { - mutex_unlock(&coredump->lock); - return 0; - } + if (offset >= ss->read.size) + goto unlock; new_chunk_position = div_u64_rem(offset, XE_DEVCOREDUMP_CHUNK_MAX, @@ -221,12 +238,13 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, ss->read.size - offset; memcpy(buffer, ss->read.buffer + chunk_offset, byte_copied); +unlock: mutex_unlock(&coredump->lock); - if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) + if (pm_needed) xe_pm_runtime_put(gt_to_xe(ss->gt)); - return byte_copied; + return byte_copied ? byte_copied : ret; } static void xe_devcoredump_free(void *data) diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 6c4cb9576fb6..e3517ce2e18c 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -417,6 +417,8 @@ int xe_gt_init_early(struct xe_gt *gt) if (err) return err; + xe_mocs_init_early(gt); + return 0; } @@ -630,17 +632,15 @@ int xe_gt_init(struct xe_gt *gt) if (err) return err; - err = xe_gt_pagefault_init(gt); + err = xe_gt_sysfs_init(gt); if (err) return err; - xe_mocs_init_early(gt); - - err = xe_gt_sysfs_init(gt); + err = gt_fw_domain_init(gt); if (err) return err; - err = gt_fw_domain_init(gt); + err = xe_gt_pagefault_init(gt); if (err) return err; @@ -839,6 +839,9 @@ static int gt_reset(struct xe_gt *gt) goto err_out; } + if (IS_SRIOV_PF(gt_to_xe(gt))) + xe_gt_sriov_pf_stop_prepare(gt); + xe_uc_gucrc_disable(>->uc); xe_uc_stop_prepare(>->uc); xe_gt_pagefault_reset(gt); diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index 10622ca471a2..6717a636b1d9 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -444,6 +444,7 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue) #define PF_MULTIPLIER 8 pf_queue->num_dw = (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER; + pf_queue->num_dw = roundup_pow_of_two(pf_queue->num_dw); #undef PF_MULTIPLIER pf_queue->gt = gt; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c index c08efca6420e..35489fa81825 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c @@ -172,6 +172,25 @@ void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid) pf_clear_vf_scratch_regs(gt, vfid); } +static void pf_cancel_restart(struct xe_gt *gt) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + + if (cancel_work_sync(>->sriov.pf.workers.restart)) + xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n"); +} + +/** + * xe_gt_sriov_pf_stop_prepare() - Prepare to stop SR-IOV support. + * @gt: the &xe_gt + * + * This function can only be called on the PF. + */ +void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt) +{ + pf_cancel_restart(gt); +} + static void pf_restart(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h index f474509411c0..e2b2ff8132dc 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h @@ -13,6 +13,7 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt); int xe_gt_sriov_pf_init(struct xe_gt *gt); void xe_gt_sriov_pf_init_hw(struct xe_gt *gt); void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid); +void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt); void xe_gt_sriov_pf_restart(struct xe_gt *gt); #else static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt) @@ -29,6 +30,10 @@ static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt) { } +static inline void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt) +{ +} + static inline void xe_gt_sriov_pf_restart(struct xe_gt *gt) { } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 2420a548cacc..53a44702c04a 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -2364,6 +2364,21 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid, return err; } +static int pf_push_self_config(struct xe_gt *gt) +{ + int err; + + err = pf_push_full_vf_config(gt, PFID); + if (err) { + xe_gt_sriov_err(gt, "Failed to push self configuration (%pe)\n", + ERR_PTR(err)); + return err; + } + + xe_gt_sriov_dbg_verbose(gt, "self configuration completed\n"); + return 0; +} + static void fini_config(void *arg) { struct xe_gt *gt = arg; @@ -2387,9 +2402,17 @@ static void fini_config(void *arg) int xe_gt_sriov_pf_config_init(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); + int err; xe_gt_assert(gt, IS_SRIOV_PF(xe)); + mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + err = pf_push_self_config(gt); + mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + + if (err) + return err; + return devm_add_action_or_reset(xe->drm.dev, fini_config, gt); } @@ -2407,6 +2430,10 @@ void xe_gt_sriov_pf_config_restart(struct xe_gt *gt) unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); unsigned int fail = 0, skip = 0; + mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + pf_push_self_config(gt); + mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + for (n = 1; n <= total_vfs; n++) { if (xe_gt_sriov_pf_config_is_empty(gt, n)) skip++; diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c index 63db66df064b..023ed6a6b49d 100644 --- a/drivers/gpu/drm/xe/xe_lmtt.c +++ b/drivers/gpu/drm/xe/xe_lmtt.c @@ -78,6 +78,9 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level } lmtt_assert(lmtt, xe_bo_is_vram(bo)); + lmtt_debug(lmtt, "level=%u addr=%#llx\n", level, (u64)xe_bo_main_addr(bo, XE_PAGE_SIZE)); + + xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, bo->size); pt->level = level; pt->bo = bo; @@ -91,6 +94,9 @@ out: static void lmtt_pt_free(struct xe_lmtt_pt *pt) { + lmtt_debug(&pt->bo->tile->sriov.pf.lmtt, "level=%u addr=%llx\n", + pt->level, (u64)xe_bo_main_addr(pt->bo, XE_PAGE_SIZE)); + xe_bo_unpin_map_no_vm(pt->bo); kfree(pt); } @@ -226,9 +232,14 @@ static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt, switch (lmtt->ops->lmtt_pte_size(level)) { case sizeof(u32): + lmtt_assert(lmtt, !overflows_type(pte, u32)); + lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u32), u32)); + xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte); break; case sizeof(u64): + lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u64), u64)); + xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte); break; default: diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 7acdc4c78866..07a5161c7d5b 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -863,7 +863,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it)) xe_res_next(&src_it, src_L0); else - emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs, + emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat, &src_it, src_L0, src); if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it)) @@ -1817,8 +1817,8 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, xe_bo_assert_held(bo); /* Use bounce buffer for small access and unaligned access */ - if (len & XE_CACHELINE_MASK || - ((uintptr_t)buf | offset) & XE_CACHELINE_MASK) { + if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) || + !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) { int buf_offset = 0; /* @@ -1848,7 +1848,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, err = xe_migrate_access_memory(m, bo, offset & ~XE_CACHELINE_MASK, (void *)ptr, - sizeof(bounce), 0); + sizeof(bounce), write); if (err) return err; } else { diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c index e4742e27e2cd..da6793c2f991 100644 --- a/drivers/gpu/drm/xe/xe_module.c +++ b/drivers/gpu/drm/xe/xe_module.c @@ -20,7 +20,7 @@ struct xe_modparam xe_modparam = { .probe_display = true, - .guc_log_level = 3, + .guc_log_level = IS_ENABLED(CONFIG_DRM_XE_DEBUG) ? 3 : 1, .force_probe = CONFIG_DRM_XE_FORCE_PROBE, .wedged_mode = 1, .svm_notifier_size = 512, diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index ac4beaed58ff..278af53c74dc 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -140,7 +140,6 @@ static const struct xe_graphics_desc graphics_xelpg = { .has_asid = 1, \ .has_atomic_enable_pte_bit = 1, \ .has_flat_ccs = 1, \ - .has_indirect_ring_state = 1, \ .has_range_tlb_invalidation = 1, \ .has_usm = 1, \ .has_64bit_timestamp = 1, \ diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index ff749edc005b..ad263de44111 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -134,7 +134,7 @@ int xe_pm_suspend(struct xe_device *xe) /* FIXME: Super racey... */ err = xe_bo_evict_all(xe); if (err) - goto err_pxp; + goto err_display; for_each_gt(gt, xe, id) { err = xe_gt_suspend(gt); @@ -151,7 +151,6 @@ int xe_pm_suspend(struct xe_device *xe) err_display: xe_display_pm_resume(xe); -err_pxp: xe_pxp_pm_resume(xe->pxp); err: drm_dbg(&xe->drm, "Device suspend failed %d\n", err); @@ -753,11 +752,13 @@ void xe_pm_assert_unbounded_bridge(struct xe_device *xe) } /** - * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold + * xe_pm_set_vram_threshold - Set a VRAM threshold for allowing/blocking D3Cold * @xe: xe device instance - * @threshold: VRAM size in bites for the D3cold threshold + * @threshold: VRAM size in MiB for the D3cold threshold * - * Returns 0 for success, negative error code otherwise. + * Return: + * * 0 - success + * * -EINVAL - invalid argument */ int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold) { diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index bc1689db4cd7..7b50c7c1ee21 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -110,13 +110,14 @@ static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i) return i; } -static int emit_flush_invalidate(u32 *dw, int i) +static int emit_flush_invalidate(u32 addr, u32 val, u32 *dw, int i) { dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW | - MI_FLUSH_IMM_DW | MI_FLUSH_DW_STORE_INDEX; - dw[i++] = LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR; - dw[i++] = 0; + MI_FLUSH_IMM_DW; + + dw[i++] = addr | MI_FLUSH_DW_USE_GTT; dw[i++] = 0; + dw[i++] = val; return i; } @@ -397,23 +398,20 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job, static void emit_migration_job_gen12(struct xe_sched_job *job, struct xe_lrc *lrc, u32 seqno) { + u32 saddr = xe_lrc_start_seqno_ggtt_addr(lrc); u32 dw[MAX_JOB_SIZE_DW], i = 0; i = emit_copy_timestamp(lrc, dw, i); - i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), - seqno, dw, i); + i = emit_store_imm_ggtt(saddr, seqno, dw, i); dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE; /* Enabled again below */ i = emit_bb_start(job->ptrs[0].batch_addr, BIT(8), dw, i); - if (!IS_SRIOV_VF(gt_to_xe(job->q->gt))) { - /* XXX: Do we need this? Leaving for now. */ - dw[i++] = preparser_disable(true); - i = emit_flush_invalidate(dw, i); - dw[i++] = preparser_disable(false); - } + dw[i++] = preparser_disable(true); + i = emit_flush_invalidate(saddr, seqno, dw, i); + dw[i++] = preparser_disable(false); i = emit_bb_start(job->ptrs[1].batch_addr, BIT(8), dw, i); diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index 2741849bbf4d..a6612105201a 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -114,10 +114,10 @@ struct fw_blobs_by_type { #define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED #define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \ - fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 44, 1)) \ - fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 44, 1)) \ + fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 45, 2)) \ + fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 45, 2)) \ fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 44, 1)) \ - fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 44, 1)) \ + fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 45, 2)) \ fw_def(DG1, GT_TYPE_ANY, major_ver(i915, guc, dg1, 70, 44, 1)) \ fw_def(ALDERLAKE_N, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \ fw_def(ALDERLAKE_P, GT_TYPE_ANY, major_ver(i915, guc, adlp, 70, 44, 1)) \ diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index 69c1d7fc695e..6d70109fcc43 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -38,10 +38,10 @@ GRAPHICS_VERSION(2004) GRAPHICS_VERSION_RANGE(3000, 3001) 22019338487 MEDIA_VERSION(2000) - GRAPHICS_VERSION(2001) + GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf) MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf) 22019338487_display PLATFORM(LUNARLAKE) -16023588340 GRAPHICS_VERSION(2001) +16023588340 GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf) 14019789679 GRAPHICS_VERSION(1255) GRAPHICS_VERSION_RANGE(1270, 2004) no_media_l3 MEDIA_VERSION(3000) diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index b348d0464314..b31b8a2fd540 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1883,9 +1883,12 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) /* * 7 extra bytes are necessary to achieve proper functionality * of implement() working on 8 byte chunks + * 1 extra byte for the report ID if it is null (not used) so + * we can reserve that extra byte in the first position of the buffer + * when sending it to .raw_request() */ - u32 len = hid_report_len(report) + 7; + u32 len = hid_report_len(report) + 7 + (report->id == 0); return kzalloc(len, flags); } @@ -1973,7 +1976,7 @@ static struct hid_report *hid_get_report(struct hid_report_enum *report_enum, int __hid_request(struct hid_device *hid, struct hid_report *report, enum hid_class_request reqtype) { - char *buf; + char *buf, *data_buf; int ret; u32 len; @@ -1981,13 +1984,19 @@ int __hid_request(struct hid_device *hid, struct hid_report *report, if (!buf) return -ENOMEM; + data_buf = buf; len = hid_report_len(report); + if (report->id == 0) { + /* reserve the first byte for the report ID */ + data_buf++; + len++; + } + if (reqtype == HID_REQ_SET_REPORT) - hid_output_report(report, buf); + hid_output_report(report, data_buf); - ret = hid->ll_driver->raw_request(hid, report->id, buf, len, - report->type, reqtype); + ret = hid_hw_raw_request(hid, report->id, buf, len, report->type, reqtype); if (ret < 0) { dbg_hid("unable to complete request: %d\n", ret); goto out; diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index c6b6b1029540..4424c0512bae 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -3299,7 +3299,7 @@ static const char *keys[KEY_MAX + 1] = { [BTN_STYLUS2] = "Stylus2", [BTN_TOOL_DOUBLETAP] = "ToolDoubleTap", [BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_TOOL_QUADTAP] = "ToolQuadrupleTap", [BTN_GEAR_DOWN] = "BtnGearDown", [BTN_GEAR_UP] = "BtnGearUp", - [BTN_WHEEL] = "BtnWheel", [KEY_OK] = "Ok", + [KEY_OK] = "Ok", [KEY_SELECT] = "Select", [KEY_GOTO] = "Goto", [KEY_CLEAR] = "Clear", [KEY_POWER2] = "Power2", [KEY_OPTION] = "Option", [KEY_INFO] = "Info", diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index ec84ba5e93e5..ff7595caf440 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -2742,7 +2742,11 @@ static unsigned long __evict_a_few(unsigned long nr_buffers) __make_buffer_clean(b); __free_buffer_wake(b); - cond_resched(); + if (need_resched()) { + dm_bufio_unlock(c); + cond_resched(); + dm_bufio_lock(c); + } } dm_bufio_unlock(c); diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index bd694910b01b..7f524a26cebc 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -2366,8 +2366,7 @@ static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats) if (!bitmap) return -ENOENT; - if (!bitmap->mddev->bitmap_info.external && - !bitmap->storage.sb_page) + if (!bitmap->storage.sb_page) return -EINVAL; sb = kmap_local_page(bitmap->storage.sb_page); stats->sync_size = le64_to_cpu(sb->sync_size); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 19c5a0ce5a40..64b8176907a9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1399,7 +1399,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, } read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp, &mddev->bio_set); - + read_bio->bi_opf &= ~REQ_NOWAIT; r1_bio->bios[rdisk] = read_bio; read_bio->bi_iter.bi_sector = r1_bio->sector + @@ -1649,6 +1649,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, wait_for_serialization(rdev, r1_bio); } + mbio->bi_opf &= ~REQ_NOWAIT; r1_bio->bios[i] = mbio; mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset); @@ -3428,6 +3429,7 @@ static int raid1_reshape(struct mddev *mddev) /* ok, everything is stopped */ oldpool = conf->r1bio_pool; conf->r1bio_pool = newpool; + init_waitqueue_head(&conf->r1bio_pool.wait); for (d = d2 = 0; d < conf->raid_disks; d++) { struct md_rdev *rdev = conf->mirrors[d].rdev; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index b74780af4c22..c9bd2005bfd0 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1182,8 +1182,11 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, } } - if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) + if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) { + raid_end_bio_io(r10_bio); return; + } + rdev = read_balance(conf, r10_bio, &max_sectors); if (!rdev) { if (err_rdev) { @@ -1221,6 +1224,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, r10_bio->master_bio = bio; } read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set); + read_bio->bi_opf &= ~REQ_NOWAIT; r10_bio->devs[slot].bio = read_bio; r10_bio->devs[slot].rdev = rdev; @@ -1256,6 +1260,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, conf->mirrors[devnum].rdev; mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set); + mbio->bi_opf &= ~REQ_NOWAIT; if (replacement) r10_bio->devs[n_copy].repl_bio = mbio; else @@ -1370,8 +1375,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, } sectors = r10_bio->sectors; - if (!regular_request_wait(mddev, conf, bio, sectors)) + if (!regular_request_wait(mddev, conf, bio, sectors)) { + raid_end_bio_io(r10_bio); return; + } + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && (mddev->reshape_backwards ? (bio->bi_iter.bi_sector < conf->reshape_safe && diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index 043b9ec756ff..7f3f47db4c98 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -324,7 +324,7 @@ EXPORT_SYMBOL(memstick_init_req); static int h_memstick_read_dev_id(struct memstick_dev *card, struct memstick_request **mrq) { - struct ms_id_register id_reg; + struct ms_id_register id_reg = {}; if (!(*mrq)) { memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg, diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index def054ddd256..4fced9b36c80 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -503,7 +503,8 @@ void bcm2835_prepare_dma(struct bcm2835_host *host, struct mmc_data *data) DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { - dma_unmap_sg(dma_chan->device->dev, data->sg, sg_len, dir_data); + dma_unmap_sg(dma_chan->device->dev, data->sg, data->sg_len, + dir_data); return; } diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 13a84b9309e0..e3877a1c72a9 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -913,7 +913,8 @@ static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) { return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && (dmi_match(DMI_BIOS_VENDOR, "LENOVO") || - dmi_match(DMI_SYS_VENDOR, "IRBIS")); + dmi_match(DMI_SYS_VENDOR, "IRBIS") || + dmi_match(DMI_SYS_VENDOR, "Positivo Tecnologia SA")); } static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot) diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index 73385ff4c0f3..9e94998e8df7 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -613,7 +613,8 @@ static const struct sdhci_ops sdhci_am654_ops = { static const struct sdhci_pltfm_data sdhci_am654_pdata = { .ops = &sdhci_am654_ops, .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_DISABLE_HW_TIMEOUT, }; static const struct sdhci_am654_driver_data sdhci_am654_sr1_drvdata = { @@ -643,7 +644,8 @@ static const struct sdhci_ops sdhci_j721e_8bit_ops = { static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = { .ops = &sdhci_j721e_8bit_ops, .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_DISABLE_HW_TIMEOUT, }; static const struct sdhci_am654_driver_data sdhci_j721e_8bit_drvdata = { @@ -667,7 +669,8 @@ static const struct sdhci_ops sdhci_j721e_4bit_ops = { static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = { .ops = &sdhci_j721e_4bit_ops, .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_DISABLE_HW_TIMEOUT, }; static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = { diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 7493e5aa984c..895fb163d48e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -381,12 +381,12 @@ static void nvme_log_err_passthru(struct request *req) nr->status & NVME_SC_MASK, /* Status Code */ nr->status & NVME_STATUS_MORE ? "MORE " : "", nr->status & NVME_STATUS_DNR ? "DNR " : "", - nr->cmd->common.cdw10, - nr->cmd->common.cdw11, - nr->cmd->common.cdw12, - nr->cmd->common.cdw13, - nr->cmd->common.cdw14, - nr->cmd->common.cdw15); + le32_to_cpu(nr->cmd->common.cdw10), + le32_to_cpu(nr->cmd->common.cdw11), + le32_to_cpu(nr->cmd->common.cdw12), + le32_to_cpu(nr->cmd->common.cdw13), + le32_to_cpu(nr->cmd->common.cdw14), + le32_to_cpu(nr->cmd->common.cdw15)); } enum nvme_disposition { @@ -764,6 +764,10 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) return BLK_STS_RESOURCE; + + if (!(rq->rq_flags & RQF_DONTPREP)) + nvme_clear_nvme_request(rq); + return nvme_host_path_error(rq); } EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); @@ -3537,15 +3541,6 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) if (ret) goto out_free; } - - if (le16_to_cpu(id->awupf) != ctrl->subsys->awupf) { - dev_err_ratelimited(ctrl->device, - "inconsistent AWUPF, controller not added (%u/%u).\n", - le16_to_cpu(id->awupf), ctrl->subsys->awupf); - ret = -EINVAL; - goto out_free; - } - memcpy(ctrl->subsys->firmware_rev, id->fr, sizeof(ctrl->subsys->firmware_rev)); @@ -4077,7 +4072,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) return; } } - list_add(&ns->list, &ns->ctrl->namespaces); + list_add_rcu(&ns->list, &ns->ctrl->namespaces); } static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 688033b88d38..470bf37e5a63 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1928,10 +1928,10 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, struct sock *sk = queue->sock->sk; /* Restore the default callbacks before starting upcall */ - read_lock_bh(&sk->sk_callback_lock); + write_lock_bh(&sk->sk_callback_lock); sk->sk_user_data = NULL; sk->sk_data_ready = port->data_ready; - read_unlock_bh(&sk->sk_callback_lock); + write_unlock_bh(&sk->sk_callback_lock); if (!nvmet_tcp_try_peek_pdu(queue)) { if (!nvmet_tcp_tls_handshake(queue)) return; diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c index b0992325dd65..b37052863847 100644 --- a/drivers/pci/controller/pci-host-common.c +++ b/drivers/pci/controller/pci-host-common.c @@ -64,13 +64,13 @@ int pci_host_common_init(struct platform_device *pdev, of_pci_check_probe_only(); + platform_set_drvdata(pdev, bridge); + /* Parse and map our Configuration Space windows */ cfg = gen_pci_init(dev, bridge, ops); if (IS_ERR(cfg)) return PTR_ERR(cfg); - platform_set_drvdata(pdev, bridge); - bridge->sysdata = cfg; bridge->ops = (struct pci_ops *)&ops->pci_ops; bridge->enable_device = ops->enable_device; diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c index 77fe73976654..0380d300adca 100644 --- a/drivers/pci/controller/pcie-apple.c +++ b/drivers/pci/controller/pcie-apple.c @@ -187,6 +187,7 @@ struct apple_pcie { const struct hw_info *hw; unsigned long *bitmap; struct list_head ports; + struct list_head entry; struct completion event; struct irq_fwspec fwspec; u32 nvecs; @@ -205,6 +206,9 @@ struct apple_pcie_port { int idx; }; +static LIST_HEAD(pcie_list); +static DEFINE_MUTEX(pcie_list_lock); + static void rmw_set(u32 set, void __iomem *addr) { writel_relaxed(readl_relaxed(addr) | set, addr); @@ -720,13 +724,45 @@ static int apple_msi_init(struct apple_pcie *pcie) return 0; } +static void apple_pcie_register(struct apple_pcie *pcie) +{ + guard(mutex)(&pcie_list_lock); + + list_add_tail(&pcie->entry, &pcie_list); +} + +static void apple_pcie_unregister(struct apple_pcie *pcie) +{ + guard(mutex)(&pcie_list_lock); + + list_del(&pcie->entry); +} + +static struct apple_pcie *apple_pcie_lookup(struct device *dev) +{ + struct apple_pcie *pcie; + + guard(mutex)(&pcie_list_lock); + + list_for_each_entry(pcie, &pcie_list, entry) { + if (pcie->dev == dev) + return pcie; + } + + return NULL; +} + static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev) { struct pci_config_window *cfg = pdev->sysdata; - struct apple_pcie *pcie = cfg->priv; + struct apple_pcie *pcie; struct pci_dev *port_pdev; struct apple_pcie_port *port; + pcie = apple_pcie_lookup(cfg->parent); + if (WARN_ON(!pcie)) + return NULL; + /* Find the root port this device is on */ port_pdev = pcie_find_root_port(pdev); @@ -806,10 +842,14 @@ static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci static int apple_pcie_init(struct pci_config_window *cfg) { - struct apple_pcie *pcie = cfg->priv; struct device *dev = cfg->parent; + struct apple_pcie *pcie; int ret; + pcie = apple_pcie_lookup(dev); + if (WARN_ON(!pcie)) + return -ENOENT; + for_each_available_child_of_node_scoped(dev->of_node, of_port) { ret = apple_pcie_setup_port(pcie, of_port); if (ret) { @@ -852,13 +892,18 @@ static int apple_pcie_probe(struct platform_device *pdev) mutex_init(&pcie->lock); INIT_LIST_HEAD(&pcie->ports); - dev_set_drvdata(dev, pcie); ret = apple_msi_init(pcie); if (ret) return ret; - return pci_host_common_init(pdev, &apple_pcie_cfg_ecam_ops); + apple_pcie_register(pcie); + + ret = pci_host_common_init(pdev, &apple_pcie_cfg_ecam_ops); + if (ret) + apple_pcie_unregister(pcie); + + return ret; } static const struct of_device_id apple_pcie_of_match[] = { diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c index 2c5e6446e00e..260b7de2dbd5 100644 --- a/drivers/pci/ecam.c +++ b/drivers/pci/ecam.c @@ -84,8 +84,6 @@ struct pci_config_window *pci_ecam_create(struct device *dev, goto err_exit_iomap; } - cfg->priv = dev_get_drvdata(dev); - if (ops->init) { err = ops->init(cfg); if (err) diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index 6ede55a7c5e6..d686488f4111 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -934,10 +934,12 @@ int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag) if (!pdev->msix_enabled) return -ENXIO; - guard(msi_descs_lock)(&pdev->dev); virq = msi_get_virq(&pdev->dev, index); if (!virq) return -ENXIO; + + guard(msi_descs_lock)(&pdev->dev); + /* * This is a horrible hack, but short of implementing a PCI * specific interrupt chip callback and a huge pile of diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 8e2daea81666..04a5a34e7a95 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -994,7 +994,8 @@ struct phy *phy_create(struct device *dev, struct device_node *node, } device_initialize(&phy->dev); - mutex_init(&phy->mutex); + lockdep_register_key(&phy->lockdep_key); + mutex_init_with_key(&phy->mutex, &phy->lockdep_key); phy->dev.class = &phy_class; phy->dev.parent = dev; @@ -1259,6 +1260,8 @@ static void phy_release(struct device *dev) dev_vdbg(dev, "releasing '%s'\n", dev_name(dev)); debugfs_remove_recursive(phy->debugfs); regulator_put(phy->pwr); + mutex_destroy(&phy->mutex); + lockdep_unregister_key(&phy->lockdep_key); ida_free(&phy_ida, phy->id); kfree(phy); } diff --git a/drivers/phy/phy-snps-eusb2.c b/drivers/phy/phy-snps-eusb2.c index b73a1d7e57b3..751b6d8ba2be 100644 --- a/drivers/phy/phy-snps-eusb2.c +++ b/drivers/phy/phy-snps-eusb2.c @@ -567,9 +567,11 @@ static int snps_eusb2_hsphy_probe(struct platform_device *pdev) } } - if (IS_ERR_OR_NULL(phy->ref_clk)) - return dev_err_probe(dev, PTR_ERR(phy->ref_clk), + if (IS_ERR_OR_NULL(phy->ref_clk)) { + ret = phy->ref_clk ? PTR_ERR(phy->ref_clk) : -ENOENT; + return dev_err_probe(dev, ret, "failed to get ref clk\n"); + } num = ARRAY_SIZE(phy->vregs); for (i = 0; i < num; i++) diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c index 23a23f2d64e5..e818f6c3980e 100644 --- a/drivers/phy/tegra/xusb-tegra186.c +++ b/drivers/phy/tegra/xusb-tegra186.c @@ -648,14 +648,15 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl) udelay(100); } - if (padctl->soc->trk_hw_mode) { - value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2); - value |= USB2_TRK_HW_MODE; + value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2); + if (padctl->soc->trk_update_on_idle) value &= ~CYA_TRK_CODE_UPDATE_ON_IDLE; - padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2); - } else { + if (padctl->soc->trk_hw_mode) + value |= USB2_TRK_HW_MODE; + padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2); + + if (!padctl->soc->trk_hw_mode) clk_disable_unprepare(priv->usb2_trk_clk); - } } static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl) @@ -782,13 +783,15 @@ static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl, } static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl, - bool status) + struct tegra_xusb_usb2_port *port, bool status) { - u32 value; + u32 value, id_override; + int err = 0; dev_dbg(padctl->dev, "%s id override\n", status ? "set" : "clear"); value = padctl_readl(padctl, USB2_VBUS_ID); + id_override = value & ID_OVERRIDE(~0); if (status) { if (value & VBUS_OVERRIDE) { @@ -799,15 +802,35 @@ static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl, value = padctl_readl(padctl, USB2_VBUS_ID); } - value &= ~ID_OVERRIDE(~0); - value |= ID_OVERRIDE_GROUNDED; + if (id_override != ID_OVERRIDE_GROUNDED) { + value &= ~ID_OVERRIDE(~0); + value |= ID_OVERRIDE_GROUNDED; + padctl_writel(padctl, value, USB2_VBUS_ID); + + err = regulator_enable(port->supply); + if (err) { + dev_err(padctl->dev, "Failed to enable regulator: %d\n", err); + return err; + } + } } else { - value &= ~ID_OVERRIDE(~0); - value |= ID_OVERRIDE_FLOATING; + if (id_override == ID_OVERRIDE_GROUNDED) { + /* + * The regulator is disabled only when the role transitions + * from USB_ROLE_HOST to USB_ROLE_NONE. + */ + err = regulator_disable(port->supply); + if (err) { + dev_err(padctl->dev, "Failed to disable regulator: %d\n", err); + return err; + } + + value &= ~ID_OVERRIDE(~0); + value |= ID_OVERRIDE_FLOATING; + padctl_writel(padctl, value, USB2_VBUS_ID); + } } - padctl_writel(padctl, value, USB2_VBUS_ID); - return 0; } @@ -826,27 +849,20 @@ static int tegra186_utmi_phy_set_mode(struct phy *phy, enum phy_mode mode, if (mode == PHY_MODE_USB_OTG) { if (submode == USB_ROLE_HOST) { - tegra186_xusb_padctl_id_override(padctl, true); - - err = regulator_enable(port->supply); + err = tegra186_xusb_padctl_id_override(padctl, port, true); + if (err) + goto out; } else if (submode == USB_ROLE_DEVICE) { tegra186_xusb_padctl_vbus_override(padctl, true); } else if (submode == USB_ROLE_NONE) { - /* - * When port is peripheral only or role transitions to - * USB_ROLE_NONE from USB_ROLE_DEVICE, regulator is not - * enabled. - */ - if (regulator_is_enabled(port->supply)) - regulator_disable(port->supply); - - tegra186_xusb_padctl_id_override(padctl, false); + err = tegra186_xusb_padctl_id_override(padctl, port, false); + if (err) + goto out; tegra186_xusb_padctl_vbus_override(padctl, false); } } - +out: mutex_unlock(&padctl->lock); - return err; } @@ -1710,7 +1726,8 @@ const struct tegra_xusb_padctl_soc tegra234_xusb_padctl_soc = { .num_supplies = ARRAY_SIZE(tegra194_xusb_padctl_supply_names), .supports_gen2 = true, .poll_trk_completed = true, - .trk_hw_mode = true, + .trk_hw_mode = false, + .trk_update_on_idle = true, .supports_lp_cfg_en = true, }; EXPORT_SYMBOL_GPL(tegra234_xusb_padctl_soc); diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h index 6e45d194c689..d2b5f9565132 100644 --- a/drivers/phy/tegra/xusb.h +++ b/drivers/phy/tegra/xusb.h @@ -434,6 +434,7 @@ struct tegra_xusb_padctl_soc { bool need_fake_usb3_port; bool poll_trk_completed; bool trk_hw_mode; + bool trk_update_on_idle; bool supports_lp_cfg_en; }; diff --git a/drivers/pmdomain/governor.c b/drivers/pmdomain/governor.c index c1e148657c87..39359811a930 100644 --- a/drivers/pmdomain/governor.c +++ b/drivers/pmdomain/governor.c @@ -8,6 +8,7 @@ #include <linux/pm_domain.h> #include <linux/pm_qos.h> #include <linux/hrtimer.h> +#include <linux/cpu.h> #include <linux/cpuidle.h> #include <linux/cpumask.h> #include <linux/ktime.h> @@ -349,6 +350,8 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) struct cpuidle_device *dev; ktime_t domain_wakeup, next_hrtimer; ktime_t now = ktime_get(); + struct device *cpu_dev; + s64 cpu_constraint, global_constraint; s64 idle_duration_ns; int cpu, i; @@ -359,6 +362,7 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN)) return true; + global_constraint = cpu_latency_qos_limit(); /* * Find the next wakeup for any of the online CPUs within the PM domain * and its subdomains. Note, we only need the genpd->cpus, as it already @@ -372,8 +376,16 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) if (ktime_before(next_hrtimer, domain_wakeup)) domain_wakeup = next_hrtimer; } + + cpu_dev = get_cpu_device(cpu); + if (cpu_dev) { + cpu_constraint = dev_pm_qos_raw_resume_latency(cpu_dev); + if (cpu_constraint < global_constraint) + global_constraint = cpu_constraint; + } } + global_constraint *= NSEC_PER_USEC; /* The minimum idle duration is from now - until the next wakeup. */ idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now)); if (idle_duration_ns <= 0) @@ -389,8 +401,10 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) */ i = genpd->state_idx; do { - if (idle_duration_ns >= (genpd->states[i].residency_ns + - genpd->states[i].power_off_latency_ns)) { + if ((idle_duration_ns >= (genpd->states[i].residency_ns + + genpd->states[i].power_off_latency_ns)) && + (global_constraint >= (genpd->states[i].power_on_latency_ns + + genpd->states[i].power_off_latency_ns))) { genpd->state_idx = i; genpd->gd->last_enter = now; genpd->gd->reflect_residency = true; diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c index ef8f355589a5..fc3a2c41cc10 100644 --- a/drivers/soc/aspeed/aspeed-lpc-snoop.c +++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c @@ -58,6 +58,7 @@ struct aspeed_lpc_snoop_model_data { }; struct aspeed_lpc_snoop_channel { + bool enabled; struct kfifo fifo; wait_queue_head_t wq; struct miscdevice miscdev; @@ -190,6 +191,9 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop, const struct aspeed_lpc_snoop_model_data *model_data = of_device_get_match_data(dev); + if (WARN_ON(lpc_snoop->chan[channel].enabled)) + return -EBUSY; + init_waitqueue_head(&lpc_snoop->chan[channel].wq); /* Create FIFO datastructure */ rc = kfifo_alloc(&lpc_snoop->chan[channel].fifo, @@ -236,6 +240,8 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop, regmap_update_bits(lpc_snoop->regmap, HICRB, hicrb_en, hicrb_en); + lpc_snoop->chan[channel].enabled = true; + return 0; err_misc_deregister: @@ -248,6 +254,9 @@ err_free_fifo: static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop, int channel) { + if (!lpc_snoop->chan[channel].enabled) + return; + switch (channel) { case 0: regmap_update_bits(lpc_snoop->regmap, HICR5, @@ -263,8 +272,10 @@ static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop, return; } - kfifo_free(&lpc_snoop->chan[channel].fifo); + lpc_snoop->chan[channel].enabled = false; + /* Consider improving safety wrt concurrent reader(s) */ misc_deregister(&lpc_snoop->chan[channel].miscdev); + kfifo_free(&lpc_snoop->chan[channel].fifo); } static int aspeed_lpc_snoop_probe(struct platform_device *pdev) diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c index a12c68b93b1c..7a671a786197 100644 --- a/drivers/soundwire/amd_manager.c +++ b/drivers/soundwire/amd_manager.c @@ -238,7 +238,7 @@ static u64 amd_sdw_send_cmd_get_resp(struct amd_sdw_manager *amd_manager, u32 lo if (sts & AMD_SDW_IMM_RES_VALID) { dev_err(amd_manager->dev, "SDW%x manager is in bad state\n", amd_manager->instance); - writel(0x00, amd_manager->mmio + ACP_SW_IMM_CMD_STS); + writel(AMD_SDW_IMM_RES_VALID, amd_manager->mmio + ACP_SW_IMM_CMD_STS); } writel(upper_data, amd_manager->mmio + ACP_SW_IMM_CMD_UPPER_WORD); writel(lower_data, amd_manager->mmio + ACP_SW_IMM_CMD_LOWER_QWORD); @@ -1209,6 +1209,7 @@ static int __maybe_unused amd_suspend(struct device *dev) } if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) { + cancel_work_sync(&amd_manager->amd_sdw_work); amd_sdw_wake_enable(amd_manager, false); if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) { ret = amd_sdw_host_wake_enable(amd_manager, false); @@ -1219,6 +1220,7 @@ static int __maybe_unused amd_suspend(struct device *dev) if (ret) return ret; } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) { + cancel_work_sync(&amd_manager->amd_sdw_work); amd_sdw_wake_enable(amd_manager, false); if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) { ret = amd_sdw_host_wake_enable(amd_manager, false); diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c index 295a46dc2be7..0f45e3404756 100644 --- a/drivers/soundwire/qcom.c +++ b/drivers/soundwire/qcom.c @@ -156,7 +156,6 @@ struct qcom_swrm_port_config { u8 word_length; u8 blk_group_count; u8 lane_control; - u8 ch_mask; }; /* @@ -1049,13 +1048,9 @@ static int qcom_swrm_port_enable(struct sdw_bus *bus, { u32 reg = SWRM_DP_PORT_CTRL_BANK(enable_ch->port_num, bank); struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus); - struct qcom_swrm_port_config *pcfg; u32 val; - pcfg = &ctrl->pconfig[enable_ch->port_num]; ctrl->reg_read(ctrl, reg, &val); - if (pcfg->ch_mask != SWR_INVALID_PARAM && pcfg->ch_mask != 0) - enable_ch->ch_mask = pcfg->ch_mask; if (enable_ch->enable) val |= (enable_ch->ch_mask << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT); @@ -1275,26 +1270,6 @@ static void *qcom_swrm_get_sdw_stream(struct snd_soc_dai *dai, int direction) return ctrl->sruntime[dai->id]; } -static int qcom_swrm_set_channel_map(struct snd_soc_dai *dai, - unsigned int tx_num, const unsigned int *tx_slot, - unsigned int rx_num, const unsigned int *rx_slot) -{ - struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev); - int i; - - if (tx_slot) { - for (i = 0; i < tx_num; i++) - ctrl->pconfig[i].ch_mask = tx_slot[i]; - } - - if (rx_slot) { - for (i = 0; i < rx_num; i++) - ctrl->pconfig[i].ch_mask = rx_slot[i]; - } - - return 0; -} - static int qcom_swrm_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { @@ -1331,7 +1306,6 @@ static const struct snd_soc_dai_ops qcom_swrm_pdm_dai_ops = { .shutdown = qcom_swrm_shutdown, .set_stream = qcom_swrm_set_sdw_stream, .get_stream = qcom_swrm_get_sdw_stream, - .set_channel_map = qcom_swrm_set_channel_map, }; static const struct snd_soc_component_driver qcom_swrm_dai_component = { |