diff options
Diffstat (limited to 'drivers')
535 files changed, 7990 insertions, 7027 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index a85c351589be..b62c87b8ce4a 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -998,7 +998,14 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf) { struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); - struct cpc_register_resource *reg = &cpc_desc->cpc_regs[reg_idx]; + struct cpc_register_resource *reg; + + if (!cpc_desc) { + pr_debug("No CPC descriptor for CPU:%d\n", cpunum); + return -ENODEV; + } + + reg = &cpc_desc->cpc_regs[reg_idx]; if (CPC_IN_PCC(reg)) { int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 7cd0009e7ff3..ef104809f27b 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -347,28 +347,3 @@ void acpi_device_notify_remove(struct device *dev) acpi_unbind_one(dev); } - -int acpi_dev_turn_off_if_unused(struct device *dev, void *not_used) -{ - struct acpi_device *adev = to_acpi_device(dev); - - /* - * Skip device objects with device IDs, because they may be in use even - * if they are not companions of any physical device objects. - */ - if (adev->pnp.type.hardware_id) - return 0; - - mutex_lock(&adev->physical_node_lock); - - /* - * Device objects without device IDs are not in use if they have no - * corresponding physical device objects. - */ - if (list_empty(&adev->physical_node_list)) - acpi_device_set_power(adev, ACPI_STATE_D3_COLD); - - mutex_unlock(&adev->physical_node_lock); - - return 0; -} diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 8fbdc172864b..d91b560e8867 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -117,7 +117,6 @@ bool acpi_device_is_battery(struct acpi_device *adev); bool acpi_device_is_first_physical_node(struct acpi_device *adev, const struct device *dev); int acpi_bus_register_early_device(int type); -int acpi_dev_turn_off_if_unused(struct device *dev, void *not_used); /* -------------------------------------------------------------------------- Device Matching and Notification diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index e312ebaed8db..2366f54d8e9c 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -1084,21 +1084,17 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, * Returns parent node of an ACPI device or data firmware node or %NULL if * not available. */ -struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode) +static struct fwnode_handle * +acpi_node_get_parent(const struct fwnode_handle *fwnode) { if (is_acpi_data_node(fwnode)) { /* All data nodes have parent pointer so just return that */ return to_acpi_data_node(fwnode)->parent; } else if (is_acpi_device_node(fwnode)) { - acpi_handle handle, parent_handle; - - handle = to_acpi_device_node(fwnode)->handle; - if (ACPI_SUCCESS(acpi_get_parent(handle, &parent_handle))) { - struct acpi_device *adev; + struct device *dev = to_acpi_device_node(fwnode)->dev.parent; - if (!acpi_bus_get_device(parent_handle, &adev)) - return acpi_fwnode_handle(adev); - } + if (dev) + return acpi_fwnode_handle(to_acpi_device(dev)); } return NULL; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index a50f1967c73d..2c80765670bc 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -2564,12 +2564,6 @@ int __init acpi_scan_init(void) } } - /* - * Make sure that power management resources are not blocked by ACPI - * device objects with no users. - */ - bus_for_each_dev(&acpi_bus_type, NULL, NULL, acpi_dev_turn_off_if_unused); - acpi_turn_off_unused_power_resources(); acpi_scan_initialized = true; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 49fb74196d02..cffbe57a8e08 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2710,7 +2710,7 @@ static void binder_transaction(struct binder_proc *proc, t->from = thread; else t->from = NULL; - t->sender_euid = proc->cred->euid; + t->sender_euid = task_euid(proc->tsk); t->to_proc = target_proc; t->to_thread = target_thread; t->code = tr->code; diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index d60f34718b5d..1e1167e725a4 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -438,6 +438,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { /* AMD */ { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */ { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */ + { PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */ /* AMD is using RAID class only for ahci controllers */ { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci }, diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 28430c093a7f..f76b8418e6fb 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -131,7 +131,7 @@ const struct attribute_group *ahci_shost_groups[] = { }; EXPORT_SYMBOL_GPL(ahci_shost_groups); -struct attribute *ahci_sdev_attrs[] = { +static struct attribute *ahci_sdev_attrs[] = { &dev_attr_sw_activity.attr, &dev_attr_unload_heads.attr, &dev_attr_ncq_prio_supported.attr, @@ -2323,6 +2323,18 @@ int ahci_port_resume(struct ata_port *ap) EXPORT_SYMBOL_GPL(ahci_port_resume); #ifdef CONFIG_PM +static void ahci_handle_s2idle(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + u32 devslp; + + if (pm_suspend_via_firmware()) + return; + devslp = readl(port_mmio + PORT_DEVSLP); + if ((devslp & PORT_DEVSLP_ADSE)) + ata_msleep(ap, devslp_idle_timeout); +} + static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) { const char *emsg = NULL; @@ -2336,6 +2348,9 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) ata_port_freeze(ap); } + if (acpi_storage_d3(ap->host->dev)) + ahci_handle_s2idle(ap); + ahci_rpm_put_port(ap); return rc; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 3018ca84a3d8..59ad8c979cb3 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2031,8 +2031,9 @@ retry: dev->horkage |= ATA_HORKAGE_NO_DMA_LOG; goto retry; } - ata_dev_err(dev, "Read log page 0x%02x failed, Emask 0x%x\n", - (unsigned int)page, err_mask); + ata_dev_err(dev, + "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n", + (unsigned int)log, (unsigned int)page, err_mask); } return err_mask; @@ -2052,8 +2053,19 @@ static bool ata_identify_page_supported(struct ata_device *dev, u8 page) struct ata_port *ap = dev->link->ap; unsigned int err, i; + if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG) + return false; + if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) { - ata_dev_warn(dev, "ATA Identify Device Log not supported\n"); + /* + * IDENTIFY DEVICE data log is defined as mandatory starting + * with ACS-3 (ATA version 10). Warn about the missing log + * for drives which implement this ATA level or above. + */ + if (ata_id_major_version(dev->id) >= 10) + ata_dev_warn(dev, + "ATA Identify Device Log not supported\n"); + dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG; return false; } @@ -2166,6 +2178,9 @@ static void ata_dev_config_ncq_prio(struct ata_device *dev) struct ata_port *ap = dev->link->ap; unsigned int err_mask; + if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS)) + return; + err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SATA_SETTINGS, @@ -2442,7 +2457,8 @@ static void ata_dev_config_devslp(struct ata_device *dev) * Check device sleep capability. Get DevSlp timing variables * from SATA Settings page of Identify Device Data Log. */ - if (!ata_id_has_devslp(dev->id)) + if (!ata_id_has_devslp(dev->id) || + !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS)) return; err_mask = ata_read_log_page(dev, diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index bf9c4b6c5c3d..1d4a6f1e88cd 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -93,6 +93,12 @@ static const unsigned long ata_eh_identify_timeouts[] = { ULONG_MAX, }; +static const unsigned long ata_eh_revalidate_timeouts[] = { + 15000, /* Some drives are slow to read log pages when waking-up */ + 15000, /* combined time till here is enough even for media access */ + ULONG_MAX, +}; + static const unsigned long ata_eh_flush_timeouts[] = { 15000, /* be generous with flush */ 15000, /* ditto */ @@ -129,6 +135,8 @@ static const struct ata_eh_cmd_timeout_ent ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), .timeouts = ata_eh_identify_timeouts, }, + { .commands = CMDS(ATA_CMD_READ_LOG_EXT, ATA_CMD_READ_LOG_DMA_EXT), + .timeouts = ata_eh_revalidate_timeouts, }, { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), .timeouts = ata_eh_other_timeouts, }, { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index 4e88597aa9df..5b78e86e3459 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -922,7 +922,7 @@ DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR, ata_ncq_prio_enable_show, ata_ncq_prio_enable_store); EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable); -struct attribute *ata_ncq_sdev_attrs[] = { +static struct attribute *ata_ncq_sdev_attrs[] = { &dev_attr_unload_heads.attr, &dev_attr_ncq_prio_enable.attr, &dev_attr_ncq_prio_supported.attr, diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index 8440203e835e..b29d3f1d64b0 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -469,10 +469,8 @@ static int ahci_highbank_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "no irq\n"); + if (irq < 0) return irq; - } if (!irq) return -EINVAL; diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 981e72a3dafb..ff16a36a908b 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -677,6 +677,8 @@ void remove_cpu_topology(unsigned int cpu) cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); for_each_cpu(sibling, topology_sibling_cpumask(cpu)) cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); + for_each_cpu(sibling, topology_cluster_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling)); for_each_cpu(sibling, topology_llc_cpumask(cpu)) cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 97bf051a50ce..6ae38776e30e 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -316,7 +316,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req = bd->rq; struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); unsigned long flags; - unsigned int num; + int num; int qid = hctx->queue_num; bool notify = false; blk_status_t status; @@ -1049,7 +1049,6 @@ static struct virtio_driver virtio_blk = { .feature_table_size = ARRAY_SIZE(features), .feature_table_legacy = features_legacy, .feature_table_size_legacy = ARRAY_SIZE(features_legacy), - .suppress_used_validation = true, .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 08d7953ec5f1..25071126995b 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1853,12 +1853,14 @@ static const struct block_device_operations zram_devops = { .owner = THIS_MODULE }; +#ifdef CONFIG_ZRAM_WRITEBACK static const struct block_device_operations zram_wb_devops = { .open = zram_open, .submit_bio = zram_submit_bio, .swap_slot_free_notify = zram_slot_free_notify, .owner = THIS_MODULE }; +#endif static DEVICE_ATTR_WO(compact); static DEVICE_ATTR_RW(disksize); diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c index 4c2f7d61cb9b..183d5cc37d42 100644 --- a/drivers/bus/brcmstb_gisb.c +++ b/drivers/bus/brcmstb_gisb.c @@ -485,7 +485,7 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev) list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list); #ifdef CONFIG_MIPS - board_be_handler = brcmstb_bus_error_handler; + mips_set_be_handler(brcmstb_bus_error_handler); #endif if (list_is_singular(&brcmstb_gisb_arb_device_list)) { diff --git a/drivers/clk/actions/owl-factor.c b/drivers/clk/actions/owl-factor.c index f15e2621fa18..64f316cf7cfc 100644 --- a/drivers/clk/actions/owl-factor.c +++ b/drivers/clk/actions/owl-factor.c @@ -10,7 +10,6 @@ #include <linux/clk-provider.h> #include <linux/regmap.h> -#include <linux/slab.h> #include "owl-factor.h" diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c index bc3be5f3eae1..24dab2312bc6 100644 --- a/drivers/clk/clk-ast2600.c +++ b/drivers/clk/clk-ast2600.c @@ -51,6 +51,8 @@ static DEFINE_SPINLOCK(aspeed_g6_clk_lock); static struct clk_hw_onecell_data *aspeed_g6_clk_data; static void __iomem *scu_g6_base; +/* AST2600 revision: A0, A1, A2, etc */ +static u8 soc_rev; /* * Clocks marked with CLK_IS_CRITICAL: @@ -191,9 +193,8 @@ static struct clk_hw *ast2600_calc_pll(const char *name, u32 val) static struct clk_hw *ast2600_calc_apll(const char *name, u32 val) { unsigned int mult, div; - u32 chip_id = readl(scu_g6_base + ASPEED_G6_SILICON_REV); - if (((chip_id & CHIP_REVISION_ID) >> 16) >= 2) { + if (soc_rev >= 2) { if (val & BIT(24)) { /* Pass through mode */ mult = div = 1; @@ -707,7 +708,7 @@ static const u32 ast2600_a1_axi_ahb200_tbl[] = { static void __init aspeed_g6_cc(struct regmap *map) { struct clk_hw *hw; - u32 val, div, divbits, chip_id, axi_div, ahb_div; + u32 val, div, divbits, axi_div, ahb_div; clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, 25000000); @@ -738,8 +739,7 @@ static void __init aspeed_g6_cc(struct regmap *map) axi_div = 2; divbits = (val >> 11) & 0x3; - regmap_read(map, ASPEED_G6_SILICON_REV, &chip_id); - if (chip_id & BIT(16)) { + if (soc_rev >= 1) { if (!divbits) { ahb_div = ast2600_a1_axi_ahb200_tbl[(val >> 8) & 0x3]; if (val & BIT(16)) @@ -784,6 +784,8 @@ static void __init aspeed_g6_cc_init(struct device_node *np) if (!scu_g6_base) return; + soc_rev = (readl(scu_g6_base + ASPEED_G6_SILICON_REV) & CHIP_REVISION_ID) >> 16; + aspeed_g6_clk_data = kzalloc(struct_size(aspeed_g6_clk_data, hws, ASPEED_G6_NUM_CLKS), GFP_KERNEL); if (!aspeed_g6_clk_data) diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c index c04ae0e7e4b4..b9c5f904f535 100644 --- a/drivers/clk/clk-composite.c +++ b/drivers/clk/clk-composite.c @@ -97,6 +97,7 @@ static int clk_composite_determine_rate(struct clk_hw *hw, return ret; req->rate = tmp_req.rate; + req->best_parent_hw = tmp_req.best_parent_hw; req->best_parent_rate = tmp_req.best_parent_rate; return 0; diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c index 57e4597cdf4c..93fa8c9e11be 100644 --- a/drivers/clk/clk-si5351.c +++ b/drivers/clk/clk-si5351.c @@ -1,15 +1,15 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * clk-si5351.c: Silicon Laboratories Si5351A/B/C I2C Clock Generator + * clk-si5351.c: Skyworks / Silicon Labs Si5351A/B/C I2C Clock Generator * * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> * Rabeeh Khoury <rabeeh@solid-run.com> * * References: * [1] "Si5351A/B/C Data Sheet" - * https://www.silabs.com/Support%20Documents/TechnicalDocs/Si5351.pdf - * [2] "Manually Generating an Si5351 Register Map" - * https://www.silabs.com/Support%20Documents/TechnicalDocs/AN619.pdf + * https://www.skyworksinc.com/-/media/Skyworks/SL/documents/public/data-sheets/Si5351-B.pdf + * [2] "AN619: Manually Generating an Si5351 Register Map" + * https://www.skyworksinc.com/-/media/Skyworks/SL/documents/public/application-notes/AN619.pdf */ #include <linux/module.h> diff --git a/drivers/clk/clk-si5351.h b/drivers/clk/clk-si5351.h index 73dc8effc519..e9e2bfdaaedf 100644 --- a/drivers/clk/clk-si5351.h +++ b/drivers/clk/clk-si5351.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * clk-si5351.h: Silicon Laboratories Si5351A/B/C I2C Clock Generator + * clk-si5351.h: Skyworks / Silicon Labs Si5351A/B/C I2C Clock Generator * * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> * Rabeeh Khoury <rabeeh@solid-run.com> diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c index c6d3b1ab3d55..e7be3e54b9be 100644 --- a/drivers/clk/clk-versaclock5.c +++ b/drivers/clk/clk-versaclock5.c @@ -905,7 +905,7 @@ output_error: static const struct of_device_id clk_vc5_of_match[]; -static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id) +static int vc5_probe(struct i2c_client *client) { unsigned int oe, sd, src_mask = 0, src_val = 0; struct vc5_driver_data *vc5; @@ -1244,7 +1244,7 @@ static struct i2c_driver vc5_driver = { .pm = &vc5_pm_ops, .of_match_table = clk_vc5_of_match, }, - .probe = vc5_probe, + .probe_new = vc5_probe, .remove = vc5_remove, .id_table = vc5_id, }; diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index 819949973db1..7d220a01de1f 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -391,11 +391,11 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name, #define imx8m_clk_hw_composite(name, parent_names, reg) \ _imx8m_clk_hw_composite(name, parent_names, reg, \ - IMX_COMPOSITE_CORE, IMX_COMPOSITE_CLK_FLAGS_DEFAULT) + 0, IMX_COMPOSITE_CLK_FLAGS_DEFAULT) #define imx8m_clk_hw_composite_critical(name, parent_names, reg) \ _imx8m_clk_hw_composite(name, parent_names, reg, \ - IMX_COMPOSITE_CORE, IMX_COMPOSITE_CLK_FLAGS_CRITICAL) + 0, IMX_COMPOSITE_CLK_FLAGS_CRITICAL) #define imx8m_clk_hw_composite_bus(name, parent_names, reg) \ _imx8m_clk_hw_composite(name, parent_names, reg, \ diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c index 266c7595d330..af31633a8862 100644 --- a/drivers/clk/ingenic/cgu.c +++ b/drivers/clk/ingenic/cgu.c @@ -453,15 +453,15 @@ ingenic_clk_calc_div(struct clk_hw *hw, } /* Impose hardware constraints */ - div = min_t(unsigned, div, 1 << clk_info->div.bits); - div = max_t(unsigned, div, 1); + div = clamp_t(unsigned int, div, clk_info->div.div, + clk_info->div.div << clk_info->div.bits); /* * If the divider value itself must be divided before being written to * the divider register, we must ensure we don't have any bits set that * would be lost as a result of doing so. */ - div /= clk_info->div.div; + div = DIV_ROUND_UP(div, clk_info->div.div); div *= clk_info->div.div; return div; diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c index 5154b0cf8ad6..744d136b721b 100644 --- a/drivers/clk/ingenic/jz4725b-cgu.c +++ b/drivers/clk/ingenic/jz4725b-cgu.c @@ -10,7 +10,7 @@ #include <linux/delay.h> #include <linux/of.h> -#include <dt-bindings/clock/jz4725b-cgu.h> +#include <dt-bindings/clock/ingenic,jz4725b-cgu.h> #include "cgu.h" #include "pm.h" diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c index cd878f08aca3..43ffb62c42bb 100644 --- a/drivers/clk/ingenic/jz4740-cgu.c +++ b/drivers/clk/ingenic/jz4740-cgu.c @@ -11,7 +11,7 @@ #include <linux/io.h> #include <linux/of.h> -#include <dt-bindings/clock/jz4740-cgu.h> +#include <dt-bindings/clock/ingenic,jz4740-cgu.h> #include "cgu.h" #include "pm.h" diff --git a/drivers/clk/ingenic/jz4760-cgu.c b/drivers/clk/ingenic/jz4760-cgu.c index 14483797a4db..080d492ac95c 100644 --- a/drivers/clk/ingenic/jz4760-cgu.c +++ b/drivers/clk/ingenic/jz4760-cgu.c @@ -12,7 +12,7 @@ #include <linux/clk.h> -#include <dt-bindings/clock/jz4760-cgu.h> +#include <dt-bindings/clock/ingenic,jz4760-cgu.h> #include "cgu.h" #include "pm.h" diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c index 2321742b3471..8c6c1208f462 100644 --- a/drivers/clk/ingenic/jz4770-cgu.c +++ b/drivers/clk/ingenic/jz4770-cgu.c @@ -10,7 +10,7 @@ #include <linux/io.h> #include <linux/of.h> -#include <dt-bindings/clock/jz4770-cgu.h> +#include <dt-bindings/clock/ingenic,jz4770-cgu.h> #include "cgu.h" #include "pm.h" diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c index 0268d23ebe2e..e357c228e0f1 100644 --- a/drivers/clk/ingenic/jz4780-cgu.c +++ b/drivers/clk/ingenic/jz4780-cgu.c @@ -13,7 +13,7 @@ #include <linux/iopoll.h> #include <linux/of.h> -#include <dt-bindings/clock/jz4780-cgu.h> +#include <dt-bindings/clock/ingenic,jz4780-cgu.h> #include "cgu.h" #include "pm.h" diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c index 9aa20b52e1c3..3c4d5a77ccbd 100644 --- a/drivers/clk/ingenic/x1000-cgu.c +++ b/drivers/clk/ingenic/x1000-cgu.c @@ -9,7 +9,7 @@ #include <linux/io.h> #include <linux/of.h> -#include <dt-bindings/clock/x1000-cgu.h> +#include <dt-bindings/clock/ingenic,x1000-cgu.h> #include "cgu.h" #include "pm.h" diff --git a/drivers/clk/ingenic/x1830-cgu.c b/drivers/clk/ingenic/x1830-cgu.c index 950aee243364..e01ec2dc7a1a 100644 --- a/drivers/clk/ingenic/x1830-cgu.c +++ b/drivers/clk/ingenic/x1830-cgu.c @@ -9,7 +9,7 @@ #include <linux/io.h> #include <linux/of.h> -#include <dt-bindings/clock/x1830-cgu.h> +#include <dt-bindings/clock/ingenic,x1830-cgu.h> #include "cgu.h" #include "pm.h" diff --git a/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c index 0e2ac0a30aa0..4ab312eb26a5 100644 --- a/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c +++ b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c @@ -10,8 +10,6 @@ #include <linux/clk-provider.h> #include <linux/platform_device.h> -#include <dt-bindings/clock/mt8195-clk.h> - static const struct mtk_gate_regs imp_iic_wrap_cg_regs = { .set_ofs = 0xe08, .clr_ofs = 0xe04, diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c index 3c3a7ff04562..9b1674b28d45 100644 --- a/drivers/clk/qcom/gcc-msm8996.c +++ b/drivers/clk/qcom/gcc-msm8996.c @@ -2937,20 +2937,6 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = { }, }; -static struct clk_branch gcc_aggre1_pnoc_ahb_clk = { - .halt_reg = 0x82014, - .clkr = { - .enable_reg = 0x82014, - .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ - .name = "gcc_aggre1_pnoc_ahb_clk", - .parent_names = (const char *[]){ "periph_noc_clk_src" }, - .num_parents = 1, - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_aggre2_ufs_axi_clk = { .halt_reg = 0x83014, .clkr = { @@ -3474,7 +3460,6 @@ static struct clk_regmap *gcc_msm8996_clocks[] = { [GCC_AGGRE0_CNOC_AHB_CLK] = &gcc_aggre0_cnoc_ahb_clk.clkr, [GCC_SMMU_AGGRE0_AXI_CLK] = &gcc_smmu_aggre0_axi_clk.clkr, [GCC_SMMU_AGGRE0_AHB_CLK] = &gcc_smmu_aggre0_ahb_clk.clkr, - [GCC_AGGRE1_PNOC_AHB_CLK] = &gcc_aggre1_pnoc_ahb_clk.clkr, [GCC_AGGRE2_UFS_AXI_CLK] = &gcc_aggre2_ufs_axi_clk.clkr, [GCC_AGGRE2_USB3_AXI_CLK] = &gcc_aggre2_usb3_axi_clk.clkr, [GCC_QSPI_AHB_CLK] = &gcc_qspi_ahb_clk.clkr, diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig index 2dfd6a383393..3067bdb6e119 100644 --- a/drivers/clk/rockchip/Kconfig +++ b/drivers/clk/rockchip/Kconfig @@ -80,14 +80,14 @@ config CLK_RK3368 Build the driver for RK3368 Clock Driver. config CLK_RK3399 - tristate "Rockchip RK3399 clock controller support" + bool "Rockchip RK3399 clock controller support" depends on ARM64 || COMPILE_TEST default y help Build the driver for RK3399 Clock Driver. config CLK_RK3568 - tristate "Rockchip RK3568 clock controller support" + bool "Rockchip RK3568 clock controller support" depends on ARM64 || COMPILE_TEST default y help diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c index 7924598747b6..306910a3a0d3 100644 --- a/drivers/clk/rockchip/clk-rk3399.c +++ b/drivers/clk/rockchip/clk-rk3399.c @@ -1630,7 +1630,6 @@ static const struct of_device_id clk_rk3399_match_table[] = { }, { } }; -MODULE_DEVICE_TABLE(of, clk_rk3399_match_table); static int __init clk_rk3399_probe(struct platform_device *pdev) { @@ -1656,7 +1655,4 @@ static struct platform_driver clk_rk3399_driver = { .suppress_bind_attrs = true, }, }; -module_platform_driver_probe(clk_rk3399_driver, clk_rk3399_probe); - -MODULE_DESCRIPTION("Rockchip RK3399 Clock Driver"); -MODULE_LICENSE("GPL"); +builtin_platform_driver_probe(clk_rk3399_driver, clk_rk3399_probe); diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c index 939e7079c334..69a9e8069a48 100644 --- a/drivers/clk/rockchip/clk-rk3568.c +++ b/drivers/clk/rockchip/clk-rk3568.c @@ -1693,7 +1693,6 @@ static const struct of_device_id clk_rk3568_match_table[] = { }, { } }; -MODULE_DEVICE_TABLE(of, clk_rk3568_match_table); static int __init clk_rk3568_probe(struct platform_device *pdev) { @@ -1719,7 +1718,4 @@ static struct platform_driver clk_rk3568_driver = { .suppress_bind_attrs = true, }, }; -module_platform_driver_probe(clk_rk3568_driver, clk_rk3568_probe); - -MODULE_DESCRIPTION("Rockchip RK3568 Clock Driver"); -MODULE_LICENSE("GPL"); +builtin_platform_driver_probe(clk_rk3568_driver, clk_rk3568_probe); diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c index 12380236d7ab..46c66fac48e6 100644 --- a/drivers/clk/uniphier/clk-uniphier-core.c +++ b/drivers/clk/uniphier/clk-uniphier-core.c @@ -132,6 +132,10 @@ static const struct of_device_id uniphier_clk_match[] = { .compatible = "socionext,uniphier-pxs3-clock", .data = uniphier_pxs3_sys_clk_data, }, + { + .compatible = "socionext,uniphier-nx1-clock", + .data = uniphier_nx1_sys_clk_data, + }, /* Media I/O clock, SD clock */ { .compatible = "socionext,uniphier-ld4-mio-clock", @@ -165,6 +169,10 @@ static const struct of_device_id uniphier_clk_match[] = { .compatible = "socionext,uniphier-pxs3-sd-clock", .data = uniphier_pro5_sd_clk_data, }, + { + .compatible = "socionext,uniphier-nx1-sd-clock", + .data = uniphier_pro5_sd_clk_data, + }, /* Peripheral clock */ { .compatible = "socionext,uniphier-ld4-peri-clock", @@ -198,6 +206,15 @@ static const struct of_device_id uniphier_clk_match[] = { .compatible = "socionext,uniphier-pxs3-peri-clock", .data = uniphier_pro4_peri_clk_data, }, + { + .compatible = "socionext,uniphier-nx1-peri-clock", + .data = uniphier_pro4_peri_clk_data, + }, + /* SoC-glue clock */ + { + .compatible = "socionext,uniphier-pro4-sg-clock", + .data = uniphier_pro4_sg_clk_data, + }, { /* sentinel */ } }; diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c index 32b301724183..0180470b24db 100644 --- a/drivers/clk/uniphier/clk-uniphier-sys.c +++ b/drivers/clk/uniphier/clk-uniphier-sys.c @@ -20,6 +20,10 @@ UNIPHIER_CLK_FACTOR("sd-200m", -1, "spll", 1, 10), \ UNIPHIER_CLK_FACTOR("sd-133m", -1, "spll", 1, 15) +#define UNIPHIER_NX1_SYS_CLK_SD \ + UNIPHIER_CLK_FACTOR("sd-200m", -1, "spll", 1, 4), \ + UNIPHIER_CLK_FACTOR("sd-133m", -1, "spll", 1, 6) + #define UNIPHIER_LD4_SYS_CLK_NAND(idx) \ UNIPHIER_CLK_FACTOR("nand-50m", -1, "spll", 1, 32), \ UNIPHIER_CLK_GATE("nand", (idx), "nand-50m", 0x2104, 2) @@ -288,6 +292,8 @@ const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[] = { UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x210c, 7), UNIPHIER_CLK_GATE("sata1", 29, NULL, 0x210c, 8), UNIPHIER_CLK_GATE("sata-phy", 30, NULL, 0x210c, 21), + UNIPHIER_LD11_SYS_CLK_AIO(40), + UNIPHIER_LD11_SYS_CLK_EXIV(42), /* CPU gears */ UNIPHIER_CLK_DIV4("cpll", 2, 3, 4, 8), UNIPHIER_CLK_DIV4("spll", 2, 3, 4, 8), @@ -300,3 +306,44 @@ const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[] = { "spll/4", "spll/8", "s2pll/4", "s2pll/8"), { /* sentinel */ } }; + +const struct uniphier_clk_data uniphier_nx1_sys_clk_data[] = { + UNIPHIER_CLK_FACTOR("cpll", -1, "ref", 100, 1), /* ARM: 2500 MHz */ + UNIPHIER_CLK_FACTOR("spll", -1, "ref", 32, 1), /* 800 MHz */ + UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 6), + UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16), + UNIPHIER_NX1_SYS_CLK_SD, + UNIPHIER_CLK_GATE("emmc", 4, NULL, 0x2108, 8), + UNIPHIER_CLK_GATE("ether", 6, NULL, 0x210c, 0), + UNIPHIER_CLK_GATE("usb30-0", 12, NULL, 0x210c, 16), /* =GIO */ + UNIPHIER_CLK_GATE("usb30-1", 13, NULL, 0x210c, 20), /* =GIO1P */ + UNIPHIER_CLK_GATE("usb30-hsphy0", 16, NULL, 0x210c, 24), + UNIPHIER_CLK_GATE("usb30-ssphy0", 17, NULL, 0x210c, 25), + UNIPHIER_CLK_GATE("usb30-ssphy1", 18, NULL, 0x210c, 26), + UNIPHIER_CLK_GATE("pcie", 24, NULL, 0x210c, 8), + UNIPHIER_CLK_GATE("voc", 52, NULL, 0x2110, 0), + UNIPHIER_CLK_GATE("hdmitx", 58, NULL, 0x2110, 8), + /* CPU gears */ + UNIPHIER_CLK_DIV5("cpll", 2, 4, 8, 16, 32), + UNIPHIER_CLK_CPUGEAR("cpu-ca53", 33, 0x8080, 0xf, 5, + "cpll/2", "cpll/4", "cpll/8", "cpll/16", + "cpll/32"), + { /* sentinel */ } +}; + +const struct uniphier_clk_data uniphier_pro4_sg_clk_data[] = { + UNIPHIER_CLK_DIV("gpll", 4), + { + .name = "sata-ref", + .type = UNIPHIER_CLK_TYPE_MUX, + .idx = 0, + .data.mux = { + .parent_names = { "gpll/4", "ref", }, + .num_parents = 2, + .reg = 0x1a28, + .masks = { 0x1, 0x1, }, + .vals = { 0x0, 0x1, }, + }, + }, + { /* sentinel */ } +}; diff --git a/drivers/clk/uniphier/clk-uniphier.h b/drivers/clk/uniphier/clk-uniphier.h index 9e30362e55e1..dea0c7829aee 100644 --- a/drivers/clk/uniphier/clk-uniphier.h +++ b/drivers/clk/uniphier/clk-uniphier.h @@ -119,6 +119,10 @@ struct uniphier_clk_data { UNIPHIER_CLK_DIV2(parent, div0, div1), \ UNIPHIER_CLK_DIV2(parent, div2, div3) +#define UNIPHIER_CLK_DIV5(parent, div0, div1, div2, div3, div4) \ + UNIPHIER_CLK_DIV4(parent, div0, div1, div2, div3), \ + UNIPHIER_CLK_DIV(parent, div4) + struct clk_hw *uniphier_clk_register_cpugear(struct device *dev, struct regmap *regmap, const char *name, @@ -146,9 +150,11 @@ extern const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[]; extern const struct uniphier_clk_data uniphier_ld11_sys_clk_data[]; extern const struct uniphier_clk_data uniphier_ld20_sys_clk_data[]; extern const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[]; +extern const struct uniphier_clk_data uniphier_nx1_sys_clk_data[]; extern const struct uniphier_clk_data uniphier_ld4_mio_clk_data[]; extern const struct uniphier_clk_data uniphier_pro5_sd_clk_data[]; extern const struct uniphier_clk_data uniphier_ld4_peri_clk_data[]; extern const struct uniphier_clk_data uniphier_pro4_peri_clk_data[]; +extern const struct uniphier_clk_data uniphier_pro4_sg_clk_data[]; #endif /* __CLK_UNIPHIER_H__ */ diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c index 77fd0ecaf155..d52f976dc875 100644 --- a/drivers/clk/versatile/clk-icst.c +++ b/drivers/clk/versatile/clk-icst.c @@ -484,7 +484,7 @@ static void __init of_syscon_icst_setup(struct device_node *np) struct device_node *parent; struct regmap *map; struct clk_icst_desc icst_desc; - const char *name = np->name; + const char *name; const char *parent_name; struct clk *regclk; enum icst_control_type ctype; @@ -533,15 +533,17 @@ static void __init of_syscon_icst_setup(struct device_node *np) icst_desc.params = &icst525_apcp_cm_params; ctype = ICST_INTEGRATOR_CP_CM_MEM; } else { - pr_err("unknown ICST clock %s\n", name); + pr_err("unknown ICST clock %pOF\n", np); return; } /* Parent clock name is not the same as node parent */ parent_name = of_clk_get_parent_name(np, 0); + name = kasprintf(GFP_KERNEL, "%pOFP", np); regclk = icst_clk_setup(NULL, &icst_desc, name, parent_name, map, ctype); if (IS_ERR(regclk)) { + kfree(name); pr_err("error setting up syscon ICST clock %s\n", name); return; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 815df3daae9d..dec2a5649ac1 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -338,6 +338,8 @@ static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); +#define CPPC_MAX_PERF U8_MAX + static void intel_pstate_set_itmt_prio(int cpu) { struct cppc_perf_caps cppc_perf; @@ -349,6 +351,14 @@ static void intel_pstate_set_itmt_prio(int cpu) return; /* + * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff. + * In this case we can't use CPPC.highest_perf to enable ITMT. + * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide. + */ + if (cppc_perf.highest_perf == CPPC_MAX_PERF) + cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached)); + + /* * The priorities can be set regardless of whether or not * sched_set_itmt_support(true) has been called and it is valid to * update them at any time after it has been called. @@ -1006,6 +1016,12 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu) */ value &= ~GENMASK_ULL(31, 24); value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); + /* + * However, make sure that EPP will be set to "performance" when + * the CPU is brought back online again and the "performance" + * scaling algorithm is still in effect. + */ + cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN; } /* @@ -2353,6 +2369,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(BROADWELL_D, core_funcs), X86_MATCH(BROADWELL_X, core_funcs), X86_MATCH(SKYLAKE_X, core_funcs), + X86_MATCH(ICELAKE_X, core_funcs), {} }; diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 2ea59cb8edcd..6437b2e978fb 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -67,12 +67,9 @@ static void dma_buf_release(struct dentry *dentry) BUG_ON(dmabuf->vmapping_counter); /* - * Any fences that a dma-buf poll can wait on should be signaled - * before releasing dma-buf. This is the responsibility of each - * driver that uses the reservation objects. - * - * If you hit this BUG() it means someone dropped their ref to the - * dma-buf while still having pending operation to the buffer. + * If you hit this BUG() it could mean: + * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else + * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback */ BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active); @@ -200,6 +197,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; + struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll); unsigned long flags; spin_lock_irqsave(&dcb->poll->lock, flags); @@ -207,21 +205,18 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) dcb->active = 0; spin_unlock_irqrestore(&dcb->poll->lock, flags); dma_fence_put(fence); + /* Paired with get_file in dma_buf_poll */ + fput(dmabuf->file); } -static bool dma_buf_poll_shared(struct dma_resv *resv, +static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write, struct dma_buf_poll_cb_t *dcb) { - struct dma_resv_list *fobj = dma_resv_shared_list(resv); + struct dma_resv_iter cursor; struct dma_fence *fence; - int i, r; - - if (!fobj) - return false; + int r; - for (i = 0; i < fobj->shared_count; ++i) { - fence = rcu_dereference_protected(fobj->shared[i], - dma_resv_held(resv)); + dma_resv_for_each_fence(&cursor, resv, write, fence) { dma_fence_get(fence); r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); if (!r) @@ -232,24 +227,6 @@ static bool dma_buf_poll_shared(struct dma_resv *resv, return false; } -static bool dma_buf_poll_excl(struct dma_resv *resv, - struct dma_buf_poll_cb_t *dcb) -{ - struct dma_fence *fence = dma_resv_excl_fence(resv); - int r; - - if (!fence) - return false; - - dma_fence_get(fence); - r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); - if (!r) - return true; - dma_fence_put(fence); - - return false; -} - static __poll_t dma_buf_poll(struct file *file, poll_table *poll) { struct dma_buf *dmabuf; @@ -282,8 +259,10 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) spin_unlock_irq(&dmabuf->poll.lock); if (events & EPOLLOUT) { - if (!dma_buf_poll_shared(resv, dcb) && - !dma_buf_poll_excl(resv, dcb)) + /* Paired with fput in dma_buf_poll_cb */ + get_file(dmabuf->file); + + if (!dma_buf_poll_add_cb(resv, true, dcb)) /* No callback queued, wake up any other waiters */ dma_buf_poll_cb(NULL, &dcb->cb); else @@ -303,7 +282,10 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) spin_unlock_irq(&dmabuf->poll.lock); if (events & EPOLLIN) { - if (!dma_buf_poll_excl(resv, dcb)) + /* Paired with fput in dma_buf_poll_cb */ + get_file(dmabuf->file); + + if (!dma_buf_poll_add_cb(resv, false, dcb)) /* No callback queued, wake up any other waiters */ dma_buf_poll_cb(NULL, &dcb->cb); else @@ -1356,10 +1338,9 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) { struct dma_buf *buf_obj; struct dma_buf_attachment *attach_obj; - struct dma_resv *robj; - struct dma_resv_list *fobj; + struct dma_resv_iter cursor; struct dma_fence *fence; - int count = 0, attach_count, shared_count, i; + int count = 0, attach_count; size_t size = 0; int ret; @@ -1378,6 +1359,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) if (ret) goto error_unlock; + + spin_lock(&buf_obj->name_lock); seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n", buf_obj->size, buf_obj->file->f_flags, buf_obj->file->f_mode, @@ -1385,22 +1368,12 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) buf_obj->exp_name, file_inode(buf_obj->file)->i_ino, buf_obj->name ?: ""); + spin_unlock(&buf_obj->name_lock); - robj = buf_obj->resv; - fence = dma_resv_excl_fence(robj); - if (fence) - seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), - dma_fence_is_signaled(fence) ? "" : "un"); - - fobj = rcu_dereference_protected(robj->fence, - dma_resv_held(robj)); - shared_count = fobj ? fobj->shared_count : 0; - for (i = 0; i < shared_count; i++) { - fence = rcu_dereference_protected(fobj->shared[i], - dma_resv_held(robj)); - seq_printf(s, "\tShared fence: %s %s %ssignalled\n", + dma_resv_for_each_fence(&cursor, buf_obj->resv, true, fence) { + seq_printf(s, "\t%s fence: %s %s %ssignalled\n", + dma_resv_iter_is_exclusive(&cursor) ? + "Exclusive" : "Shared", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), dma_fence_is_signaled(fence) ? "" : "un"); diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index a480af9581bd..9eb2baa387d4 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -333,10 +333,14 @@ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) { cursor->seq = read_seqcount_begin(&cursor->obj->seq); cursor->index = -1; - if (cursor->all_fences) + cursor->shared_count = 0; + if (cursor->all_fences) { cursor->fences = dma_resv_shared_list(cursor->obj); - else + if (cursor->fences) + cursor->shared_count = cursor->fences->shared_count; + } else { cursor->fences = NULL; + } cursor->is_restarted = true; } @@ -363,7 +367,7 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor) continue; } else if (!cursor->fences || - cursor->index >= cursor->fences->shared_count) { + cursor->index >= cursor->shared_count) { cursor->fence = NULL; break; @@ -424,6 +428,57 @@ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor) EXPORT_SYMBOL(dma_resv_iter_next_unlocked); /** + * dma_resv_iter_first - first fence from a locked dma_resv object + * @cursor: cursor to record the current position + * + * Return the first fence in the dma_resv object while holding the + * &dma_resv.lock. + */ +struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor) +{ + struct dma_fence *fence; + + dma_resv_assert_held(cursor->obj); + + cursor->index = 0; + if (cursor->all_fences) + cursor->fences = dma_resv_shared_list(cursor->obj); + else + cursor->fences = NULL; + + fence = dma_resv_excl_fence(cursor->obj); + if (!fence) + fence = dma_resv_iter_next(cursor); + + cursor->is_restarted = true; + return fence; +} +EXPORT_SYMBOL_GPL(dma_resv_iter_first); + +/** + * dma_resv_iter_next - next fence from a locked dma_resv object + * @cursor: cursor to record the current position + * + * Return the next fences from the dma_resv object while holding the + * &dma_resv.lock. + */ +struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor) +{ + unsigned int idx; + + dma_resv_assert_held(cursor->obj); + + cursor->is_restarted = false; + if (!cursor->fences || cursor->index >= cursor->fences->shared_count) + return NULL; + + idx = cursor->index++; + return rcu_dereference_protected(cursor->fences->shared[idx], + dma_resv_held(cursor->obj)); +} +EXPORT_SYMBOL_GPL(dma_resv_iter_next); + +/** * dma_resv_copy_fences - Copy all fences from src to dst. * @dst: the destination reservation object * @src: the source reservation object @@ -448,10 +503,8 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) dma_resv_list_free(list); dma_fence_put(excl); - if (cursor.fences) { - unsigned int cnt = cursor.fences->shared_count; - - list = dma_resv_list_alloc(cnt); + if (cursor.shared_count) { + list = dma_resv_list_alloc(cursor.shared_count); if (!list) { dma_resv_iter_end(&cursor); return -ENOMEM; @@ -522,7 +575,7 @@ int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl, if (fence_excl) dma_fence_put(*fence_excl); - count = cursor.fences ? cursor.fences->shared_count : 0; + count = cursor.shared_count; count += fence_excl ? 0 : 1; /* Eventually re-allocate the array */ diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c index de416f9e7921..f5219334fd3a 100644 --- a/drivers/firmware/arm_scmi/base.c +++ b/drivers/firmware/arm_scmi/base.c @@ -34,6 +34,12 @@ struct scmi_msg_resp_base_attributes { __le16 reserved; }; +struct scmi_msg_resp_base_discover_agent { + __le32 agent_id; + u8 name[SCMI_MAX_STR_SIZE]; +}; + + struct scmi_msg_base_error_notify { __le32 event_control; #define BASE_TP_NOTIFY_ALL BIT(0) @@ -225,18 +231,21 @@ static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph, int id, char *name) { int ret; + struct scmi_msg_resp_base_discover_agent *agent_info; struct scmi_xfer *t; ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_AGENT, - sizeof(__le32), SCMI_MAX_STR_SIZE, &t); + sizeof(__le32), sizeof(*agent_info), &t); if (ret) return ret; put_unaligned_le32(id, t->tx.buf); ret = ph->xops->do_xfer(ph, t); - if (!ret) - strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE); + if (!ret) { + agent_info = t->rx.buf; + strlcpy(name, agent_info->name, SCMI_MAX_STR_SIZE); + } ph->xops->xfer_put(ph, t); diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c index 4371fdcd5a73..581d34c95769 100644 --- a/drivers/firmware/arm_scmi/scmi_pm_domain.c +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c @@ -138,9 +138,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) scmi_pd_data->domains = domains; scmi_pd_data->num_domains = num_domains; - of_genpd_add_provider_onecell(np, scmi_pd_data); - - return 0; + return of_genpd_add_provider_onecell(np, scmi_pd_data); } static const struct scmi_device_id scmi_id_table[] = { diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index 308471586381..cdbb287bd8bc 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -637,7 +637,7 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph, if (ret) return ret; - put_unaligned_le32(cpu_to_le32(sensor_id), t->tx.buf); + put_unaligned_le32(sensor_id, t->tx.buf); ret = ph->xops->do_xfer(ph, t); if (!ret) { struct sensors_info *si = ph->get_priv(ph); diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index 11e8efb71375..87039c5c03fd 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -82,7 +82,8 @@ static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) } static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, - struct scmi_vio_msg *msg) + struct scmi_vio_msg *msg, + struct device *dev) { struct scatterlist sg_in; int rc; @@ -94,8 +95,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); if (rc) - dev_err_once(vioch->cinfo->dev, - "failed to add to virtqueue (%d)\n", rc); + dev_err_once(dev, "failed to add to virtqueue (%d)\n", rc); else virtqueue_kick(vioch->vqueue); @@ -108,7 +108,7 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch, struct scmi_vio_msg *msg) { if (vioch->is_rx) { - scmi_vio_feed_vq_rx(vioch, msg); + scmi_vio_feed_vq_rx(vioch, msg, vioch->cinfo->dev); } else { /* Here IRQs are assumed to be already disabled by the caller */ spin_lock(&vioch->lock); @@ -269,7 +269,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, list_add_tail(&msg->list, &vioch->free_list); spin_unlock_irqrestore(&vioch->lock, flags); } else { - scmi_vio_feed_vq_rx(vioch, msg); + scmi_vio_feed_vq_rx(vioch, msg, cinfo->dev); } } diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c index a5048956a0be..ac08e819088b 100644 --- a/drivers/firmware/arm_scmi/voltage.c +++ b/drivers/firmware/arm_scmi/voltage.c @@ -156,7 +156,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph, int cnt; cmd->domain_id = cpu_to_le32(v->id); - cmd->level_index = desc_index; + cmd->level_index = cpu_to_le32(desc_index); ret = ph->xops->do_xfer(ph, tl); if (ret) break; diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c index 581aa5e9b077..dd7c3d5e8b0b 100644 --- a/drivers/firmware/smccc/soc_id.c +++ b/drivers/firmware/smccc/soc_id.c @@ -50,7 +50,7 @@ static int __init smccc_soc_init(void) arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_ARCH_SOC_ID, &res); - if (res.a0 == SMCCC_RET_NOT_SUPPORTED) { + if ((int)res.a0 == SMCCC_RET_NOT_SUPPORTED) { pr_info("ARCH_SOC_ID not implemented, skipping ....\n"); return 0; } diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 072ed610f9c6..60d9374c72c0 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -523,6 +523,7 @@ config GPIO_REG config GPIO_ROCKCHIP tristate "Rockchip GPIO support" depends on ARCH_ROCKCHIP || COMPILE_TEST + select GENERIC_IRQ_CHIP select GPIOLIB_IRQCHIP default ARCH_ROCKCHIP help diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c index aeec4bf0b625..84f96b78f32a 100644 --- a/drivers/gpio/gpio-virtio.c +++ b/drivers/gpio/gpio-virtio.c @@ -434,7 +434,7 @@ static void virtio_gpio_event_vq(struct virtqueue *vq) ret = generic_handle_domain_irq(vgpio->gc.irq.domain, gpio); if (ret) dev_err(dev, "failed to handle interrupt: %d\n", ret); - }; + } } static void virtio_gpio_request_vq(struct virtqueue *vq) diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 2a926d0de423..0039df26854b 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -100,11 +100,25 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS This has the potential to use a lot of memory and print some very large kernel messages. If in doubt, say "N". +config DRM_DEBUG_MODESET_LOCK + bool "Enable backtrace history for lock contention" + depends on STACKTRACE_SUPPORT + depends on DEBUG_KERNEL + depends on EXPERT + select STACKDEPOT + default y if DEBUG_WW_MUTEX_SLOWPATH + help + Enable debug tracing of failures to gracefully handle drm modeset lock + contention. A history of each drm modeset lock path hitting -EDEADLK + will be saved until gracefully handled, and the backtrace will be + printed when attempting to lock a contended lock. + + If in doubt, say "N". + config DRM_FBDEV_EMULATION bool "Enable legacy fbdev support for your modesetting driver" - depends on DRM - depends on FB=y || FB=DRM - select DRM_KMS_HELPER + depends on DRM_KMS_HELPER + depends on FB=y || FB=DRM_KMS_HELPER select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 751557af09bb..a15a4787c7ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -297,7 +297,7 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd); void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, struct amdgpu_vm *vm); -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); +void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo); void amdgpu_amdkfd_reserve_system_mem(uint64_t size); #else static inline @@ -312,7 +312,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, } static inline -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) +void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) { } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 0e9cfe99ae9e..71a6a9ef54ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -207,7 +207,7 @@ static void unreserve_mem_limit(struct amdgpu_device *adev, spin_unlock(&kfd_mem_limit.mem_limit_lock); } -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) +void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); u32 domain = bo->preferred_domains; @@ -219,6 +219,8 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) } unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg); + + kfree(bo->kfd_bo); } @@ -644,12 +646,6 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, if (IS_ERR(gobj)) return PTR_ERR(gobj); - /* Import takes an extra reference on the dmabuf. Drop it now to - * avoid leaking it. We only need the one reference in - * kgd_mem->dmabuf. - */ - dma_buf_put(mem->dmabuf); - *bo = gem_to_amdgpu_bo(gobj); (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE; (*bo)->parent = amdgpu_bo_ref(mem->bo); @@ -734,14 +730,19 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, } /* Add BO to VM internal data structures */ + ret = amdgpu_bo_reserve(bo[i], false); + if (ret) { + pr_debug("Unable to reserve BO during memory attach"); + goto unwind; + } attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); + amdgpu_bo_unreserve(bo[i]); if (unlikely(!attachment[i]->bo_va)) { ret = -ENOMEM; pr_err("Failed to add BO object to VM. ret == %d\n", ret); goto unwind; } - attachment[i]->va = va; attachment[i]->pte_flags = get_pte_flags(adev, mem); attachment[i]->adev = adev; @@ -757,7 +758,9 @@ unwind: if (!attachment[i]) continue; if (attachment[i]->bo_va) { + amdgpu_bo_reserve(bo[i], true); amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va); + amdgpu_bo_unreserve(bo[i]); list_del(&attachment[i]->list); } if (bo[i]) @@ -1568,12 +1571,12 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, mem->va + bo_size * (1 + mem->aql_queue)); - ret = unreserve_bo_and_vms(&ctx, false, false); - /* Remove from VM internal data structures */ list_for_each_entry_safe(entry, tmp, &mem->attachments, list) kfd_mem_detach(entry); + ret = unreserve_bo_and_vms(&ctx, false, false); + /* Free the sync object */ amdgpu_sync_free(&mem->sync); @@ -1600,9 +1603,13 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); if (mem->dmabuf) dma_buf_put(mem->dmabuf); - drm_gem_object_put(&mem->bo->tbo.base); mutex_destroy(&mem->lock); - kfree(mem); + + /* If this releases the last reference, it will end up calling + * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why + * this needs to be the last call here. + */ + drm_gem_object_put(&mem->bo->tbo.base); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 96b7bb13a2dd..12a6b1c99c93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1569,6 +1569,18 @@ void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, WREG32(adev->bios_scratch_reg_offset + 3, tmp); } +void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev, + u32 backlight_level) +{ + u32 tmp = RREG32(adev->bios_scratch_reg_offset + 2); + + tmp &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK; + tmp |= (backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) & + ATOM_S2_CURRENT_BL_LEVEL_MASK; + + WREG32(adev->bios_scratch_reg_offset + 2, tmp); +} + bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev) { u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 8cc0222dba19..27e74b1fc260 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -185,6 +185,8 @@ bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock); void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, bool hung); +void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev, + u32 backlight_level); bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev); void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index b9c11c2b2885..0de66f59adb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -827,6 +827,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector) amdgpu_connector_get_edid(connector); ret = amdgpu_connector_ddc_get_modes(connector); + amdgpu_get_native_mode(connector); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6e40cc1bc6dc..d94fa748e6bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2398,10 +2398,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (!adev->gmc.xgmi.pending_reset) amdgpu_amdkfd_device_init(adev); - r = amdgpu_amdkfd_resume_iommu(adev); - if (r) - goto init_failed; - amdgpu_fru_get_product_info(adev); init_failed: @@ -3171,11 +3167,21 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) { switch (asic_type) { #if defined(CONFIG_DRM_AMD_DC) -#if defined(CONFIG_DRM_AMD_DC_SI) case CHIP_TAHITI: case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: + /* + * We have systems in the wild with these ASICs that require + * LVDS and VGA support which is not supported with DC. + * + * Fallback to the non-DC driver here by default so as not to + * cause regressions. + */ +#if defined(CONFIG_DRM_AMD_DC_SI) + return amdgpu_dc > 0; +#else + return false; #endif case CHIP_BONAIRE: case CHIP_KAVERI: @@ -3503,6 +3509,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->rmmio_size = pci_resource_len(adev->pdev, 2); } + for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++) + atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); + adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); if (adev->rmmio == NULL) { return -ENOMEM; @@ -4287,8 +4296,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) return r; - amdgpu_amdkfd_pre_reset(adev); - /* Resume IP prior to SMC */ r = amdgpu_device_ip_reinit_early_sriov(adev); if (r) @@ -4309,7 +4316,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, amdgpu_irq_gpu_reset_resume_helper(adev); r = amdgpu_ib_ring_tests(adev); - amdgpu_amdkfd_post_reset(adev); error: if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { @@ -4850,6 +4856,9 @@ static void amdgpu_device_recheck_guilty_jobs( /* clear job's guilty and depend the folowing step to decide the real one */ drm_sched_reset_karma(s_job); + /* for the real bad job, it will be resubmitted twice, adding a dma_fence_get + * to make sure fence is balanced */ + dma_fence_get(s_job->s_fence->parent); drm_sched_resubmit_jobs_ext(&ring->sched, 1); ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout); @@ -4885,6 +4894,7 @@ retry: /* got the hw fence, signal finished fence */ atomic_dec(ring->sched.score); + dma_fence_put(s_job->s_fence->parent); dma_fence_get(&s_job->s_fence->finished); dma_fence_signal(&s_job->s_fence->finished); dma_fence_put(&s_job->s_fence->finished); @@ -5020,8 +5030,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, cancel_delayed_work_sync(&tmp_adev->delayed_init_work); - if (!amdgpu_sriov_vf(tmp_adev)) - amdgpu_amdkfd_pre_reset(tmp_adev); + amdgpu_amdkfd_pre_reset(tmp_adev); /* * Mark these ASICs to be reseted as untracked first @@ -5079,7 +5088,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter)); /* Actual ASIC resets if needed.*/ - /* TODO Implement XGMI hive reset logic for SRIOV */ + /* Host driver will handle XGMI hive reset for SRIOV */ if (amdgpu_sriov_vf(adev)) { r = amdgpu_device_reset_sriov(adev, job ? false : true); if (r) @@ -5139,8 +5148,8 @@ skip_hw_reset: skip_sched_resume: list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - /* unlock kfd: SRIOV would do it separately */ - if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev)) + /* unlock kfd */ + if (!need_emergency_restart) amdgpu_amdkfd_post_reset(tmp_adev); /* kfd_post_reset will do nothing if kfd device is not initialized, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index d7c8d9e3c203..503995c7ff6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -248,8 +248,8 @@ get_from_vram: offset = offsetof(struct binary_header, binary_checksum) + sizeof(bhdr->binary_checksum); - size = bhdr->binary_size - offset; - checksum = bhdr->binary_checksum; + size = le16_to_cpu(bhdr->binary_size) - offset; + checksum = le16_to_cpu(bhdr->binary_checksum); if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, size, checksum)) { @@ -270,7 +270,7 @@ get_from_vram: } if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - ihdr->size, checksum)) { + le16_to_cpu(ihdr->size), checksum)) { DRM_ERROR("invalid ip discovery data table checksum\n"); r = -EINVAL; goto out; @@ -282,7 +282,7 @@ get_from_vram: ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset); if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - ghdr->size, checksum)) { + le32_to_cpu(ghdr->size), checksum)) { DRM_ERROR("invalid gc data table checksum\n"); r = -EINVAL; goto out; @@ -489,10 +489,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset)); for (i = 0; i < 32; i++) { - if (le32_to_cpu(harvest_info->list[i].hw_id) == 0) + if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) break; - switch (le32_to_cpu(harvest_info->list[i].hw_id)) { + switch (le16_to_cpu(harvest_info->list[i].hw_id)) { case VCN_HWID: vcn_harvest_count++; if (harvest_info->list[i].number_instance == 0) @@ -587,6 +587,9 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &nv_common_ip_block); break; default: + dev_err(adev->dev, + "Failed to add common ip block(GC_HWIP:0x%x)\n", + adev->ip_versions[GC_HWIP][0]); return -EINVAL; } return 0; @@ -619,6 +622,9 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add gmc ip block(GC_HWIP:0x%x)\n", + adev->ip_versions[GC_HWIP][0]); return -EINVAL; } return 0; @@ -648,6 +654,9 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); break; default: + dev_err(adev->dev, + "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", + adev->ip_versions[OSSSYS_HWIP][0]); return -EINVAL; } return 0; @@ -688,6 +697,9 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add psp ip block(MP0_HWIP:0x%x)\n", + adev->ip_versions[MP0_HWIP][0]); return -EINVAL; } return 0; @@ -726,6 +738,9 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add smu ip block(MP1_HWIP:0x%x)\n", + adev->ip_versions[MP1_HWIP][0]); return -EINVAL; } return 0; @@ -753,6 +768,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &dm_ip_block); break; default: + dev_err(adev->dev, + "Failed to add dm ip block(DCE_HWIP:0x%x)\n", + adev->ip_versions[DCE_HWIP][0]); return -EINVAL; } } else if (adev->ip_versions[DCI_HWIP][0]) { @@ -763,6 +781,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &dm_ip_block); break; default: + dev_err(adev->dev, + "Failed to add dm ip block(DCI_HWIP:0x%x)\n", + adev->ip_versions[DCI_HWIP][0]); return -EINVAL; } #endif @@ -796,6 +817,9 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add gfx ip block(GC_HWIP:0x%x)\n", + adev->ip_versions[GC_HWIP][0]); return -EINVAL; } return 0; @@ -829,6 +853,9 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); break; default: + dev_err(adev->dev, + "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", + adev->ip_versions[SDMA0_HWIP][0]); return -EINVAL; } return 0; @@ -845,6 +872,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", + adev->ip_versions[UVD_HWIP][0]); return -EINVAL; } switch (adev->ip_versions[VCE_HWIP][0]) { @@ -855,6 +885,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", + adev->ip_versions[VCE_HWIP][0]); return -EINVAL; } } else { @@ -867,7 +900,8 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(2, 0, 2): case IP_VERSION(2, 2, 0): amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); break; case IP_VERSION(2, 0, 3): break; @@ -881,6 +915,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) break; case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 16): + case IP_VERSION(3, 0, 64): case IP_VERSION(3, 1, 1): case IP_VERSION(3, 0, 2): amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); @@ -891,6 +926,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", + adev->ip_versions[UVD_HWIP][0]); return -EINVAL; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index a573424a6e0b..a1e63ba4c54a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -60,9 +60,10 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) goto unlock; } - ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, - TTM_BO_VM_NUM_PREFAULT, 1); - drm_dev_exit(idx); + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT); + + drm_dev_exit(idx); } else { ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index f3d62e196901..0c7963dfacad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -223,7 +223,7 @@ int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev, */ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - unsigned int count = AMDGPU_IH_MAX_NUM_IVS; + unsigned int count; u32 wptr; if (!ih->enabled || adev->shutdown) @@ -232,6 +232,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) wptr = amdgpu_ih_get_wptr(adev, ih); restart_ih: + count = AMDGPU_IH_MAX_NUM_IVS; DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); /* Order reading of wptr vs. reading of IH ring data */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index dfe667ea8b05..651c7abfde03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1423,6 +1423,8 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) struct drm_amdgpu_info_firmware fw_info; struct drm_amdgpu_query_fw query_fw; struct atom_context *ctx = adev->mode_info.atom_context; + uint8_t smu_minor, smu_debug; + uint16_t smu_major; int ret, i; static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = { @@ -1568,8 +1570,11 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); if (ret) return ret; - seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n", - fw_info.feature, fw_info.ver); + smu_major = (fw_info.ver >> 16) & 0xffff; + smu_minor = (fw_info.ver >> 8) & 0xff; + smu_debug = (fw_info.ver >> 0) & 0xff; + seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x (%d.%d.%d)\n", + fw_info.feature, fw_info.ver, smu_major, smu_minor, smu_debug); /* SDMA */ query_fw.fw_type = AMDGPU_INFO_FW_SDMA; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index aeb92e5677ac..4fcfc2313b8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1274,7 +1274,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) abo = ttm_to_amdgpu_bo(bo); if (abo->kfd_bo) - amdgpu_amdkfd_unreserve_memory_limit(abo); + amdgpu_amdkfd_release_notify(abo); /* We only remove the fence if the resv has individualized. */ WARN_ON_ONCE(bo->type == ttm_bo_type_kernel diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 2658414c503d..4f7c70845785 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -134,6 +134,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) adev->vcn.indirect_sram = true; break; case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 64): if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) fw_name = FIRMWARE_SIENNA_CICHLID; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 978ac927ac11..567df2db23ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -386,6 +386,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) "%s", "xgmi_hive_info"); if (ret) { dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n"); + kobject_put(&hive->kobj); kfree(hive); hive = NULL; goto pro_end; @@ -806,9 +807,9 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) pcs_clear_status(adev, xgmi23_pcs_err_status_reg_aldebaran[i]); - for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) + for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) pcs_clear_status(adev, - xgmi23_pcs_err_status_reg_aldebaran[i]); + xgmi3x16_pcs_err_status_reg_aldebaran[i]); for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) pcs_clear_status(adev, walf_pcs_err_status_reg_aldebaran[i]); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 90a834dc4008..dbe7442fb25c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -7707,8 +7707,19 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(10, 3, 1): case IP_VERSION(10, 3, 3): - clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) | - ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL); + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh); + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh); + hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh); + /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); break; default: preempt_disable(); @@ -8249,6 +8260,9 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 reg, data; + + amdgpu_gfx_off_ctrl(adev, false); + /* not for *_SOC15 */ reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); if (amdgpu_sriov_is_pp_one_vf(adev)) @@ -8263,6 +8277,8 @@ static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); else WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); + + amdgpu_gfx_off_ctrl(adev, true); } static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev, @@ -8316,11 +8332,8 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable) if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(10, 3, 1): - data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; - WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data); - break; case IP_VERSION(10, 3, 3): - data = 0x1388 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; + data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 37b4a3db6360..d17a6f399347 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3575,12 +3575,16 @@ static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 data; + amdgpu_gfx_off_ctrl(adev, false); + data = RREG32(mmRLC_SPM_VMID); data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; WREG32(mmRLC_SPM_VMID, data); + + amdgpu_gfx_off_ctrl(adev, true); } static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index e0302c23e9a7..5f112efda634 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5624,6 +5624,8 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 data; + amdgpu_gfx_off_ctrl(adev, false); + if (amdgpu_sriov_is_pp_one_vf(adev)) data = RREG32_NO_KIQ(mmRLC_SPM_VMID); else @@ -5636,6 +5638,8 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_NO_KIQ(mmRLC_SPM_VMID, data); else WREG32(mmRLC_SPM_VMID, data); + + amdgpu_gfx_off_ctrl(adev, true); } static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 7f944bb11298..34478bcc4d09 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -140,6 +140,11 @@ MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin"); #define mmTCP_CHAN_STEER_5_ARCT 0x0b0c #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0 +#define mmGOLDEN_TSC_COUNT_UPPER_Renoir 0x0025 +#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX 1 +#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026 +#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1 + enum ta_ras_gfx_subblock { /*CPC*/ TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0, @@ -2462,7 +2467,9 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_gfx_kiq_fini(adev); gfx_v9_0_mec_fini(adev); - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); if (adev->flags & AMD_IS_APU) { amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, &adev->gfx.rlc.cp_table_gpu_addr, @@ -4236,19 +4243,38 @@ failed_kiq_read: static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) { - uint64_t clock; + uint64_t clock, clock_lo, clock_hi, hi_check; - amdgpu_gfx_off_ctrl(adev, false); - mutex_lock(&adev->gfx.gpu_clock_mutex); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) { - clock = gfx_v9_0_kiq_read_clock(adev); - } else { - WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); - clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | - ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 3, 0): + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir); + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir); + hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir); + /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); + break; + default: + amdgpu_gfx_off_ctrl(adev, false); + mutex_lock(&adev->gfx.gpu_clock_mutex); + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) { + clock = gfx_v9_0_kiq_read_clock(adev); + } else { + WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + } + mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); + break; } - mutex_unlock(&adev->gfx.gpu_clock_mutex); - amdgpu_gfx_off_ctrl(adev, true); return clock; } @@ -5102,6 +5128,8 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 reg, data; + amdgpu_gfx_off_ctrl(adev, false); + reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); if (amdgpu_sriov_is_pp_one_vf(adev)) data = RREG32_NO_KIQ(reg); @@ -5115,6 +5143,8 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); else WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); + + amdgpu_gfx_off_ctrl(adev, true); } static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index bda1542ef1dd..480e41847d7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -348,6 +348,10 @@ static void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev) WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL, i * hub->ctx_distance, 0); + if (amdgpu_sriov_vf(adev)) + /* Avoid write to GMC registers */ + return; + /* Setup TLB control */ tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c index 497b86c376c6..90f0aefbdb39 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c @@ -54,15 +54,17 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev) seg_size = REG_GET_FIELD( RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE_ALDE), MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; + max_region = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL_ALDE, PF_MAX_REGION); } else { xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL); seg_size = REG_GET_FIELD( RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE), MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; + max_region = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION); } - max_region = - REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION); switch (adev->asic_type) { @@ -89,9 +91,15 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev) if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes) return -EINVAL; - adev->gmc.xgmi.physical_node_id = - REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, - PF_LFB_REGION); + if (adev->asic_type == CHIP_ALDEBARAN) { + adev->gmc.xgmi.physical_node_id = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL_ALDE, + PF_LFB_REGION); + } else { + adev->gmc.xgmi.physical_node_id = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, + PF_LFB_REGION); + } if (adev->gmc.xgmi.physical_node_id > max_physical_node_id) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index 1d8414c3fadb..38241cf0e1f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -160,6 +160,7 @@ static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev, tmp = RREG32(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1); /* enable_intr field is only valid in ring0 */ if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); @@ -275,10 +276,8 @@ static int navi10_ih_enable_ring(struct amdgpu_device *adev, tmp = navi10_ih_rb_cntl(ih, tmp); if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); - if (ih == &adev->irq.ih1) { - tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0); + if (ih == &adev->irq.ih1) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); - } if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { @@ -319,7 +318,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) { struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; u32 ih_chicken; - u32 tmp; int ret; int i; @@ -363,15 +361,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell, ih[0]->doorbell_index); - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); - tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL, - CLIENT18_IS_STORM_CLIENT, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp); - - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL); - tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp); - pci_set_master(adev->pdev); /* enable interrupts */ @@ -420,12 +409,19 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev, u32 wptr, tmp; struct amdgpu_ih_regs *ih_regs; - wptr = le32_to_cpu(*ih->wptr_cpu); - ih_regs = &ih->ih_regs; + if (ih == &adev->irq.ih) { + /* Only ring0 supports writeback. On other rings fall back + * to register-based code with overflow checking below. + */ + wptr = le32_to_cpu(*ih->wptr_cpu); - if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) - goto out; + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + } + ih_regs = &ih->ih_regs; + + /* Double check that the overflow wasn't already cleared. */ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; @@ -513,15 +509,11 @@ static int navi10_ih_self_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - uint32_t wptr = cpu_to_le32(entry->src_data[0]); - switch (entry->ring_id) { case 1: - *adev->irq.ih1.wptr_cpu = wptr; schedule_work(&adev->irq.ih1_work); break; case 2: - *adev->irq.ih2.wptr_cpu = wptr; schedule_work(&adev->irq.ih2_work); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index 4ecd2b5808ce..ee7cab37dfd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -359,6 +359,10 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnPCIE_CONFIG_CNTL, data); + + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } #define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1 diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 0d2d629e2d6a..4bbacf1be25a 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -276,6 +276,10 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnPCIE_CI_CNTL, data); + + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } static void nbio_v6_1_program_ltr(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index 3c00666a13e1..37a4039fdfc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -273,7 +273,9 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = { static void nbio_v7_0_init_registers(struct amdgpu_device *adev) { - + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = + SOC15_REG_OFFSET(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } const struct amdgpu_nbio_funcs nbio_v7_0_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c index 8f2a315e7c73..3444332ea110 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c @@ -371,6 +371,10 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data); } + + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } const struct amdgpu_nbio_funcs nbio_v7_2_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index b8bd03d16dba..dc5e93756fea 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -362,7 +362,9 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = { static void nbio_v7_4_init_registers(struct amdgpu_device *adev) { - + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev) @@ -692,6 +694,9 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) { uint32_t def, data; + if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4)) + return; + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index febc903adf58..a6659d9ecdd2 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -182,6 +182,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, { switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 64): if (amdgpu_sriov_vf(adev)) { if (encode) *codecs = &sriov_sc_video_codecs_encode; @@ -730,8 +731,10 @@ static int nv_common_early_init(void *handle) #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; - adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + if (!amdgpu_sriov_vf(adev)) { + adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; + adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + } adev->smc_rreg = NULL; adev->smc_wreg = NULL; adev->pcie_rreg = &nv_pcie_rreg; @@ -1031,7 +1034,7 @@ static int nv_common_hw_init(void *handle) * for the purpose of expose those registers * to process space */ - if (adev->nbio.funcs->remap_hdp_registers) + if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev)) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ nv_enable_doorbell_aperture(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0c316a2d42ed..de9b55383e9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -971,8 +971,10 @@ static int soc15_common_early_init(void *handle) #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; - adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + if (!amdgpu_sriov_vf(adev)) { + adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; + adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + } adev->smc_rreg = NULL; adev->smc_wreg = NULL; adev->pcie_rreg = &soc15_pcie_rreg; @@ -1285,7 +1287,7 @@ static int soc15_common_hw_init(void *handle) * for the purpose of expose those registers * to process space */ - if (adev->nbio.funcs->remap_hdp_registers) + if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev)) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index d5d023a24269..2d558c2f417d 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -534,6 +534,19 @@ static int uvd_v6_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (RREG32(mmUVD_STATUS) != 0) + uvd_v6_0_stop(adev); + + return 0; +} + +static int uvd_v6_0_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work @@ -558,17 +571,6 @@ static int uvd_v6_0_hw_fini(void *handle) AMD_CG_STATE_GATE); } - if (RREG32(mmUVD_STATUS) != 0) - uvd_v6_0_stop(adev); - - return 0; -} - -static int uvd_v6_0_suspend(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = uvd_v6_0_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 0fffaf859c59..3b119db16003 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -406,7 +406,7 @@ static const struct kfd_device_info aldebaran_device_info = { static const struct kfd_device_info renoir_device_info = { .asic_family = CHIP_RENOIR, .asic_name = "renoir", - .gfx_target_version = 90002, + .gfx_target_version = 90012, .max_pasid_bits = 16, .max_no_of_hqd = 24, .doorbell_size = 8, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 533b27b35fc9..93e33dd84dd4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1226,6 +1226,11 @@ static int stop_cpsch(struct device_queue_manager *dqm) bool hanging; dqm_lock(dqm); + if (!dqm->sched_running) { + dqm_unlock(dqm); + return 0; + } + if (!dqm->is_hws_hang) unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); hanging = dqm->is_hws_hang || dqm->is_resetting; @@ -1430,7 +1435,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, if (!dqm->sched_running) return 0; - if (dqm->is_hws_hang) + if (dqm->is_hws_hang || dqm->is_resetting) return -EIO; if (!dqm->active_runlist) return retval; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 2e86692def19..d1388896f9c1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -308,7 +308,7 @@ * 16MB are reserved for kernel use (CWSR trap handler and kernel IB * for now). */ -#define SVM_USER_BASE 0x1000000ull +#define SVM_USER_BASE (u64)(KFD_CWSR_TBA_TMA_SIZE + 2*PAGE_SIZE) #define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE) #define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index d43bfd8b35ae..9b9c2b9bf2ef 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -281,6 +281,19 @@ static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) return cpages; } +static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate) +{ + unsigned long upages = 0; + unsigned long i; + + for (i = 0; i < migrate->npages; i++) { + if (migrate->src[i] & MIGRATE_PFN_VALID && + !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) + upages++; + } + return upages; +} + static int svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, @@ -632,10 +645,11 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, uint64_t end) { uint64_t npages = (end - start) >> PAGE_SHIFT; + unsigned long upages = npages; + unsigned long cpages = 0; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate; - unsigned long cpages = 0; dma_addr_t *scratch; size_t size; void *buf; @@ -669,6 +683,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, if (!cpages) { pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n", prange->start, prange->last); + upages = svm_migrate_unsuccessful_pages(&migrate); goto out_free; } if (cpages != npages) @@ -681,8 +696,9 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, scratch, npages); migrate_vma_pages(&migrate); - pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", - svm_migrate_successful_pages(&migrate), cpages, migrate.npages); + upages = svm_migrate_unsuccessful_pages(&migrate); + pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n", + upages, cpages, migrate.npages); svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); @@ -696,9 +712,9 @@ out: if (pdd) WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); - return cpages; + return upages; } - return r; + return r ? r : upages; } /** @@ -718,7 +734,7 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm) unsigned long addr; unsigned long start; unsigned long end; - unsigned long cpages = 0; + unsigned long upages = 0; long r = 0; if (!prange->actual_loc) { @@ -754,12 +770,12 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm) pr_debug("failed %ld to migrate\n", r); break; } else { - cpages += r; + upages += r; } addr = next; } - if (cpages) { + if (!upages) { svm_range_vram_node_free(prange); prange->actual_loc = 0; } @@ -782,7 +798,7 @@ static int svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, struct mm_struct *mm) { - int r; + int r, retries = 3; /* * TODO: for both devices with PCIe large bar or on same xgmi hive, skip @@ -791,9 +807,14 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); - r = svm_migrate_vram_to_ram(prange, mm); - if (r) - return r; + do { + r = svm_migrate_vram_to_ram(prange, mm); + if (r) + return r; + } while (prange->actual_loc && --retries); + + if (prange->actual_loc) + return -EDEADLK; return svm_migrate_ram_to_vram(prange, best_loc, mm); } @@ -838,6 +859,11 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) pr_debug("failed find process at fault address 0x%lx\n", addr); return VM_FAULT_SIGBUS; } + if (READ_ONCE(p->svms.faulting_task) == current) { + pr_debug("skipping ram migration\n"); + kfd_unref_process(p); + return 0; + } addr >>= PAGE_SHIFT; pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 4104b167e721..8fd48d0ed240 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -766,8 +766,10 @@ struct svm_range_list { struct list_head deferred_range_list; spinlock_t deferred_list_lock; atomic_t evicted_ranges; + atomic_t drain_pagefaults; struct delayed_work restore_work; DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); + struct task_struct *faulting_task; }; /* Process data */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 457863861d6f..b993011cfa64 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1715,7 +1715,11 @@ int kfd_process_evict_queues(struct kfd_process *p) r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, &pdd->qpd); - if (r) { + /* evict return -EIO if HWS is hang or asic is resetting, in this case + * we would like to set all the queues to be in evicted state to prevent + * them been add back since they actually not be saved right now. + */ + if (r && r != -EIO) { pr_err("Failed to evict process queues\n"); goto fail; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index b691c8495d66..58b89b53ebe6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1496,9 +1496,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm, next = min(vma->vm_end, end); npages = (next - addr) >> PAGE_SHIFT; + WRITE_ONCE(p->svms.faulting_task, current); r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL, addr, npages, &hmm_range, readonly, true, owner); + WRITE_ONCE(p->svms.faulting_task, NULL); if (r) { pr_debug("failed %d to get svm range pages\n", r); goto unreserve_out; @@ -1966,10 +1968,16 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms) struct kfd_process_device *pdd; struct amdgpu_device *adev; struct kfd_process *p; + int drain; uint32_t i; p = container_of(svms, struct kfd_process, svms); +restart: + drain = atomic_read(&svms->drain_pagefaults); + if (!drain) + return; + for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) { pdd = p->pdds[i]; if (!pdd) @@ -1981,6 +1989,8 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms) amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1); pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms); } + if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain) + goto restart; } static void svm_range_deferred_list_work(struct work_struct *work) @@ -1988,35 +1998,41 @@ static void svm_range_deferred_list_work(struct work_struct *work) struct svm_range_list *svms; struct svm_range *prange; struct mm_struct *mm; + struct kfd_process *p; svms = container_of(work, struct svm_range_list, deferred_list_work); pr_debug("enter svms 0x%p\n", svms); + p = container_of(svms, struct kfd_process, svms); + /* Avoid mm is gone when inserting mmu notifier */ + mm = get_task_mm(p->lead_thread); + if (!mm) { + pr_debug("svms 0x%p process mm gone\n", svms); + return; + } +retry: + mmap_write_lock(mm); + + /* Checking for the need to drain retry faults must be inside + * mmap write lock to serialize with munmap notifiers. + */ + if (unlikely(atomic_read(&svms->drain_pagefaults))) { + mmap_write_unlock(mm); + svm_range_drain_retry_fault(svms); + goto retry; + } + spin_lock(&svms->deferred_list_lock); while (!list_empty(&svms->deferred_range_list)) { prange = list_first_entry(&svms->deferred_range_list, struct svm_range, deferred_list); + list_del_init(&prange->deferred_list); spin_unlock(&svms->deferred_list_lock); + pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange, prange->start, prange->last, prange->work_item.op); - /* Make sure no stale retry fault coming after range is freed */ - if (prange->work_item.op == SVM_OP_UNMAP_RANGE) - svm_range_drain_retry_fault(prange->svms); - - mm = prange->work_item.mm; - mmap_write_lock(mm); mutex_lock(&svms->lock); - - /* Remove from deferred_list must be inside mmap write lock, - * otherwise, svm_range_list_lock_and_flush_work may hold mmap - * write lock, and continue because deferred_list is empty, then - * deferred_list handle is blocked by mmap write lock. - */ - spin_lock(&svms->deferred_list_lock); - list_del_init(&prange->deferred_list); - spin_unlock(&svms->deferred_list_lock); - mutex_lock(&prange->migrate_mutex); while (!list_empty(&prange->child_list)) { struct svm_range *pchild; @@ -2032,12 +2048,13 @@ static void svm_range_deferred_list_work(struct work_struct *work) svm_range_handle_list_op(svms, prange); mutex_unlock(&svms->lock); - mmap_write_unlock(mm); spin_lock(&svms->deferred_list_lock); } spin_unlock(&svms->deferred_list_lock); + mmap_write_unlock(mm); + mmput(mm); pr_debug("exit svms 0x%p\n", svms); } @@ -2124,6 +2141,12 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms, prange, prange->start, prange->last, start, last); + /* Make sure pending page faults are drained in the deferred worker + * before the range is freed to avoid straggler interrupts on + * unmapped memory causing "phantom faults". + */ + atomic_inc(&svms->drain_pagefaults); + unmap_parent = start <= prange->start && last >= prange->last; list_for_each_entry(pchild, &prange->child_list, child_list) { @@ -2261,7 +2284,7 @@ svm_range_from_addr(struct svm_range_list *svms, unsigned long addr, * migration if actual loc is not best location, then update GPU page table * mapping to the best location. * - * If vm fault gpu is range preferred loc, the best_loc is preferred loc. + * If the preferred loc is accessible by faulting GPU, use preferred loc. * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then * if range actual loc is cpu, best_loc is cpu @@ -2278,7 +2301,7 @@ svm_range_best_restore_location(struct svm_range *prange, struct amdgpu_device *adev, int32_t *gpuidx) { - struct amdgpu_device *bo_adev; + struct amdgpu_device *bo_adev, *preferred_adev; struct kfd_process *p; uint32_t gpuid; int r; @@ -2291,8 +2314,16 @@ svm_range_best_restore_location(struct svm_range *prange, return -1; } - if (prange->preferred_loc == gpuid) + if (prange->preferred_loc == gpuid || + prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) { return prange->preferred_loc; + } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) { + preferred_adev = svm_range_get_adev_by_id(prange, + prange->preferred_loc); + if (amdgpu_xgmi_same_hive(adev, preferred_adev)) + return prange->preferred_loc; + /* fall through */ + } if (test_bit(*gpuidx, prange->bitmap_access)) return gpuid; @@ -2313,7 +2344,8 @@ svm_range_best_restore_location(struct svm_range *prange, static int svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, - unsigned long *start, unsigned long *last) + unsigned long *start, unsigned long *last, + bool *is_heap_stack) { struct vm_area_struct *vma; struct interval_tree_node *node; @@ -2324,6 +2356,12 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, pr_debug("VMA does not exist in address [0x%llx]\n", addr); return -EFAULT; } + + *is_heap_stack = (vma->vm_start <= vma->vm_mm->brk && + vma->vm_end >= vma->vm_mm->start_brk) || + (vma->vm_start <= vma->vm_mm->start_stack && + vma->vm_end >= vma->vm_mm->start_stack); + start_limit = max(vma->vm_start >> PAGE_SHIFT, (unsigned long)ALIGN_DOWN(addr, 2UL << 8)); end_limit = min(vma->vm_end >> PAGE_SHIFT, @@ -2353,9 +2391,9 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, *start = start_limit; *last = end_limit - 1; - pr_debug("vma start: 0x%lx start: 0x%lx vma end: 0x%lx last: 0x%lx\n", - vma->vm_start >> PAGE_SHIFT, *start, - vma->vm_end >> PAGE_SHIFT, *last); + pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n", + vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT, + *start, *last, *is_heap_stack); return 0; } @@ -2420,11 +2458,13 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, struct svm_range *prange = NULL; unsigned long start, last; uint32_t gpuid, gpuidx; + bool is_heap_stack; uint64_t bo_s = 0; uint64_t bo_l = 0; int r; - if (svm_range_get_range_boundaries(p, addr, &start, &last)) + if (svm_range_get_range_boundaries(p, addr, &start, &last, + &is_heap_stack)) return NULL; r = svm_range_check_vm(p, start, last, &bo_s, &bo_l); @@ -2451,6 +2491,9 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, return NULL; } + if (is_heap_stack) + prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM; + svm_range_add_to_svms(prange); svm_range_add_notifier_locked(mm, prange); @@ -2523,20 +2566,13 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, } static bool -svm_fault_allowed(struct mm_struct *mm, uint64_t addr, bool write_fault) +svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) { unsigned long requested = VM_READ; - struct vm_area_struct *vma; if (write_fault) requested |= VM_WRITE; - vma = find_vma(mm, addr << PAGE_SHIFT); - if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) { - pr_debug("address 0x%llx VMA is removed\n", addr); - return true; - } - pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested, vma->vm_flags); return (vma->vm_flags & requested) == requested; @@ -2554,6 +2590,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, int32_t best_loc; int32_t gpuidx = MAX_GPU_INSTANCE; bool write_locked = false; + struct vm_area_struct *vma; int r = 0; if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) { @@ -2564,7 +2601,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, p = kfd_lookup_process_by_pasid(pasid); if (!p) { pr_debug("kfd process not founded pasid 0x%x\n", pasid); - return -ESRCH; + return 0; } if (!p->xnack_enabled) { pr_debug("XNACK not enabled for pasid 0x%x\n", pasid); @@ -2575,10 +2612,17 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr); + if (atomic_read(&svms->drain_pagefaults)) { + pr_debug("draining retry fault, drop fault 0x%llx\n", addr); + goto out; + } + + /* p->lead_thread is available as kfd_process_wq_release flush the work + * before releasing task ref. + */ mm = get_task_mm(p->lead_thread); if (!mm) { pr_debug("svms 0x%p failed to get mm\n", svms); - r = -ESRCH; goto out; } @@ -2627,7 +2671,17 @@ retry_write_locked: goto out_unlock_range; } - if (!svm_fault_allowed(mm, addr, write_fault)) { + /* __do_munmap removed VMA, return success as we are handling stale + * retry fault. + */ + vma = find_vma(mm, addr << PAGE_SHIFT); + if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) { + pr_debug("address 0x%llx VMA is removed\n", addr); + r = 0; + goto out_unlock_range; + } + + if (!svm_fault_allowed(vma, write_fault)) { pr_debug("fault addr 0x%llx no %s permission\n", addr, write_fault ? "write" : "read"); r = -EPERM; @@ -2705,6 +2759,14 @@ void svm_range_list_fini(struct kfd_process *p) /* Ensure list work is finished before process is destroyed */ flush_work(&p->svms.deferred_list_work); + /* + * Ensure no retry fault comes in afterwards, as page fault handler will + * not find kfd process and take mm lock to recover fault. + */ + atomic_inc(&p->svms.drain_pagefaults); + svm_range_drain_retry_fault(&p->svms); + + list_for_each_entry_safe(prange, next, &p->svms.list, list) { svm_range_unlink(prange); svm_range_remove_notifier(prange); @@ -2725,6 +2787,7 @@ int svm_range_list_init(struct kfd_process *p) mutex_init(&svms->lock); INIT_LIST_HEAD(&svms->list); atomic_set(&svms->evicted_ranges, 0); + atomic_set(&svms->drain_pagefaults, 0); INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work); INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work); INIT_LIST_HEAD(&svms->deferred_range_list); @@ -3076,6 +3139,8 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work) struct svm_range *prange = list_first_entry(&svm_bo->range_list, struct svm_range, svm_bo_list); + int retries = 3; + list_del_init(&prange->svm_bo_list); spin_unlock(&svm_bo->list_lock); @@ -3083,7 +3148,11 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work) prange->start, prange->last); mutex_lock(&prange->migrate_mutex); - svm_migrate_vram_to_ram(prange, svm_bo->eviction_fence->mm); + do { + svm_migrate_vram_to_ram(prange, + svm_bo->eviction_fence->mm); + } while (prange->actual_loc && --retries); + WARN(prange->actual_loc, "Migration failed during eviction"); mutex_lock(&prange->lock); prange->svm_bo = NULL; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 43e983e42c0f..1cd6b9f4a568 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -51,6 +51,7 @@ #include <drm/drm_hdcp.h> #endif #include "amdgpu_pm.h" +#include "amdgpu_atombios.h" #include "amd_shared.h" #include "amdgpu_dm_irq.h" @@ -217,6 +218,7 @@ static const struct drm_format_info * amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); +static void handle_hpd_rx_irq(void *param); static bool is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, @@ -619,7 +621,7 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); } -#endif +#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ /** * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command. @@ -669,10 +671,7 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not return; } - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - link_index = notify->link_index; - link = adev->dm.dc->links[link_index]; drm_connector_list_iter_begin(dev, &iter); @@ -685,10 +684,13 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not } } drm_connector_list_iter_end(&iter); - drm_modeset_unlock(&dev->mode_config.connection_mutex); - if (hpd_aconnector) - handle_hpd_irq_helper(hpd_aconnector); + if (hpd_aconnector) { + if (notify->type == DMUB_NOTIFICATION_HPD) + handle_hpd_irq_helper(hpd_aconnector); + else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) + handle_hpd_rx_irq(hpd_aconnector); + } } /** @@ -764,6 +766,10 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) DRM_ERROR("DM: notify type %d invalid!", notify.type); continue; } + if (!dm->dmub_callback[notify.type]) { + DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type); + continue; + } if (dm->dmub_thread_offload[notify.type] == true) { dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); if (!dmub_hpd_wrk) { @@ -813,7 +819,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) if (count > DMUB_TRACE_MAX_READ) DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); } -#endif +#endif /* CONFIG_DRM_AMD_DC_DCN */ static int dm_set_clockgating_state(void *handle, enum amd_clockgating_state state) @@ -1410,7 +1416,15 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(2, 1, 0): init_data.flags.gpu_vm_support = true; - init_data.flags.disable_dmcu = true; + switch (adev->dm.dmcub_fw_version) { + case 0: /* development */ + case 0x1: /* linux-firmware.git hash 6d9f399 */ + case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ + init_data.flags.disable_dmcu = false; + break; + default: + init_data.flags.disable_dmcu = true; + } break; case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): @@ -1556,7 +1570,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_ERROR("amdgpu: fail to register dmub hpd callback"); goto error; } -#endif + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + goto error; + } +#endif /* CONFIG_DRM_AMD_DC_DCN */ } if (amdgpu_dm_initialize_drm_device(adev)) { @@ -2544,6 +2562,22 @@ static int dm_resume(void *handle) if (amdgpu_in_reset(adev)) { dc_state = dm->cached_dc_state; + /* + * The dc->current_state is backed up into dm->cached_dc_state + * before we commit 0 streams. + * + * DC will clear link encoder assignments on the real state + * but the changes won't propagate over to the copy we made + * before the 0 streams commit. + * + * DC expects that link encoder assignments are *not* valid + * when committing a state, so as a workaround it needs to be + * cleared here. + */ + link_enc_cfg_init(dm->dc, dc_state); + + amdgpu_dm_outbox_init(adev); + r = dm_dmub_hw_init(adev); if (r) DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -2555,8 +2589,8 @@ static int dm_resume(void *handle) for (i = 0; i < dc_state->stream_count; i++) { dc_state->streams[i]->mode_changed = true; - for (j = 0; j < dc_state->stream_status->plane_count; j++) { - dc_state->stream_status->plane_states[j]->update_flags.raw + for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { + dc_state->stream_status[i].plane_states[j]->update_flags.raw = 0xffffffff; } } @@ -3892,6 +3926,9 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, caps = dm->backlight_caps[bl_idx]; dm->brightness[bl_idx] = user_brightness; + /* update scratch register */ + if (bl_idx == 0) + amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); link = (struct dc_link *)dm->backlight_link[bl_idx]; @@ -4225,7 +4262,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { amdgpu_dm_update_connector_after_detect(aconnector); register_backlight_device(dm, link); - + if (dm->num_of_edps) + update_connector_ext_caps(aconnector); if (psr_feature_enabled) amdgpu_dm_set_psr_caps(link); } @@ -4565,7 +4603,8 @@ static void get_min_max_dc_plane_scaling(struct drm_device *dev, } -static int fill_dc_scaling_info(const struct drm_plane_state *state, +static int fill_dc_scaling_info(struct amdgpu_device *adev, + const struct drm_plane_state *state, struct dc_scaling_info *scaling_info) { int scale_w, scale_h, min_downscale, max_upscale; @@ -4579,7 +4618,8 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state, /* * For reasons we don't (yet) fully understand a non-zero * src_y coordinate into an NV12 buffer can cause a - * system hang. To avoid hangs (and maybe be overly cautious) + * system hang on DCN1x. + * To avoid hangs (and maybe be overly cautious) * let's reject both non-zero src_x and src_y. * * We currently know of only one use-case to reproduce a @@ -4587,10 +4627,10 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state, * is to gesture the YouTube Android app into full screen * on ChromeOS. */ - if (state->fb && - state->fb->format->format == DRM_FORMAT_NV12 && - (scaling_info->src_rect.x != 0 || - scaling_info->src_rect.y != 0)) + if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || + (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) && + (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && + (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) return -EINVAL; scaling_info->src_rect.width = state->src_w >> 16; @@ -5496,7 +5536,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, int ret; bool force_disable_dcc = false; - ret = fill_dc_scaling_info(plane_state, &scaling_info); + ret = fill_dc_scaling_info(adev, plane_state, &scaling_info); if (ret) return ret; @@ -6070,7 +6110,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; } -#endif +#endif /* CONFIG_DRM_AMD_DC_DCN */ /** * DOC: FreeSync Video @@ -7241,8 +7281,8 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, struct drm_connector_state *new_con_state; struct amdgpu_dm_connector *aconnector; struct dm_connector_state *dm_conn_state; - int i, j, clock; - int vcpi, pbn_div, pbn = 0; + int i, j; + int vcpi, pbn_div, pbn, slot_num = 0; for_each_new_connector_in_state(state, connector, new_con_state, i) { @@ -7270,17 +7310,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, if (!stream) continue; - if (stream->timing.flags.DSC != 1) { - drm_dp_mst_atomic_enable_dsc(state, - aconnector->port, - dm_conn_state->pbn, - 0, - false); - continue; - } - pbn_div = dm_mst_get_pbn_divider(stream->link); - clock = stream->timing.pix_clk_100hz / 10; /* pbn is calculated by compute_mst_dsc_configs_for_state*/ for (j = 0; j < dc_state->stream_count; j++) { if (vars[j].aconnector == aconnector) { @@ -7289,6 +7319,23 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, } } + if (j == dc_state->stream_count) + continue; + + slot_num = DIV_ROUND_UP(pbn, pbn_div); + + if (stream->timing.flags.DSC != 1) { + dm_conn_state->pbn = pbn; + dm_conn_state->vcpi_slots = slot_num; + + drm_dp_mst_atomic_enable_dsc(state, + aconnector->port, + dm_conn_state->pbn, + 0, + false); + continue; + } + vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, pbn_div, @@ -7552,7 +7599,7 @@ static int dm_plane_atomic_check(struct drm_plane *plane, if (ret) return ret; - ret = fill_dc_scaling_info(new_plane_state, &scaling_info); + ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info); if (ret) return ret; @@ -9000,7 +9047,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; } - fill_dc_scaling_info(new_plane_state, + fill_dc_scaling_info(dm->adev, new_plane_state, &bundle->scaling_infos[planes_count]); bundle->surface_updates[planes_count].scaling_info = @@ -10787,7 +10834,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ret = drm_atomic_add_affected_connectors(state, crtc); if (ret) - return ret; + goto fail; ret = drm_atomic_add_affected_planes(state, crtc); if (ret) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 3655663e079b..9d43ecb1f692 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -78,12 +78,10 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size, wr_buf_ptr = wr_buf; - r = copy_from_user(wr_buf_ptr, buf, wr_buf_size); - - /* r is bytes not be copied */ - if (r >= wr_buf_size) { - DRM_DEBUG_DRIVER("user data not be read\n"); - return -EINVAL; + /* r is bytes not be copied */ + if (copy_from_user(wr_buf_ptr, buf, wr_buf_size)) { + DRM_DEBUG_DRIVER("user data could not be read successfully\n"); + return -EFAULT; } /* check number of parameters. isspace could not differ space and \n */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 874a49b605c7..32a5ce09a62a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -534,13 +534,14 @@ static int kbps_to_peak_pbn(int kbps) static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, - int count) + int count, + int k) { int i; for (i = 0; i < count; i++) { memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); - if (vars[i].dsc_enabled && dc_dsc_compute_config( + if (vars[i + k].dsc_enabled && dc_dsc_compute_config( params[i].sink->ctx->dc->res_pool->dscs[0], ¶ms[i].sink->dsc_caps.dsc_dec_caps, params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, @@ -553,7 +554,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p if (params[i].bpp_overwrite) params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite; else - params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16; + params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16; if (params[i].num_slices_h) params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h; @@ -586,7 +587,8 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, struct dc_link *dc_link, struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, - int count) + int count, + int k) { int i; bool bpp_increased[MAX_PIPES]; @@ -601,8 +603,9 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link); for (i = 0; i < count; i++) { - if (vars[i].dsc_enabled) { - initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn; + if (vars[i + k].dsc_enabled) { + initial_slack[i] = + kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn; bpp_increased[i] = false; remaining_to_increase += 1; } else { @@ -629,7 +632,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, link_timeslots_used = 0; for (i = 0; i < count; i++) - link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot); + link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, pbn_per_timeslot); fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot; @@ -682,7 +685,8 @@ static void try_disable_dsc(struct drm_atomic_state *state, struct dc_link *dc_link, struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, - int count) + int count, + int k) { int i; bool tried[MAX_PIPES]; @@ -692,8 +696,8 @@ static void try_disable_dsc(struct drm_atomic_state *state, int remaining_to_try = 0; for (i = 0; i < count; i++) { - if (vars[i].dsc_enabled - && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16 + if (vars[i + k].dsc_enabled + && vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16 && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) { kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; tried[i] = false; @@ -748,9 +752,10 @@ static void try_disable_dsc(struct drm_atomic_state *state, static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, struct dc_state *dc_state, struct dc_link *dc_link, - struct dsc_mst_fairness_vars *vars) + struct dsc_mst_fairness_vars *vars, + int *link_vars_start_index) { - int i; + int i, k; struct dc_stream_state *stream; struct dsc_mst_fairness_params params[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; @@ -768,11 +773,17 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (stream->link != dc_link) continue; + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + if (!aconnector) + continue; + + if (!aconnector->port) + continue; + stream->timing.flags.DSC = 0; params[count].timing = &stream->timing; params[count].sink = stream->sink; - aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; params[count].aconnector = aconnector; params[count].port = aconnector->port; params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; @@ -794,44 +805,55 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, count++; } + + if (count == 0) { + ASSERT(0); + return true; + } + + /* k is start index of vars for current phy link used by mst hub */ + k = *link_vars_start_index; + /* set vars start index for next mst hub phy link */ + *link_vars_start_index += count; + /* Try no compression */ for (i = 0; i < count; i++) { - vars[i].aconnector = params[i].aconnector; - vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); - vars[i].dsc_enabled = false; - vars[i].bpp_x16 = 0; + vars[i + k].aconnector = params[i].aconnector; + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); + vars[i + k].dsc_enabled = false; + vars[i + k].bpp_x16 = 0; if (drm_dp_atomic_find_vcpi_slots(state, params[i].port->mgr, params[i].port, - vars[i].pbn, + vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link)) < 0) return false; } if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) { - set_dsc_configs_from_fairness_vars(params, vars, count); + set_dsc_configs_from_fairness_vars(params, vars, count, k); return true; } /* Try max compression */ for (i = 0; i < count; i++) { if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { - vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); - vars[i].dsc_enabled = true; - vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16; + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); + vars[i + k].dsc_enabled = true; + vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; if (drm_dp_atomic_find_vcpi_slots(state, params[i].port->mgr, params[i].port, - vars[i].pbn, + vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link)) < 0) return false; } else { - vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); - vars[i].dsc_enabled = false; - vars[i].bpp_x16 = 0; + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); + vars[i + k].dsc_enabled = false; + vars[i + k].bpp_x16 = 0; if (drm_dp_atomic_find_vcpi_slots(state, params[i].port->mgr, params[i].port, - vars[i].pbn, + vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link)) < 0) return false; } @@ -840,15 +862,76 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, return false; /* Optimize degree of compression */ - increase_dsc_bpp(state, dc_link, params, vars, count); + increase_dsc_bpp(state, dc_link, params, vars, count, k); - try_disable_dsc(state, dc_link, params, vars, count); + try_disable_dsc(state, dc_link, params, vars, count, k); - set_dsc_configs_from_fairness_vars(params, vars, count); + set_dsc_configs_from_fairness_vars(params, vars, count, k); return true; } +static bool is_dsc_need_re_compute( + struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dc_link *dc_link) +{ + int i; + bool is_dsc_need_re_compute = false; + + /* only check phy used by mst branch */ + if (dc_link->type != dc_connection_mst_branch) + return false; + + /* check if there is mode change in new request */ + for (i = 0; i < dc_state->stream_count; i++) { + struct amdgpu_dm_connector *aconnector; + struct dc_stream_state *stream; + struct drm_crtc_state *new_crtc_state; + struct drm_connector_state *new_conn_state; + + stream = dc_state->streams[i]; + + if (!stream) + continue; + + /* check if stream using the same link for mst */ + if (stream->link != dc_link) + continue; + + aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; + if (!aconnector) + continue; + + new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); + + if (!new_conn_state) + continue; + + if (IS_ERR(new_conn_state)) + continue; + + if (!new_conn_state->crtc) + continue; + + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); + + if (!new_crtc_state) + continue; + + if (IS_ERR(new_crtc_state)) + continue; + + if (new_crtc_state->enable && new_crtc_state->active) { + if (new_crtc_state->mode_changed || new_crtc_state->active_changed || + new_crtc_state->connectors_changed) + is_dsc_need_re_compute = true; + } + } + + return is_dsc_need_re_compute; +} + bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, struct dc_state *dc_state, struct dsc_mst_fairness_vars *vars) @@ -857,6 +940,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, struct dc_stream_state *stream; bool computed_streams[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; + int link_vars_start_index = 0; for (i = 0; i < dc_state->stream_count; i++) computed_streams[i] = false; @@ -881,8 +965,12 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) return false; + if (!is_dsc_need_re_compute(state, dc_state, stream->link)) + continue; + mutex_lock(&aconnector->mst_mgr.lock); - if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) { + if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, + vars, &link_vars_start_index)) { mutex_unlock(&aconnector->mst_mgr.lock); return false; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 12e5470fa567..0ded4decee05 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1085,6 +1085,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) struct dc_stream_state *old_stream = dc->current_state->res_ctx.pipe_ctx[i].stream; bool should_disable = true; + bool pipe_split_change = + context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe; for (j = 0; j < context->stream_count; j++) { if (old_stream == context->streams[j]) { @@ -1092,6 +1094,9 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) break; } } + if (!should_disable && pipe_split_change) + should_disable = true; + if (should_disable && old_stream) { dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); @@ -1887,6 +1892,7 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) return false; } +#ifdef CONFIG_DRM_AMD_DC_DCN /* Perform updates here which need to be deferred until next vupdate * * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered @@ -1896,7 +1902,6 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) */ static void process_deferred_updates(struct dc *dc) { -#ifdef CONFIG_DRM_AMD_DC_DCN int i = 0; if (dc->debug.enable_mem_low_power.bits.cm) { @@ -1905,8 +1910,8 @@ static void process_deferred_updates(struct dc *dc) if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); } -#endif } +#endif /* CONFIG_DRM_AMD_DC_DCN */ void dc_post_update_surfaces_to_stream(struct dc *dc) { @@ -1933,7 +1938,9 @@ void dc_post_update_surfaces_to_stream(struct dc *dc) dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); } +#ifdef CONFIG_DRM_AMD_DC_DCN process_deferred_updates(dc); +#endif dc->hwss.optimize_bandwidth(dc, context); @@ -3603,7 +3610,8 @@ bool dc_enable_dmub_notifications(struct dc *dc) #if defined(CONFIG_DRM_AMD_DC_DCN) /* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */ if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && - dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) + dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && + !dc->debug.dpia_debug.bits.disable_dpia) return true; #endif /* dmub aux needs dmub notifications to be enabled */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 2796bdd17de1..60544788e911 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -4279,6 +4279,8 @@ void core_link_enable_stream( */ if (status != DC_FAIL_DP_LINK_TRAINING || pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + if (false == stream->link->link_status.link_active) + disable_link(stream->link, pipe_ctx->stream->signal); BREAK_TO_DEBUGGER(); return; } @@ -4768,7 +4770,7 @@ uint32_t dc_bandwidth_in_kbps_from_timing( timing->dsc_cfg.bits_per_pixel, timing->dsc_cfg.num_slices_h, timing->dsc_cfg.is_dp); -#endif +#endif /* CONFIG_DRM_AMD_DC_DCN */ switch (timing->display_color_depth) { case COLOR_DEPTH_666: diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index cc25ba0ec7db..cb7bf9148904 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -5329,6 +5329,14 @@ bool dc_link_dp_set_test_pattern( return false; if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) + core_link_write_dpcd(link, + DP_LINK_SQUARE_PATTERN, + p_custom_pattern, + 1); + +#endif /* tell receiver that we are sending qualification * pattern DP 1.2 or later - DP receiver's link quality * pattern is set using DPCD LINK_QUAL_LANEx_SET diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index 72b0f8594b4a..25e48a8cbb78 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -236,6 +236,23 @@ static struct link_encoder *get_link_enc_used_by_link( return link_enc; } +/* Clear all link encoder assignments. */ +static void clear_enc_assignments(struct dc_state *state) +{ + int i; + enum engine_id eng_id; + struct dc_stream_state *stream; + + for (i = 0; i < MAX_PIPES; i++) { + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false; + eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id; + stream = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream; + if (eng_id != ENGINE_ID_UNKNOWN) + state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_id - ENGINE_ID_DIGA] = eng_id; + if (stream) + stream->link_enc = NULL; + } +} void link_enc_cfg_init( struct dc *dc, @@ -250,6 +267,8 @@ void link_enc_cfg_init( state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN; } + clear_enc_assignments(state); + state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; } @@ -265,6 +284,9 @@ void link_enc_cfg_link_encs_assign( ASSERT(state->stream_count == stream_count); + if (stream_count == 0) + clear_enc_assignments(state); + /* Release DIG link encoder resources before running assignment algorithm. */ for (i = 0; i < stream_count; i++) dc->res_pool->funcs->link_enc_unassign(state, streams[i]); diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index a5339796902a..3aac3f4a2852 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,7 +47,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.159" +#define DC_VER "3.2.160" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -675,6 +675,7 @@ struct dc_debug_options { #endif union mem_low_power_enable_options enable_mem_low_power; union root_clock_optimization_options root_clock_optimization; + bool hpo_optimization; bool force_vblank_alignment; /* Enable dmub aux for legacy ddc */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index bc87ea0adf94..e68e9a86a4d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -898,6 +898,9 @@ struct dpcd_usb4_dp_tunneling_info { #ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3 #endif +#ifndef DP_LINK_SQUARE_PATTERN +#define DP_LINK_SQUARE_PATTERN 0x10F +#endif #ifndef DP_DSC_CONFIGURATION #define DP_DSC_CONFIGURATION 0x161 #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index 989f5b6907e2..a3fee929cd12 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -671,6 +671,7 @@ struct dce_hwseq_registers { uint32_t MC_VM_FB_LOCATION_BASE; uint32_t MC_VM_FB_LOCATION_TOP; uint32_t MC_VM_FB_OFFSET; + uint32_t HPO_TOP_HW_CONTROL; }; /* set field name */ #define HWS_SF(blk_name, reg_name, field_name, post_fix)\ @@ -1152,7 +1153,8 @@ struct dce_hwseq_registers { type DOMAIN_PGFSM_PWR_STATUS;\ type HPO_HDMISTREAMCLK_G_GATE_DIS;\ type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;\ - type I2C_LIGHT_SLEEP_FORCE; + type I2C_LIGHT_SLEEP_FORCE;\ + type HPO_IO_EN; struct dce_hwseq_shift { HWSEQ_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index af3e68d3e747..24e47df526f6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1244,6 +1244,12 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) #endif if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE); + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dc->hwseq->funcs.setup_hpo_hw_control && is_dp_128b_132b_signal(pipe_ctx)) + dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, false); +#endif + } void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index a25732d07222..04d7bddc915b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -231,7 +231,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) if (!s->blank_en) DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" - "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" + " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start, dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler, @@ -1637,7 +1637,7 @@ void dcn10_reset_hw_ctx_wrap( dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) - hws->funcs.enable_stream_gating(dc, pipe_ctx); + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index cfee456c6c9a..e6af99ae3d9f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2270,7 +2270,7 @@ void dcn20_reset_hw_ctx_wrap( dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) - hws->funcs.enable_stream_gating(dc, pipe_ctx); + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } @@ -2397,6 +2397,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) * BY this, it is logic clean to separate stream and link */ if (is_dp_128b_132b_signal(pipe_ctx)) { + if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control) + pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control( + pipe_ctx->stream->ctx->dc->hwseq, true); setup_dp_hpo_stream(pipe_ctx, true); pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->enable_stream( pipe_ctx->stream_res.hpo_dp_stream_enc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c index a82319f4d081..95149734378b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c @@ -1381,13 +1381,11 @@ int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id) } -static void mpc3_mpc_init(struct mpc *mpc) +static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc) { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); int mpcc_id; - mpc1_mpc_init(mpc); - if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { if (mpc30->mpc_mask->MPC_RMU0_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPC_RMU1_MEM_LOW_PWR_MODE) { REG_UPDATE(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, 3); @@ -1405,7 +1403,7 @@ const struct mpc_funcs dcn30_mpc_funcs = { .read_mpcc_state = mpc1_read_mpcc_state, .insert_plane = mpc1_insert_plane, .remove_mpcc = mpc1_remove_mpcc, - .mpc_init = mpc3_mpc_init, + .mpc_init = mpc1_mpc_init, .mpc_init_single_inst = mpc1_mpc_init_single_inst, .update_blending = mpc2_update_blending, .cursor_lock = mpc1_cursor_lock, @@ -1432,6 +1430,7 @@ const struct mpc_funcs dcn30_mpc_funcs = { .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, .set_bg_color = mpc1_set_bg_color, + .set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode, }; void dcn30_mpc_construct(struct dcn30_mpc *mpc30, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index e50c695e3c96..79a66e0c4303 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -2128,10 +2128,10 @@ static noinline void dcn30_calculate_wm_and_dlg_fp( int pipe_cnt, int vlevel) { + int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; int i, pipe_idx; - double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; - bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != - dm_dram_clock_change_unsupported; + double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb]; + bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported; if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk) dcfclk = context->bw_ctx.dml.soc.min_dcfclk; @@ -2207,6 +2207,7 @@ static noinline void dcn30_calculate_wm_and_dlg_fp( context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us; context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us; } + context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index d24ad7754d71..4d4ac4ceb1e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -66,6 +66,45 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name +static void enable_memory_low_power(struct dc *dc) +{ + struct dce_hwseq *hws = dc->hwseq; + int i; + + if (dc->debug.enable_mem_low_power.bits.dmcu) { + // Force ERAM to shutdown if DMCU is not enabled + if (dc->debug.disable_dmcu || dc->config.disable_dmcu) { + REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3); + } + } + + // Set default OPTC memory power states + if (dc->debug.enable_mem_low_power.bits.optc) { + // Shutdown when unassigned and light sleep in VBLANK + REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1); + } + + if (dc->debug.enable_mem_low_power.bits.vga) { + // Power down VGA memory + REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1); + } + + if (dc->debug.enable_mem_low_power.bits.mpc) + dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc); + + + if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) { + // Power down VPGs + for (i = 0; i < dc->res_pool->stream_enc_count; i++) + dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); +#if defined(CONFIG_DRM_AMD_DC_DCN) + for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) + dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg); +#endif + } + +} + void dcn31_init_hw(struct dc *dc) { struct abm **abms = dc->res_pool->multiple_abms; @@ -108,35 +147,7 @@ void dcn31_init_hw(struct dc *dc) if (res_pool->dccg->funcs->dccg_init) res_pool->dccg->funcs->dccg_init(res_pool->dccg); - if (dc->debug.enable_mem_low_power.bits.dmcu) { - // Force ERAM to shutdown if DMCU is not enabled - if (dc->debug.disable_dmcu || dc->config.disable_dmcu) { - REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3); - } - } - - // Set default OPTC memory power states - if (dc->debug.enable_mem_low_power.bits.optc) { - // Shutdown when unassigned and light sleep in VBLANK - REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1); - } - - if (dc->debug.enable_mem_low_power.bits.vga) { - // Power down VGA memory - REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1); - } - -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) { - // Power down VPGs - for (i = 0; i < dc->res_pool->stream_enc_count; i++) - dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); -#if defined(CONFIG_DRM_AMD_DC_DP2_0) - for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) - dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg); -#endif - } -#endif + enable_memory_low_power(dc); if (dc->ctx->dc_bios->fw_info_valid) { res_pool->ref_clocks.xtalin_clock_inKhz = @@ -264,6 +275,9 @@ void dcn31_init_hw(struct dc *dc) if (dc->debug.enable_mem_low_power.bits.i2c) REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1); + if (hws->funcs.setup_hpo_hw_control) + hws->funcs.setup_hpo_hw_control(hws, false); + if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); @@ -588,7 +602,7 @@ void dcn31_reset_hw_ctx_wrap( dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) - hws->funcs.enable_stream_gating(dc, pipe_ctx); + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } @@ -597,3 +611,9 @@ void dcn31_reset_hw_ctx_wrap( /* New dc_state in the process of being applied to hardware. */ dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_TRANSIENT; } + +void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable) +{ + if (hws->ctx->dc->debug.hpo_optimization) + REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable); +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h index 7ae45dd202d9..edfc01d6ad73 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h @@ -54,5 +54,6 @@ void dcn31_reset_hw_ctx_wrap( bool dcn31_is_abm_supported(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); void dcn31_init_pipes(struct dc *dc, struct dc_state *context); +void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable); #endif /* __DC_HWSS_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index c6a737781ad1..05335a8c3c2d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -137,6 +137,7 @@ static const struct hwseq_private_funcs dcn31_private_funcs = { .dccg_init = dcn20_dccg_init, .set_blend_lut = dcn30_set_blend_lut, .set_shaper_3dlut = dcn20_set_shaper_3dlut, + .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, }; void dcn31_hw_sequencer_construct(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 87b2c2428842..18896294ae12 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -860,7 +860,8 @@ static const struct dccg_mask dccg_mask = { SR(D6VGA_CONTROL), \ SR(DC_IP_REQUEST_CNTL), \ SR(AZALIA_AUDIO_DTO), \ - SR(AZALIA_CONTROLLER_CLOCK_GATING) + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + SR(HPO_TOP_HW_CONTROL) static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCN31_REG_LIST() @@ -898,7 +899,8 @@ static const struct dce_hwseq_registers hwseq_reg = { HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \ - HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ + HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh) static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN31_MASK_SH_LIST(__SHIFT) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index e3d9f1decdfc..f47d82da115c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -3576,16 +3576,9 @@ static double TruncToValidBPP( MinDSCBPP = 8; MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16; } else { - if (Output == dm_hdmi) { - NonDSCBPP0 = 24; - NonDSCBPP1 = 24; - NonDSCBPP2 = 24; - } - else { - NonDSCBPP0 = 16; - NonDSCBPP1 = 20; - NonDSCBPP2 = 24; - } + NonDSCBPP0 = 16; + NonDSCBPP1 = 20; + NonDSCBPP2 = 24; if (Format == dm_n422) { MinDSCBPP = 7; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index d58925cff420..7e937bdcea00 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -3892,15 +3892,11 @@ static double TruncToValidBPP( MinDSCBPP = 8; MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16; } else { - if (Output == dm_hdmi) { - NonDSCBPP0 = 24; - NonDSCBPP1 = 24; - NonDSCBPP2 = 24; - } else { - NonDSCBPP0 = 16; - NonDSCBPP1 = 20; - NonDSCBPP2 = 24; - } + + NonDSCBPP0 = 16; + NonDSCBPP1 = 20; + NonDSCBPP2 = 24; + if (Format == dm_n422) { MinDSCBPP = 7; MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 04d6ec3f021f..f5fd2a067323 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -367,6 +367,7 @@ struct mpc_funcs { void (*set_bg_color)(struct mpc *mpc, struct tg_color *bg_color, int mpcc_id); + void (*set_mpc_mem_lp_mode)(struct mpc *mpc); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index f324285394be..c2008258c50a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -143,6 +143,7 @@ struct hwseq_private_funcs { const struct dc_plane_state *plane_state); void (*PLAT_58856_wa)(struct dc_state *context, struct pipe_ctx *pipe_ctx); + void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 717c0e572d2f..cd204eef073b 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -238,6 +238,7 @@ struct dmub_srv_hw_params { bool load_inst_const; bool skip_panel_power_sequence; bool disable_z10; + bool power_optimization; bool dpia_supported; bool disable_dpia; }; diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 0293c58f0701..c29a67ccef17 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -46,10 +46,10 @@ /* Firmware versioning. */ #ifdef DMUB_EXPOSE_VERSION -#define DMUB_FW_VERSION_GIT_HASH 0x9525efb5 +#define DMUB_FW_VERSION_GIT_HASH 0x1d82d23e #define DMUB_FW_VERSION_MAJOR 0 #define DMUB_FW_VERSION_MINOR 0 -#define DMUB_FW_VERSION_REVISION 90 +#define DMUB_FW_VERSION_REVISION 91 #define DMUB_FW_VERSION_TEST 0 #define DMUB_FW_VERSION_VBIOS 0 #define DMUB_FW_VERSION_HOTFIX 0 diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 10ebf20eaa41..fa0569174aec 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -340,6 +340,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.z10_disable = params->disable_z10; boot_options.bits.dpia_supported = params->dpia_supported; boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1; + boot_options.bits.power_optimization = params->power_optimization; boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index f1a46d16f7ea..4b9e68a79f06 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -98,7 +98,8 @@ enum amd_ip_block_type { AMD_IP_BLOCK_TYPE_ACP, AMD_IP_BLOCK_TYPE_VCN, AMD_IP_BLOCK_TYPE_MES, - AMD_IP_BLOCK_TYPE_JPEG + AMD_IP_BLOCK_TYPE_JPEG, + AMD_IP_BLOCK_TYPE_NUM, }; enum amd_clockgating_state { diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 03581d5b1836..08362d506534 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -927,6 +927,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block { int ret = 0; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; + + if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { + dev_dbg(adev->dev, "IP block%d already in the target %s state!", + block_type, gate ? "gate" : "ungate"); + return 0; + } switch (block_type) { case AMD_IP_BLOCK_TYPE_UVD: @@ -979,6 +986,9 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block break; } + if (!ret) + atomic_set(&adev->pm.pwr_state[block_type], pwr_state); + return ret; } diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 49fe4155c374..41472ed99253 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2094,6 +2094,10 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID)) *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) { + if (!adev->powerplay.pp_funcs->get_power_profile_mode || + amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP) + *states = ATTR_STATE_UNSUPPORTED; } switch (asic_type) { diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 98f1b3d8c1d5..16e3f72d31b9 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -417,6 +417,12 @@ struct amdgpu_dpm { enum amd_dpm_forced_level forced_level; }; +enum ip_power_state { + POWER_STATE_UNKNOWN, + POWER_STATE_ON, + POWER_STATE_OFF, +}; + struct amdgpu_pm { struct mutex mutex; u32 current_sclk; @@ -452,6 +458,8 @@ struct amdgpu_pm { struct i2c_adapter smu_i2c; struct mutex smu_i2c_mutex; struct list_head pm_attr_list; + + atomic_t pwr_state[AMD_IP_BLOCK_TYPE_NUM]; }; #define R600_SSTU_DFLT 0 diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h index 1d3447991d0c..fc9198846e70 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h @@ -51,7 +51,7 @@ #define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default #define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display #define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz -#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Set active work load type +#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Deprecated (Not to be used) #define PPSMC_MSG_ForcePowerDownGfx 0x0B ///< Force power down GFX, i.e. enter GFXOFF #define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload #define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer @@ -63,7 +63,7 @@ #define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK #define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK #define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK) -#define PPSMC_MSG_SPARE0 0x16 ///< Spared +#define PPSMC_MSG_SPARE 0x16 ///< Spare #define PPSMC_MSG_GetGfxclkFrequency 0x17 ///< Get GFX clock frequency #define PPSMC_MSG_GetFclkFrequency 0x18 ///< Get FCLK frequency #define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 321215003643..8d796ed3b7d1 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -875,34 +875,30 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx) static int pp_get_power_profile_mode(void *handle, char *buf) { struct pp_hwmgr *hwmgr = handle; + int ret; - if (!hwmgr || !hwmgr->pm_en || !buf) + if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode) + return -EOPNOTSUPP; + if (!buf) return -EINVAL; - if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) { - pr_info_ratelimited("%s was not implemented.\n", __func__); - return snprintf(buf, PAGE_SIZE, "\n"); - } - - return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf); + mutex_lock(&hwmgr->smu_lock); + ret = hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf); + mutex_unlock(&hwmgr->smu_lock); + return ret; } static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) { struct pp_hwmgr *hwmgr = handle; - int ret = -EINVAL; + int ret = -EOPNOTSUPP; - if (!hwmgr || !hwmgr->pm_en) - return ret; - - if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { - pr_info_ratelimited("%s was not implemented.\n", __func__); + if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode) return ret; - } if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { pr_debug("power profile setting is for manual dpm mode only.\n"); - return ret; + return -EINVAL; } mutex_lock(&hwmgr->smu_lock); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index 1de3ae77e03e..1f406f21b452 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -1036,13 +1036,13 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, else i = 1; - size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", + size += sprintf(buf + size, "0: %uMhz %s\n", data->gfx_min_freq_limit/100, i == 0 ? "*" : ""); - size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + size += sprintf(buf + size, "1: %uMhz %s\n", i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK, i == 1 ? "*" : ""); - size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", + size += sprintf(buf + size, "2: %uMhz %s\n", data->gfx_max_freq_limit/100, i == 2 ? "*" : ""); break; @@ -1050,7 +1050,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now); for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->entries[i].clk / 100, ((mclk_table->entries[i].clk / 100) @@ -1065,10 +1065,10 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, if (ret) return ret; - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "0: %10uMhz\n", (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "1: %10uMhz\n", (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq); } break; @@ -1081,8 +1081,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, if (ret) return ret; - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", min_freq, max_freq); } break; @@ -1456,6 +1456,8 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0], title[1], title[2], title[3], title[4], title[5]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index e7803ce8f67a..611969bf4520 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -4926,7 +4926,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i; for (i = 0; i < sclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4941,7 +4941,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i; for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4955,7 +4955,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i; for (i = 0; i < pcie_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %s %s\n", i, + size += sprintf(buf + size, "%d: %s %s\n", i, (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" : (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" : (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "", @@ -4963,32 +4963,32 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_SCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); for (i = 0; i < odn_sclk_table->num_of_pl; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", + size += sprintf(buf + size, "%d: %10uMHz %10umV\n", i, odn_sclk_table->entries[i].clock/100, odn_sclk_table->entries[i].vddc); } break; case OD_MCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); for (i = 0; i < odn_mclk_table->num_of_pl; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", + size += sprintf(buf + size, "%d: %10uMHz %10umV\n", i, odn_mclk_table->entries[i].clock/100, odn_mclk_table->entries[i].vddc); } break; case OD_RANGE: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.engineClock/100); - size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); - size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n", + size += sprintf(buf + size, "VDDC: %7umV %11umV\n", data->odn_dpm_table.min_vddc, data->odn_dpm_table.max_vddc); } @@ -5518,6 +5518,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n", title[0], title[1], title[2], title[3], title[4], title[5], title[6], title[7]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index b94a77e4e714..03bf8f069222 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -1559,7 +1559,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, CURR_SCLK_INDEX); for (i = 0; i < sclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->entries[i].clk / 100, (i == now) ? "*" : ""); break; @@ -1571,7 +1571,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, CURR_MCLK_INDEX); for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100, (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : ""); break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h index ad33983a8064..2a75da1e9f03 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h @@ -109,6 +109,19 @@ int phm_irq_process(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +/* + * Helper function to make sysfs_emit_at() happy. Align buf to + * the current page boundary and record the offset. + */ +static inline void phm_get_sysfs_buf(char **buf, int *offset) +{ + if (!*buf || !offset) + return; + + *offset = offset_in_page(*buf); + *buf -= *offset; +} + int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr); void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index c152a61ddd2c..e6336654c565 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -4548,6 +4548,8 @@ static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) int ret = 0; int size = 0; + phm_get_sysfs_buf(&buf, &size); + ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled); PP_ASSERT_WITH_CODE(!ret, "[EnableAllSmuFeatures] Failed to get enabled smc features!", @@ -4650,7 +4652,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, else count = sclk_table->count; for (i = 0; i < count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4661,7 +4663,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now); for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4672,7 +4674,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now); for (i = 0; i < soc_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, soc_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4684,7 +4686,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now); for (i = 0; i < dcef_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, dcef_table->dpm_levels[i].value / 100, (dcef_table->dpm_levels[i].value / 100 == now) ? "*" : ""); @@ -4698,7 +4700,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, gen_speed = pptable->PcieGenSpeed[i]; lane_width = pptable->PcieLaneCount[i]; - size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i, + size += sprintf(buf + size, "%d: %s %s %s\n", i, (gen_speed == 0) ? "2.5GT/s," : (gen_speed == 1) ? "5.0GT/s," : (gen_speed == 2) ? "8.0GT/s," : @@ -4717,34 +4719,34 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_SCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; for (i = 0; i < podn_vdd_dep->count; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", + size += sprintf(buf + size, "%d: %10uMhz %10umV\n", i, podn_vdd_dep->entries[i].clk / 100, podn_vdd_dep->entries[i].vddc); } break; case OD_MCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; for (i = 0; i < podn_vdd_dep->count; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", + size += sprintf(buf + size, "%d: %10uMhz %10umV\n", i, podn_vdd_dep->entries[i].clk/100, podn_vdd_dep->entries[i].vddc); } break; case OD_RANGE: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.gfx_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.engineClock/100); - size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.mem_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); - size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n", + size += sprintf(buf + size, "VDDC: %7umV %11umV\n", data->odn_dpm_table.min_vddc, data->odn_dpm_table.max_vddc); } @@ -5112,6 +5114,8 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0], title[1], title[2], title[3], title[4], title[5]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index 8558718e15a8..a2f4d6773d45 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -2141,6 +2141,8 @@ static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) int ret = 0; int size = 0; + phm_get_sysfs_buf(&buf, &size); + ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled); PP_ASSERT_WITH_CODE(!ret, "[EnableAllSmuFeatures] Failed to get enabled smc features!", @@ -2256,7 +2258,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get gfx clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; @@ -2272,7 +2274,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get memory clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; @@ -2290,7 +2292,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get soc clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); break; @@ -2308,7 +2310,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get dcef clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 0cf39c1244b1..85d55ab4e369 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -3238,6 +3238,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) int ret = 0; int size = 0; + phm_get_sysfs_buf(&buf, &size); + ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); PP_ASSERT_WITH_CODE(!ret, "[EnableAllSmuFeatures] Failed to get enabled smc features!", @@ -3372,13 +3374,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_sclks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3390,13 +3392,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_memclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3408,13 +3410,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_socclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3426,7 +3428,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); for (i = 0; i < fclk_dpm_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, fclk_dpm_table->dpm_levels[i].value, fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : ""); break; @@ -3438,13 +3440,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_dcefclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3458,7 +3460,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, gen_speed = pptable->PcieGenSpeed[i]; lane_width = pptable->PcieLaneCount[i]; - size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, + size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, (gen_speed == 0) ? "2.5GT/s," : (gen_speed == 1) ? "5.0GT/s," : (gen_speed == 2) ? "8.0GT/s," : @@ -3479,18 +3481,18 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_SCLK: if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "0: %10uMhz\n", od_table->GfxclkFmin); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "1: %10uMhz\n", od_table->GfxclkFmax); } break; case OD_MCLK: if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "1: %10uMhz\n", od_table->UclkFmax); } @@ -3503,14 +3505,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { - size = sysfs_emit(buf, "%s:\n", "OD_VDDC_CURVE"); - size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n", + size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE"); + size += sprintf(buf + size, "0: %10uMhz %10dmV\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1 / VOLTAGE_SCALE); - size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n", + size += sprintf(buf + size, "1: %10uMhz %10dmV\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2 / VOLTAGE_SCALE); - size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n", + size += sprintf(buf + size, "2: %10uMhz %10dmV\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3 / VOLTAGE_SCALE); } @@ -3518,17 +3520,17 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_RANGE: - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); } if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_UCLK_FMAX].min_value, od8_settings[OD8_SETTING_UCLK_FMAX].max_value); } @@ -3539,22 +3541,22 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value); } @@ -4003,6 +4005,8 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n", title[0], title[1], title[2], title[3], title[4], title[5], title[6], title[7], title[8], title[9], title[10]); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index b06c59dcc1b4..01168b8955bf 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1468,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu) dev_err(adev->dev, "Failed to disable smu features.\n"); } - if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0) && + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) && adev->gfx.rlc.funcs->stop) adev->gfx.rlc.funcs->stop(adev); @@ -2534,13 +2534,15 @@ static int smu_get_power_profile_mode(void *handle, char *buf) struct smu_context *smu = handle; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || + !smu->ppt_funcs->get_power_profile_mode) return -EOPNOTSUPP; + if (!buf) + return -EINVAL; mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_power_profile_mode) - ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); + ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); mutex_unlock(&smu->mutex); @@ -2554,7 +2556,8 @@ static int smu_set_power_profile_mode(void *handle, struct smu_context *smu = handle; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || + !smu->ppt_funcs->set_power_profile_mode) return -EOPNOTSUPP; mutex_lock(&smu->mutex); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index cbc3f99e8573..2238ee19c222 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -309,6 +309,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, { int ret = 0, size = 0; uint32_t cur_value = 0; + int i; smu_cmn_get_sysfs_buf(&buf, &size); @@ -334,8 +335,6 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n", CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); break; - case SMU_GFXCLK: - case SMU_SCLK: case SMU_FCLK: case SMU_MCLK: case SMU_SOCCLK: @@ -346,6 +345,25 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, return ret; size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value); break; + case SMU_SCLK: + case SMU_GFXCLK: + ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value); + if (ret) + return ret; + if (cur_value == CYAN_SKILLFISH_SCLK_MAX) + i = 2; + else if (cur_value == CYAN_SKILLFISH_SCLK_MIN) + i = 0; + else + i = 1; + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MIN, + i == 0 ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + i == 1 ? cur_value : cyan_skillfish_sclk_default, + i == 1 ? "*" : ""); + size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MAX, + i == 2 ? "*" : ""); + break; default: dev_warn(smu->adev->dev, "Unsupported clock type\n"); return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 71161f6b78fe..60a557068ea4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1265,7 +1265,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { uint16_t *curve_settings; - int i, size = 0, ret = 0; + int i, levels, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t freq_values[3] = {0}; uint32_t mark_index = 0; @@ -1319,14 +1319,17 @@ static int navi10_print_clk_levels(struct smu_context *smu, freq_values[1] = cur_value; mark_index = cur_value == freq_values[0] ? 0 : cur_value == freq_values[2] ? 2 : 1; - if (mark_index != 1) - freq_values[1] = (freq_values[0] + freq_values[2]) / 2; - for (i = 0; i < 3; i++) { + levels = 3; + if (mark_index != 1) { + levels = 2; + freq_values[1] = freq_values[2]; + } + + for (i = 0; i < levels; i++) { size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i], i == mark_index ? "*" : ""); } - } break; case SMU_PCIE: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 421f38e8dada..c02ed65ffa38 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -683,6 +683,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; + uint32_t min, max; memset(&metrics, 0, sizeof(metrics)); @@ -743,6 +744,13 @@ static int vangogh_print_clk_levels(struct smu_context *smu, if (ret) return ret; break; + case SMU_GFXCLK: + case SMU_SCLK: + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value); + if (ret) { + return ret; + } + break; default: break; } @@ -768,6 +776,24 @@ static int vangogh_print_clk_levels(struct smu_context *smu, if (!cur_value_match_level) size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); break; + case SMU_GFXCLK: + case SMU_SCLK: + min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; + max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; + if (cur_value == max) + i = 2; + else if (cur_value == min) + i = 0; + else + i = 1; + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, + i == 0 ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, + i == 1 ? "*" : ""); + size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, + i == 2 ? "*" : ""); + break; default: break; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index a403657151ba..caf1775d48ef 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -64,7 +64,6 @@ static struct cmn2asic_msg_mapping yellow_carp_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1), - MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 1), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), @@ -135,14 +134,6 @@ static struct cmn2asic_mapping yellow_carp_table_map[SMU_TABLE_COUNT] = { TAB_MAP_VALID(CUSTOM_DPM), TAB_MAP_VALID(DPMCLOCKS), }; - -static struct cmn2asic_mapping yellow_carp_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), -}; static int yellow_carp_init_smc_tables(struct smu_context *smu) { @@ -543,81 +534,6 @@ static int yellow_carp_set_watermarks_table(struct smu_context *smu, return 0; } -static int yellow_carp_get_power_profile_mode(struct smu_context *smu, - char *buf) -{ - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; - uint32_t i, size = 0; - int16_t workload_type = 0; - - if (!buf) - return -EINVAL; - - for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { - /* - * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT. - * Not all profile modes are supported on yellow carp. - */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - i); - - if (workload_type < 0) - continue; - - size += sysfs_emit_at(buf, size, "%2d %14s%s\n", - i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); - } - - return size; -} - -static int yellow_carp_set_power_profile_mode(struct smu_context *smu, - long *input, uint32_t size) -{ - int workload_type, ret; - uint32_t profile_mode = input[size]; - - if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); - return -EINVAL; - } - - if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || - profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) - return 0; - - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - profile_mode); - if (workload_type < 0) { - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on YELLOWCARP\n", - profile_mode); - return -EINVAL; - } - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, - NULL); - if (ret) { - dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", - workload_type); - return ret; - } - - smu->power_profile_mode = profile_mode; - - return 0; -} - static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu, void **table) { @@ -781,6 +697,11 @@ static int yellow_carp_get_current_clk_freq(struct smu_context *smu, case SMU_FCLK: return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, value); + case SMU_GFXCLK: + case SMU_SCLK: + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetGfxclkFrequency, 0, value); + break; default: return -EINVAL; } @@ -1051,6 +972,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, { int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; + uint32_t min, max; smu_cmn_get_sysfs_buf(&buf, &size); @@ -1089,6 +1011,27 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, cur_value == value ? "*" : ""); } break; + case SMU_GFXCLK: + case SMU_SCLK: + ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value); + if (ret) + goto print_clk_out; + min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; + max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; + if (cur_value == max) + i = 2; + else if (cur_value == min) + i = 0; + else + i = 1; + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, + i == 0 ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + i == 1 ? cur_value : YELLOW_CARP_UMD_PSTATE_GFXCLK, + i == 1 ? "*" : ""); + size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, + i == 2 ? "*" : ""); + break; default: break; } @@ -1238,8 +1181,6 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = { .read_sensor = yellow_carp_read_sensor, .is_dpm_running = yellow_carp_is_dpm_running, .set_watermarks_table = yellow_carp_set_watermarks_table, - .get_power_profile_mode = yellow_carp_get_power_profile_mode, - .set_power_profile_mode = yellow_carp_set_power_profile_mode, .get_gpu_metrics = yellow_carp_get_gpu_metrics, .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, @@ -1261,6 +1202,5 @@ void yellow_carp_set_ppt_funcs(struct smu_context *smu) smu->message_map = yellow_carp_message_map; smu->feature_map = yellow_carp_feature_mask_map; smu->table_map = yellow_carp_table_map; - smu->workload_map = yellow_carp_workload_map; smu->is_apu = true; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h index b3ad8352c68a..a9205a8ea3ad 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h @@ -24,5 +24,6 @@ #define __YELLOW_CARP_PPT_H__ extern void yellow_carp_set_ppt_funcs(struct smu_context *smu); +#define YELLOW_CARP_UMD_PSTATE_GFXCLK 1100 #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 843d2cbfc71d..ea6f50c08c5f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -139,9 +139,13 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu, const char *message = smu_get_message_name(smu, msg); switch (reg_c2pmsg_90) { - case SMU_RESP_NONE: + case SMU_RESP_NONE: { + u32 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66); + u32 prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); dev_err_ratelimited(adev->dev, - "SMU: I'm not done with your previous command!"); + "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X", + msg_idx, prm); + } break; case SMU_RESP_OK: /* The SMU executed the command. It completed with a diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index b53fee6f1c17..65f172807a0d 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -291,7 +291,7 @@ vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf) if (rc) return rc; - return sprintf(buf, "%u\n", reg & 1); + return sprintf(buf, "%u\n", reg); } static DEVICE_ATTR_RO(vga_pw); diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c index 3cac16db970f..010657ea7af7 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -167,9 +167,10 @@ static void lt9611uxc_hpd_work(struct work_struct *work) struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work); bool connected; - if (lt9611uxc->connector.dev) - drm_kms_helper_hotplug_event(lt9611uxc->connector.dev); - else { + if (lt9611uxc->connector.dev) { + if (lt9611uxc->connector.dev->mode_config.funcs) + drm_kms_helper_hotplug_event(lt9611uxc->connector.dev); + } else { mutex_lock(<9611uxc->ocm_lock); connected = lt9611uxc->hdmi_connected; @@ -339,6 +340,8 @@ static int lt9611uxc_connector_init(struct drm_bridge *bridge, struct lt9611uxc return -ENODEV; } + lt9611uxc->connector.polled = DRM_CONNECTOR_POLL_HPD; + drm_connector_helper_add(<9611uxc->connector, <9611uxc_bridge_connector_helper_funcs); ret = drm_connector_init(bridge->dev, <9611uxc->connector, diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c index dcf579a4cf83..ad460b96c0a3 100644 --- a/drivers/gpu/drm/bridge/lvds-codec.c +++ b/drivers/gpu/drm/bridge/lvds-codec.c @@ -12,6 +12,7 @@ #include <linux/platform_device.h> #include <linux/regulator/consumer.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_panel.h> @@ -22,6 +23,7 @@ struct lvds_codec { struct regulator *vcc; struct gpio_desc *powerdown_gpio; u32 connector_type; + unsigned int bus_format; }; static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge) @@ -74,12 +76,50 @@ static const struct drm_bridge_funcs funcs = { .disable = lvds_codec_disable, }; +#define MAX_INPUT_SEL_FORMATS 1 +static u32 * +lvds_codec_atomic_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + struct lvds_codec *lvds_codec = to_lvds_codec(bridge); + u32 *input_fmts; + + *num_input_fmts = 0; + + input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts), + GFP_KERNEL); + if (!input_fmts) + return NULL; + + input_fmts[0] = lvds_codec->bus_format; + *num_input_fmts = MAX_INPUT_SEL_FORMATS; + + return input_fmts; +} + +static const struct drm_bridge_funcs funcs_decoder = { + .attach = lvds_codec_attach, + .enable = lvds_codec_enable, + .disable = lvds_codec_disable, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_get_input_bus_fmts = lvds_codec_atomic_get_input_bus_fmts, +}; + static int lvds_codec_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *panel_node; + struct device_node *bus_node; struct drm_panel *panel; struct lvds_codec *lvds_codec; + const char *mapping; + int ret; lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL); if (!lvds_codec) @@ -119,13 +159,47 @@ static int lvds_codec_probe(struct platform_device *pdev) if (IS_ERR(lvds_codec->panel_bridge)) return PTR_ERR(lvds_codec->panel_bridge); + lvds_codec->bridge.funcs = &funcs; + + /* + * Decoder input LVDS format is a property of the decoder chip or even + * its strapping. Handle data-mapping the same way lvds-panel does. In + * case data-mapping is not present, do nothing, since there are still + * legacy bindings which do not specify this property. + */ + if (lvds_codec->connector_type != DRM_MODE_CONNECTOR_LVDS) { + bus_node = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0); + if (!bus_node) { + dev_dbg(dev, "bus DT node not found\n"); + return -ENXIO; + } + + ret = of_property_read_string(bus_node, "data-mapping", + &mapping); + of_node_put(bus_node); + if (ret < 0) { + dev_warn(dev, "missing 'data-mapping' DT property\n"); + } else { + if (!strcmp(mapping, "jeida-18")) { + lvds_codec->bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG; + } else if (!strcmp(mapping, "jeida-24")) { + lvds_codec->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA; + } else if (!strcmp(mapping, "vesa-24")) { + lvds_codec->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG; + } else { + dev_err(dev, "invalid 'data-mapping' DT property\n"); + return -EINVAL; + } + lvds_codec->bridge.funcs = &funcs_decoder; + } + } + /* * The panel_bridge bridge is attached to the panel's of_node, * but we need a bridge attached to our of_node for our user * to look up. */ lvds_codec->bridge.of_node = dev->of_node; - lvds_codec->bridge.funcs = &funcs; drm_bridge_add(&lvds_codec->bridge); platform_set_drvdata(pdev, lvds_codec); diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index ed8ac5059cd2..a7389a0facfb 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -939,6 +939,40 @@ static void nwl_dsi_bridge_detach(struct drm_bridge *bridge) drm_of_panel_bridge_remove(dsi->dev->of_node, 1, 0); } +static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + u32 *input_fmts, input_fmt; + + *num_input_fmts = 0; + + switch (output_fmt) { + /* If MEDIA_BUS_FMT_FIXED is tested, return default bus format */ + case MEDIA_BUS_FMT_FIXED: + input_fmt = MEDIA_BUS_FMT_RGB888_1X24; + break; + case MEDIA_BUS_FMT_RGB888_1X24: + case MEDIA_BUS_FMT_RGB666_1X18: + case MEDIA_BUS_FMT_RGB565_1X16: + input_fmt = output_fmt; + break; + default: + return NULL; + } + + input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL); + if (!input_fmts) + return NULL; + input_fmts[0] = input_fmt; + *num_input_fmts = 1; + + return input_fmts; +} + static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, @@ -946,6 +980,7 @@ static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = { .atomic_check = nwl_dsi_bridge_atomic_check, .atomic_enable = nwl_dsi_bridge_atomic_enable, .atomic_disable = nwl_dsi_bridge_atomic_disable, + .atomic_get_input_bus_fmts = nwl_bridge_atomic_get_input_bus_fmts, .mode_set = nwl_dsi_bridge_mode_set, .mode_valid = nwl_dsi_bridge_mode_valid, .attach = nwl_dsi_bridge_attach, diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c index a32f70bc68ea..ba1160ec6d6e 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c @@ -288,6 +288,19 @@ err_dsi_attach: return ret; } +static void sn65dsi83_detach(struct drm_bridge *bridge) +{ + struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); + + if (!ctx->dsi) + return; + + mipi_dsi_detach(ctx->dsi); + mipi_dsi_device_unregister(ctx->dsi); + drm_bridge_remove(&ctx->bridge); + ctx->dsi = NULL; +} + static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { @@ -583,6 +596,7 @@ sn65dsi83_atomic_get_input_bus_fmts(struct drm_bridge *bridge, static const struct drm_bridge_funcs sn65dsi83_funcs = { .attach = sn65dsi83_attach, + .detach = sn65dsi83_detach, .atomic_pre_enable = sn65dsi83_atomic_pre_enable, .atomic_enable = sn65dsi83_atomic_enable, .atomic_disable = sn65dsi83_atomic_disable, @@ -697,9 +711,6 @@ static int sn65dsi83_remove(struct i2c_client *client) { struct sn65dsi83 *ctx = i2c_get_clientdata(client); - mipi_dsi_detach(ctx->dsi); - mipi_dsi_device_unregister(ctx->dsi); - drm_bridge_remove(&ctx->bridge); of_node_put(ctx->host_node); return 0; diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 3bc782b630b9..52e20c68813b 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -625,6 +625,8 @@ int drm_connector_register_all(struct drm_device *dev) * * In contrast to the other drm_get_*_name functions this one here returns a * const pointer and hence is threadsafe. + * + * Returns: connector status string */ const char *drm_get_connector_status_name(enum drm_connector_status status) { @@ -707,7 +709,7 @@ __drm_connector_put_safe(struct drm_connector *conn) * drm_connector_list_iter_next - return next connector * @iter: connector_list iterator * - * Returns the next connector for @iter, or NULL when the list walk has + * Returns: the next connector for @iter, or NULL when the list walk has * completed. */ struct drm_connector * @@ -780,6 +782,8 @@ static const struct drm_prop_enum_list drm_subpixel_enum_list[] = { * * Note you could abuse this and return something out of bounds, but that * would be a caller error. No unscrubbed user data should make it here. + * + * Returns: string describing an enumerated subpixel property */ const char *drm_get_subpixel_order_name(enum subpixel_order order) { @@ -809,6 +813,9 @@ static const struct drm_prop_enum_list drm_link_status_enum_list[] = { * Store the supported bus formats in display info structure. * See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for * a full list of available formats. + * + * Returns: + * 0 on success or a negative error code on failure. */ int drm_display_info_set_bus_formats(struct drm_display_info *info, const u32 *formats, @@ -1326,6 +1333,8 @@ int drm_connector_create_standard_properties(struct drm_device *dev) * @dev: DRM device * * Called by a driver the first time a DVI-I connector is made. + * + * Returns: %0 */ int drm_mode_create_dvi_i_properties(struct drm_device *dev) { @@ -1397,6 +1406,8 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property); * Game: * Content type is game * + * The meaning of each content type is defined in CTA-861-G table 15. + * * Drivers can set up this property by calling * drm_connector_attach_content_type_property(). Decoding to * infoframe values is done through drm_hdmi_avi_infoframe_content_type(). @@ -1407,6 +1418,8 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property); * @connector: connector to attach content type property on. * * Called by a driver the first time a HDMI connector is made. + * + * Returns: %0 */ int drm_connector_attach_content_type_property(struct drm_connector *connector) { @@ -1487,6 +1500,9 @@ EXPORT_SYMBOL(drm_connector_attach_tv_margin_properties); * creates the TV margin properties for a given device. No need to call this * function for an SDTV connector, it's already called from * drm_mode_create_tv_properties(). + * + * Returns: + * 0 on success or a negative error code on failure. */ int drm_mode_create_tv_margin_properties(struct drm_device *dev) { @@ -1527,6 +1543,9 @@ EXPORT_SYMBOL(drm_mode_create_tv_margin_properties); * the TV specific connector properties for a given device. Caller is * responsible for allocating a list of format names and passing them to * this routine. + * + * Returns: + * 0 on success or a negative error code on failure. */ int drm_mode_create_tv_properties(struct drm_device *dev, unsigned int num_modes, @@ -1622,6 +1641,8 @@ EXPORT_SYMBOL(drm_mode_create_tv_properties); * Atomic drivers should use drm_connector_attach_scaling_mode_property() * instead to correctly assign &drm_connector_state.scaling_mode * in the atomic state. + * + * Returns: %0 */ int drm_mode_create_scaling_mode_property(struct drm_device *dev) { @@ -1939,6 +1960,9 @@ EXPORT_SYMBOL(drm_mode_create_content_type_property); * @dev: DRM device * * Create the suggested x/y offset property for connectors. + * + * Returns: + * 0 on success or a negative error code on failure. */ int drm_mode_create_suggested_offset_properties(struct drm_device *dev) { @@ -2312,8 +2336,8 @@ int drm_connector_set_panel_orientation( EXPORT_SYMBOL(drm_connector_set_panel_orientation); /** - * drm_connector_set_panel_orientation_with_quirk - - * set the connector's panel_orientation after checking for quirks + * drm_connector_set_panel_orientation_with_quirk - set the + * connector's panel_orientation after checking for quirks * @connector: connector for which to init the panel-orientation property. * @panel_orientation: drm_panel_orientation value to set * @width: width in pixels of the panel, used for panel quirk detection @@ -2597,7 +2621,7 @@ struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode) /** * drm_connector_oob_hotplug_event - Report out-of-band hotplug event to connector - * @connector: connector to report the event on + * @connector_fwnode: fwnode_handle to report the event on * * On some hardware a hotplug event notification may come from outside the display * driver / device. An example of this is some USB Type-C setups where the hardware diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 09c820045859..4dcdec6487bb 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1340,31 +1340,15 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array, struct drm_gem_object *obj, bool write) { - int ret; - struct dma_fence **fences; - unsigned int i, fence_count; - - if (!write) { - struct dma_fence *fence = - dma_resv_get_excl_unlocked(obj->resv); - - return drm_gem_fence_array_add(fence_array, fence); - } + struct dma_resv_iter cursor; + struct dma_fence *fence; + int ret = 0; - ret = dma_resv_get_fences(obj->resv, NULL, - &fence_count, &fences); - if (ret || !fence_count) - return ret; - - for (i = 0; i < fence_count; i++) { - ret = drm_gem_fence_array_add(fence_array, fences[i]); + dma_resv_for_each_fence(&cursor, obj->resv, write, fence) { + ret = drm_gem_fence_array_add(fence_array, fence); if (ret) break; } - - for (; i < fence_count; i++) - dma_fence_put(fences[i]); - kfree(fences); return ret; } EXPORT_SYMBOL(drm_gem_fence_array_add_implicit); diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index d53388199f34..9d05674550a4 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -210,8 +210,13 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map); drm_prime_gem_destroy(gem_obj, cma_obj->sgt); } else if (cma_obj->vaddr) { - dma_free_wc(gem_obj->dev->dev, cma_obj->base.size, - cma_obj->vaddr, cma_obj->paddr); + if (cma_obj->map_noncoherent) + dma_free_noncoherent(gem_obj->dev->dev, cma_obj->base.size, + cma_obj->vaddr, cma_obj->paddr, + DMA_TO_DEVICE); + else + dma_free_wc(gem_obj->dev->dev, cma_obj->base.size, + cma_obj->vaddr, cma_obj->paddr); } drm_gem_object_release(gem_obj); diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index bf8a6e823a15..c97323365675 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -25,6 +25,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_device.h> #include <drm/drm_modeset_lock.h> +#include <drm/drm_print.h> /** * DOC: kms locking @@ -77,6 +78,45 @@ static DEFINE_WW_CLASS(crtc_ww_class); +#if IS_ENABLED(CONFIG_DRM_DEBUG_MODESET_LOCK) +static noinline depot_stack_handle_t __drm_stack_depot_save(void) +{ + unsigned long entries[8]; + unsigned int n; + + n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); + + return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN); +} + +static void __drm_stack_depot_print(depot_stack_handle_t stack_depot) +{ + struct drm_printer p = drm_debug_printer("drm_modeset_lock"); + unsigned long *entries; + unsigned int nr_entries; + char *buf; + + buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN); + if (!buf) + return; + + nr_entries = stack_depot_fetch(stack_depot, &entries); + stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 2); + + drm_printf(&p, "attempting to lock a contended lock without backoff:\n%s", buf); + + kfree(buf); +} +#else /* CONFIG_DRM_DEBUG_MODESET_LOCK */ +static depot_stack_handle_t __drm_stack_depot_save(void) +{ + return 0; +} +static void __drm_stack_depot_print(depot_stack_handle_t stack_depot) +{ +} +#endif /* CONFIG_DRM_DEBUG_MODESET_LOCK */ + /** * drm_modeset_lock_all - take all modeset locks * @dev: DRM device @@ -225,7 +265,9 @@ EXPORT_SYMBOL(drm_modeset_acquire_fini); */ void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx) { - WARN_ON(ctx->contended); + if (WARN_ON(ctx->contended)) + __drm_stack_depot_print(ctx->stack_depot); + while (!list_empty(&ctx->locked)) { struct drm_modeset_lock *lock; @@ -243,7 +285,8 @@ static inline int modeset_lock(struct drm_modeset_lock *lock, { int ret; - WARN_ON(ctx->contended); + if (WARN_ON(ctx->contended)) + __drm_stack_depot_print(ctx->stack_depot); if (ctx->trylock_only) { lockdep_assert_held(&ctx->ww_ctx); @@ -274,6 +317,7 @@ static inline int modeset_lock(struct drm_modeset_lock *lock, ret = 0; } else if (ret == -EDEADLK) { ctx->contended = lock; + ctx->stack_depot = __drm_stack_depot_save(); } return ret; @@ -296,6 +340,7 @@ int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx) struct drm_modeset_lock *contended = ctx->contended; ctx->contended = NULL; + ctx->stack_depot = 0; if (WARN_ON(!contended)) return 0; diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 5b2d0ca03705..838b32b70bce 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -123,7 +123,6 @@ static int drm_plane_helper_check_update(struct drm_plane *plane, .crtc_w = drm_rect_width(dst), .crtc_h = drm_rect_height(dst), .rotation = rotation, - .visible = *visible, }; struct drm_crtc_state crtc_state = { .crtc = crtc, diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index d8ba95744410..c773d3dfb1ab 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -722,11 +722,13 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) if (obj->funcs && obj->funcs->mmap) { vma->vm_ops = obj->funcs->vm_ops; + drm_gem_object_get(obj); ret = obj->funcs->mmap(obj, vma); - if (ret) + if (ret) { + drm_gem_object_put(obj); return ret; + } vma->vm_private_data = obj; - drm_gem_object_get(obj); return 0; } diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index cd818a629183..00e53de4812b 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -225,12 +225,29 @@ static int hyperv_vmbus_remove(struct hv_device *hdev) { struct drm_device *dev = hv_get_drvdata(hdev); struct hyperv_drm_device *hv = to_hv(dev); + struct pci_dev *pdev; drm_dev_unplug(dev); drm_atomic_helper_shutdown(dev); vmbus_close(hdev->channel); hv_set_drvdata(hdev, NULL); - vmbus_free_mmio(hv->mem->start, hv->fb_size); + + /* + * Free allocated MMIO memory only on Gen2 VMs. + * On Gen1 VMs, release the PCI device + */ + if (efi_enabled(EFI_BOOT)) { + vmbus_free_mmio(hv->mem->start, hv->fb_size); + } else { + pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT, + PCI_DEVICE_ID_HYPERV_VIDEO, NULL); + if (!pdev) { + drm_err(dev, "Unable to find PCI Hyper-V video\n"); + return -ENODEV; + } + pci_release_region(pdev, 0); + pci_dev_put(pdev); + } return 0; } diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index 88c427f3c346..f5b4dd5b4275 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -584,6 +584,7 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv, else intel_encoder->enable = g4x_enable_hdmi; } + intel_encoder->shutdown = intel_hdmi_encoder_shutdown; intel_encoder->type = INTEL_OUTPUT_HDMI; intel_encoder->power_domain = intel_port_to_power_domain(port); diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 168c84a74d30..71fbdcddd31f 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -696,10 +696,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); for_each_dsi_phy(phy, intel_dsi->phys) { - if (DISPLAY_VER(dev_priv) >= 12) - val |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - else - val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); + val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); } intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); @@ -1135,8 +1132,6 @@ static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - /* step 4a: power up all lanes of the DDI used by DSI */ gen11_dsi_power_up_lanes(encoder); @@ -1162,8 +1157,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, gen11_dsi_configure_transcoder(encoder, crtc_state); /* Step 4l: Gate DDI clocks */ - if (DISPLAY_VER(dev_priv) == 11) - gen11_dsi_gate_clocks(encoder); + gen11_dsi_gate_clocks(encoder); } static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) @@ -1271,7 +1265,8 @@ static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder) if (DISPLAY_VER(i915) == 13) { for_each_dsi_port(port, intel_dsi->ports) intel_de_rmw(i915, TGL_DSI_CHKN_REG(port), - TGL_DSI_CHKN_LSHS_GB, 0x4); + TGL_DSI_CHKN_LSHS_GB_MASK, + TGL_DSI_CHKN_LSHS_GB(4)); } } diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index b99907c656bb..2b1423a43437 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1707,6 +1707,39 @@ static void sanitize_aux_ch(struct intel_bios_encoder_data *devdata, child->aux_channel = 0; } +static u8 dvo_port_type(u8 dvo_port) +{ + switch (dvo_port) { + case DVO_PORT_HDMIA: + case DVO_PORT_HDMIB: + case DVO_PORT_HDMIC: + case DVO_PORT_HDMID: + case DVO_PORT_HDMIE: + case DVO_PORT_HDMIF: + case DVO_PORT_HDMIG: + case DVO_PORT_HDMIH: + case DVO_PORT_HDMII: + return DVO_PORT_HDMIA; + case DVO_PORT_DPA: + case DVO_PORT_DPB: + case DVO_PORT_DPC: + case DVO_PORT_DPD: + case DVO_PORT_DPE: + case DVO_PORT_DPF: + case DVO_PORT_DPG: + case DVO_PORT_DPH: + case DVO_PORT_DPI: + return DVO_PORT_DPA; + case DVO_PORT_MIPIA: + case DVO_PORT_MIPIB: + case DVO_PORT_MIPIC: + case DVO_PORT_MIPID: + return DVO_PORT_MIPIA; + default: + return dvo_port; + } +} + static enum port __dvo_port_to_port(int n_ports, int n_dvo, const int port_mapping[][3], u8 dvo_port) { @@ -1930,50 +1963,6 @@ static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devd } } -static enum port get_edp_port(struct drm_i915_private *i915) -{ - const struct intel_bios_encoder_data *devdata; - enum port port; - - for_each_port(port) { - devdata = i915->vbt.ports[port]; - - if (devdata && intel_bios_encoder_supports_edp(devdata)) - return port; - } - - return PORT_NONE; -} - -/* - * FIXME: The power sequencer and backlight code currently do not support more - * than one set registers, at least not on anything other than VLV/CHV. It will - * clobber the registers. As a temporary workaround, gracefully prevent more - * than one eDP from being registered. - */ -static void sanitize_dual_edp(struct intel_bios_encoder_data *devdata, - enum port port) -{ - struct drm_i915_private *i915 = devdata->i915; - struct child_device_config *child = &devdata->child; - enum port p; - - /* CHV might not clobber PPS registers. */ - if (IS_CHERRYVIEW(i915)) - return; - - p = get_edp_port(i915); - if (p == PORT_NONE) - return; - - drm_dbg_kms(&i915->drm, "both ports %c and %c configured as eDP, " - "disabling port %c eDP\n", port_name(p), port_name(port), - port_name(port)); - - child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT; - child->device_type &= ~DEVICE_TYPE_INTERNAL_CONNECTOR; -} - static bool is_port_valid(struct drm_i915_private *i915, enum port port) { /* @@ -2031,9 +2020,6 @@ static void parse_ddi_port(struct drm_i915_private *i915, supports_typec_usb, supports_tbt, devdata->dsc != NULL); - if (is_edp) - sanitize_dual_edp(devdata, port); - if (is_dvi) sanitize_ddc_pin(devdata, port); @@ -2670,35 +2656,17 @@ bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port) return false; } -static bool child_dev_is_dp_dual_mode(const struct child_device_config *child, - enum port port) +static bool child_dev_is_dp_dual_mode(const struct child_device_config *child) { - static const struct { - u16 dp, hdmi; - } port_mapping[] = { - /* - * Buggy VBTs may declare DP ports as having - * HDMI type dvo_port :( So let's check both. - */ - [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, - [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, - [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, - [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, - [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, }, - }; - - if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) - return false; - if ((child->device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) != (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) return false; - if (child->dvo_port == port_mapping[port].dp) + if (dvo_port_type(child->dvo_port) == DVO_PORT_DPA) return true; /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */ - if (child->dvo_port == port_mapping[port].hdmi && + if (dvo_port_type(child->dvo_port) == DVO_PORT_HDMIA && child->aux_channel != 0) return true; @@ -2708,10 +2676,36 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child, bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *i915, enum port port) { + static const struct { + u16 dp, hdmi; + } port_mapping[] = { + /* + * Buggy VBTs may declare DP ports as having + * HDMI type dvo_port :( So let's check both. + */ + [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, + [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, + [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, + [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, + [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, }, + }; const struct intel_bios_encoder_data *devdata; + if (HAS_DDI(i915)) { + const struct intel_bios_encoder_data *devdata; + + devdata = intel_bios_encoder_data_lookup(i915, port); + + return devdata && child_dev_is_dp_dual_mode(&devdata->child); + } + + if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) + return false; + list_for_each_entry(devdata, &i915->vbt.display_devices, node) { - if (child_dev_is_dp_dual_mode(&devdata->child, port)) + if ((devdata->child.dvo_port == port_mapping[port].dp || + devdata->child.dvo_port == port_mapping[port].hdmi) && + child_dev_is_dp_dual_mode(&devdata->child)) return true; } diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 9e466d829019..868dd43a7542 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2885,7 +2885,7 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv) return freq; } -static struct intel_cdclk_funcs tgl_cdclk_funcs = { +static const struct intel_cdclk_funcs tgl_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, @@ -2893,7 +2893,7 @@ static struct intel_cdclk_funcs tgl_cdclk_funcs = { .calc_voltage_level = tgl_calc_voltage_level, }; -static struct intel_cdclk_funcs ehl_cdclk_funcs = { +static const struct intel_cdclk_funcs ehl_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, @@ -2901,7 +2901,7 @@ static struct intel_cdclk_funcs ehl_cdclk_funcs = { .calc_voltage_level = ehl_calc_voltage_level, }; -static struct intel_cdclk_funcs icl_cdclk_funcs = { +static const struct intel_cdclk_funcs icl_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, @@ -2909,7 +2909,7 @@ static struct intel_cdclk_funcs icl_cdclk_funcs = { .calc_voltage_level = icl_calc_voltage_level, }; -static struct intel_cdclk_funcs bxt_cdclk_funcs = { +static const struct intel_cdclk_funcs bxt_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, @@ -2917,54 +2917,54 @@ static struct intel_cdclk_funcs bxt_cdclk_funcs = { .calc_voltage_level = bxt_calc_voltage_level, }; -static struct intel_cdclk_funcs skl_cdclk_funcs = { +static const struct intel_cdclk_funcs skl_cdclk_funcs = { .get_cdclk = skl_get_cdclk, .set_cdclk = skl_set_cdclk, .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, .modeset_calc_cdclk = skl_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs bdw_cdclk_funcs = { +static const struct intel_cdclk_funcs bdw_cdclk_funcs = { .get_cdclk = bdw_get_cdclk, .set_cdclk = bdw_set_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = bdw_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs chv_cdclk_funcs = { +static const struct intel_cdclk_funcs chv_cdclk_funcs = { .get_cdclk = vlv_get_cdclk, .set_cdclk = chv_set_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = vlv_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs vlv_cdclk_funcs = { +static const struct intel_cdclk_funcs vlv_cdclk_funcs = { .get_cdclk = vlv_get_cdclk, .set_cdclk = vlv_set_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = vlv_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs hsw_cdclk_funcs = { +static const struct intel_cdclk_funcs hsw_cdclk_funcs = { .get_cdclk = hsw_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; /* SNB, IVB, 965G, 945G */ -static struct intel_cdclk_funcs fixed_400mhz_cdclk_funcs = { +static const struct intel_cdclk_funcs fixed_400mhz_cdclk_funcs = { .get_cdclk = fixed_400mhz_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs ilk_cdclk_funcs = { +static const struct intel_cdclk_funcs ilk_cdclk_funcs = { .get_cdclk = fixed_450mhz_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs gm45_cdclk_funcs = { +static const struct intel_cdclk_funcs gm45_cdclk_funcs = { .get_cdclk = gm45_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, @@ -2972,7 +2972,7 @@ static struct intel_cdclk_funcs gm45_cdclk_funcs = { /* G45 uses G33 */ -static struct intel_cdclk_funcs i965gm_cdclk_funcs = { +static const struct intel_cdclk_funcs i965gm_cdclk_funcs = { .get_cdclk = i965gm_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, @@ -2980,19 +2980,19 @@ static struct intel_cdclk_funcs i965gm_cdclk_funcs = { /* i965G uses fixed 400 */ -static struct intel_cdclk_funcs pnv_cdclk_funcs = { +static const struct intel_cdclk_funcs pnv_cdclk_funcs = { .get_cdclk = pnv_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs g33_cdclk_funcs = { +static const struct intel_cdclk_funcs g33_cdclk_funcs = { .get_cdclk = g33_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs i945gm_cdclk_funcs = { +static const struct intel_cdclk_funcs i945gm_cdclk_funcs = { .get_cdclk = i945gm_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, @@ -3000,37 +3000,37 @@ static struct intel_cdclk_funcs i945gm_cdclk_funcs = { /* i945G uses fixed 400 */ -static struct intel_cdclk_funcs i915gm_cdclk_funcs = { +static const struct intel_cdclk_funcs i915gm_cdclk_funcs = { .get_cdclk = i915gm_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs i915g_cdclk_funcs = { +static const struct intel_cdclk_funcs i915g_cdclk_funcs = { .get_cdclk = fixed_333mhz_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs i865g_cdclk_funcs = { +static const struct intel_cdclk_funcs i865g_cdclk_funcs = { .get_cdclk = fixed_266mhz_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs i85x_cdclk_funcs = { +static const struct intel_cdclk_funcs i85x_cdclk_funcs = { .get_cdclk = i85x_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs i845g_cdclk_funcs = { +static const struct intel_cdclk_funcs i845g_cdclk_funcs = { .get_cdclk = fixed_200mhz_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs i830_cdclk_funcs = { +static const struct intel_cdclk_funcs i830_cdclk_funcs = { .get_cdclk = fixed_133mhz_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 1dcfe31e6c6f..cfb567df71b3 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -4361,6 +4361,7 @@ static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder) enum phy phy = intel_port_to_phy(i915, encoder->port); intel_dp_encoder_shutdown(encoder); + intel_hdmi_encoder_shutdown(encoder); if (!intel_phy_is_tc(i915, phy)) return; diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index ff598b6cd953..ec403e46a328 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -848,9 +848,16 @@ unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info int i; for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { + unsigned int plane_size; + + plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; + if (plane_size == 0) + continue; + if (rem_info->plane_alignment) size = ALIGN(size, rem_info->plane_alignment); - size += rem_info->plane[i].dst_stride * rem_info->plane[i].height; + + size += plane_size; } return size; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 23de500d56b5..be883469d2fc 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -120,6 +120,12 @@ bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state) return crtc_state->port_clock >= 1000000; } +static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) +{ + intel_dp->sink_rates[0] = 162000; + intel_dp->num_sink_rates = 1; +} + /* update sink rates from dpcd */ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) { @@ -281,7 +287,7 @@ intel_dp_max_data_rate(int max_link_rate, int max_lanes) */ int max_link_rate_kbps = max_link_rate * 10; - max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 9671, 10000); + max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000); max_link_rate = max_link_rate_kbps / 8; } @@ -1858,6 +1864,12 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp, intel_dp->lane_count = lane_count; } +static void intel_dp_reset_max_link_params(struct intel_dp *intel_dp) +{ + intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); + intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); +} + /* Enable backlight PWM and backlight PP control. */ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) @@ -2017,8 +2029,7 @@ void intel_dp_sync_state(struct intel_encoder *encoder, if (intel_dp->dpcd[DP_DPCD_REV] == 0) intel_dp_get_dpcd(intel_dp); - intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); - intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); + intel_dp_reset_max_link_params(intel_dp); } bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, @@ -2556,6 +2567,9 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) */ intel_psr_init_dpcd(intel_dp); + /* Clear the default sink rates */ + intel_dp->num_sink_rates = 0; + /* Read the eDP 1.4+ supported link rates. */ if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; @@ -2591,6 +2605,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) intel_dp_set_sink_rates(intel_dp); intel_dp_set_common_rates(intel_dp); + intel_dp_reset_max_link_params(intel_dp); /* Read the eDP DSC DPCD registers */ if (DISPLAY_VER(dev_priv) >= 10) @@ -4332,12 +4347,7 @@ intel_dp_detect(struct drm_connector *connector, * supports link training fallback params. */ if (intel_dp->reset_link_params || intel_dp->is_mst) { - /* Initial max link lane count */ - intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); - - /* Initial max link rate */ - intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); - + intel_dp_reset_max_link_params(intel_dp); intel_dp->reset_link_params = false; } @@ -5003,6 +5013,9 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, } intel_dp_set_source_rates(intel_dp); + intel_dp_set_default_sink_rates(intel_dp); + intel_dp_set_common_rates(intel_dp); + intel_dp_reset_max_link_params(intel_dp); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index fa1f375e696b..cb511b2b7069 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -378,8 +378,8 @@ static void intel_fb_plane_dims(const struct intel_framebuffer *fb, int color_pl intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, &fb->base, main_plane); intel_fb_plane_get_subsampling(&hsub, &vsub, &fb->base, color_plane); - *w = main_width / main_hsub / hsub; - *h = main_height / main_vsub / vsub; + *w = DIV_ROUND_UP(main_width, main_hsub * hsub); + *h = DIV_ROUND_UP(main_height, main_vsub * vsub); } static u32 intel_adjust_tile_offset(int *x, int *y, diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index d2e61f6c6e08..371736bdc01f 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -1246,12 +1246,13 @@ static void hsw_set_infoframes(struct intel_encoder *encoder, void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) { struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi); - struct i2c_adapter *adapter = - intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); + struct i2c_adapter *adapter; if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI) return; + adapter = intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); + drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n", enable ? "Enabling" : "Disabling"); @@ -2258,6 +2259,17 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, return 0; } +void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder) +{ + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + + /* + * Give a hand to buggy BIOSen which forget to turn + * the TMDS output buffers back on after a reboot. + */ + intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); +} + static void intel_hdmi_unset_edid(struct drm_connector *connector) { diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index b43a180d007e..2bf440eb400a 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -28,6 +28,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state); +void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder); bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, struct drm_connector *connector, bool high_tmds_clock_ratio, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index eae09d323049..e8a58c997170 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -9,6 +9,8 @@ #include <linux/dma-resv.h> #include <linux/module.h> +#include <asm/smp.h> + #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index f17383e76eb7..57c97554393b 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -1396,6 +1396,9 @@ remap_pages(struct drm_i915_gem_object *obj, { unsigned int row; + if (!width || !height) + return sg; + if (alignment_pad) { st->nents++; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 524eaf678790..795689eb3fc7 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -301,7 +301,7 @@ void intel_gt_suspend_prepare(struct intel_gt *gt) user_forcewake(gt, true); wait_for_suspend(gt); - intel_pxp_suspend(>->pxp, false); + intel_pxp_suspend_prepare(>->pxp); } static suspend_state_t pm_suspend_target(void) @@ -326,6 +326,7 @@ void intel_gt_suspend_late(struct intel_gt *gt) GEM_BUG_ON(gt->awake); intel_uc_suspend(>->uc); + intel_pxp_suspend(>->pxp); /* * On disabling the device, we want to turn off HW access to memory @@ -353,7 +354,7 @@ void intel_gt_suspend_late(struct intel_gt *gt) void intel_gt_runtime_suspend(struct intel_gt *gt) { - intel_pxp_suspend(>->pxp, true); + intel_pxp_runtime_suspend(>->pxp); intel_uc_runtime_suspend(>->uc); GT_TRACE(gt, "\n"); @@ -371,7 +372,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt) if (ret) return ret; - intel_pxp_resume(>->pxp); + intel_pxp_runtime_resume(>->pxp); return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index d7710debcd47..c48557dfa04c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -2373,6 +2373,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) unsigned long flags; bool disabled; + lockdep_assert_held(&guc->submission_state.lock); GEM_BUG_ON(!intel_gt_pm_is_awake(gt)); GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id)); GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id)); @@ -2388,7 +2389,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) } spin_unlock_irqrestore(&ce->guc_state.lock, flags); if (unlikely(disabled)) { - release_guc_id(guc, ce); + __release_guc_id(guc, ce); __guc_context_destroy(ce); return; } @@ -3079,8 +3080,8 @@ guc_create_parallel(struct intel_engine_cs **engines, ce = intel_engine_create_virtual(siblings, num_siblings, FORCE_VIRTUAL); - if (!ce) { - err = ERR_PTR(-ENOMEM); + if (IS_ERR(ce)) { + err = ERR_CAST(ce); goto unwind; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index da9055c3ebf0..bcee121bec5a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -11717,7 +11717,9 @@ enum skl_power_gate { #define TGL_DSI_CHKN_REG(port) _MMIO_PORT(port, \ _TGL_DSI_CHKN_REG_0, \ _TGL_DSI_CHKN_REG_1) -#define TGL_DSI_CHKN_LSHS_GB REG_GENMASK(15, 12) +#define TGL_DSI_CHKN_LSHS_GB_MASK REG_GENMASK(15, 12) +#define TGL_DSI_CHKN_LSHS_GB(byte_clocks) REG_FIELD_PREP(TGL_DSI_CHKN_LSHS_GB_MASK, \ + (byte_clocks)) /* Display Stream Splitter Control */ #define DSS_CTL1 _MMIO(0x67400) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 2c3cd6e635b5..820a1f38b271 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1537,38 +1537,14 @@ i915_request_await_object(struct i915_request *to, struct drm_i915_gem_object *obj, bool write) { - struct dma_fence *excl; + struct dma_resv_iter cursor; + struct dma_fence *fence; int ret = 0; - if (write) { - struct dma_fence **shared; - unsigned int count, i; - - ret = dma_resv_get_fences(obj->base.resv, &excl, &count, - &shared); + dma_resv_for_each_fence(&cursor, obj->base.resv, write, fence) { + ret = i915_request_await_dma_fence(to, fence); if (ret) - return ret; - - for (i = 0; i < count; i++) { - ret = i915_request_await_dma_fence(to, shared[i]); - if (ret) - break; - - dma_fence_put(shared[i]); - } - - for (; i < count; i++) - dma_fence_put(shared[i]); - kfree(shared); - } else { - excl = dma_resv_get_excl_unlocked(obj->base.resv); - } - - if (excl) { - if (ret == 0) - ret = i915_request_await_dma_fence(to, excl); - - dma_fence_put(excl); + break; } return ret; diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c index 23fd86de5a24..6a7d4e2ee138 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c @@ -7,26 +7,29 @@ #include "intel_pxp_irq.h" #include "intel_pxp_pm.h" #include "intel_pxp_session.h" +#include "i915_drv.h" -void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime) +void intel_pxp_suspend_prepare(struct intel_pxp *pxp) { if (!intel_pxp_is_enabled(pxp)) return; pxp->arb_is_valid = false; - /* - * Contexts using protected objects keep a runtime PM reference, so we - * can only runtime suspend when all of them have been either closed - * or banned. Therefore, there is no need to invalidate in that - * scenario. - */ - if (!runtime) - intel_pxp_invalidate(pxp); + intel_pxp_invalidate(pxp); +} - intel_pxp_fini_hw(pxp); +void intel_pxp_suspend(struct intel_pxp *pxp) +{ + intel_wakeref_t wakeref; - pxp->hw_state_invalidated = false; + if (!intel_pxp_is_enabled(pxp)) + return; + + with_intel_runtime_pm(&pxp_to_gt(pxp)->i915->runtime_pm, wakeref) { + intel_pxp_fini_hw(pxp); + pxp->hw_state_invalidated = false; + } } void intel_pxp_resume(struct intel_pxp *pxp) @@ -44,3 +47,15 @@ void intel_pxp_resume(struct intel_pxp *pxp) intel_pxp_init_hw(pxp); } + +void intel_pxp_runtime_suspend(struct intel_pxp *pxp) +{ + if (!intel_pxp_is_enabled(pxp)) + return; + + pxp->arb_is_valid = false; + + intel_pxp_fini_hw(pxp); + + pxp->hw_state_invalidated = false; +} diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h index c89e97a0c3d0..16990a3f2f85 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h @@ -9,16 +9,29 @@ #include "intel_pxp_types.h" #ifdef CONFIG_DRM_I915_PXP -void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime); +void intel_pxp_suspend_prepare(struct intel_pxp *pxp); +void intel_pxp_suspend(struct intel_pxp *pxp); void intel_pxp_resume(struct intel_pxp *pxp); +void intel_pxp_runtime_suspend(struct intel_pxp *pxp); #else -static inline void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime) +static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp) +{ +} + +static inline void intel_pxp_suspend(struct intel_pxp *pxp) { } static inline void intel_pxp_resume(struct intel_pxp *pxp) { } -#endif +static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp) +{ +} +#endif +static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp) +{ + intel_pxp_resume(pxp); +} #endif /* __INTEL_PXP_PM_H__ */ diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 9558e9e1b431..cb685fe2039b 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -81,7 +81,6 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) struct drm_plane_state *old_plane_state, *new_plane_state; bool plane_disabling = false; int i; - bool fence_cookie = dma_fence_begin_signalling(); drm_atomic_helper_commit_modeset_disables(dev, state); @@ -112,7 +111,6 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) } drm_atomic_helper_commit_hw_done(state); - dma_fence_end_signalling(fence_cookie); } static const struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = { diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c index 89dd618d78f3..0655582ae8ed 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c @@ -88,7 +88,7 @@ static void mxsfb_set_formats(struct mxsfb_drm_private *mxsfb, ctrl |= CTRL_BUS_WIDTH_24; break; default: - dev_err(drm->dev, "Unknown media bus format %d\n", bus_format); + dev_err(drm->dev, "Unknown media bus format 0x%x\n", bus_format); break; } @@ -362,6 +362,12 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc, drm_atomic_get_new_bridge_state(state, mxsfb->bridge); bus_format = bridge_state->input_bus_cfg.format; + if (bus_format == MEDIA_BUS_FMT_FIXED) { + dev_warn_once(drm->dev, + "Bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n" + "Please fix bridge driver by handling atomic_get_input_bus_fmts.\n"); + bus_format = MEDIA_BUS_FMT_RGB888_1X24; + } } /* If there is no bridge, use bus format from connector */ diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 12b107acb6ee..fa73fe57f97b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1249,7 +1249,6 @@ nouveau_ttm_tt_populate(struct ttm_device *bdev, { struct ttm_tt *ttm_dma = (void *)ttm; struct nouveau_drm *drm; - struct device *dev; bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL); if (ttm_tt_is_populated(ttm)) @@ -1262,7 +1261,6 @@ nouveau_ttm_tt_populate(struct ttm_device *bdev, } drm = nouveau_bdev(bdev); - dev = drm->dev->dev; return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx); } @@ -1272,7 +1270,6 @@ nouveau_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) { struct nouveau_drm *drm; - struct device *dev; bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL); if (slave) @@ -1281,7 +1278,6 @@ nouveau_ttm_tt_unpopulate(struct ttm_device *bdev, nouveau_ttm_tt_unbind(bdev, ttm); drm = nouveau_bdev(bdev); - dev = drm->dev->dev; return ttm_pool_free(&drm->ttm.bdev.pool, ttm); } diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 6109cd9e3399..e7efd9ede8e4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -562,6 +562,7 @@ nouveau_drm_device_init(struct drm_device *dev) nvkm_dbgopt(nouveau_debug, "DRM"); INIT_LIST_HEAD(&drm->clients); + mutex_init(&drm->clients_lock); spin_lock_init(&drm->tile.lock); /* workaround an odd issue on nvc1 by disabling the device's @@ -632,6 +633,7 @@ fail_alloc: static void nouveau_drm_device_fini(struct drm_device *dev) { + struct nouveau_cli *cli, *temp_cli; struct nouveau_drm *drm = nouveau_drm(dev); if (nouveau_pmops_runtime()) { @@ -656,9 +658,28 @@ nouveau_drm_device_fini(struct drm_device *dev) nouveau_ttm_fini(drm); nouveau_vga_fini(drm); + /* + * There may be existing clients from as-yet unclosed files. For now, + * clean them up here rather than deferring until the file is closed, + * but this likely not correct if we want to support hot-unplugging + * properly. + */ + mutex_lock(&drm->clients_lock); + list_for_each_entry_safe(cli, temp_cli, &drm->clients, head) { + list_del(&cli->head); + mutex_lock(&cli->mutex); + if (cli->abi16) + nouveau_abi16_fini(cli->abi16); + mutex_unlock(&cli->mutex); + nouveau_cli_fini(cli); + kfree(cli); + } + mutex_unlock(&drm->clients_lock); + nouveau_cli_fini(&drm->client); nouveau_cli_fini(&drm->master); nvif_parent_dtor(&drm->parent); + mutex_destroy(&drm->clients_lock); kfree(drm); } @@ -796,7 +817,7 @@ nouveau_drm_device_remove(struct drm_device *dev) struct nvkm_client *client; struct nvkm_device *device; - drm_dev_unregister(dev); + drm_dev_unplug(dev); client = nvxx_client(&drm->client.base); device = nvkm_device_find(client->device); @@ -1090,9 +1111,9 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) fpriv->driver_priv = cli; - mutex_lock(&drm->client.mutex); + mutex_lock(&drm->clients_lock); list_add(&cli->head, &drm->clients); - mutex_unlock(&drm->client.mutex); + mutex_unlock(&drm->clients_lock); done: if (ret && cli) { @@ -1110,6 +1131,16 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv) { struct nouveau_cli *cli = nouveau_cli(fpriv); struct nouveau_drm *drm = nouveau_drm(dev); + int dev_index; + + /* + * The device is gone, and as it currently stands all clients are + * cleaned up in the removal codepath. In the future this may change + * so that we can support hot-unplugging, but for now we immediately + * return to avoid a double-free situation. + */ + if (!drm_dev_enter(dev, &dev_index)) + return; pm_runtime_get_sync(dev->dev); @@ -1118,14 +1149,15 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv) nouveau_abi16_fini(cli->abi16); mutex_unlock(&cli->mutex); - mutex_lock(&drm->client.mutex); + mutex_lock(&drm->clients_lock); list_del(&cli->head); - mutex_unlock(&drm->client.mutex); + mutex_unlock(&drm->clients_lock); nouveau_cli_fini(cli); kfree(cli); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); + drm_dev_exit(dev_index); } static const struct drm_ioctl_desc diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index ba65f136cf48..b2a970aa9bf4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -139,6 +139,11 @@ struct nouveau_drm { struct list_head clients; + /** + * @clients_lock: Protects access to the @clients list of &struct nouveau_cli. + */ + struct mutex clients_lock; + u8 old_pm_cap; struct { diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 8c2ecc282723..9416bee92141 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -56,7 +56,7 @@ static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf) nouveau_bo_del_io_reserve_lru(bo); prot = vm_get_page_prot(vma->vm_flags); - ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1); + ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); nouveau_bo_add_io_reserve_lru(bo); if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) return ret; @@ -337,7 +337,7 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, struct ttm_buffer_object *bo = &nvbo->bo; uint32_t domains = valid_domains & nvbo->valid_domains & (write_domains ? write_domains : read_domains); - uint32_t pref_domains = 0;; + uint32_t pref_domains = 0; if (!domains) return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 1a896a24288a..266809e511e2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -162,10 +162,14 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, */ mm = get_task_mm(current); + if (!mm) { + return -EINVAL; + } mmap_read_lock(mm); if (!cli->svm.svmm) { mmap_read_unlock(mm); + mmput(mm); return -EINVAL; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c index 704df0f2d1f1..09a112af2f89 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c @@ -78,6 +78,6 @@ int gt215_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_falcon_new_(>215_ce, device, type, inst, + return nvkm_falcon_new_(>215_ce, device, type, -1, (device->chipset != 0xaf), 0x104000, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index ca75c5f6ecaf..88d262ba648c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2627,6 +2627,27 @@ nv174_chipset = { }; static const struct nvkm_device_chip +nv176_chipset = { + .name = "GA106", + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .devinit = { 0x00000001, ga100_devinit_new }, + .fb = { 0x00000001, ga102_fb_new }, + .gpio = { 0x00000001, ga102_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, ga100_mc_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, ga100_top_new }, + .disp = { 0x00000001, ga102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + +static const struct nvkm_device_chip nv177_chipset = { .name = "GA107", .bar = { 0x00000001, tu102_bar_new }, @@ -3072,6 +3093,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x168: device->chip = &nv168_chipset; break; case 0x172: device->chip = &nv172_chipset; break; case 0x174: device->chip = &nv174_chipset; break; + case 0x176: device->chip = &nv176_chipset; break; case 0x177: device->chip = &nv177_chipset; break; default: if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) { @@ -3147,8 +3169,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, WARN_ON(device->chip->ptr.inst & ~((1 << ARRAY_SIZE(device->ptr)) - 1)); \ for (j = 0; device->chip->ptr.inst && j < ARRAY_SIZE(device->ptr); j++) { \ if ((device->chip->ptr.inst & BIT(j)) && (subdev_mask & BIT_ULL(type))) { \ - int inst = (device->chip->ptr.inst == 1) ? -1 : (j); \ - ret = device->chip->ptr.ctor(device, (type), inst, &device->ptr[j]); \ + ret = device->chip->ptr.ctor(device, (type), (j), &device->ptr[j]); \ subdev = nvkm_device_subdev(device, (type), (j)); \ if (ret) { \ nvkm_subdev_del(&subdev); \ diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c index 6e3c450eaace..3ff49344abc7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c @@ -62,7 +62,6 @@ gv100_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, nvkm_wr32(device, 0x6f0108 + hdmi, vendor_infoframe.header); nvkm_wr32(device, 0x6f010c + hdmi, vendor_infoframe.subpack0_low); nvkm_wr32(device, 0x6f0110 + hdmi, vendor_infoframe.subpack0_high); - nvkm_wr32(device, 0x6f0110 + hdmi, 0x00000000); nvkm_wr32(device, 0x6f0114 + hdmi, 0x00000000); nvkm_wr32(device, 0x6f0118 + hdmi, 0x00000000); nvkm_wr32(device, 0x6f011c + hdmi, 0x00000000); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c index c39e797dc7c9..cf5dcfda7b25 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c @@ -21,7 +21,6 @@ */ #include "priv.h" -#include "priv.h" #include <core/firmware.h> static void * diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c index cdb1ead26d84..82b4c8e1457c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c @@ -207,11 +207,13 @@ int gm200_acr_wpr_parse(struct nvkm_acr *acr) { const struct wpr_header *hdr = (void *)acr->wpr_fw->data; + struct nvkm_acr_lsfw *lsfw; while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) { wpr_header_dump(&acr->subdev, hdr); - if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) - return -ENOMEM; + lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id); + if (IS_ERR(lsfw)) + return PTR_ERR(lsfw); } return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c index fb9132a39bb1..fd97a935a380 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c @@ -161,11 +161,13 @@ int gp102_acr_wpr_parse(struct nvkm_acr *acr) { const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data; + struct nvkm_acr_lsfw *lsfw; while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) { wpr_header_v1_dump(&acr->subdev, hdr); - if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) - return -ENOMEM; + lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id); + if (IS_ERR(lsfw)) + return PTR_ERR(lsfw); } return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c index d6a1f8d04c09..186b4e63e559 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c @@ -299,7 +299,7 @@ nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc) page = uvmm->vmm->func->page; for (nr = 0; page[nr].shift; nr++); - if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { + if (!(nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { if ((index = args->v0.index) >= nr) return -EINVAL; type = page[index].type; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c index b5e733783b5b..17899fc95b2d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c @@ -488,7 +488,7 @@ gp100_vmm_fault_cancel(struct nvkm_vmm *vmm, void *argv, u32 argc) struct gp100_vmm_fault_cancel_v0 v0; } *args = argv; int ret = -ENOSYS; - u32 inst, aper; + u32 aper; if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) return ret; @@ -502,7 +502,7 @@ gp100_vmm_fault_cancel(struct nvkm_vmm *vmm, void *argv, u32 argc) args->v0.inst |= 0x80000000; if (!WARN_ON(nvkm_gr_ctxsw_pause(device))) { - if ((inst = nvkm_gr_ctxsw_inst(device)) == args->v0.inst) { + if (nvkm_gr_ctxsw_inst(device) == args->v0.inst) { gf100_vmm_invalidate(vmm, 0x0000001b /* CANCEL_TARGETED. */ | (args->v0.hub << 20) | diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 2cb8eba76af8..cfc8d644cedf 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -520,6 +520,16 @@ config DRM_PANEL_SHARP_LS043T1LE01 Say Y here if you want to enable support for Sharp LS043T1LE01 qHD (540x960) DSI panel as found on the Qualcomm APQ8074 Dragonboard +config DRM_PANEL_SHARP_LS060T1SX01 + tristate "Sharp LS060T1SX01 FullHD video mode panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for Sharp LS060T1SX01 6.0" + FullHD (1080x1920) DSI panel as found in Dragonboard Display Adapter + Bundle. + config DRM_PANEL_SITRONIX_ST7701 tristate "Sitronix ST7701 panel driver" depends on OF diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 6e30640b9099..bca4cc1f2715 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -53,6 +53,7 @@ obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o obj-$(CONFIG_DRM_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o +obj-$(CONFIG_DRM_PANEL_SHARP_LS060T1SX01) += panel-sharp-ls060t1sx01.o obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c index 30f28ad4df6b..31daae1da9c9 100644 --- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c +++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c @@ -8,6 +8,7 @@ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> +#include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/regulator/consumer.h> @@ -220,6 +221,10 @@ static const struct drm_display_mode default_mode_ys = { .height_mm = 130, }; +static const u32 mantix_bus_formats[] = { + MEDIA_BUS_FMT_RGB888_1X24, +}; + static int mantix_get_modes(struct drm_panel *panel, struct drm_connector *connector) { @@ -241,6 +246,10 @@ static int mantix_get_modes(struct drm_panel *panel, connector->display_info.height_mm = mode->height_mm; drm_mode_probed_add(connector, mode); + drm_display_info_set_bus_formats(&connector->display_info, + mantix_bus_formats, + ARRAY_SIZE(mantix_bus_formats)); + return 1; } diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c index e0b1a7e354f3..e0f773678168 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c @@ -116,7 +116,8 @@ static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi) static int s6e63m0_dsi_remove(struct mipi_dsi_device *dsi) { mipi_dsi_detach(dsi); - return s6e63m0_remove(&dsi->dev); + s6e63m0_remove(&dsi->dev); + return 0; } static const struct of_device_id s6e63m0_dsi_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c index 3669cc3719ce..c178d962b0d5 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c @@ -64,7 +64,8 @@ static int s6e63m0_spi_probe(struct spi_device *spi) static int s6e63m0_spi_remove(struct spi_device *spi) { - return s6e63m0_remove(&spi->dev); + s6e63m0_remove(&spi->dev); + return 0; } static const struct of_device_id s6e63m0_spi_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c index 35d72ac663d6..b34fa4d5de07 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c @@ -749,13 +749,11 @@ int s6e63m0_probe(struct device *dev, void *trsp, } EXPORT_SYMBOL_GPL(s6e63m0_probe); -int s6e63m0_remove(struct device *dev) +void s6e63m0_remove(struct device *dev) { struct s6e63m0 *ctx = dev_get_drvdata(dev); drm_panel_remove(&ctx->panel); - - return 0; } EXPORT_SYMBOL_GPL(s6e63m0_remove); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h index 306605ed1117..c926eca1c817 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h @@ -35,6 +35,6 @@ int s6e63m0_probe(struct device *dev, void *trsp, const u8 *data, size_t len), bool dsi_mode); -int s6e63m0_remove(struct device *dev); +void s6e63m0_remove(struct device *dev); #endif /* _PANEL_SAMSUNG_S6E63M0_H */ diff --git a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c new file mode 100644 index 000000000000..e12570561629 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2021 Linaro Ltd. + * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree: + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + */ + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +struct sharp_ls060 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct regulator *vddi_supply; + struct regulator *vddh_supply; + struct regulator *avdd_supply; + struct regulator *avee_supply; + struct gpio_desc *reset_gpio; + bool prepared; +}; + +static inline struct sharp_ls060 *to_sharp_ls060(struct drm_panel *panel) +{ + return container_of(panel, struct sharp_ls060, panel); +} + +#define dsi_dcs_write_seq(dsi, seq...) ({ \ + static const u8 d[] = { seq }; \ + \ + mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \ + }) + +static void sharp_ls060_reset(struct sharp_ls060 *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(10000, 11000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); +} + +static int sharp_ls060_on(struct sharp_ls060 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + ret = dsi_dcs_write_seq(dsi, 0xbb, 0x13); + if (ret < 0) { + dev_err(dev, "Failed to send command: %d\n", ret); + return ret; + } + + ret = dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_MEMORY_START); + if (ret < 0) { + dev_err(dev, "Failed to send command: %d\n", ret); + return ret; + } + + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to exit sleep mode: %d\n", ret); + return ret; + } + msleep(120); + + ret = mipi_dsi_dcs_set_display_on(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display on: %d\n", ret); + return ret; + } + msleep(50); + + return 0; +} + +static int sharp_ls060_off(struct sharp_ls060 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_set_display_off(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display off: %d\n", ret); + return ret; + } + usleep_range(2000, 3000); + + ret = mipi_dsi_dcs_enter_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to enter sleep mode: %d\n", ret); + return ret; + } + msleep(121); + + return 0; +} + +static int sharp_ls060_prepare(struct drm_panel *panel) +{ + struct sharp_ls060 *ctx = to_sharp_ls060(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + if (ctx->prepared) + return 0; + + ret = regulator_enable(ctx->vddi_supply); + if (ret < 0) + return ret; + + ret = regulator_enable(ctx->avdd_supply); + if (ret < 0) + goto err_avdd; + + usleep_range(1000, 2000); + + ret = regulator_enable(ctx->avee_supply); + if (ret < 0) + goto err_avee; + + usleep_range(10000, 11000); + + ret = regulator_enable(ctx->vddh_supply); + if (ret < 0) + goto err_vddh; + + usleep_range(10000, 11000); + + sharp_ls060_reset(ctx); + + ret = sharp_ls060_on(ctx); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + goto err_on; + } + + ctx->prepared = true; + + return 0; + +err_on: + regulator_disable(ctx->vddh_supply); + + usleep_range(10000, 11000); + +err_vddh: + regulator_disable(ctx->avee_supply); + +err_avee: + regulator_disable(ctx->avdd_supply); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + +err_avdd: + regulator_disable(ctx->vddi_supply); + + return ret; +} + +static int sharp_ls060_unprepare(struct drm_panel *panel) +{ + struct sharp_ls060 *ctx = to_sharp_ls060(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + if (!ctx->prepared) + return 0; + + ret = sharp_ls060_off(ctx); + if (ret < 0) + dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + + regulator_disable(ctx->vddh_supply); + + usleep_range(10000, 11000); + + regulator_disable(ctx->avee_supply); + regulator_disable(ctx->avdd_supply); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + + regulator_disable(ctx->vddi_supply); + + ctx->prepared = false; + return 0; +} + +static const struct drm_display_mode sharp_ls060_mode = { + .clock = (1080 + 96 + 16 + 64) * (1920 + 4 + 1 + 16) * 60 / 1000, + .hdisplay = 1080, + .hsync_start = 1080 + 96, + .hsync_end = 1080 + 96 + 16, + .htotal = 1080 + 96 + 16 + 64, + .vdisplay = 1920, + .vsync_start = 1920 + 4, + .vsync_end = 1920 + 4 + 1, + .vtotal = 1920 + 4 + 1 + 16, + .width_mm = 75, + .height_mm = 132, +}; + +static int sharp_ls060_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, &sharp_ls060_mode); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs sharp_ls060_panel_funcs = { + .prepare = sharp_ls060_prepare, + .unprepare = sharp_ls060_unprepare, + .get_modes = sharp_ls060_get_modes, +}; + +static int sharp_ls060_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct sharp_ls060 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->vddi_supply = devm_regulator_get(dev, "vddi"); + if (IS_ERR(ctx->vddi_supply)) + return PTR_ERR(ctx->vddi_supply); + + ctx->vddh_supply = devm_regulator_get(dev, "vddh"); + if (IS_ERR(ctx->vddh_supply)) + return PTR_ERR(ctx->vddh_supply); + + ctx->avdd_supply = devm_regulator_get(dev, "avdd"); + if (IS_ERR(ctx->avdd_supply)) + return PTR_ERR(ctx->avdd_supply); + + ctx->avee_supply = devm_regulator_get(dev, "avee"); + if (IS_ERR(ctx->avee_supply)) + return PTR_ERR(ctx->avee_supply); + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_NO_EOT_PACKET | + MIPI_DSI_CLOCK_NON_CONTINUOUS; + + drm_panel_init(&ctx->panel, dev, &sharp_ls060_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + ret = drm_panel_of_backlight(&ctx->panel); + if (ret) + return dev_err_probe(dev, ret, "Failed to get backlight\n"); + + drm_panel_add(&ctx->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + drm_panel_remove(&ctx->panel); + return ret; + } + + return 0; +} + +static int sharp_ls060_remove(struct mipi_dsi_device *dsi) +{ + struct sharp_ls060 *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&ctx->panel); + + return 0; +} + +static const struct of_device_id sharp_ls060t1sx01_of_match[] = { + { .compatible = "sharp,ls060t1sx01" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sharp_ls060t1sx01_of_match); + +static struct mipi_dsi_driver sharp_ls060_driver = { + .probe = sharp_ls060_probe, + .remove = sharp_ls060_remove, + .driver = { + .name = "panel-sharp-ls060t1sx01", + .of_match_table = sharp_ls060t1sx01_of_match, + }, +}; +module_mipi_dsi_driver(sharp_ls060_driver); + +MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>"); +MODULE_DESCRIPTION("DRM driver for Sharp LS060T1SX01 1080p video mode dsi panel"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 7f3e1b84b5f5..eb475a3a774b 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2370,6 +2370,38 @@ static const struct panel_desc logictechno_lt170410_2whc = { .connector_type = DRM_MODE_CONNECTOR_LVDS, }; +static const struct drm_display_mode logictechno_lttd800480070_l2rt_mode = { + .clock = 33000, + .hdisplay = 800, + .hsync_start = 800 + 112, + .hsync_end = 800 + 112 + 3, + .htotal = 800 + 112 + 3 + 85, + .vdisplay = 480, + .vsync_start = 480 + 38, + .vsync_end = 480 + 38 + 3, + .vtotal = 480 + 38 + 3 + 29, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + +static const struct panel_desc logictechno_lttd800480070_l2rt = { + .modes = &logictechno_lttd800480070_l2rt_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 154, + .height = 86, + }, + .delay = { + .prepare = 45, + .enable = 100, + .disable = 100, + .unprepare = 45 + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE, + .connector_type = DRM_MODE_CONNECTOR_DPI, +}; + static const struct drm_display_mode logictechno_lttd800480070_l6wh_rt_mode = { .clock = 33000, .hdisplay = 800, @@ -3751,6 +3783,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "logictechno,lt170410-2whc", .data = &logictechno_lt170410_2whc, }, { + .compatible = "logictechno,lttd800480070-l2rt", + .data = &logictechno_lttd800480070_l2rt, + }, { .compatible = "logictechno,lttd800480070-l6wh-rt", .data = &logictechno_lttd800480070_l6wh_rt, }, { diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c index a2c303e5732c..73f69c929a75 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c @@ -453,6 +453,10 @@ disable_vcc: return ret; } +static const u32 mantix_bus_formats[] = { + MEDIA_BUS_FMT_RGB888_1X24, +}; + static int st7703_get_modes(struct drm_panel *panel, struct drm_connector *connector) { @@ -474,6 +478,10 @@ static int st7703_get_modes(struct drm_panel *panel, connector->display_info.height_mm = mode->height_mm; drm_mode_probed_add(connector, mode); + drm_display_info_set_bus_formats(&connector->display_info, + mantix_bus_formats, + ARRAY_SIZE(mantix_bus_formats)); + return 1; } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 458f92a70887..a36a4f2c76b0 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -61,7 +61,7 @@ static vm_fault_t radeon_gem_fault(struct vm_fault *vmf) goto unlock_resv; ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, - TTM_BO_VM_NUM_PREFAULT, 1); + TTM_BO_VM_NUM_PREFAULT); if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) goto unlock_mclk; diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 042c16b5d54a..f91fb31ab7a7 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -699,30 +699,20 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, struct drm_gem_object *obj, bool write) { + struct dma_resv_iter cursor; + struct dma_fence *fence; int ret; - struct dma_fence **fences; - unsigned int i, fence_count; - - if (!write) { - struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv); - - return drm_sched_job_add_dependency(job, fence); - } - - ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences); - if (ret || !fence_count) - return ret; - for (i = 0; i < fence_count; i++) { - ret = drm_sched_job_add_dependency(job, fences[i]); - if (ret) - break; + dma_resv_for_each_fence(&cursor, obj->resv, write, fence) { + /* Make sure to grab an additional ref on the added fence */ + dma_fence_get(fence); + ret = drm_sched_job_add_dependency(job, fence); + if (ret) { + dma_fence_put(fence); + return ret; + } } - - for (; i < fence_count; i++) - dma_fence_put(fences[i]); - kfree(fences); - return ret; + return 0; } EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies); diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig index 5755f0432e77..8c796de53222 100644 --- a/drivers/gpu/drm/sun4i/Kconfig +++ b/drivers/gpu/drm/sun4i/Kconfig @@ -46,6 +46,7 @@ config DRM_SUN6I_DSI default MACH_SUN8I select CRC_CCITT select DRM_MIPI_DSI + select RESET_CONTROLLER select PHY_SUN6I_MIPI_DPHY help Choose this option if you want have an Allwinner SoC with diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index d62b2013c367..739f11c0109c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -269,23 +269,15 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) { struct dma_resv *resv = &bo->base._resv; - struct dma_resv_list *fobj; + struct dma_resv_iter cursor; struct dma_fence *fence; - int i; - - rcu_read_lock(); - fobj = dma_resv_shared_list(resv); - fence = dma_resv_excl_fence(resv); - if (fence && !fence->ops->signaled) - dma_fence_enable_sw_signaling(fence); - - for (i = 0; fobj && i < fobj->shared_count; ++i) { - fence = rcu_dereference(fobj->shared[i]); + dma_resv_iter_begin(&cursor, resv, true); + dma_resv_for_each_fence_unlocked(&cursor, fence) { if (!fence->ops->signaled) dma_fence_enable_sw_signaling(fence); } - rcu_read_unlock(); + dma_resv_iter_end(&cursor); } /** @@ -627,7 +619,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, *busy = !ret; } - if (ret && place && !bo->bdev->funcs->eviction_valuable(bo, place)) { + if (ret && place && (bo->resource->mem_type != place->mem_type || + !bo->bdev->funcs->eviction_valuable(bo, place))) { ret = false; if (*locked) { dma_resv_unlock(bo->base.resv); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 33680c94127c..08ba083a80d2 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -173,89 +173,6 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_vm_reserve); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -/** - * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults - * @vmf: Fault data - * @bo: The buffer object - * @page_offset: Page offset from bo start - * @fault_page_size: The size of the fault in pages. - * @pgprot: The page protections. - * Does additional checking whether it's possible to insert a PUD or PMD - * pfn and performs the insertion. - * - * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if - * a huge fault was not possible, or on insertion error. - */ -static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, - struct ttm_buffer_object *bo, - pgoff_t page_offset, - pgoff_t fault_page_size, - pgprot_t pgprot) -{ - pgoff_t i; - vm_fault_t ret; - unsigned long pfn; - pfn_t pfnt; - struct ttm_tt *ttm = bo->ttm; - bool write = vmf->flags & FAULT_FLAG_WRITE; - - /* Fault should not cross bo boundary. */ - page_offset &= ~(fault_page_size - 1); - if (page_offset + fault_page_size > bo->resource->num_pages) - goto out_fallback; - - if (bo->resource->bus.is_iomem) - pfn = ttm_bo_io_mem_pfn(bo, page_offset); - else - pfn = page_to_pfn(ttm->pages[page_offset]); - - /* pfn must be fault_page_size aligned. */ - if ((pfn & (fault_page_size - 1)) != 0) - goto out_fallback; - - /* Check that memory is contiguous. */ - if (!bo->resource->bus.is_iomem) { - for (i = 1; i < fault_page_size; ++i) { - if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i) - goto out_fallback; - } - } else if (bo->bdev->funcs->io_mem_pfn) { - for (i = 1; i < fault_page_size; ++i) { - if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i) - goto out_fallback; - } - } - - pfnt = __pfn_to_pfn_t(pfn, PFN_DEV); - if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT)) - ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write); -#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD - else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT)) - ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write); -#endif - else - WARN_ON_ONCE(ret = VM_FAULT_FALLBACK); - - if (ret != VM_FAULT_NOPAGE) - goto out_fallback; - - return VM_FAULT_NOPAGE; -out_fallback: - count_vm_event(THP_FAULT_FALLBACK); - return VM_FAULT_FALLBACK; -} -#else -static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, - struct ttm_buffer_object *bo, - pgoff_t page_offset, - pgoff_t fault_page_size, - pgprot_t pgprot) -{ - return VM_FAULT_FALLBACK; -} -#endif - /** * ttm_bo_vm_fault_reserved - TTM fault helper * @vmf: The struct vm_fault given as argument to the fault callback @@ -263,7 +180,6 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, * @num_prefault: Maximum number of prefault pages. The caller may want to * specify this based on madvice settings and the size of the GPU object * backed by the memory. - * @fault_page_size: The size of the fault in pages. * * This function inserts one or more page table entries pointing to the * memory backing the buffer object, and then returns a return code @@ -277,8 +193,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, */ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, pgprot_t prot, - pgoff_t num_prefault, - pgoff_t fault_page_size) + pgoff_t num_prefault) { struct vm_area_struct *vma = vmf->vma; struct ttm_buffer_object *bo = vma->vm_private_data; @@ -329,11 +244,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, prot = pgprot_decrypted(prot); } - /* We don't prefault on huge faults. Yet. */ - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) - return ttm_bo_vm_insert_huge(vmf, bo, page_offset, - fault_page_size, prot); - /* * Speculatively prefault a number of pages. Only error on * first page. @@ -429,7 +339,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) prot = vma->vm_page_prot; if (drm_dev_enter(ddev, &idx)) { - ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1); + ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); drm_dev_exit(idx); } else { ret = ttm_bo_vm_dummy_page(vmf, prot); diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index 3750fd216131..930574ad2bca 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -30,7 +30,7 @@ static int udl_get_edid_block(void *data, u8 *buf, unsigned int block, int bval = (i + block * EDID_LENGTH) << 8; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x02, (0x80 | (0x02 << 5)), bval, - 0xA1, read_buff, 2, HZ); + 0xA1, read_buff, 2, 1000); if (ret < 1) { DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); kfree(read_buff); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 6a000d77c568..e47ae40a865a 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -487,8 +487,8 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, for (i = 0; i < se->in_sync_count; i++) { struct drm_v3d_sem in; - ret = copy_from_user(&in, handle++, sizeof(in)); - if (ret) { + if (copy_from_user(&in, handle++, sizeof(in))) { + ret = -EFAULT; DRM_DEBUG("Failed to copy wait dep handle.\n"); goto fail_deps; } @@ -609,8 +609,8 @@ v3d_get_multisync_post_deps(struct drm_file *file_priv, for (i = 0; i < count; i++) { struct drm_v3d_sem out; - ret = copy_from_user(&out, post_deps++, sizeof(out)); - if (ret) { + if (copy_from_user(&out, post_deps++, sizeof(out))) { + ret = -EFAULT; DRM_DEBUG("Failed to copy post dep handles\n"); goto fail; } @@ -646,9 +646,8 @@ v3d_get_multisync_submit_deps(struct drm_file *file_priv, struct v3d_submit_ext *se = data; int ret; - ret = copy_from_user(&multisync, ext, sizeof(multisync)); - if (ret) - return ret; + if (copy_from_user(&multisync, ext, sizeof(multisync))) + return -EFAULT; if (multisync.pad) return -EINVAL; diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index fddaeb0b09c1..f642bd6e71ff 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -391,7 +391,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) - return ERR_PTR(-ENOMEM); + return NULL; bo->madv = VC4_MADV_WILLNEED; refcount_set(&bo->usecnt, 0); diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index a6caebd4a0dd..5b00310ac4cd 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -308,8 +308,10 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-EINVAL); virtio_gpu_fb = kzalloc(sizeof(*virtio_gpu_fb), GFP_KERNEL); - if (virtio_gpu_fb == NULL) + if (virtio_gpu_fb == NULL) { + drm_gem_object_put(obj); return ERR_PTR(-ENOMEM); + } ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj); if (ret) { diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 749db18dcfa2..d86e1ad4a972 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -163,10 +163,11 @@ static __poll_t virtio_gpu_poll(struct file *filp, struct drm_file *drm_file = filp->private_data; struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; struct drm_device *dev = drm_file->minor->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; struct drm_pending_event *e = NULL; __poll_t mask = 0; - if (!vfpriv->ring_idx_mask) + if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask) return drm_poll(filp, wait); poll_wait(filp, &drm_file->event_wait, wait); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index a833751099b5..858aff99a3fe 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1550,10 +1550,6 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, pgoff_t start, pgoff_t end); vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf); vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, - enum page_entry_size pe_size); -#endif /* Transparent hugepage support - vmwgfx_thp.c */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index e5a9a5cbd01a..922317d1acc8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -477,7 +477,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) else prot = vm_get_page_prot(vma->vm_flags); - ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault, 1); + ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault); if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) return ret; @@ -486,73 +486,3 @@ out_unlock: return ret; } - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, - enum page_entry_size pe_size) -{ - struct vm_area_struct *vma = vmf->vma; - struct ttm_buffer_object *bo = (struct ttm_buffer_object *) - vma->vm_private_data; - struct vmw_buffer_object *vbo = - container_of(bo, struct vmw_buffer_object, base); - pgprot_t prot; - vm_fault_t ret; - pgoff_t fault_page_size; - bool write = vmf->flags & FAULT_FLAG_WRITE; - - switch (pe_size) { - case PE_SIZE_PMD: - fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT; - break; -#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD - case PE_SIZE_PUD: - fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT; - break; -#endif - default: - WARN_ON_ONCE(1); - return VM_FAULT_FALLBACK; - } - - /* Always do write dirty-tracking and COW on PTE level. */ - if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping(vma->vm_flags))) - return VM_FAULT_FALLBACK; - - ret = ttm_bo_vm_reserve(bo, vmf); - if (ret) - return ret; - - if (vbo->dirty) { - pgoff_t allowed_prefault; - unsigned long page_offset; - - page_offset = vmf->pgoff - - drm_vma_node_start(&bo->base.vma_node); - if (page_offset >= bo->resource->num_pages || - vmw_resources_clean(vbo, page_offset, - page_offset + PAGE_SIZE, - &allowed_prefault)) { - ret = VM_FAULT_SIGBUS; - goto out_unlock; - } - - /* - * Write protect, so we get a new fault on write, and can - * split. - */ - prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED); - } else { - prot = vm_get_page_prot(vma->vm_flags); - } - - ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size); - if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) - return ret; - -out_unlock: - dma_resv_unlock(bo->base.resv); - - return ret; -} -#endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index e6b1f98ec99f..0a4c340252ec 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -61,9 +61,6 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) .fault = vmw_bo_vm_fault, .open = ttm_bo_vm_open, .close = ttm_bo_vm_close, -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - .huge_fault = vmw_bo_vm_huge_fault, -#endif }; struct drm_file *file_priv = filp->private_data; struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 9f14d99c763c..bc7605324db3 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -773,6 +773,7 @@ static struct xenbus_driver xen_driver = { .probe = xen_drv_probe, .remove = xen_drv_remove, .otherend_changed = displback_changed, + .not_essential = true, }; static int __init xen_drv_init(void) diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 5d57214d8dee..f3ecddc519ee 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -854,7 +854,7 @@ static int asus_input_mapping(struct hid_device *hdev, switch (usage->hid & HID_USAGE) { case 0x10: asus_map_key_clear(KEY_BRIGHTNESSDOWN); break; case 0x20: asus_map_key_clear(KEY_BRIGHTNESSUP); break; - case 0x35: asus_map_key_clear(KEY_SCREENLOCK); break; + case 0x35: asus_map_key_clear(KEY_DISPLAY_OFF); break; case 0x6c: asus_map_key_clear(KEY_SLEEP); break; case 0x7c: asus_map_key_clear(KEY_MICMUTE); break; case 0x82: asus_map_key_clear(KEY_CAMERA); break; diff --git a/drivers/hid/hid-ft260.c b/drivers/hid/hid-ft260.c index 4ef1c3b8094e..8ee77f4afe9f 100644 --- a/drivers/hid/hid-ft260.c +++ b/drivers/hid/hid-ft260.c @@ -966,24 +966,23 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id) mutex_init(&dev->lock); init_completion(&dev->wait); + ret = ft260_xfer_status(dev); + if (ret) + ft260_i2c_reset(hdev); + + i2c_set_adapdata(&dev->adap, dev); ret = i2c_add_adapter(&dev->adap); if (ret) { hid_err(hdev, "failed to add i2c adapter\n"); goto err_hid_close; } - i2c_set_adapdata(&dev->adap, dev); - ret = sysfs_create_group(&hdev->dev.kobj, &ft260_attr_group); if (ret < 0) { hid_err(hdev, "failed to create sysfs attrs\n"); goto err_i2c_free; } - ret = ft260_xfer_status(dev); - if (ret) - ft260_i2c_reset(hdev); - return 0; err_i2c_free: diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 95037a3e2e6e..96a455921c67 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -397,6 +397,7 @@ #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401 #define USB_DEVICE_ID_HP_X2 0x074d #define USB_DEVICE_ID_HP_X2_10_COVER 0x0755 +#define I2C_DEVICE_ID_HP_ENVY_X360_15 0x2d05 #define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817 #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706 #define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 2c72ce4147b1..217f2d1b91c5 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -160,6 +160,7 @@ static int hidinput_setkeycode(struct input_dev *dev, if (usage) { *old_keycode = usage->type == EV_KEY ? usage->code : KEY_RESERVED; + usage->type = EV_KEY; usage->code = ke->keycode; clear_bit(*old_keycode, dev->keybit); @@ -324,6 +325,8 @@ static const struct hid_device_id hid_battery_quirks[] = { HID_BATTERY_QUIRK_IGNORE }, { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN), HID_BATTERY_QUIRK_IGNORE }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15), + HID_BATTERY_QUIRK_IGNORE }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15), HID_BATTERY_QUIRK_IGNORE }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN), @@ -650,10 +653,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel code += KEY_MACRO1; else code += BTN_TRIGGER_HAPPY - 0x1e; - } else { - goto ignore; + break; } - break; + fallthrough; default: switch (field->physical) { case HID_GD_MOUSE: diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 686788ebf3e1..d7687ce70614 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -256,8 +256,11 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda unsigned long now = jiffies; int step_x = msc->touches[id].scroll_x - x; int step_y = msc->touches[id].scroll_y - y; - int step_hr = ((64 - (int)scroll_speed) * msc->scroll_accel) / - SCROLL_HR_STEPS; + int step_hr = + max_t(int, + ((64 - (int)scroll_speed) * msc->scroll_accel) / + SCROLL_HR_STEPS, + 1); int step_x_hr = msc->touches[id].scroll_x_hr - x; int step_y_hr = msc->touches[id].scroll_y_hr - y; diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index e1afddb7b33d..082376a6cb3d 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1888,6 +1888,11 @@ static const struct hid_device_id mt_devices[] = { MT_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) }, + /* eGalax devices (SAW) */ + { .driver_data = MT_CLS_EXPORT_ALL_INPUTS, + MT_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER) }, + /* eGalax devices (resistive) */ { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c index a1e0f6849875..b6a9a0f3966e 100644 --- a/drivers/hid/hid-nintendo.c +++ b/drivers/hid/hid-nintendo.c @@ -189,6 +189,7 @@ struct joycon_rumble_amp_data { u16 amp; }; +#if IS_ENABLED(CONFIG_NINTENDO_FF) /* * These tables are from * https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering/blob/master/rumble_data_table.md @@ -289,6 +290,10 @@ static const struct joycon_rumble_amp_data joycon_rumble_amplitudes[] = { { 0xc2, 0x8070, 940 }, { 0xc4, 0x0071, 960 }, { 0xc6, 0x8071, 981 }, { 0xc8, 0x0072, joycon_max_rumble_amp } }; +static const u16 JC_RUMBLE_DFLT_LOW_FREQ = 160; +static const u16 JC_RUMBLE_DFLT_HIGH_FREQ = 320; +#endif /* IS_ENABLED(CONFIG_NINTENDO_FF) */ +static const u16 JC_RUMBLE_PERIOD_MS = 50; /* States for controller state machine */ enum joycon_ctlr_state { @@ -397,9 +402,6 @@ struct joycon_input_report { #define JC_RUMBLE_DATA_SIZE 8 #define JC_RUMBLE_QUEUE_SIZE 8 -static const u16 JC_RUMBLE_DFLT_LOW_FREQ = 160; -static const u16 JC_RUMBLE_DFLT_HIGH_FREQ = 320; -static const u16 JC_RUMBLE_PERIOD_MS = 50; static const unsigned short JC_RUMBLE_ZERO_AMP_PKT_CNT = 5; static const char * const joycon_player_led_names[] = { @@ -1850,8 +1852,10 @@ static int joycon_leds_create(struct joycon_ctlr *ctlr) d_name, "green", joycon_player_led_names[i]); - if (!name) + if (!name) { + mutex_unlock(&joycon_input_num_mutex); return -ENOMEM; + } led = &ctlr->leds[i]; led->name = name; @@ -1864,6 +1868,7 @@ static int joycon_leds_create(struct joycon_ctlr *ctlr) ret = devm_led_classdev_register(&hdev->dev, led); if (ret) { hid_err(hdev, "Failed registering %s LED\n", led->name); + mutex_unlock(&joycon_input_num_mutex); return ret; } } diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c index d44550aa8805..3a5333424aa3 100644 --- a/drivers/hid/hid-thrustmaster.c +++ b/drivers/hid/hid-thrustmaster.c @@ -205,7 +205,7 @@ static void thrustmaster_model_handler(struct urb *urb) struct tm_wheel *tm_wheel = hid_get_drvdata(hdev); uint16_t model = 0; int i, ret; - const struct tm_wheel_info *twi = 0; + const struct tm_wheel_info *twi = NULL; if (urb->status) { hid_err(hdev, "URB to get model id failed with error %d\n", urb->status); @@ -238,7 +238,7 @@ static void thrustmaster_model_handler(struct urb *urb) tm_wheel->usb_dev, usb_sndctrlpipe(tm_wheel->usb_dev, 0), (char *)tm_wheel->change_request, - 0, 0, // We do not expect any response from the wheel + NULL, 0, // We do not expect any response from the wheel thrustmaster_change_handler, hdev ); @@ -272,7 +272,7 @@ static void thrustmaster_remove(struct hid_device *hdev) static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret = 0; - struct tm_wheel *tm_wheel = 0; + struct tm_wheel *tm_wheel = NULL; ret = hid_parse(hdev); if (ret) { diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c index 1b486f262747..0e1183e96147 100644 --- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c +++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c @@ -76,9 +76,12 @@ enum ish_loader_commands { #define LOADER_XFER_MODE_ISHTP BIT(1) /* ISH Transport Loader client unique GUID */ -static const guid_t loader_ishtp_guid = - GUID_INIT(0xc804d06a, 0x55bd, 0x4ea7, - 0xad, 0xed, 0x1e, 0x31, 0x22, 0x8c, 0x76, 0xdc); +static const struct ishtp_device_id loader_ishtp_id_table[] = { + { .guid = GUID_INIT(0xc804d06a, 0x55bd, 0x4ea7, + 0xad, 0xed, 0x1e, 0x31, 0x22, 0x8c, 0x76, 0xdc) }, + { } +}; +MODULE_DEVICE_TABLE(ishtp, loader_ishtp_id_table); #define FILENAME_SIZE 256 @@ -880,7 +883,7 @@ static int loader_init(struct ishtp_cl *loader_ishtp_cl, int reset) fw_client = ishtp_fw_cl_get_client(ishtp_get_ishtp_device(loader_ishtp_cl), - &loader_ishtp_guid); + &loader_ishtp_id_table[0].guid); if (!fw_client) { dev_err(cl_data_to_dev(client_data), "ISH client uuid not found\n"); @@ -1057,7 +1060,7 @@ static int loader_ishtp_cl_reset(struct ishtp_cl_device *cl_device) static struct ishtp_cl_driver loader_ishtp_cl_driver = { .name = "ish-loader", - .guid = &loader_ishtp_guid, + .id = loader_ishtp_id_table, .probe = loader_ishtp_cl_probe, .remove = loader_ishtp_cl_remove, .reset = loader_ishtp_cl_reset, @@ -1083,4 +1086,3 @@ MODULE_DESCRIPTION("ISH ISH-TP Host firmware Loader Client Driver"); MODULE_AUTHOR("Rushikesh S Kadam <rushikesh.s.kadam@intel.com>"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("ishtp:*"); diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c index 91bf4d01e91a..4338c9b68a43 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c +++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c @@ -12,9 +12,12 @@ #include "ishtp-hid.h" /* ISH Transport protocol (ISHTP in short) GUID */ -static const guid_t hid_ishtp_guid = - GUID_INIT(0x33AECD58, 0xB679, 0x4E54, - 0x9B, 0xD9, 0xA0, 0x4D, 0x34, 0xF0, 0xC2, 0x26); +static const struct ishtp_device_id hid_ishtp_id_table[] = { + { .guid = GUID_INIT(0x33AECD58, 0xB679, 0x4E54, + 0x9B, 0xD9, 0xA0, 0x4D, 0x34, 0xF0, 0xC2, 0x26), }, + { } +}; +MODULE_DEVICE_TABLE(ishtp, hid_ishtp_id_table); /* Rx ring buffer pool size */ #define HID_CL_RX_RING_SIZE 32 @@ -662,7 +665,7 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset) ishtp_set_tx_ring_size(hid_ishtp_cl, HID_CL_TX_RING_SIZE); ishtp_set_rx_ring_size(hid_ishtp_cl, HID_CL_RX_RING_SIZE); - fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_guid); + fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_id_table[0].guid); if (!fw_client) { dev_err(cl_data_to_dev(client_data), "ish client uuid not found\n"); @@ -945,7 +948,7 @@ static const struct dev_pm_ops hid_ishtp_pm_ops = { static struct ishtp_cl_driver hid_ishtp_cl_driver = { .name = "ish-hid", - .guid = &hid_ishtp_guid, + .id = hid_ishtp_id_table, .probe = hid_ishtp_cl_probe, .remove = hid_ishtp_cl_remove, .reset = hid_ishtp_cl_reset, @@ -981,4 +984,3 @@ MODULE_AUTHOR("Daniel Drubin <daniel.drubin@intel.com>"); MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("ishtp:*"); diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c index 334eac611774..f68aba8794fe 100644 --- a/drivers/hid/intel-ish-hid/ishtp/bus.c +++ b/drivers/hid/intel-ish-hid/ishtp/bus.c @@ -241,7 +241,7 @@ static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv) struct ishtp_cl_device *device = to_ishtp_cl_device(dev); struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv); - return guid_equal(driver->guid, + return guid_equal(&driver->id[0].guid, &device->fw_client->props.protocol_name); } @@ -350,7 +350,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, { int len; - len = snprintf(buf, PAGE_SIZE, "ishtp:%s\n", dev_name(dev)); + len = snprintf(buf, PAGE_SIZE, ISHTP_MODULE_PREFIX "%s\n", dev_name(dev)); return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; } static DEVICE_ATTR_RO(modalias); @@ -363,7 +363,7 @@ ATTRIBUTE_GROUPS(ishtp_cl_dev); static int ishtp_cl_uevent(struct device *dev, struct kobj_uevent_env *env) { - if (add_uevent_var(env, "MODALIAS=ishtp:%s", dev_name(dev))) + if (add_uevent_var(env, "MODALIAS=" ISHTP_MODULE_PREFIX "%s", dev_name(dev))) return -ENOMEM; return 0; } diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 33a6908995b1..2a4cc39962e7 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2603,6 +2603,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev, return; switch (equivalent_usage) { + case HID_DG_CONFIDENCE: + wacom_wac->hid_data.confidence = value; + break; case HID_GD_X: wacom_wac->hid_data.x = value; break; @@ -2635,7 +2638,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev, } if (usage->usage_index + 1 == field->report_count) { - if (equivalent_usage == wacom_wac->hid_data.last_slot_field) + if (equivalent_usage == wacom_wac->hid_data.last_slot_field && + wacom_wac->hid_data.confidence) wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input); } } @@ -2653,6 +2657,8 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev, wacom_wac->is_invalid_bt_frame = false; + hid_data->confidence = true; + for (i = 0; i < report->maxfield; i++) { struct hid_field *field = report->field[i]; int j; diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index 8b2d4e5b2303..466b62cc16dc 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -301,6 +301,7 @@ struct hid_data { bool barrelswitch; bool barrelswitch2; bool serialhi; + bool confidence; int x; int y; int pressure; diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 7f11ea07d698..ca873a3b98db 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -480,7 +480,7 @@ module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR)); MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure"); static atomic_t trans_id = ATOMIC_INIT(0); -static int dm_ring_size = 20 * 1024; +static int dm_ring_size = VMBUS_RING_SIZE(16 * 1024); /* * Driver specific state. diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 05187457f88a..41446f9cc52d 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -191,6 +191,7 @@ #define SMBSLVSTS_HST_NTFY_STS BIT(0) /* Host Notify Command register bits */ +#define SMBSLVCMD_SMBALERT_DISABLE BIT(2) #define SMBSLVCMD_HST_NTFY_INTREN BIT(0) #define STATUS_ERROR_FLAGS (SMBHSTSTS_FAILED | SMBHSTSTS_BUS_ERR | \ @@ -259,6 +260,7 @@ struct i801_priv { struct i2c_adapter adapter; unsigned long smba; unsigned char original_hstcfg; + unsigned char original_hstcnt; unsigned char original_slvcmd; struct pci_dev *pci_dev; unsigned int features; @@ -641,12 +643,20 @@ static irqreturn_t i801_isr(int irq, void *dev_id) i801_isr_byte_done(priv); /* - * Clear irq sources and report transaction result. + * Clear remaining IRQ sources: Completion of last command, errors + * and the SMB_ALERT signal. SMB_ALERT status is set after signal + * assertion independently of the interrupt generation being blocked + * or not so clear it always when the status is set. + */ + status &= SMBHSTSTS_INTR | STATUS_ERROR_FLAGS | SMBHSTSTS_SMBALERT_STS; + if (status) + outb_p(status, SMBHSTSTS(priv)); + status &= ~SMBHSTSTS_SMBALERT_STS; /* SMB_ALERT not reported */ + /* + * Report transaction result. * ->status must be cleared before the next transaction is started. */ - status &= SMBHSTSTS_INTR | STATUS_ERROR_FLAGS; if (status) { - outb_p(status, SMBHSTSTS(priv)); priv->status = status; complete(&priv->done); } @@ -974,9 +984,13 @@ static void i801_enable_host_notify(struct i2c_adapter *adapter) if (!(priv->features & FEATURE_HOST_NOTIFY)) return; - if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd)) - outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd, - SMBSLVCMD(priv)); + /* + * Enable host notify interrupt and block the generation of interrupt + * from the SMB_ALERT signal because the driver does not support + * SMBus Alert. + */ + outb_p(SMBSLVCMD_HST_NTFY_INTREN | SMBSLVCMD_SMBALERT_DISABLE | + priv->original_slvcmd, SMBSLVCMD(priv)); /* clear Host Notify bit to allow a new notification */ outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv)); @@ -1805,7 +1819,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) outb_p(inb_p(SMBAUXCTL(priv)) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv)); - /* Remember original Host Notify setting */ + /* Remember original Interrupt and Host Notify settings */ + priv->original_hstcnt = inb_p(SMBHSTCNT(priv)) & ~SMBHSTCNT_KILL; if (priv->features & FEATURE_HOST_NOTIFY) priv->original_slvcmd = inb_p(SMBSLVCMD(priv)); @@ -1869,6 +1884,7 @@ static void i801_remove(struct pci_dev *dev) { struct i801_priv *priv = pci_get_drvdata(dev); + outb_p(priv->original_hstcnt, SMBHSTCNT(priv)); i801_disable_host_notify(priv); i801_del_mux(priv); i2c_del_adapter(&priv->adapter); @@ -1892,6 +1908,7 @@ static void i801_shutdown(struct pci_dev *dev) struct i801_priv *priv = pci_get_drvdata(dev); /* Restore config registers to avoid hard hang on some systems */ + outb_p(priv->original_hstcnt, SMBHSTCNT(priv)); i801_disable_host_notify(priv); pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); } @@ -1901,6 +1918,7 @@ static int i801_suspend(struct device *dev) { struct i801_priv *priv = dev_get_drvdata(dev); + outb_p(priv->original_hstcnt, SMBHSTCNT(priv)); pci_write_config_byte(priv->pci_dev, SMBHSTCFG, priv->original_hstcfg); return 0; } diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c index 1ed4daa918a0..95378780da6d 100644 --- a/drivers/i2c/busses/i2c-virtio.c +++ b/drivers/i2c/busses/i2c-virtio.c @@ -104,11 +104,10 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq, static int virtio_i2c_complete_reqs(struct virtqueue *vq, struct virtio_i2c_req *reqs, - struct i2c_msg *msgs, int num, - bool timedout) + struct i2c_msg *msgs, int num) { struct virtio_i2c_req *req; - bool failed = timedout; + bool failed = false; unsigned int len; int i, j = 0; @@ -130,7 +129,7 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq, j++; } - return timedout ? -ETIMEDOUT : j; + return j; } static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, @@ -139,7 +138,6 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, struct virtio_i2c *vi = i2c_get_adapdata(adap); struct virtqueue *vq = vi->vq; struct virtio_i2c_req *reqs; - unsigned long time_left; int count; reqs = kcalloc(num, sizeof(*reqs), GFP_KERNEL); @@ -162,11 +160,9 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, reinit_completion(&vi->completion); virtqueue_kick(vq); - time_left = wait_for_completion_timeout(&vi->completion, adap->timeout); - if (!time_left) - dev_err(&adap->dev, "virtio i2c backend timeout.\n"); + wait_for_completion(&vi->completion); - count = virtio_i2c_complete_reqs(vq, reqs, msgs, count, !time_left); + count = virtio_i2c_complete_reqs(vq, reqs, msgs, count); err_free: kfree(reqs); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index fedc0fa6ebf9..f5aacaf7fb8e 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -1906,7 +1906,8 @@ static int nldev_stat_set_mode_doit(struct sk_buff *msg, int ret; /* Currently only counter for QP is supported */ - if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) + if (!tb[RDMA_NLDEV_ATTR_STAT_RES] || + nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) return -EINVAL; mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 692d5ff657df..c18634bec212 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1232,6 +1232,9 @@ static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd, INIT_LIST_HEAD(&qp->rdma_mrs); INIT_LIST_HEAD(&qp->sig_mrs); + qp->send_cq = attr->send_cq; + qp->recv_cq = attr->recv_cq; + rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP); WARN_ONCE(!udata && !caller, "Missing kernel QP owner"); rdma_restrack_set_name(&qp->res, udata ? NULL : caller); diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index ed9fa0d84e9e..dc9211f3a009 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1628,8 +1628,7 @@ static int init_cntr_names(const char *names_in, const size_t names_len, n++; names_out = - kmalloc((n + num_extra_names) * sizeof(struct rdma_stat_desc) + - names_len, + kzalloc((n + num_extra_names) * sizeof(*q) + names_len, GFP_KERNEL); if (!names_out) { *num_cntrs = 0; @@ -1637,7 +1636,7 @@ static int init_cntr_names(const char *names_in, const size_t names_len, return -ENOMEM; } - p = names_out + (n + num_extra_names) * sizeof(struct rdma_stat_desc); + p = names_out + (n + num_extra_names) * sizeof(*q); memcpy(p, names_in, names_len); q = (struct rdma_stat_desc *)names_out; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index ceca05982f61..0d2fa3338784 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2215,6 +2215,11 @@ static const struct ib_device_ops mlx4_ib_hw_stats_ops = { .get_hw_stats = mlx4_ib_get_hw_stats, }; +static const struct ib_device_ops mlx4_ib_hw_stats_ops1 = { + .alloc_hw_device_stats = mlx4_ib_alloc_hw_device_stats, + .get_hw_stats = mlx4_ib_get_hw_stats, +}; + static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev) { struct mlx4_ib_diag_counters *diag = ibdev->diag_counters; @@ -2227,9 +2232,16 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev) return 0; for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { - /* i == 1 means we are building port counters */ - if (i && !per_port) - continue; + /* + * i == 1 means we are building port counters, set a different + * stats ops without port stats callback. + */ + if (i && !per_port) { + ib_set_device_ops(&ibdev->ib_dev, + &mlx4_ib_hw_stats_ops1); + + return 0; + } ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs, &diag[i].offset, diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 882c3c8ba399..3088c5b829f0 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c @@ -19,6 +19,7 @@ #include <linux/input.h> #include <linux/gameport.h> #include <linux/jiffies.h> +#include <linux/seq_buf.h> #include <linux/timex.h> #include <linux/timekeeping.h> @@ -338,23 +339,24 @@ static void analog_calibrate_timer(struct analog_port *port) static void analog_name(struct analog *analog) { - snprintf(analog->name, sizeof(analog->name), "Analog %d-axis %d-button", + struct seq_buf s; + + seq_buf_init(&s, analog->name, sizeof(analog->name)); + seq_buf_printf(&s, "Analog %d-axis %d-button", hweight8(analog->mask & ANALOG_AXES_STD), hweight8(analog->mask & ANALOG_BTNS_STD) + !!(analog->mask & ANALOG_BTNS_CHF) * 2 + hweight16(analog->mask & ANALOG_BTNS_GAMEPAD) + !!(analog->mask & ANALOG_HBTN_CHF) * 4); if (analog->mask & ANALOG_HATS_ALL) - snprintf(analog->name, sizeof(analog->name), "%s %d-hat", - analog->name, hweight16(analog->mask & ANALOG_HATS_ALL)); + seq_buf_printf(&s, " %d-hat", + hweight16(analog->mask & ANALOG_HATS_ALL)); if (analog->mask & ANALOG_HAT_FCS) - strlcat(analog->name, " FCS", sizeof(analog->name)); + seq_buf_printf(&s, " FCS"); if (analog->mask & ANALOG_ANY_CHF) - strlcat(analog->name, (analog->mask & ANALOG_SAITEK) ? " Saitek" : " CHF", - sizeof(analog->name)); + seq_buf_printf(&s, (analog->mask & ANALOG_SAITEK) ? " Saitek" : " CHF"); - strlcat(analog->name, (analog->mask & ANALOG_GAMEPAD) ? " gamepad": " joystick", - sizeof(analog->name)); + seq_buf_printf(&s, (analog->mask & ANALOG_GAMEPAD) ? " gamepad" : " joystick"); } /* diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c index 6c554c11a7ac..ea58805c480f 100644 --- a/drivers/input/joystick/iforce/iforce-usb.c +++ b/drivers/input/joystick/iforce/iforce-usb.c @@ -92,7 +92,7 @@ static int iforce_usb_get_id(struct iforce *iforce, u8 id, id, USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_INTERFACE, - 0, 0, buf, IFORCE_MAX_LENGTH, HZ); + 0, 0, buf, IFORCE_MAX_LENGTH, 1000); if (status < 0) { dev_err(&iforce_usb->intf->dev, "usb_submit_urb failed: %d\n", status); diff --git a/drivers/input/joystick/tmdc.c b/drivers/input/joystick/tmdc.c index f89e9aa6d328..7416de84b955 100644 --- a/drivers/input/joystick/tmdc.c +++ b/drivers/input/joystick/tmdc.c @@ -83,7 +83,7 @@ static const struct tmdc_model { const signed char *axes; const short *buttons; } tmdc_models[] = { - { 1, "ThrustMaster Millenium 3D Inceptor", 6, 2, { 4, 2 }, { 4, 6 }, tmdc_abs, tmdc_btn_joy }, + { 1, "ThrustMaster Millennium 3D Inceptor", 6, 2, { 4, 2 }, { 4, 6 }, tmdc_abs, tmdc_btn_joy }, { 3, "ThrustMaster Rage 3D Gamepad", 2, 0, { 8, 2 }, { 0, 0 }, tmdc_abs, tmdc_btn_pad }, { 4, "ThrustMaster Attack Throttle", 5, 2, { 4, 6 }, { 4, 2 }, tmdc_abs_at, tmdc_btn_at }, { 8, "ThrustMaster FragMaster", 4, 0, { 8, 2 }, { 0, 0 }, tmdc_abs_fm, tmdc_btn_fm }, diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index e75650e98c9e..0c607da9ee10 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -788,4 +788,14 @@ config KEYBOARD_MTK_PMIC To compile this driver as a module, choose M here: the module will be called pmic-keys. +config KEYBOARD_CYPRESS_SF + tristate "Cypress StreetFighter touchkey support" + depends on I2C + help + Say Y here if you want to enable support for Cypress StreetFighter + touchkeys. + + To compile this driver as a module, choose M here: the + module will be called cypress-sf. + endif diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index 1d689fdd5c00..e3c8648f834e 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_KEYBOARD_BCM) += bcm-keypad.o obj-$(CONFIG_KEYBOARD_CAP11XX) += cap11xx.o obj-$(CONFIG_KEYBOARD_CLPS711X) += clps711x-keypad.o obj-$(CONFIG_KEYBOARD_CROS_EC) += cros_ec_keyb.o +obj-$(CONFIG_KEYBOARD_CYPRESS_SF) += cypress-sf.o obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o obj-$(CONFIG_KEYBOARD_DLINK_DIR685) += dlink-dir685-touchkeys.o obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c index 688e2bef682e..7c85343cd32f 100644 --- a/drivers/input/keyboard/cap11xx.c +++ b/drivers/input/keyboard/cap11xx.c @@ -91,18 +91,21 @@ struct cap11xx_hw_model { u8 product_id; unsigned int num_channels; unsigned int num_leds; + bool no_gain; }; enum { CAP1106, CAP1126, CAP1188, + CAP1206, }; static const struct cap11xx_hw_model cap11xx_devices[] = { - [CAP1106] = { .product_id = 0x55, .num_channels = 6, .num_leds = 0 }, - [CAP1126] = { .product_id = 0x53, .num_channels = 6, .num_leds = 2 }, - [CAP1188] = { .product_id = 0x50, .num_channels = 8, .num_leds = 8 }, + [CAP1106] = { .product_id = 0x55, .num_channels = 6, .num_leds = 0, .no_gain = false }, + [CAP1126] = { .product_id = 0x53, .num_channels = 6, .num_leds = 2, .no_gain = false }, + [CAP1188] = { .product_id = 0x50, .num_channels = 8, .num_leds = 8, .no_gain = false }, + [CAP1206] = { .product_id = 0x67, .num_channels = 6, .num_leds = 0, .no_gain = true }, }; static const struct reg_default cap11xx_reg_defaults[] = { @@ -378,17 +381,24 @@ static int cap11xx_i2c_probe(struct i2c_client *i2c_client, node = dev->of_node; if (!of_property_read_u32(node, "microchip,sensor-gain", &gain32)) { - if (is_power_of_2(gain32) && gain32 <= 8) + if (cap->no_gain) + dev_warn(dev, + "This version doesn't support sensor gain\n"); + else if (is_power_of_2(gain32) && gain32 <= 8) gain = ilog2(gain32); else dev_err(dev, "Invalid sensor-gain value %d\n", gain32); } - if (of_property_read_bool(node, "microchip,irq-active-high")) { - error = regmap_update_bits(priv->regmap, CAP11XX_REG_CONFIG2, - CAP11XX_REG_CONFIG2_ALT_POL, 0); - if (error) - return error; + if (id->driver_data != CAP1206) { + if (of_property_read_bool(node, "microchip,irq-active-high")) { + error = regmap_update_bits(priv->regmap, + CAP11XX_REG_CONFIG2, + CAP11XX_REG_CONFIG2_ALT_POL, + 0); + if (error) + return error; + } } /* Provide some useful defaults */ @@ -398,11 +408,14 @@ static int cap11xx_i2c_probe(struct i2c_client *i2c_client, of_property_read_u32_array(node, "linux,keycodes", priv->keycodes, cap->num_channels); - error = regmap_update_bits(priv->regmap, CAP11XX_REG_MAIN_CONTROL, - CAP11XX_REG_MAIN_CONTROL_GAIN_MASK, - gain << CAP11XX_REG_MAIN_CONTROL_GAIN_SHIFT); - if (error) - return error; + if (!cap->no_gain) { + error = regmap_update_bits(priv->regmap, + CAP11XX_REG_MAIN_CONTROL, + CAP11XX_REG_MAIN_CONTROL_GAIN_MASK, + gain << CAP11XX_REG_MAIN_CONTROL_GAIN_SHIFT); + if (error) + return error; + } /* Disable autorepeat. The Linux input system has its own handling. */ error = regmap_write(priv->regmap, CAP11XX_REG_REPEAT_RATE, 0); @@ -470,6 +483,7 @@ static const struct of_device_id cap11xx_dt_ids[] = { { .compatible = "microchip,cap1106", }, { .compatible = "microchip,cap1126", }, { .compatible = "microchip,cap1188", }, + { .compatible = "microchip,cap1206", }, {} }; MODULE_DEVICE_TABLE(of, cap11xx_dt_ids); @@ -478,6 +492,7 @@ static const struct i2c_device_id cap11xx_i2c_ids[] = { { "cap1106", CAP1106 }, { "cap1126", CAP1126 }, { "cap1188", CAP1188 }, + { "cap1206", CAP1206 }, {} }; MODULE_DEVICE_TABLE(i2c, cap11xx_i2c_ids); diff --git a/drivers/input/keyboard/cypress-sf.c b/drivers/input/keyboard/cypress-sf.c new file mode 100644 index 000000000000..c28996028e80 --- /dev/null +++ b/drivers/input/keyboard/cypress-sf.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Cypress StreetFighter Touchkey Driver + * + * Copyright (c) 2021 Yassine Oudjana <y.oudjana@protonmail.com> + */ + +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/i2c.h> +#include <linux/input.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/pm.h> +#include <linux/regulator/consumer.h> + +#define CYPRESS_SF_DEV_NAME "cypress-sf" + +#define CYPRESS_SF_REG_BUTTON_STATUS 0x4a + +struct cypress_sf_data { + struct i2c_client *client; + struct input_dev *input_dev; + struct regulator_bulk_data regulators[2]; + u32 *keycodes; + unsigned long keystates; + int num_keys; +}; + +static irqreturn_t cypress_sf_irq_handler(int irq, void *devid) +{ + struct cypress_sf_data *touchkey = devid; + unsigned long keystates, changed; + bool new_state; + int val, key; + + val = i2c_smbus_read_byte_data(touchkey->client, + CYPRESS_SF_REG_BUTTON_STATUS); + if (val < 0) { + dev_err(&touchkey->client->dev, + "Failed to read button status: %d", val); + return IRQ_NONE; + } + keystates = val; + + bitmap_xor(&changed, &keystates, &touchkey->keystates, + touchkey->num_keys); + + for_each_set_bit(key, &changed, touchkey->num_keys) { + new_state = keystates & BIT(key); + dev_dbg(&touchkey->client->dev, + "Key %d changed to %d", key, new_state); + input_report_key(touchkey->input_dev, + touchkey->keycodes[key], new_state); + } + + input_sync(touchkey->input_dev); + touchkey->keystates = keystates; + + return IRQ_HANDLED; +} + +static int cypress_sf_probe(struct i2c_client *client) +{ + struct cypress_sf_data *touchkey; + int key, error; + + touchkey = devm_kzalloc(&client->dev, sizeof(*touchkey), GFP_KERNEL); + if (!touchkey) + return -ENOMEM; + + touchkey->client = client; + i2c_set_clientdata(client, touchkey); + + touchkey->regulators[0].supply = "vdd"; + touchkey->regulators[1].supply = "avdd"; + + error = devm_regulator_bulk_get(&client->dev, + ARRAY_SIZE(touchkey->regulators), + touchkey->regulators); + if (error) { + dev_err(&client->dev, "Failed to get regulators: %d\n", error); + return error; + } + + touchkey->num_keys = device_property_read_u32_array(&client->dev, + "linux,keycodes", + NULL, 0); + if (touchkey->num_keys < 0) { + /* Default key count */ + touchkey->num_keys = 2; + } + + touchkey->keycodes = devm_kcalloc(&client->dev, + touchkey->num_keys, + sizeof(*touchkey->keycodes), + GFP_KERNEL); + if (!touchkey->keycodes) + return -ENOMEM; + + error = device_property_read_u32_array(&client->dev, "linux,keycodes", + touchkey->keycodes, + touchkey->num_keys); + + if (error) { + dev_warn(&client->dev, + "Failed to read keycodes: %d, using defaults\n", + error); + + /* Default keycodes */ + touchkey->keycodes[0] = KEY_BACK; + touchkey->keycodes[1] = KEY_MENU; + } + + error = regulator_bulk_enable(ARRAY_SIZE(touchkey->regulators), + touchkey->regulators); + if (error) { + dev_err(&client->dev, + "Failed to enable regulators: %d\n", error); + return error; + } + + touchkey->input_dev = devm_input_allocate_device(&client->dev); + if (!touchkey->input_dev) { + dev_err(&client->dev, "Failed to allocate input device\n"); + return -ENOMEM; + } + + touchkey->input_dev->name = CYPRESS_SF_DEV_NAME; + touchkey->input_dev->id.bustype = BUS_I2C; + + for (key = 0; key < touchkey->num_keys; ++key) + input_set_capability(touchkey->input_dev, + EV_KEY, touchkey->keycodes[key]); + + error = input_register_device(touchkey->input_dev); + if (error) { + dev_err(&client->dev, + "Failed to register input device: %d\n", error); + return error; + } + + error = devm_request_threaded_irq(&client->dev, client->irq, + NULL, cypress_sf_irq_handler, + IRQF_ONESHOT, + CYPRESS_SF_DEV_NAME, touchkey); + if (error) { + dev_err(&client->dev, + "Failed to register threaded irq: %d", error); + return error; + } + + return 0; +}; + +static int __maybe_unused cypress_sf_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct cypress_sf_data *touchkey = i2c_get_clientdata(client); + int error; + + disable_irq(client->irq); + + error = regulator_bulk_disable(ARRAY_SIZE(touchkey->regulators), + touchkey->regulators); + if (error) { + dev_err(dev, "Failed to disable regulators: %d", error); + enable_irq(client->irq); + return error; + } + + return 0; +} + +static int __maybe_unused cypress_sf_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct cypress_sf_data *touchkey = i2c_get_clientdata(client); + int error; + + error = regulator_bulk_enable(ARRAY_SIZE(touchkey->regulators), + touchkey->regulators); + if (error) { + dev_err(dev, "Failed to enable regulators: %d", error); + return error; + } + + enable_irq(client->irq); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(cypress_sf_pm_ops, + cypress_sf_suspend, cypress_sf_resume); + +static struct i2c_device_id cypress_sf_id_table[] = { + { CYPRESS_SF_DEV_NAME, 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, cypress_sf_id_table); + +#ifdef CONFIG_OF +static const struct of_device_id cypress_sf_of_match[] = { + { .compatible = "cypress,sf3155", }, + { }, +}; +MODULE_DEVICE_TABLE(of, cypress_sf_of_match); +#endif + +static struct i2c_driver cypress_sf_driver = { + .driver = { + .name = CYPRESS_SF_DEV_NAME, + .pm = &cypress_sf_pm_ops, + .of_match_table = of_match_ptr(cypress_sf_of_match), + }, + .id_table = cypress_sf_id_table, + .probe_new = cypress_sf_probe, +}; +module_i2c_driver(cypress_sf_driver); + +MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>"); +MODULE_DESCRIPTION("Cypress StreetFighter Touchkey Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c index e0e931e796fa..272a4f1c6e81 100644 --- a/drivers/input/keyboard/ep93xx_keypad.c +++ b/drivers/input/keyboard/ep93xx_keypad.c @@ -17,6 +17,7 @@ * flag. */ +#include <linux/bits.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> @@ -26,6 +27,7 @@ #include <linux/slab.h> #include <linux/soc/cirrus/ep93xx.h> #include <linux/platform_data/keypad-ep93xx.h> +#include <linux/pm_wakeirq.h> /* * Keypad Interface Register offsets @@ -35,28 +37,28 @@ #define KEY_REG 0x08 /* Key Value Capture register */ /* Key Scan Initialization Register bit defines */ -#define KEY_INIT_DBNC_MASK (0x00ff0000) -#define KEY_INIT_DBNC_SHIFT (16) -#define KEY_INIT_DIS3KY (1<<15) -#define KEY_INIT_DIAG (1<<14) -#define KEY_INIT_BACK (1<<13) -#define KEY_INIT_T2 (1<<12) -#define KEY_INIT_PRSCL_MASK (0x000003ff) -#define KEY_INIT_PRSCL_SHIFT (0) +#define KEY_INIT_DBNC_MASK GENMASK(23, 16) +#define KEY_INIT_DBNC_SHIFT 16 +#define KEY_INIT_DIS3KY BIT(15) +#define KEY_INIT_DIAG BIT(14) +#define KEY_INIT_BACK BIT(13) +#define KEY_INIT_T2 BIT(12) +#define KEY_INIT_PRSCL_MASK GENMASK(9, 0) +#define KEY_INIT_PRSCL_SHIFT 0 /* Key Scan Diagnostic Register bit defines */ -#define KEY_DIAG_MASK (0x0000003f) -#define KEY_DIAG_SHIFT (0) +#define KEY_DIAG_MASK GENMASK(5, 0) +#define KEY_DIAG_SHIFT 0 /* Key Value Capture Register bit defines */ -#define KEY_REG_K (1<<15) -#define KEY_REG_INT (1<<14) -#define KEY_REG_2KEYS (1<<13) -#define KEY_REG_1KEY (1<<12) -#define KEY_REG_KEY2_MASK (0x00000fc0) -#define KEY_REG_KEY2_SHIFT (6) -#define KEY_REG_KEY1_MASK (0x0000003f) -#define KEY_REG_KEY1_SHIFT (0) +#define KEY_REG_K BIT(15) +#define KEY_REG_INT BIT(14) +#define KEY_REG_2KEYS BIT(13) +#define KEY_REG_1KEY BIT(12) +#define KEY_REG_KEY2_MASK GENMASK(11, 6) +#define KEY_REG_KEY2_SHIFT 6 +#define KEY_REG_KEY1_MASK GENMASK(5, 0) +#define KEY_REG_KEY1_SHIFT 0 #define EP93XX_MATRIX_SIZE (EP93XX_MATRIX_ROWS * EP93XX_MATRIX_COLS) @@ -175,8 +177,7 @@ static void ep93xx_keypad_close(struct input_dev *pdev) } -#ifdef CONFIG_PM_SLEEP -static int ep93xx_keypad_suspend(struct device *dev) +static int __maybe_unused ep93xx_keypad_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct ep93xx_keypad *keypad = platform_get_drvdata(pdev); @@ -191,21 +192,15 @@ static int ep93xx_keypad_suspend(struct device *dev) mutex_unlock(&input_dev->mutex); - if (device_may_wakeup(&pdev->dev)) - enable_irq_wake(keypad->irq); - return 0; } -static int ep93xx_keypad_resume(struct device *dev) +static int __maybe_unused ep93xx_keypad_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct ep93xx_keypad *keypad = platform_get_drvdata(pdev); struct input_dev *input_dev = keypad->input_dev; - if (device_may_wakeup(&pdev->dev)) - disable_irq_wake(keypad->irq); - mutex_lock(&input_dev->mutex); if (input_device_enabled(input_dev)) { @@ -220,11 +215,17 @@ static int ep93xx_keypad_resume(struct device *dev) return 0; } -#endif static SIMPLE_DEV_PM_OPS(ep93xx_keypad_pm_ops, ep93xx_keypad_suspend, ep93xx_keypad_resume); +static void ep93xx_keypad_release_gpio_action(void *_pdev) +{ + struct platform_device *pdev = _pdev; + + ep93xx_keypad_release_gpio(pdev); +} + static int ep93xx_keypad_probe(struct platform_device *pdev) { struct ep93xx_keypad *keypad; @@ -233,61 +234,46 @@ static int ep93xx_keypad_probe(struct platform_device *pdev) struct resource *res; int err; - keypad = kzalloc(sizeof(struct ep93xx_keypad), GFP_KERNEL); + keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad), GFP_KERNEL); if (!keypad) return -ENOMEM; keypad->pdata = dev_get_platdata(&pdev->dev); - if (!keypad->pdata) { - err = -EINVAL; - goto failed_free; - } + if (!keypad->pdata) + return -EINVAL; keymap_data = keypad->pdata->keymap_data; - if (!keymap_data) { - err = -EINVAL; - goto failed_free; - } + if (!keymap_data) + return -EINVAL; keypad->irq = platform_get_irq(pdev, 0); - if (keypad->irq < 0) { - err = keypad->irq; - goto failed_free; - } + if (keypad->irq < 0) + return keypad->irq; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - err = -ENXIO; - goto failed_free; - } + if (!res) + return -ENXIO; - res = request_mem_region(res->start, resource_size(res), pdev->name); - if (!res) { - err = -EBUSY; - goto failed_free; - } - - keypad->mmio_base = ioremap(res->start, resource_size(res)); - if (keypad->mmio_base == NULL) { - err = -ENXIO; - goto failed_free_mem; - } + keypad->mmio_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(keypad->mmio_base)) + return PTR_ERR(keypad->mmio_base); err = ep93xx_keypad_acquire_gpio(pdev); if (err) - goto failed_free_io; + return err; - keypad->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(keypad->clk)) { - err = PTR_ERR(keypad->clk); - goto failed_free_gpio; - } + err = devm_add_action_or_reset(&pdev->dev, + ep93xx_keypad_release_gpio_action, pdev); + if (err) + return err; - input_dev = input_allocate_device(); - if (!input_dev) { - err = -ENOMEM; - goto failed_put_clk; - } + keypad->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(keypad->clk)) + return PTR_ERR(keypad->clk); + + input_dev = devm_input_allocate_device(&pdev->dev); + if (!input_dev) + return -ENOMEM; keypad->input_dev = input_dev; @@ -295,70 +281,40 @@ static int ep93xx_keypad_probe(struct platform_device *pdev) input_dev->id.bustype = BUS_HOST; input_dev->open = ep93xx_keypad_open; input_dev->close = ep93xx_keypad_close; - input_dev->dev.parent = &pdev->dev; err = matrix_keypad_build_keymap(keymap_data, NULL, EP93XX_MATRIX_ROWS, EP93XX_MATRIX_COLS, keypad->keycodes, input_dev); if (err) - goto failed_free_dev; + return err; if (keypad->pdata->flags & EP93XX_KEYPAD_AUTOREPEAT) __set_bit(EV_REP, input_dev->evbit); input_set_drvdata(input_dev, keypad); - err = request_irq(keypad->irq, ep93xx_keypad_irq_handler, - 0, pdev->name, keypad); + err = devm_request_irq(&pdev->dev, keypad->irq, + ep93xx_keypad_irq_handler, + 0, pdev->name, keypad); if (err) - goto failed_free_dev; + return err; err = input_register_device(input_dev); if (err) - goto failed_free_irq; + return err; platform_set_drvdata(pdev, keypad); + device_init_wakeup(&pdev->dev, 1); + err = dev_pm_set_wake_irq(&pdev->dev, keypad->irq); + if (err) + dev_warn(&pdev->dev, "failed to set up wakeup irq: %d\n", err); return 0; - -failed_free_irq: - free_irq(keypad->irq, keypad); -failed_free_dev: - input_free_device(input_dev); -failed_put_clk: - clk_put(keypad->clk); -failed_free_gpio: - ep93xx_keypad_release_gpio(pdev); -failed_free_io: - iounmap(keypad->mmio_base); -failed_free_mem: - release_mem_region(res->start, resource_size(res)); -failed_free: - kfree(keypad); - return err; } static int ep93xx_keypad_remove(struct platform_device *pdev) { - struct ep93xx_keypad *keypad = platform_get_drvdata(pdev); - struct resource *res; - - free_irq(keypad->irq, keypad); - - if (keypad->enabled) - clk_disable(keypad->clk); - clk_put(keypad->clk); - - input_unregister_device(keypad->input_dev); - - ep93xx_keypad_release_gpio(pdev); - - iounmap(keypad->mmio_base); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - - kfree(keypad); + dev_pm_clear_wake_irq(&pdev->dev); return 0; } diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c index 40d6e5087cde..230ab3d50b9e 100644 --- a/drivers/input/keyboard/mpr121_touchkey.c +++ b/drivers/input/keyboard/mpr121_touchkey.c @@ -107,9 +107,9 @@ static struct regulator *mpr121_vdd_supply_init(struct device *dev) return ERR_PTR(err); } - err = devm_add_action(dev, mpr121_vdd_supply_disable, vdd_supply); + err = devm_add_action_or_reset(dev, mpr121_vdd_supply_disable, + vdd_supply); if (err) { - regulator_disable(vdd_supply); dev_err(dev, "failed to add disable regulator action: %d\n", err); return ERR_PTR(err); diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c index dbe836c7ff47..eb3a687796e7 100644 --- a/drivers/input/keyboard/omap-keypad.c +++ b/drivers/input/keyboard/omap-keypad.c @@ -190,8 +190,7 @@ static int omap_kp_probe(struct platform_device *pdev) row_shift = get_count_order(pdata->cols); keycodemax = pdata->rows << row_shift; - omap_kp = kzalloc(sizeof(struct omap_kp) + - keycodemax * sizeof(unsigned short), GFP_KERNEL); + omap_kp = kzalloc(struct_size(omap_kp, keymap, keycodemax), GFP_KERNEL); input_dev = input_allocate_device(); if (!omap_kp || !input_dev) { kfree(omap_kp); diff --git a/drivers/input/keyboard/tm2-touchkey.c b/drivers/input/keyboard/tm2-touchkey.c index 6218b1c682ef..632cd6c1c8d4 100644 --- a/drivers/input/keyboard/tm2-touchkey.c +++ b/drivers/input/keyboard/tm2-touchkey.c @@ -156,6 +156,8 @@ static irqreturn_t tm2_touchkey_irq_handler(int irq, void *devid) goto out; } + input_event(touchkey->input_dev, EV_MSC, MSC_SCAN, index); + if (data & TM2_TOUCHKEY_BIT_PRESS_EV) { for (i = 0; i < touchkey->num_keycodes; i++) input_report_key(touchkey->input_dev, @@ -250,6 +252,11 @@ static int tm2_touchkey_probe(struct i2c_client *client, touchkey->input_dev->name = TM2_TOUCHKEY_DEV_NAME; touchkey->input_dev->id.bustype = BUS_I2C; + touchkey->input_dev->keycode = touchkey->keycodes; + touchkey->input_dev->keycodemax = touchkey->num_keycodes; + touchkey->input_dev->keycodesize = sizeof(touchkey->keycodes[0]); + + input_set_capability(touchkey->input_dev, EV_MSC, MSC_SCAN); for (i = 0; i < touchkey->num_keycodes; i++) input_set_capability(touchkey->input_dev, EV_KEY, touchkey->keycodes[i]); diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c index e64368a63346..a3b5f88d2bd1 100644 --- a/drivers/input/misc/adxl34x-i2c.c +++ b/drivers/input/misc/adxl34x-i2c.c @@ -103,7 +103,9 @@ static int adxl34x_i2c_remove(struct i2c_client *client) { struct adxl34x *ac = i2c_get_clientdata(client); - return adxl34x_remove(ac); + adxl34x_remove(ac); + + return 0; } static int __maybe_unused adxl34x_i2c_suspend(struct device *dev) diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c index df6afa455e46..6e51c9bc619f 100644 --- a/drivers/input/misc/adxl34x-spi.c +++ b/drivers/input/misc/adxl34x-spi.c @@ -91,7 +91,9 @@ static int adxl34x_spi_remove(struct spi_device *spi) { struct adxl34x *ac = spi_get_drvdata(spi); - return adxl34x_remove(ac); + adxl34x_remove(ac); + + return 0; } static int __maybe_unused adxl34x_spi_suspend(struct device *dev) diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c index 4cc4e8ff42b3..a4af314392a9 100644 --- a/drivers/input/misc/adxl34x.c +++ b/drivers/input/misc/adxl34x.c @@ -237,7 +237,7 @@ static const struct adxl34x_platform_data adxl34x_default_init = { static void adxl34x_get_triple(struct adxl34x *ac, struct axis_triple *axis) { - short buf[3]; + __le16 buf[3]; ac->bops->read_block(ac->dev, DATAX0, DATAZ1 - DATAX0 + 1, buf); @@ -896,15 +896,13 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq, } EXPORT_SYMBOL_GPL(adxl34x_probe); -int adxl34x_remove(struct adxl34x *ac) +void adxl34x_remove(struct adxl34x *ac) { sysfs_remove_group(&ac->dev->kobj, &adxl34x_attr_group); free_irq(ac->irq, ac); input_unregister_device(ac->input); dev_dbg(ac->dev, "unregistered accelerometer\n"); kfree(ac); - - return 0; } EXPORT_SYMBOL_GPL(adxl34x_remove); diff --git a/drivers/input/misc/adxl34x.h b/drivers/input/misc/adxl34x.h index 83a0eeccf613..febf85270fff 100644 --- a/drivers/input/misc/adxl34x.h +++ b/drivers/input/misc/adxl34x.h @@ -25,6 +25,6 @@ void adxl34x_resume(struct adxl34x *ac); struct adxl34x *adxl34x_probe(struct device *dev, int irq, bool fifo_delay_default, const struct adxl34x_bus_ops *bops); -int adxl34x_remove(struct adxl34x *ac); +void adxl34x_remove(struct adxl34x *ac); #endif diff --git a/drivers/input/misc/ariel-pwrbutton.c b/drivers/input/misc/ariel-pwrbutton.c index 17bbaac8b80c..cdc80715b5fd 100644 --- a/drivers/input/misc/ariel-pwrbutton.c +++ b/drivers/input/misc/ariel-pwrbutton.c @@ -149,12 +149,19 @@ static const struct of_device_id ariel_pwrbutton_of_match[] = { }; MODULE_DEVICE_TABLE(of, ariel_pwrbutton_of_match); +static const struct spi_device_id ariel_pwrbutton_spi_ids[] = { + { .name = "wyse-ariel-ec-input" }, + { } +}; +MODULE_DEVICE_TABLE(spi, ariel_pwrbutton_spi_ids); + static struct spi_driver ariel_pwrbutton_driver = { .driver = { .name = "dell-wyse-ariel-ec-input", .of_match_table = ariel_pwrbutton_of_match, }, .probe = ariel_pwrbutton_probe, + .id_table = ariel_pwrbutton_spi_ids, }; module_spi_driver(ariel_pwrbutton_driver); diff --git a/drivers/input/misc/cpcap-pwrbutton.c b/drivers/input/misc/cpcap-pwrbutton.c index 0abef63217e2..879790bbf9fe 100644 --- a/drivers/input/misc/cpcap-pwrbutton.c +++ b/drivers/input/misc/cpcap-pwrbutton.c @@ -54,9 +54,13 @@ static irqreturn_t powerbutton_irq(int irq, void *_button) static int cpcap_power_button_probe(struct platform_device *pdev) { struct cpcap_power_button *button; - int irq = platform_get_irq(pdev, 0); + int irq; int err; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + button = devm_kmalloc(&pdev->dev, sizeof(*button), GFP_KERNEL); if (!button) return -ENOMEM; @@ -73,7 +77,6 @@ static int cpcap_power_button_probe(struct platform_device *pdev) button->idev->name = "cpcap-pwrbutton"; button->idev->phys = "cpcap-pwrbutton/input0"; - button->idev->dev.parent = button->dev; input_set_capability(button->idev, EV_KEY, KEY_POWER); err = devm_request_threaded_irq(&pdev->dev, irq, NULL, diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c index 0d09ffeafeea..4369d3c04d38 100644 --- a/drivers/input/misc/max77693-haptic.c +++ b/drivers/input/misc/max77693-haptic.c @@ -424,5 +424,4 @@ module_platform_driver(max77693_haptic_driver); MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>"); MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>"); MODULE_DESCRIPTION("MAXIM 77693/77843 Haptic driver"); -MODULE_ALIAS("platform:max77693-haptic"); MODULE_LICENSE("GPL"); diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c index ffab4a490c75..4770cb55631a 100644 --- a/drivers/input/misc/max8925_onkey.c +++ b/drivers/input/misc/max8925_onkey.c @@ -1,4 +1,4 @@ -/** +/* * MAX8925 ONKEY driver * * Copyright (C) 2009 Marvell International Ltd. diff --git a/drivers/input/misc/palmas-pwrbutton.c b/drivers/input/misc/palmas-pwrbutton.c index 1e1baed63929..f9b05cf09ff5 100644 --- a/drivers/input/misc/palmas-pwrbutton.c +++ b/drivers/input/misc/palmas-pwrbutton.c @@ -210,6 +210,11 @@ static int palmas_pwron_probe(struct platform_device *pdev) INIT_DELAYED_WORK(&pwron->input_work, palmas_power_button_work); pwron->irq = platform_get_irq(pdev, 0); + if (pwron->irq < 0) { + error = pwron->irq; + goto err_free_input; + } + error = request_threaded_irq(pwron->irq, NULL, pwron_irq, IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c index 33609603245d..89af52498c96 100644 --- a/drivers/input/misc/pm8941-pwrkey.c +++ b/drivers/input/misc/pm8941-pwrkey.c @@ -29,6 +29,7 @@ #define PON_PS_HOLD_RST_CTL2 0x5b #define PON_PS_HOLD_ENABLE BIT(7) #define PON_PS_HOLD_TYPE_MASK 0x0f +#define PON_PS_HOLD_TYPE_WARM_RESET 1 #define PON_PS_HOLD_TYPE_SHUTDOWN 4 #define PON_PS_HOLD_TYPE_HARD_RESET 7 @@ -99,7 +100,10 @@ static int pm8941_reboot_notify(struct notifier_block *nb, break; case SYS_RESTART: default: - reset_type = PON_PS_HOLD_TYPE_HARD_RESET; + if (reboot_mode == REBOOT_WARM) + reset_type = PON_PS_HOLD_TYPE_WARM_RESET; + else + reset_type = PON_PS_HOLD_TYPE_HARD_RESET; break; } diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c index 4ff5cd2a6d8d..3d17a0b3fe51 100644 --- a/drivers/input/misc/xen-kbdfront.c +++ b/drivers/input/misc/xen-kbdfront.c @@ -542,6 +542,7 @@ static struct xenbus_driver xenkbd_driver = { .remove = xenkbd_remove, .resume = xenkbd_resume, .otherend_changed = xenkbd_backend_changed, + .not_essential = true, }; static int __init xenkbd_init(void) diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 2d0bc029619f..956d9cd34796 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -517,6 +517,19 @@ static void elantech_report_trackpoint(struct psmouse *psmouse, case 0x16008020U: case 0x26800010U: case 0x36808000U: + + /* + * This firmware misreport coordinates for trackpoint + * occasionally. Discard packets outside of [-127, 127] range + * to prevent cursor jumps. + */ + if (packet[4] == 0x80 || packet[5] == 0x80 || + packet[1] >> 7 == packet[4] >> 7 || + packet[2] >> 7 == packet[5] >> 7) { + elantech_debug("discarding packet [%6ph]\n", packet); + break; + + } x = packet[4] - (int)((packet[1]^0x80) << 1); y = (int)((packet[2]^0x80) << 1) - packet[5]; diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c index 24f31a5c0e04..50a0134b6901 100644 --- a/drivers/input/rmi4/rmi_bus.c +++ b/drivers/input/rmi4/rmi_bus.c @@ -90,6 +90,7 @@ int rmi_register_transport_device(struct rmi_transport_dev *xport) rmi_dev->dev.bus = &rmi_bus_type; rmi_dev->dev.type = &rmi_device_type; + rmi_dev->dev.parent = xport->dev; xport->rmi_dev = rmi_dev; diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index a5a003553646..aedd05541044 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -273,6 +273,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { }, }, { + /* Fujitsu Lifebook T725 laptop */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"), + }, + }, + { /* Fujitsu Lifebook U745 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), @@ -841,6 +848,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { }, }, { + /* Fujitsu Lifebook T725 laptop */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"), + }, + }, + { /* Fujitsu U574 laptop */ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ .matches = { diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index d4e74738c5a8..2f6adfb7b938 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -425,6 +425,7 @@ config TOUCHSCREEN_HYCON_HY46XX config TOUCHSCREEN_ILI210X tristate "Ilitek ILI210X based touchscreen" depends on I2C + select CRC_CCITT help Say Y here if you have a ILI210X based touchscreen controller. This driver supports models ILI2102, diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index 7d34100f7f22..39a8127cf6a5 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -6,6 +6,7 @@ # Each configuration option enables a list of files. wm97xx-ts-y := wm97xx-core.o +goodix_ts-y := goodix.o goodix_fwupload.o obj-$(CONFIG_TOUCHSCREEN_88PM860X) += 88pm860x-ts.o obj-$(CONFIG_TOUCHSCREEN_AD7877) += ad7877.o @@ -44,7 +45,7 @@ obj-$(CONFIG_TOUCHSCREEN_EGALAX) += egalax_ts.o obj-$(CONFIG_TOUCHSCREEN_EGALAX_SERIAL) += egalax_ts_serial.o obj-$(CONFIG_TOUCHSCREEN_EXC3000) += exc3000.o obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o -obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix.o +obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix_ts.o obj-$(CONFIG_TOUCHSCREEN_HIDEEP) += hideep.o obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o obj-$(CONFIG_TOUCHSCREEN_ILITEK) += ilitek_ts_i2c.o diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index f113a27aeb1e..a25a77dd9a32 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -101,10 +101,6 @@ struct ads7846 { struct spi_device *spi; struct regulator *reg; -#if IS_ENABLED(CONFIG_HWMON) - struct device *hwmon; -#endif - u16 model; u16 vref_mv; u16 vref_delay_usecs; @@ -142,13 +138,18 @@ struct ads7846 { int (*filter)(void *data, int data_idx, int *val); void *filter_data; - void (*filter_cleanup)(void *data); int (*get_pendown_state)(void); int gpio_pendown; void (*wait_for_sync)(void); }; +enum ads7846_filter { + ADS7846_FILTER_OK, + ADS7846_FILTER_REPEAT, + ADS7846_FILTER_IGNORE, +}; + /* leave chip selected when we're done, for quicker re-select? */ #if 0 #define CS_CHANGE(xfer) ((xfer).cs_change = 1) @@ -549,6 +550,8 @@ __ATTRIBUTE_GROUPS(ads7846_attr); static int ads784x_hwmon_register(struct spi_device *spi, struct ads7846 *ts) { + struct device *hwmon; + /* hwmon sensors need a reference voltage */ switch (ts->model) { case 7846: @@ -569,17 +572,11 @@ static int ads784x_hwmon_register(struct spi_device *spi, struct ads7846 *ts) break; } - ts->hwmon = hwmon_device_register_with_groups(&spi->dev, spi->modalias, - ts, ads7846_attr_groups); + hwmon = devm_hwmon_device_register_with_groups(&spi->dev, + spi->modalias, ts, + ads7846_attr_groups); - return PTR_ERR_OR_ZERO(ts->hwmon); -} - -static void ads784x_hwmon_unregister(struct spi_device *spi, - struct ads7846 *ts) -{ - if (ts->hwmon) - hwmon_device_unregister(ts->hwmon); + return PTR_ERR_OR_ZERO(hwmon); } #else @@ -588,11 +585,6 @@ static inline int ads784x_hwmon_register(struct spi_device *spi, { return 0; } - -static inline void ads784x_hwmon_unregister(struct spi_device *spi, - struct ads7846 *ts) -{ -} #endif static ssize_t ads7846_pen_down_show(struct device *dev, @@ -1014,8 +1006,8 @@ static int ads7846_setup_pendown(struct spi_device *spi, ts->get_pendown_state = pdata->get_pendown_state; } else if (gpio_is_valid(pdata->gpio_pendown)) { - err = gpio_request_one(pdata->gpio_pendown, GPIOF_IN, - "ads7846_pendown"); + err = devm_gpio_request_one(&spi->dev, pdata->gpio_pendown, + GPIOF_IN, "ads7846_pendown"); if (err) { dev_err(&spi->dev, "failed to request/setup pendown GPIO%d: %d\n", @@ -1212,24 +1204,30 @@ static const struct ads7846_platform_data *ads7846_probe_dt(struct device *dev) } #endif +static void ads7846_regulator_disable(void *regulator) +{ + regulator_disable(regulator); +} + static int ads7846_probe(struct spi_device *spi) { const struct ads7846_platform_data *pdata; struct ads7846 *ts; + struct device *dev = &spi->dev; struct ads7846_packet *packet; struct input_dev *input_dev; unsigned long irq_flags; int err; if (!spi->irq) { - dev_dbg(&spi->dev, "no IRQ?\n"); + dev_dbg(dev, "no IRQ?\n"); return -EINVAL; } /* don't exceed max specified sample rate */ if (spi->max_speed_hz > (125000 * SAMPLE_BITS)) { - dev_err(&spi->dev, "f(sample) %d KHz?\n", - (spi->max_speed_hz/SAMPLE_BITS)/1000); + dev_err(dev, "f(sample) %d KHz?\n", + (spi->max_speed_hz/SAMPLE_BITS)/1000); return -EINVAL; } @@ -1245,13 +1243,17 @@ static int ads7846_probe(struct spi_device *spi) if (err < 0) return err; - ts = kzalloc(sizeof(struct ads7846), GFP_KERNEL); - packet = kzalloc(sizeof(struct ads7846_packet), GFP_KERNEL); - input_dev = input_allocate_device(); - if (!ts || !packet || !input_dev) { - err = -ENOMEM; - goto err_free_mem; - } + ts = devm_kzalloc(dev, sizeof(struct ads7846), GFP_KERNEL); + if (!ts) + return -ENOMEM; + + packet = devm_kzalloc(dev, sizeof(struct ads7846_packet), GFP_KERNEL); + if (!packet) + return -ENOMEM; + + input_dev = devm_input_allocate_device(dev); + if (!input_dev) + return -ENOMEM; spi_set_drvdata(spi, ts); @@ -1262,13 +1264,11 @@ static int ads7846_probe(struct spi_device *spi) mutex_init(&ts->lock); init_waitqueue_head(&ts->wait); - pdata = dev_get_platdata(&spi->dev); + pdata = dev_get_platdata(dev); if (!pdata) { - pdata = ads7846_probe_dt(&spi->dev); - if (IS_ERR(pdata)) { - err = PTR_ERR(pdata); - goto err_free_mem; - } + pdata = ads7846_probe_dt(dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); } ts->model = pdata->model ? : 7846; @@ -1276,15 +1276,7 @@ static int ads7846_probe(struct spi_device *spi) ts->x_plate_ohms = pdata->x_plate_ohms ? : 400; ts->vref_mv = pdata->vref_mv; - if (pdata->filter != NULL) { - if (pdata->filter_init != NULL) { - err = pdata->filter_init(pdata, &ts->filter_data); - if (err < 0) - goto err_free_mem; - } - ts->filter = pdata->filter; - ts->filter_cleanup = pdata->filter_cleanup; - } else if (pdata->debounce_max) { + if (pdata->debounce_max) { ts->debounce_max = pdata->debounce_max; if (ts->debounce_max < 2) ts->debounce_max = 2; @@ -1298,7 +1290,7 @@ static int ads7846_probe(struct spi_device *spi) err = ads7846_setup_pendown(spi, ts, pdata); if (err) - goto err_cleanup_filter; + return err; if (pdata->penirq_recheck_delay_usecs) ts->penirq_recheck_delay_usecs = @@ -1306,15 +1298,16 @@ static int ads7846_probe(struct spi_device *spi) ts->wait_for_sync = pdata->wait_for_sync ? : null_wait_for_sync; - snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&spi->dev)); + snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(dev)); snprintf(ts->name, sizeof(ts->name), "ADS%d Touchscreen", ts->model); input_dev->name = ts->name; input_dev->phys = ts->phys; - input_dev->dev.parent = &spi->dev; - input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); - input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); + input_dev->id.bustype = BUS_SPI; + input_dev->id.product = pdata->model; + + input_set_capability(input_dev, EV_KEY, BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, pdata->x_min ? : 0, pdata->x_max ? : MAX_12BIT, @@ -1345,125 +1338,84 @@ static int ads7846_probe(struct spi_device *spi) ads7846_setup_spi_msg(ts, pdata); - ts->reg = regulator_get(&spi->dev, "vcc"); + ts->reg = devm_regulator_get(dev, "vcc"); if (IS_ERR(ts->reg)) { err = PTR_ERR(ts->reg); - dev_err(&spi->dev, "unable to get regulator: %d\n", err); - goto err_free_gpio; + dev_err(dev, "unable to get regulator: %d\n", err); + return err; } err = regulator_enable(ts->reg); if (err) { - dev_err(&spi->dev, "unable to enable regulator: %d\n", err); - goto err_put_regulator; + dev_err(dev, "unable to enable regulator: %d\n", err); + return err; } + err = devm_add_action_or_reset(dev, ads7846_regulator_disable, ts->reg); + if (err) + return err; + irq_flags = pdata->irq_flags ? : IRQF_TRIGGER_FALLING; irq_flags |= IRQF_ONESHOT; - err = request_threaded_irq(spi->irq, ads7846_hard_irq, ads7846_irq, - irq_flags, spi->dev.driver->name, ts); - if (err && !pdata->irq_flags) { - dev_info(&spi->dev, + err = devm_request_threaded_irq(dev, spi->irq, + ads7846_hard_irq, ads7846_irq, + irq_flags, dev->driver->name, ts); + if (err && err != -EPROBE_DEFER && !pdata->irq_flags) { + dev_info(dev, "trying pin change workaround on irq %d\n", spi->irq); irq_flags |= IRQF_TRIGGER_RISING; - err = request_threaded_irq(spi->irq, - ads7846_hard_irq, ads7846_irq, - irq_flags, spi->dev.driver->name, ts); + err = devm_request_threaded_irq(dev, spi->irq, + ads7846_hard_irq, ads7846_irq, + irq_flags, dev->driver->name, + ts); } if (err) { - dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq); - goto err_disable_regulator; + dev_dbg(dev, "irq %d busy?\n", spi->irq); + return err; } err = ads784x_hwmon_register(spi, ts); if (err) - goto err_free_irq; + return err; - dev_info(&spi->dev, "touchscreen, irq %d\n", spi->irq); + dev_info(dev, "touchscreen, irq %d\n", spi->irq); /* * Take a first sample, leaving nPENIRQ active and vREF off; avoid * the touchscreen, in case it's not connected. */ if (ts->model == 7845) - ads7845_read12_ser(&spi->dev, PWRDOWN); + ads7845_read12_ser(dev, PWRDOWN); else - (void) ads7846_read12_ser(&spi->dev, READ_12BIT_SER(vaux)); + (void) ads7846_read12_ser(dev, READ_12BIT_SER(vaux)); - err = sysfs_create_group(&spi->dev.kobj, &ads784x_attr_group); + err = devm_device_add_group(dev, &ads784x_attr_group); if (err) - goto err_remove_hwmon; + return err; err = input_register_device(input_dev); if (err) - goto err_remove_attr_group; + return err; - device_init_wakeup(&spi->dev, pdata->wakeup); + device_init_wakeup(dev, pdata->wakeup); /* * If device does not carry platform data we must have allocated it * when parsing DT data. */ - if (!dev_get_platdata(&spi->dev)) - devm_kfree(&spi->dev, (void *)pdata); + if (!dev_get_platdata(dev)) + devm_kfree(dev, (void *)pdata); return 0; - - err_remove_attr_group: - sysfs_remove_group(&spi->dev.kobj, &ads784x_attr_group); - err_remove_hwmon: - ads784x_hwmon_unregister(spi, ts); - err_free_irq: - free_irq(spi->irq, ts); - err_disable_regulator: - regulator_disable(ts->reg); - err_put_regulator: - regulator_put(ts->reg); - err_free_gpio: - if (!ts->get_pendown_state) - gpio_free(ts->gpio_pendown); - err_cleanup_filter: - if (ts->filter_cleanup) - ts->filter_cleanup(ts->filter_data); - err_free_mem: - input_free_device(input_dev); - kfree(packet); - kfree(ts); - return err; } static int ads7846_remove(struct spi_device *spi) { struct ads7846 *ts = spi_get_drvdata(spi); - sysfs_remove_group(&spi->dev.kobj, &ads784x_attr_group); - - ads7846_disable(ts); - free_irq(ts->spi->irq, ts); - - input_unregister_device(ts->input); - - ads784x_hwmon_unregister(spi, ts); - - regulator_put(ts->reg); - - if (!ts->get_pendown_state) { - /* - * If we are not using specialized pendown method we must - * have been relying on gpio we set up ourselves. - */ - gpio_free(ts->gpio_pendown); - } - - if (ts->filter_cleanup) - ts->filter_cleanup(ts->filter_data); - - kfree(ts->packet); - kfree(ts); - - dev_dbg(&spi->dev, "unregistered touchscreen\n"); + ads7846_stop(ts); return 0; } diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 68f542bb809f..7e13a66a8a95 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -1439,11 +1439,11 @@ static int elants_i2c_probe(struct i2c_client *client) if (error) return error; - error = devm_add_action(&client->dev, elants_i2c_power_off, ts); + error = devm_add_action_or_reset(&client->dev, + elants_i2c_power_off, ts); if (error) { dev_err(&client->dev, "failed to install power off action: %d\n", error); - elants_i2c_power_off(ts); return error; } diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 4f53d3c57e69..b5cc91788195 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -14,20 +14,15 @@ #include <linux/kernel.h> #include <linux/dmi.h> #include <linux/firmware.h> -#include <linux/gpio/consumer.h> -#include <linux/i2c.h> -#include <linux/input.h> -#include <linux/input/mt.h> -#include <linux/input/touchscreen.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/interrupt.h> -#include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/acpi.h> #include <linux/of.h> #include <asm/unaligned.h> +#include "goodix.h" #define GOODIX_GPIO_INT_NAME "irq" #define GOODIX_GPIO_RST_NAME "reset" @@ -38,22 +33,11 @@ #define GOODIX_CONTACT_SIZE 8 #define GOODIX_MAX_CONTACT_SIZE 9 #define GOODIX_MAX_CONTACTS 10 -#define GOODIX_MAX_KEYS 7 #define GOODIX_CONFIG_MIN_LENGTH 186 #define GOODIX_CONFIG_911_LENGTH 186 #define GOODIX_CONFIG_967_LENGTH 228 #define GOODIX_CONFIG_GT9X_LENGTH 240 -#define GOODIX_CONFIG_MAX_LENGTH 240 - -/* Register defines */ -#define GOODIX_REG_COMMAND 0x8040 -#define GOODIX_CMD_SCREEN_OFF 0x05 - -#define GOODIX_READ_COOR_ADDR 0x814E -#define GOODIX_GT1X_REG_CONFIG_DATA 0x8050 -#define GOODIX_GT9X_REG_CONFIG_DATA 0x8047 -#define GOODIX_REG_ID 0x8140 #define GOODIX_BUFFER_STATUS_READY BIT(7) #define GOODIX_HAVE_KEY BIT(4) @@ -68,55 +52,11 @@ #define ACPI_GPIO_SUPPORT #endif -struct goodix_ts_data; - -enum goodix_irq_pin_access_method { - IRQ_PIN_ACCESS_NONE, - IRQ_PIN_ACCESS_GPIO, - IRQ_PIN_ACCESS_ACPI_GPIO, - IRQ_PIN_ACCESS_ACPI_METHOD, -}; - -struct goodix_chip_data { - u16 config_addr; - int config_len; - int (*check_config)(struct goodix_ts_data *ts, const u8 *cfg, int len); - void (*calc_config_checksum)(struct goodix_ts_data *ts); -}; - struct goodix_chip_id { const char *id; const struct goodix_chip_data *data; }; -#define GOODIX_ID_MAX_LEN 4 - -struct goodix_ts_data { - struct i2c_client *client; - struct input_dev *input_dev; - const struct goodix_chip_data *chip; - struct touchscreen_properties prop; - unsigned int max_touch_num; - unsigned int int_trigger_type; - struct regulator *avdd28; - struct regulator *vddio; - struct gpio_desc *gpiod_int; - struct gpio_desc *gpiod_rst; - int gpio_count; - int gpio_int_idx; - char id[GOODIX_ID_MAX_LEN + 1]; - u16 version; - const char *cfg_name; - bool reset_controller_at_probe; - bool load_cfg_from_disk; - struct completion firmware_loading_complete; - unsigned long irq_flags; - enum goodix_irq_pin_access_method irq_pin_access_method; - unsigned int contact_size; - u8 config[GOODIX_CONFIG_MAX_LENGTH]; - unsigned short keymap[GOODIX_MAX_KEYS]; -}; - static int goodix_check_cfg_8(struct goodix_ts_data *ts, const u8 *cfg, int len); static int goodix_check_cfg_16(struct goodix_ts_data *ts, @@ -215,8 +155,7 @@ static const struct dmi_system_id inverted_x_screen[] = { * @buf: raw write data buffer. * @len: length of the buffer to write */ -static int goodix_i2c_read(struct i2c_client *client, - u16 reg, u8 *buf, int len) +int goodix_i2c_read(struct i2c_client *client, u16 reg, u8 *buf, int len) { struct i2c_msg msgs[2]; __be16 wbuf = cpu_to_be16(reg); @@ -233,7 +172,13 @@ static int goodix_i2c_read(struct i2c_client *client, msgs[1].buf = buf; ret = i2c_transfer(client->adapter, msgs, 2); - return ret < 0 ? ret : (ret != ARRAY_SIZE(msgs) ? -EIO : 0); + if (ret >= 0) + ret = (ret == ARRAY_SIZE(msgs) ? 0 : -EIO); + + if (ret) + dev_err(&client->dev, "Error reading %d bytes from 0x%04x: %d\n", + len, reg, ret); + return ret; } /** @@ -244,8 +189,7 @@ static int goodix_i2c_read(struct i2c_client *client, * @buf: raw data buffer to write. * @len: length of the buffer to write */ -static int goodix_i2c_write(struct i2c_client *client, u16 reg, const u8 *buf, - unsigned len) +int goodix_i2c_write(struct i2c_client *client, u16 reg, const u8 *buf, int len) { u8 *addr_buf; struct i2c_msg msg; @@ -265,11 +209,18 @@ static int goodix_i2c_write(struct i2c_client *client, u16 reg, const u8 *buf, msg.len = len + 2; ret = i2c_transfer(client->adapter, &msg, 1); + if (ret >= 0) + ret = (ret == 1 ? 0 : -EIO); + kfree(addr_buf); - return ret < 0 ? ret : (ret != 1 ? -EIO : 0); + + if (ret) + dev_err(&client->dev, "Error writing %d bytes to 0x%04x: %d\n", + len, reg, ret); + return ret; } -static int goodix_i2c_write_u8(struct i2c_client *client, u16 reg, u8 value) +int goodix_i2c_write_u8(struct i2c_client *client, u16 reg, u8 value) { return goodix_i2c_write(client, reg, &value, sizeof(value)); } @@ -308,11 +259,8 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) do { error = goodix_i2c_read(ts->client, addr, data, header_contact_keycode_size); - if (error) { - dev_err(&ts->client->dev, "I2C transfer error: %d\n", - error); + if (error) return error; - } if (data[0] & GOODIX_BUFFER_STATUS_READY) { touch_num = data[0] & 0x0f; @@ -333,6 +281,11 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) return touch_num; } + if (data[0] == 0 && ts->firmware_name) { + if (goodix_handle_fw_request(ts)) + return 0; + } + usleep_range(1000, 2000); /* Poll every 1 - 2 ms */ } while (time_before(jiffies, max_timeout)); @@ -435,9 +388,7 @@ static irqreturn_t goodix_ts_irq_handler(int irq, void *dev_id) struct goodix_ts_data *ts = dev_id; goodix_process_events(ts); - - if (goodix_i2c_write_u8(ts->client, GOODIX_READ_COOR_ADDR, 0) < 0) - dev_err(&ts->client->dev, "I2C write end_cmd error\n"); + goodix_i2c_write_u8(ts->client, GOODIX_READ_COOR_ADDR, 0); return IRQ_HANDLED; } @@ -553,7 +504,7 @@ static int goodix_check_cfg(struct goodix_ts_data *ts, const u8 *cfg, int len) * @cfg: config firmware to write to device * @len: config data length */ -static int goodix_send_cfg(struct goodix_ts_data *ts, const u8 *cfg, int len) +int goodix_send_cfg(struct goodix_ts_data *ts, const u8 *cfg, int len) { int error; @@ -562,11 +513,9 @@ static int goodix_send_cfg(struct goodix_ts_data *ts, const u8 *cfg, int len) return error; error = goodix_i2c_write(ts->client, ts->chip->config_addr, cfg, len); - if (error) { - dev_err(&ts->client->dev, "Failed to write config data: %d", - error); + if (error) return error; - } + dev_dbg(&ts->client->dev, "Config sent successfully."); /* Let the firmware reconfigure itself, so sleep for 10ms */ @@ -651,62 +600,82 @@ static int goodix_irq_direction_input(struct goodix_ts_data *ts) return -EINVAL; /* Never reached */ } -static int goodix_int_sync(struct goodix_ts_data *ts) +int goodix_int_sync(struct goodix_ts_data *ts) { int error; error = goodix_irq_direction_output(ts, 0); if (error) - return error; + goto error; msleep(50); /* T5: 50ms */ error = goodix_irq_direction_input(ts); if (error) - return error; + goto error; return 0; + +error: + dev_err(&ts->client->dev, "Controller irq sync failed.\n"); + return error; } /** - * goodix_reset - Reset device during power on + * goodix_reset_no_int_sync - Reset device, leaving interrupt line in output mode * * @ts: goodix_ts_data pointer */ -static int goodix_reset(struct goodix_ts_data *ts) +int goodix_reset_no_int_sync(struct goodix_ts_data *ts) { int error; /* begin select I2C slave addr */ error = gpiod_direction_output(ts->gpiod_rst, 0); if (error) - return error; + goto error; msleep(20); /* T2: > 10ms */ /* HIGH: 0x28/0x29, LOW: 0xBA/0xBB */ error = goodix_irq_direction_output(ts, ts->client->addr == 0x14); if (error) - return error; + goto error; usleep_range(100, 2000); /* T3: > 100us */ error = gpiod_direction_output(ts->gpiod_rst, 1); if (error) - return error; + goto error; usleep_range(6000, 10000); /* T4: > 5ms */ /* end select I2C slave addr */ error = gpiod_direction_input(ts->gpiod_rst); if (error) - return error; + goto error; - error = goodix_int_sync(ts); + return 0; + +error: + dev_err(&ts->client->dev, "Controller reset failed.\n"); + return error; +} + +/** + * goodix_reset - Reset device during power on + * + * @ts: goodix_ts_data pointer + */ +static int goodix_reset(struct goodix_ts_data *ts) +{ + int error; + + error = goodix_reset_no_int_sync(ts); if (error) return error; - return 0; + return goodix_int_sync(ts); } #ifdef ACPI_GPIO_SUPPORT @@ -931,14 +900,19 @@ static void goodix_read_config(struct goodix_ts_data *ts) int x_max, y_max; int error; - error = goodix_i2c_read(ts->client, ts->chip->config_addr, - ts->config, ts->chip->config_len); - if (error) { - dev_warn(&ts->client->dev, "Error reading config: %d\n", - error); - ts->int_trigger_type = GOODIX_INT_TRIGGER; - ts->max_touch_num = GOODIX_MAX_CONTACTS; - return; + /* + * On controllers where we need to upload the firmware + * (controllers without flash) ts->config already has the config + * at this point and the controller itself does not have it yet! + */ + if (!ts->firmware_name) { + error = goodix_i2c_read(ts->client, ts->chip->config_addr, + ts->config, ts->chip->config_len); + if (error) { + ts->int_trigger_type = GOODIX_INT_TRIGGER; + ts->max_touch_num = GOODIX_MAX_CONTACTS; + return; + } } ts->int_trigger_type = ts->config[TRIGGER_LOC] & 0x03; @@ -966,10 +940,8 @@ static int goodix_read_version(struct goodix_ts_data *ts) char id_str[GOODIX_ID_MAX_LEN + 1]; error = goodix_i2c_read(ts->client, GOODIX_REG_ID, buf, sizeof(buf)); - if (error) { - dev_err(&ts->client->dev, "read version failed: %d\n", error); + if (error) return error; - } memcpy(id_str, buf, GOODIX_ID_MAX_LEN); id_str[GOODIX_ID_MAX_LEN] = 0; @@ -995,13 +967,10 @@ static int goodix_i2c_test(struct i2c_client *client) u8 test; while (retry++ < 2) { - error = goodix_i2c_read(client, GOODIX_REG_ID, - &test, 1); + error = goodix_i2c_read(client, GOODIX_REG_ID, &test, 1); if (!error) return 0; - dev_err(&client->dev, "i2c test failed attempt %d: %d\n", - retry, error); msleep(20); } @@ -1130,7 +1099,16 @@ static void goodix_config_cb(const struct firmware *cfg, void *ctx) struct goodix_ts_data *ts = ctx; int error; - if (cfg) { + if (ts->firmware_name) { + if (!cfg) + goto err_release_cfg; + + error = goodix_check_cfg(ts, cfg->data, cfg->size); + if (error) + goto err_release_cfg; + + memcpy(ts->config, cfg->data, cfg->size); + } else if (cfg) { /* send device configuration to the firmware */ error = goodix_send_cfg(ts, cfg->data, cfg->size); if (error) @@ -1156,6 +1134,7 @@ static int goodix_ts_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct goodix_ts_data *ts; + const char *cfg_name; int error; dev_dbg(&client->dev, "I2C Address: 0x%02x\n", client->addr); @@ -1205,10 +1184,8 @@ reset: if (ts->reset_controller_at_probe) { /* reset the controller */ error = goodix_reset(ts); - if (error) { - dev_err(&client->dev, "Controller reset failed.\n"); + if (error) return error; - } } error = goodix_i2c_test(client); @@ -1223,20 +1200,27 @@ reset: return error; } + error = goodix_firmware_check(ts); + if (error) + return error; + error = goodix_read_version(ts); - if (error) { - dev_err(&client->dev, "Read version failed.\n"); + if (error) return error; - } ts->chip = goodix_get_chip_data(ts->id); if (ts->load_cfg_from_disk) { /* update device config */ - ts->cfg_name = devm_kasprintf(&client->dev, GFP_KERNEL, - "goodix_%s_cfg.bin", ts->id); - if (!ts->cfg_name) - return -ENOMEM; + error = device_property_read_string(&client->dev, + "goodix,config-name", + &cfg_name); + if (!error) + snprintf(ts->cfg_name, sizeof(ts->cfg_name), + "goodix/%s", cfg_name); + else + snprintf(ts->cfg_name, sizeof(ts->cfg_name), + "goodix_%s_cfg.bin", ts->id); error = request_firmware_nowait(THIS_MODULE, true, ts->cfg_name, &client->dev, GFP_KERNEL, ts, @@ -1286,6 +1270,9 @@ static int __maybe_unused goodix_suspend(struct device *dev) /* Free IRQ as IRQ pin is used as output in the suspend sequence */ goodix_free_irq(ts); + /* Save reference (calibration) info if necessary */ + goodix_save_bak_ref(ts); + /* Output LOW on the INT pin for 5 ms */ error = goodix_irq_direction_output(ts, 0); if (error) { @@ -1298,7 +1285,6 @@ static int __maybe_unused goodix_suspend(struct device *dev) error = goodix_i2c_write_u8(ts->client, GOODIX_REG_COMMAND, GOODIX_CMD_SCREEN_OFF); if (error) { - dev_err(&ts->client->dev, "Screen off command failed\n"); goodix_irq_direction_input(ts); goodix_request_irq(ts); return -EAGAIN; @@ -1341,19 +1327,14 @@ static int __maybe_unused goodix_resume(struct device *dev) error = goodix_i2c_read(ts->client, ts->chip->config_addr, &config_ver, 1); - if (error) - dev_warn(dev, "Error reading config version: %d, resetting controller\n", - error); - else if (config_ver != ts->config[0]) + if (!error && config_ver != ts->config[0]) dev_info(dev, "Config version mismatch %d != %d, resetting controller\n", config_ver, ts->config[0]); if (error != 0 || config_ver != ts->config[0]) { error = goodix_reset(ts); - if (error) { - dev_err(dev, "Controller reset failed.\n"); + if (error) return error; - } error = goodix_send_cfg(ts, ts->config, ts->chip->config_len); if (error) diff --git a/drivers/input/touchscreen/goodix.h b/drivers/input/touchscreen/goodix.h new file mode 100644 index 000000000000..62138f930d1a --- /dev/null +++ b/drivers/input/touchscreen/goodix.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __GOODIX_H__ +#define __GOODIX_H__ + +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/input.h> +#include <linux/input/mt.h> +#include <linux/input/touchscreen.h> +#include <linux/regulator/consumer.h> + +/* Register defines */ +#define GOODIX_REG_MISCTL_DSP_CTL 0x4010 +#define GOODIX_REG_MISCTL_SRAM_BANK 0x4048 +#define GOODIX_REG_MISCTL_MEM_CD_EN 0x4049 +#define GOODIX_REG_MISCTL_CACHE_EN 0x404B +#define GOODIX_REG_MISCTL_TMR0_EN 0x40B0 +#define GOODIX_REG_MISCTL_SWRST 0x4180 +#define GOODIX_REG_MISCTL_CPU_SWRST_PULSE 0x4184 +#define GOODIX_REG_MISCTL_BOOTCTL 0x4190 +#define GOODIX_REG_MISCTL_BOOT_OPT 0x4218 +#define GOODIX_REG_MISCTL_BOOT_CTL 0x5094 + +#define GOODIX_REG_FW_SIG 0x8000 +#define GOODIX_FW_SIG_LEN 10 + +#define GOODIX_REG_MAIN_CLK 0x8020 +#define GOODIX_MAIN_CLK_LEN 6 + +#define GOODIX_REG_COMMAND 0x8040 +#define GOODIX_CMD_SCREEN_OFF 0x05 + +#define GOODIX_REG_SW_WDT 0x8041 + +#define GOODIX_REG_REQUEST 0x8043 +#define GOODIX_RQST_RESPONDED 0x00 +#define GOODIX_RQST_CONFIG 0x01 +#define GOODIX_RQST_BAK_REF 0x02 +#define GOODIX_RQST_RESET 0x03 +#define GOODIX_RQST_MAIN_CLOCK 0x04 +/* + * Unknown request which gets send by the controller aprox. + * every 34 seconds once it is up and running. + */ +#define GOODIX_RQST_UNKNOWN 0x06 +#define GOODIX_RQST_IDLE 0xFF + +#define GOODIX_REG_STATUS 0x8044 + +#define GOODIX_GT1X_REG_CONFIG_DATA 0x8050 +#define GOODIX_GT9X_REG_CONFIG_DATA 0x8047 +#define GOODIX_REG_ID 0x8140 +#define GOODIX_READ_COOR_ADDR 0x814E +#define GOODIX_REG_BAK_REF 0x99D0 + +#define GOODIX_ID_MAX_LEN 4 +#define GOODIX_CONFIG_MAX_LENGTH 240 +#define GOODIX_MAX_KEYS 7 + +enum goodix_irq_pin_access_method { + IRQ_PIN_ACCESS_NONE, + IRQ_PIN_ACCESS_GPIO, + IRQ_PIN_ACCESS_ACPI_GPIO, + IRQ_PIN_ACCESS_ACPI_METHOD, +}; + +struct goodix_ts_data; + +struct goodix_chip_data { + u16 config_addr; + int config_len; + int (*check_config)(struct goodix_ts_data *ts, const u8 *cfg, int len); + void (*calc_config_checksum)(struct goodix_ts_data *ts); +}; + +struct goodix_ts_data { + struct i2c_client *client; + struct input_dev *input_dev; + const struct goodix_chip_data *chip; + const char *firmware_name; + struct touchscreen_properties prop; + unsigned int max_touch_num; + unsigned int int_trigger_type; + struct regulator *avdd28; + struct regulator *vddio; + struct gpio_desc *gpiod_int; + struct gpio_desc *gpiod_rst; + int gpio_count; + int gpio_int_idx; + char id[GOODIX_ID_MAX_LEN + 1]; + char cfg_name[64]; + u16 version; + bool reset_controller_at_probe; + bool load_cfg_from_disk; + struct completion firmware_loading_complete; + unsigned long irq_flags; + enum goodix_irq_pin_access_method irq_pin_access_method; + unsigned int contact_size; + u8 config[GOODIX_CONFIG_MAX_LENGTH]; + unsigned short keymap[GOODIX_MAX_KEYS]; + u8 main_clk[GOODIX_MAIN_CLK_LEN]; + int bak_ref_len; + u8 *bak_ref; +}; + +int goodix_i2c_read(struct i2c_client *client, u16 reg, u8 *buf, int len); +int goodix_i2c_write(struct i2c_client *client, u16 reg, const u8 *buf, int len); +int goodix_i2c_write_u8(struct i2c_client *client, u16 reg, u8 value); +int goodix_send_cfg(struct goodix_ts_data *ts, const u8 *cfg, int len); +int goodix_int_sync(struct goodix_ts_data *ts); +int goodix_reset_no_int_sync(struct goodix_ts_data *ts); + +int goodix_firmware_check(struct goodix_ts_data *ts); +bool goodix_handle_fw_request(struct goodix_ts_data *ts); +void goodix_save_bak_ref(struct goodix_ts_data *ts); + +#endif diff --git a/drivers/input/touchscreen/goodix_fwupload.c b/drivers/input/touchscreen/goodix_fwupload.c new file mode 100644 index 000000000000..c1e7a2413078 --- /dev/null +++ b/drivers/input/touchscreen/goodix_fwupload.c @@ -0,0 +1,427 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Goodix Touchscreen firmware upload support + * + * Copyright (c) 2021 Hans de Goede <hdegoede@redhat.com> + * + * This is a rewrite of gt9xx_update.c from the Allwinner H3 BSP which is: + * Copyright (c) 2010 - 2012 Goodix Technology. + * Author: andrew@goodix.com + */ + +#include <linux/device.h> +#include <linux/firmware.h> +#include <linux/i2c.h> +#include "goodix.h" + +#define GOODIX_FW_HEADER_LENGTH sizeof(struct goodix_fw_header) +#define GOODIX_FW_SECTION_LENGTH 0x2000 +#define GOODIX_FW_DSP_LENGTH 0x1000 +#define GOODIX_FW_UPLOAD_ADDRESS 0xc000 + +#define GOODIX_CFG_LOC_HAVE_KEY 7 +#define GOODIX_CFG_LOC_DRVA_NUM 27 +#define GOODIX_CFG_LOC_DRVB_NUM 28 +#define GOODIX_CFG_LOC_SENS_NUM 29 + +struct goodix_fw_header { + u8 hw_info[4]; + u8 pid[8]; + u8 vid[2]; +} __packed; + +static u16 goodix_firmware_checksum(const u8 *data, int size) +{ + u16 checksum = 0; + int i; + + for (i = 0; i < size; i += 2) + checksum += (data[i] << 8) + data[i + 1]; + + return checksum; +} + +static int goodix_firmware_verify(struct device *dev, const struct firmware *fw) +{ + const struct goodix_fw_header *fw_header; + size_t expected_size; + const u8 *data; + u16 checksum; + char buf[9]; + + expected_size = GOODIX_FW_HEADER_LENGTH + 4 * GOODIX_FW_SECTION_LENGTH + + GOODIX_FW_DSP_LENGTH; + if (fw->size != expected_size) { + dev_err(dev, "Firmware has wrong size, expected %zu got %zu\n", + expected_size, fw->size); + return -EINVAL; + } + + data = fw->data + GOODIX_FW_HEADER_LENGTH; + checksum = goodix_firmware_checksum(data, 4 * GOODIX_FW_SECTION_LENGTH); + if (checksum) { + dev_err(dev, "Main firmware checksum error\n"); + return -EINVAL; + } + + data += 4 * GOODIX_FW_SECTION_LENGTH; + checksum = goodix_firmware_checksum(data, GOODIX_FW_DSP_LENGTH); + if (checksum) { + dev_err(dev, "DSP firmware checksum error\n"); + return -EINVAL; + } + + fw_header = (const struct goodix_fw_header *)fw->data; + dev_info(dev, "Firmware hardware info %02x%02x%02x%02x\n", + fw_header->hw_info[0], fw_header->hw_info[1], + fw_header->hw_info[2], fw_header->hw_info[3]); + /* pid is a 8 byte buffer containing a string, weird I know */ + memcpy(buf, fw_header->pid, 8); + buf[8] = 0; + dev_info(dev, "Firmware PID: %s VID: %02x%02x\n", buf, + fw_header->vid[0], fw_header->vid[1]); + return 0; +} + +static int goodix_enter_upload_mode(struct i2c_client *client) +{ + int tries, error; + u8 val; + + tries = 200; + do { + error = goodix_i2c_write_u8(client, + GOODIX_REG_MISCTL_SWRST, 0x0c); + if (error) + return error; + + error = goodix_i2c_read(client, + GOODIX_REG_MISCTL_SWRST, &val, 1); + if (error) + return error; + + if (val == 0x0c) + break; + } while (--tries); + + if (!tries) { + dev_err(&client->dev, "Error could not hold ss51 & dsp\n"); + return -EIO; + } + + /* DSP_CK and DSP_ALU_CK PowerOn */ + error = goodix_i2c_write_u8(client, GOODIX_REG_MISCTL_DSP_CTL, 0x00); + if (error) + return error; + + /* Disable watchdog */ + error = goodix_i2c_write_u8(client, GOODIX_REG_MISCTL_TMR0_EN, 0x00); + if (error) + return error; + + /* Clear cache enable */ + error = goodix_i2c_write_u8(client, GOODIX_REG_MISCTL_CACHE_EN, 0x00); + if (error) + return error; + + /* Set boot from SRAM */ + error = goodix_i2c_write_u8(client, GOODIX_REG_MISCTL_BOOTCTL, 0x02); + if (error) + return error; + + /* Software reboot */ + error = goodix_i2c_write_u8(client, + GOODIX_REG_MISCTL_CPU_SWRST_PULSE, 0x01); + if (error) + return error; + + /* Clear control flag */ + error = goodix_i2c_write_u8(client, GOODIX_REG_MISCTL_BOOTCTL, 0x00); + if (error) + return error; + + /* Set scramble */ + error = goodix_i2c_write_u8(client, GOODIX_REG_MISCTL_BOOT_OPT, 0x00); + if (error) + return error; + + /* Enable accessing code */ + error = goodix_i2c_write_u8(client, GOODIX_REG_MISCTL_MEM_CD_EN, 0x01); + if (error) + return error; + + return 0; +} + +static int goodix_start_firmware(struct i2c_client *client) +{ + int error; + u8 val; + + /* Init software watchdog */ + error = goodix_i2c_write_u8(client, GOODIX_REG_SW_WDT, 0xaa); + if (error) + return error; + + /* Release SS51 & DSP */ + error = goodix_i2c_write_u8(client, GOODIX_REG_MISCTL_SWRST, 0x00); + if (error) + return error; + + error = goodix_i2c_read(client, GOODIX_REG_SW_WDT, &val, 1); + if (error) + return error; + + /* The value we've written to SW_WDT should have been cleared now */ + if (val == 0xaa) { + dev_err(&client->dev, "Error SW_WDT reg not cleared on fw startup\n"); + return -EIO; + } + + /* Re-init software watchdog */ + error = goodix_i2c_write_u8(client, GOODIX_REG_SW_WDT, 0xaa); + if (error) + return error; + + return 0; +} + +static int goodix_firmware_upload(struct goodix_ts_data *ts) +{ + const struct firmware *fw; + char fw_name[64]; + const u8 *data; + int error; + + snprintf(fw_name, sizeof(fw_name), "goodix/%s", ts->firmware_name); + + error = request_firmware(&fw, fw_name, &ts->client->dev); + if (error) { + dev_err(&ts->client->dev, "Firmware request error %d\n", error); + return error; + } + + error = goodix_firmware_verify(&ts->client->dev, fw); + if (error) + goto release; + + error = goodix_reset_no_int_sync(ts); + if (error) + return error; + + error = goodix_enter_upload_mode(ts->client); + if (error) + goto release; + + /* Select SRAM bank 0 and upload section 1 & 2 */ + error = goodix_i2c_write_u8(ts->client, + GOODIX_REG_MISCTL_SRAM_BANK, 0x00); + if (error) + goto release; + + data = fw->data + GOODIX_FW_HEADER_LENGTH; + error = goodix_i2c_write(ts->client, GOODIX_FW_UPLOAD_ADDRESS, + data, 2 * GOODIX_FW_SECTION_LENGTH); + if (error) + goto release; + + /* Select SRAM bank 1 and upload section 3 & 4 */ + error = goodix_i2c_write_u8(ts->client, + GOODIX_REG_MISCTL_SRAM_BANK, 0x01); + if (error) + goto release; + + data += 2 * GOODIX_FW_SECTION_LENGTH; + error = goodix_i2c_write(ts->client, GOODIX_FW_UPLOAD_ADDRESS, + data, 2 * GOODIX_FW_SECTION_LENGTH); + if (error) + goto release; + + /* Select SRAM bank 2 and upload the DSP firmware */ + error = goodix_i2c_write_u8(ts->client, + GOODIX_REG_MISCTL_SRAM_BANK, 0x02); + if (error) + goto release; + + data += 2 * GOODIX_FW_SECTION_LENGTH; + error = goodix_i2c_write(ts->client, GOODIX_FW_UPLOAD_ADDRESS, + data, GOODIX_FW_DSP_LENGTH); + if (error) + goto release; + + error = goodix_start_firmware(ts->client); + if (error) + goto release; + + error = goodix_int_sync(ts); +release: + release_firmware(fw); + return error; +} + +static int goodix_prepare_bak_ref(struct goodix_ts_data *ts) +{ + u8 have_key, driver_num, sensor_num; + + if (ts->bak_ref) + return 0; /* Already done */ + + have_key = (ts->config[GOODIX_CFG_LOC_HAVE_KEY] & 0x01); + + driver_num = (ts->config[GOODIX_CFG_LOC_DRVA_NUM] & 0x1f) + + (ts->config[GOODIX_CFG_LOC_DRVB_NUM] & 0x1f); + if (have_key) + driver_num--; + + sensor_num = (ts->config[GOODIX_CFG_LOC_SENS_NUM] & 0x0f) + + ((ts->config[GOODIX_CFG_LOC_SENS_NUM] >> 4) & 0x0f); + + dev_dbg(&ts->client->dev, "Drv %d Sen %d Key %d\n", + driver_num, sensor_num, have_key); + + ts->bak_ref_len = (driver_num * (sensor_num - 2) + 2) * 2; + + ts->bak_ref = devm_kzalloc(&ts->client->dev, + ts->bak_ref_len, GFP_KERNEL); + if (!ts->bak_ref) + return -ENOMEM; + + /* + * The bak_ref array contains the backup of an array of (self/auto) + * calibration related values which the Android version of the driver + * stores on the filesystem so that it can be restored after reboot. + * The mainline kernel never writes directly to the filesystem like + * this, we always start will all the values which give a correction + * factor in approx. the -20 - +20 range (in 2s complement) set to 0. + * + * Note the touchscreen works fine without restoring the reference + * values after a reboot / power-cycle. + * + * The last 2 bytes are a 16 bits unsigned checksum which is expected + * to make the addition al all 16 bit unsigned values in the array add + * up to 1 (rather then the usual 0), so we must set the last byte to 1. + */ + ts->bak_ref[ts->bak_ref_len - 1] = 1; + + return 0; +} + +static int goodix_send_main_clock(struct goodix_ts_data *ts) +{ + u32 main_clk = 54; /* Default main clock */ + u8 checksum = 0; + int i; + + device_property_read_u32(&ts->client->dev, + "goodix,main-clk", &main_clk); + + for (i = 0; i < (GOODIX_MAIN_CLK_LEN - 1); i++) { + ts->main_clk[i] = main_clk; + checksum += main_clk; + } + + /* The value of all bytes combines must be 0 */ + ts->main_clk[GOODIX_MAIN_CLK_LEN - 1] = 256 - checksum; + + return goodix_i2c_write(ts->client, GOODIX_REG_MAIN_CLK, + ts->main_clk, GOODIX_MAIN_CLK_LEN); +} + +int goodix_firmware_check(struct goodix_ts_data *ts) +{ + device_property_read_string(&ts->client->dev, + "firmware-name", &ts->firmware_name); + if (!ts->firmware_name) + return 0; + + if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_NONE) { + dev_err(&ts->client->dev, "Error no IRQ-pin access method, cannot upload fw.\n"); + return -EINVAL; + } + + dev_info(&ts->client->dev, "Touchscreen controller needs fw-upload\n"); + ts->load_cfg_from_disk = true; + + return goodix_firmware_upload(ts); +} + +bool goodix_handle_fw_request(struct goodix_ts_data *ts) +{ + int error; + u8 val; + + error = goodix_i2c_read(ts->client, GOODIX_REG_REQUEST, &val, 1); + if (error) + return false; + + switch (val) { + case GOODIX_RQST_RESPONDED: + /* + * If we read back our own last ack the IRQ was not for + * a request. + */ + return false; + case GOODIX_RQST_CONFIG: + error = goodix_send_cfg(ts, ts->config, ts->chip->config_len); + if (error) + return false; + + break; + case GOODIX_RQST_BAK_REF: + error = goodix_prepare_bak_ref(ts); + if (error) + return false; + + error = goodix_i2c_write(ts->client, GOODIX_REG_BAK_REF, + ts->bak_ref, ts->bak_ref_len); + if (error) + return false; + + break; + case GOODIX_RQST_RESET: + error = goodix_firmware_upload(ts); + if (error) + return false; + + break; + case GOODIX_RQST_MAIN_CLOCK: + error = goodix_send_main_clock(ts); + if (error) + return false; + + break; + case GOODIX_RQST_UNKNOWN: + case GOODIX_RQST_IDLE: + break; + default: + dev_err_ratelimited(&ts->client->dev, "Unknown Request: 0x%02x\n", val); + } + + /* Ack the request */ + goodix_i2c_write_u8(ts->client, + GOODIX_REG_REQUEST, GOODIX_RQST_RESPONDED); + return true; +} + +void goodix_save_bak_ref(struct goodix_ts_data *ts) +{ + int error; + u8 val; + + if (!ts->firmware_name) + return; + + error = goodix_i2c_read(ts->client, GOODIX_REG_STATUS, &val, 1); + if (error) + return; + + if (!(val & 0x80)) + return; + + error = goodix_i2c_read(ts->client, GOODIX_REG_BAK_REF, + ts->bak_ref, ts->bak_ref_len); + if (error) { + memset(ts->bak_ref, 0, ts->bak_ref_len); + ts->bak_ref[ts->bak_ref_len - 1] = 1; + } +} diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c index 30576a5f2f04..2bd407d86bae 100644 --- a/drivers/input/touchscreen/ili210x.c +++ b/drivers/input/touchscreen/ili210x.c @@ -1,7 +1,9 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/crc-ccitt.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> +#include <linux/ihex.h> #include <linux/input.h> #include <linux/input/mt.h> #include <linux/input/touchscreen.h> @@ -12,7 +14,7 @@ #include <linux/slab.h> #include <asm/unaligned.h> -#define ILI2XXX_POLL_PERIOD 20 +#define ILI2XXX_POLL_PERIOD 15 #define ILI210X_DATA_SIZE 64 #define ILI211X_DATA_SIZE 43 @@ -22,8 +24,23 @@ /* Touchscreen commands */ #define REG_TOUCHDATA 0x10 #define REG_PANEL_INFO 0x20 +#define REG_FIRMWARE_VERSION 0x40 +#define REG_PROTOCOL_VERSION 0x42 +#define REG_KERNEL_VERSION 0x61 +#define REG_IC_BUSY 0x80 +#define REG_IC_BUSY_NOT_BUSY 0x50 +#define REG_GET_MODE 0xc0 +#define REG_GET_MODE_AP 0x5a +#define REG_GET_MODE_BL 0x55 +#define REG_SET_MODE_AP 0xc1 +#define REG_SET_MODE_BL 0xc2 +#define REG_WRITE_DATA 0xc3 +#define REG_WRITE_ENABLE 0xc4 +#define REG_READ_DATA_CRC 0xc7 #define REG_CALIBRATE 0xcc +#define ILI251X_FW_FILENAME "ilitek/ili251x.bin" + struct ili2xxx_chip { int (*read_reg)(struct i2c_client *client, u8 reg, void *buf, size_t len); @@ -35,6 +52,7 @@ struct ili2xxx_chip { unsigned int max_touches; unsigned int resolution; bool has_calibrate_reg; + bool has_firmware_proto; bool has_pressure_reg; }; @@ -44,6 +62,10 @@ struct ili210x { struct gpio_desc *reset_gpio; struct touchscreen_properties prop; const struct ili2xxx_chip *chip; + u8 version_firmware[8]; + u8 version_kernel[5]; + u8 version_proto[2]; + u8 ic_mode[2]; bool stop; }; @@ -202,15 +224,17 @@ static const struct ili2xxx_chip ili212x_chip = { .has_calibrate_reg = true, }; -static int ili251x_read_reg(struct i2c_client *client, - u8 reg, void *buf, size_t len) +static int ili251x_read_reg_common(struct i2c_client *client, + u8 reg, void *buf, size_t len, + unsigned int delay) { int error; int ret; ret = i2c_master_send(client, ®, 1); if (ret == 1) { - usleep_range(5000, 5500); + if (delay) + usleep_range(delay, delay + 500); ret = i2c_master_recv(client, buf, len); if (ret == len) @@ -222,12 +246,18 @@ static int ili251x_read_reg(struct i2c_client *client, return ret; } +static int ili251x_read_reg(struct i2c_client *client, + u8 reg, void *buf, size_t len) +{ + return ili251x_read_reg_common(client, reg, buf, len, 5000); +} + static int ili251x_read_touch_data(struct i2c_client *client, u8 *data) { int error; - error = ili251x_read_reg(client, REG_TOUCHDATA, - data, ILI251X_DATA_SIZE1); + error = ili251x_read_reg_common(client, REG_TOUCHDATA, + data, ILI251X_DATA_SIZE1, 0); if (!error && data[0] == 2) { error = i2c_master_recv(client, data + ILI251X_DATA_SIZE1, ILI251X_DATA_SIZE2); @@ -268,6 +298,7 @@ static const struct ili2xxx_chip ili251x_chip = { .continue_polling = ili251x_check_continue_polling, .max_touches = 10, .has_calibrate_reg = true, + .has_firmware_proto = true, .has_pressure_reg = true, }; @@ -303,10 +334,13 @@ static irqreturn_t ili210x_irq(int irq, void *irq_data) const struct ili2xxx_chip *chip = priv->chip; u8 touchdata[ILI210X_DATA_SIZE] = { 0 }; bool keep_polling; + ktime_t time_next; + s64 time_delta; bool touch; int error; do { + time_next = ktime_add_ms(ktime_get(), ILI2XXX_POLL_PERIOD); error = chip->get_touch_data(client, touchdata); if (error) { dev_err(&client->dev, @@ -316,13 +350,201 @@ static irqreturn_t ili210x_irq(int irq, void *irq_data) touch = ili210x_report_events(priv, touchdata); keep_polling = chip->continue_polling(touchdata, touch); - if (keep_polling) - msleep(ILI2XXX_POLL_PERIOD); + if (keep_polling) { + time_delta = ktime_us_delta(time_next, ktime_get()); + if (time_delta > 0) + usleep_range(time_delta, time_delta + 1000); + } } while (!priv->stop && keep_polling); return IRQ_HANDLED; } +static int ili251x_firmware_update_resolution(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ili210x *priv = i2c_get_clientdata(client); + u16 resx, resy; + u8 rs[10]; + int error; + + /* The firmware update blob might have changed the resolution. */ + error = priv->chip->read_reg(client, REG_PANEL_INFO, &rs, sizeof(rs)); + if (error) + return error; + + resx = le16_to_cpup((__le16 *)rs); + resy = le16_to_cpup((__le16 *)(rs + 2)); + + /* The value reported by the firmware is invalid. */ + if (!resx || resx == 0xffff || !resy || resy == 0xffff) + return -EINVAL; + + input_abs_set_max(priv->input, ABS_X, resx - 1); + input_abs_set_max(priv->input, ABS_Y, resy - 1); + input_abs_set_max(priv->input, ABS_MT_POSITION_X, resx - 1); + input_abs_set_max(priv->input, ABS_MT_POSITION_Y, resy - 1); + + return 0; +} + +static ssize_t ili251x_firmware_update_firmware_version(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ili210x *priv = i2c_get_clientdata(client); + int error; + u8 fw[8]; + + /* Get firmware version */ + error = priv->chip->read_reg(client, REG_FIRMWARE_VERSION, + &fw, sizeof(fw)); + if (!error) + memcpy(priv->version_firmware, fw, sizeof(fw)); + + return error; +} + +static ssize_t ili251x_firmware_update_kernel_version(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ili210x *priv = i2c_get_clientdata(client); + int error; + u8 kv[5]; + + /* Get kernel version */ + error = priv->chip->read_reg(client, REG_KERNEL_VERSION, + &kv, sizeof(kv)); + if (!error) + memcpy(priv->version_kernel, kv, sizeof(kv)); + + return error; +} + +static ssize_t ili251x_firmware_update_protocol_version(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ili210x *priv = i2c_get_clientdata(client); + int error; + u8 pv[2]; + + /* Get protocol version */ + error = priv->chip->read_reg(client, REG_PROTOCOL_VERSION, + &pv, sizeof(pv)); + if (!error) + memcpy(priv->version_proto, pv, sizeof(pv)); + + return error; +} + +static ssize_t ili251x_firmware_update_ic_mode(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ili210x *priv = i2c_get_clientdata(client); + int error; + u8 md[2]; + + /* Get chip boot mode */ + error = priv->chip->read_reg(client, REG_GET_MODE, &md, sizeof(md)); + if (!error) + memcpy(priv->ic_mode, md, sizeof(md)); + + return error; +} + +static int ili251x_firmware_update_cached_state(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ili210x *priv = i2c_get_clientdata(client); + int error; + + if (!priv->chip->has_firmware_proto) + return 0; + + /* Wait for firmware to boot and stabilize itself. */ + msleep(200); + + /* Firmware does report valid information. */ + error = ili251x_firmware_update_resolution(dev); + if (error) + return error; + + error = ili251x_firmware_update_firmware_version(dev); + if (error) + return error; + + error = ili251x_firmware_update_kernel_version(dev); + if (error) + return error; + + error = ili251x_firmware_update_protocol_version(dev); + if (error) + return error; + + error = ili251x_firmware_update_ic_mode(dev); + if (error) + return error; + + return 0; +} + +static ssize_t ili251x_firmware_version_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ili210x *priv = i2c_get_clientdata(client); + u8 *fw = priv->version_firmware; + + return sysfs_emit(buf, "%02x%02x.%02x%02x.%02x%02x.%02x%02x\n", + fw[0], fw[1], fw[2], fw[3], + fw[4], fw[5], fw[6], fw[7]); +} +static DEVICE_ATTR(firmware_version, 0444, ili251x_firmware_version_show, NULL); + +static ssize_t ili251x_kernel_version_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ili210x *priv = i2c_get_clientdata(client); + u8 *kv = priv->version_kernel; + + return sysfs_emit(buf, "%02x.%02x.%02x.%02x.%02x\n", + kv[0], kv[1], kv[2], kv[3], kv[4]); +} +static DEVICE_ATTR(kernel_version, 0444, ili251x_kernel_version_show, NULL); + +static ssize_t ili251x_protocol_version_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ili210x *priv = i2c_get_clientdata(client); + u8 *pv = priv->version_proto; + + return sysfs_emit(buf, "%02x.%02x\n", pv[0], pv[1]); +} +static DEVICE_ATTR(protocol_version, 0444, ili251x_protocol_version_show, NULL); + +static ssize_t ili251x_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ili210x *priv = i2c_get_clientdata(client); + u8 *md = priv->ic_mode; + char *mode = "AP"; + + if (md[0] == REG_GET_MODE_AP) /* Application Mode */ + mode = "AP"; + else if (md[0] == REG_GET_MODE_BL) /* BootLoader Mode */ + mode = "BL"; + else /* Unknown Mode */ + mode = "??"; + + return sysfs_emit(buf, "%02x.%02x:%s\n", md[0], md[1], mode); +} +static DEVICE_ATTR(mode, 0444, ili251x_mode_show, NULL); + static ssize_t ili210x_calibrate(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -349,24 +571,333 @@ static ssize_t ili210x_calibrate(struct device *dev, } static DEVICE_ATTR(calibrate, S_IWUSR, NULL, ili210x_calibrate); +static int ili251x_firmware_to_buffer(const struct firmware *fw, + u8 **buf, u16 *ac_end, u16 *df_end) +{ + const struct ihex_binrec *rec; + u32 fw_addr, fw_last_addr = 0; + u16 fw_len; + u8 *fw_buf; + int error; + + /* + * The firmware ihex blob can never be bigger than 64 kiB, so make this + * simple -- allocate a 64 kiB buffer, iterate over the ihex blob records + * once, copy them all into this buffer at the right locations, and then + * do all operations on this linear buffer. + */ + fw_buf = kzalloc(SZ_64K, GFP_KERNEL); + if (!fw_buf) + return -ENOMEM; + + rec = (const struct ihex_binrec *)fw->data; + while (rec) { + fw_addr = be32_to_cpu(rec->addr); + fw_len = be16_to_cpu(rec->len); + + /* The last 32 Byte firmware block can be 0xffe0 */ + if (fw_addr + fw_len > SZ_64K || fw_addr > SZ_64K - 32) { + error = -EFBIG; + goto err_big; + } + + /* Find the last address before DF start address, that is AC end */ + if (fw_addr == 0xf000) + *ac_end = fw_last_addr; + fw_last_addr = fw_addr + fw_len; + + memcpy(fw_buf + fw_addr, rec->data, fw_len); + rec = ihex_next_binrec(rec); + } + + /* DF end address is the last address in the firmware blob */ + *df_end = fw_addr + fw_len; + *buf = fw_buf; + return 0; + +err_big: + kfree(fw_buf); + return error; +} + +/* Switch mode between Application and BootLoader */ +static int ili251x_switch_ic_mode(struct i2c_client *client, u8 cmd_mode) +{ + struct ili210x *priv = i2c_get_clientdata(client); + u8 cmd_wren[3] = { REG_WRITE_ENABLE, 0x5a, 0xa5 }; + u8 md[2]; + int error; + + error = priv->chip->read_reg(client, REG_GET_MODE, md, sizeof(md)); + if (error) + return error; + /* Mode already set */ + if ((cmd_mode == REG_SET_MODE_AP && md[0] == REG_GET_MODE_AP) || + (cmd_mode == REG_SET_MODE_BL && md[0] == REG_GET_MODE_BL)) + return 0; + + /* Unlock writes */ + error = i2c_master_send(client, cmd_wren, sizeof(cmd_wren)); + if (error != sizeof(cmd_wren)) + return -EINVAL; + + mdelay(20); + + /* Select mode (BootLoader or Application) */ + error = i2c_master_send(client, &cmd_mode, 1); + if (error != 1) + return -EINVAL; + + mdelay(200); /* Reboot into bootloader takes a lot of time ... */ + + /* Read back mode */ + error = priv->chip->read_reg(client, REG_GET_MODE, md, sizeof(md)); + if (error) + return error; + /* Check if mode is correct now. */ + if ((cmd_mode == REG_SET_MODE_AP && md[0] == REG_GET_MODE_AP) || + (cmd_mode == REG_SET_MODE_BL && md[0] == REG_GET_MODE_BL)) + return 0; + + return -EINVAL; +} + +static int ili251x_firmware_busy(struct i2c_client *client) +{ + struct ili210x *priv = i2c_get_clientdata(client); + int error, i = 0; + u8 data; + + do { + /* The read_reg already contains suitable delay */ + error = priv->chip->read_reg(client, REG_IC_BUSY, &data, 1); + if (error) + return error; + if (i++ == 100000) + return -ETIMEDOUT; + } while (data != REG_IC_BUSY_NOT_BUSY); + + return 0; +} + +static int ili251x_firmware_write_to_ic(struct device *dev, u8 *fwbuf, + u16 start, u16 end, u8 dataflash) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ili210x *priv = i2c_get_clientdata(client); + u8 cmd_crc = REG_READ_DATA_CRC; + u8 crcrb[4] = { 0 }; + u8 fw_data[33]; + u16 fw_addr; + int error; + + /* + * The DF (dataflash) needs 2 bytes offset for unknown reasons, + * the AC (application) has 2 bytes CRC16-CCITT at the end. + */ + u16 crc = crc_ccitt(0, fwbuf + start + (dataflash ? 2 : 0), + end - start - 2); + + /* Unlock write to either AC (application) or DF (dataflash) area */ + u8 cmd_wr[10] = { + REG_WRITE_ENABLE, 0x5a, 0xa5, dataflash, + (end >> 16) & 0xff, (end >> 8) & 0xff, end & 0xff, + (crc >> 16) & 0xff, (crc >> 8) & 0xff, crc & 0xff + }; + + error = i2c_master_send(client, cmd_wr, sizeof(cmd_wr)); + if (error != sizeof(cmd_wr)) + return -EINVAL; + + error = ili251x_firmware_busy(client); + if (error) + return error; + + for (fw_addr = start; fw_addr < end; fw_addr += 32) { + fw_data[0] = REG_WRITE_DATA; + memcpy(&(fw_data[1]), fwbuf + fw_addr, 32); + error = i2c_master_send(client, fw_data, 33); + if (error != sizeof(fw_data)) + return error; + error = ili251x_firmware_busy(client); + if (error) + return error; + } + + error = i2c_master_send(client, &cmd_crc, 1); + if (error != 1) + return -EINVAL; + + error = ili251x_firmware_busy(client); + if (error) + return error; + + error = priv->chip->read_reg(client, REG_READ_DATA_CRC, + &crcrb, sizeof(crcrb)); + if (error) + return error; + + /* Check CRC readback */ + if ((crcrb[0] != (crc & 0xff)) || crcrb[1] != ((crc >> 8) & 0xff)) + return -EINVAL; + + return 0; +} + +static int ili251x_firmware_reset(struct i2c_client *client) +{ + u8 cmd_reset[2] = { 0xf2, 0x01 }; + int error; + + error = i2c_master_send(client, cmd_reset, sizeof(cmd_reset)); + if (error != sizeof(cmd_reset)) + return -EINVAL; + + return ili251x_firmware_busy(client); +} + +static void ili251x_hardware_reset(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ili210x *priv = i2c_get_clientdata(client); + + /* Reset the controller */ + gpiod_set_value_cansleep(priv->reset_gpio, 1); + usleep_range(10000, 15000); + gpiod_set_value_cansleep(priv->reset_gpio, 0); + msleep(300); +} + +static ssize_t ili210x_firmware_update_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + const char *fwname = ILI251X_FW_FILENAME; + const struct firmware *fw; + u16 ac_end, df_end; + u8 *fwbuf; + int error; + int i; + + error = request_ihex_firmware(&fw, fwname, dev); + if (error) { + dev_err(dev, "Failed to request firmware %s, error=%d\n", + fwname, error); + return error; + } + + error = ili251x_firmware_to_buffer(fw, &fwbuf, &ac_end, &df_end); + release_firmware(fw); + if (error) + return error; + + /* + * Disable touchscreen IRQ, so that we would not get spurious touch + * interrupt during firmware update, and so that the IRQ handler won't + * trigger and interfere with the firmware update. There is no bit in + * the touch controller to disable the IRQs during update, so we have + * to do it this way here. + */ + disable_irq(client->irq); + + dev_dbg(dev, "Firmware update started, firmware=%s\n", fwname); + + ili251x_hardware_reset(dev); + + error = ili251x_firmware_reset(client); + if (error) + goto exit; + + /* This may not succeed on first try, so re-try a few times. */ + for (i = 0; i < 5; i++) { + error = ili251x_switch_ic_mode(client, REG_SET_MODE_BL); + if (!error) + break; + } + + if (error) + goto exit; + + dev_dbg(dev, "IC is now in BootLoader mode\n"); + + msleep(200); /* The bootloader seems to need some time too. */ + + error = ili251x_firmware_write_to_ic(dev, fwbuf, 0xf000, df_end, 1); + if (error) { + dev_err(dev, "DF firmware update failed, error=%d\n", error); + goto exit; + } + + dev_dbg(dev, "DataFlash firmware written\n"); + + error = ili251x_firmware_write_to_ic(dev, fwbuf, 0x2000, ac_end, 0); + if (error) { + dev_err(dev, "AC firmware update failed, error=%d\n", error); + goto exit; + } + + dev_dbg(dev, "Application firmware written\n"); + + /* This may not succeed on first try, so re-try a few times. */ + for (i = 0; i < 5; i++) { + error = ili251x_switch_ic_mode(client, REG_SET_MODE_AP); + if (!error) + break; + } + + if (error) + goto exit; + + dev_dbg(dev, "IC is now in Application mode\n"); + + error = ili251x_firmware_update_cached_state(dev); + if (error) + goto exit; + + error = count; + +exit: + ili251x_hardware_reset(dev); + dev_dbg(dev, "Firmware update ended, error=%i\n", error); + enable_irq(client->irq); + kfree(fwbuf); + return error; +} + +static DEVICE_ATTR(firmware_update, 0200, NULL, ili210x_firmware_update_store); + static struct attribute *ili210x_attributes[] = { &dev_attr_calibrate.attr, + &dev_attr_firmware_update.attr, + &dev_attr_firmware_version.attr, + &dev_attr_kernel_version.attr, + &dev_attr_protocol_version.attr, + &dev_attr_mode.attr, NULL, }; -static umode_t ili210x_calibrate_visible(struct kobject *kobj, +static umode_t ili210x_attributes_visible(struct kobject *kobj, struct attribute *attr, int index) { struct device *dev = kobj_to_dev(kobj); struct i2c_client *client = to_i2c_client(dev); struct ili210x *priv = i2c_get_clientdata(client); - return priv->chip->has_calibrate_reg ? attr->mode : 0; + /* Calibrate is present on all ILI2xxx which have calibrate register */ + if (attr == &dev_attr_calibrate.attr) + return priv->chip->has_calibrate_reg ? attr->mode : 0; + + /* Firmware/Kernel/Protocol/BootMode is implememted only for ILI251x */ + if (!priv->chip->has_firmware_proto) + return 0; + + return attr->mode; } static const struct attribute_group ili210x_attr_group = { .attrs = ili210x_attributes, - .is_visible = ili210x_calibrate_visible, + .is_visible = ili210x_attributes_visible, }; static void ili210x_power_down(void *data) @@ -449,6 +980,12 @@ static int ili210x_i2c_probe(struct i2c_client *client, input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0); if (priv->chip->has_pressure_reg) input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xa, 0, 0); + error = ili251x_firmware_update_cached_state(dev); + if (error) { + dev_err(dev, "Unable to cache firmware information, err: %d\n", + error); + return error; + } touchscreen_parse_properties(input, true, &priv->prop); error = input_mt_init_slots(input, priv->chip->max_touches, diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c index 4d2d22a86977..3a4952935366 100644 --- a/drivers/input/touchscreen/raydium_i2c_ts.c +++ b/drivers/input/touchscreen/raydium_i2c_ts.c @@ -37,6 +37,7 @@ #define RM_CMD_BOOT_READ 0x44 /* send wait bl data ready*/ #define RM_BOOT_RDY 0xFF /* bl data ready */ +#define RM_BOOT_CMD_READHWID 0x0E /* read hwid */ /* I2C main commands */ #define RM_CMD_QUERY_BANK 0x2B @@ -290,6 +291,44 @@ static int raydium_i2c_sw_reset(struct i2c_client *client) return 0; } +static int raydium_i2c_query_ts_bootloader_info(struct raydium_data *ts) +{ + struct i2c_client *client = ts->client; + static const u8 get_hwid[] = { RM_BOOT_CMD_READHWID, + 0x10, 0xc0, 0x01, 0x00, 0x04, 0x00 }; + u8 rbuf[5] = { 0 }; + u32 hw_ver; + int error; + + error = raydium_i2c_send(client, RM_CMD_BOOT_WRT, + get_hwid, sizeof(get_hwid)); + if (error) { + dev_err(&client->dev, "WRT HWID command failed: %d\n", error); + return error; + } + + error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, rbuf, 1); + if (error) { + dev_err(&client->dev, "Ack HWID command failed: %d\n", error); + return error; + } + + error = raydium_i2c_read(client, RM_CMD_BOOT_CHK, rbuf, sizeof(rbuf)); + if (error) { + dev_err(&client->dev, "Read HWID command failed: %d (%4ph)\n", + error, rbuf + 1); + hw_ver = 0xffffffffUL; + } else { + hw_ver = get_unaligned_be32(rbuf + 1); + } + + ts->info.hw_ver = cpu_to_le32(hw_ver); + ts->info.main_ver = 0xff; + ts->info.sub_ver = 0xff; + + return error; +} + static int raydium_i2c_query_ts_info(struct raydium_data *ts) { struct i2c_client *client = ts->client; @@ -388,13 +427,10 @@ static int raydium_i2c_initialize(struct raydium_data *ts) if (error) ts->boot_mode = RAYDIUM_TS_BLDR; - if (ts->boot_mode == RAYDIUM_TS_BLDR) { - ts->info.hw_ver = cpu_to_le32(0xffffffffUL); - ts->info.main_ver = 0xff; - ts->info.sub_ver = 0xff; - } else { + if (ts->boot_mode == RAYDIUM_TS_BLDR) + raydium_i2c_query_ts_bootloader_info(ts); + else raydium_i2c_query_ts_info(ts); - } return error; } @@ -1082,11 +1118,11 @@ static int raydium_i2c_probe(struct i2c_client *client, if (error) return error; - error = devm_add_action(&client->dev, raydium_i2c_power_off, ts); + error = devm_add_action_or_reset(&client->dev, + raydium_i2c_power_off, ts); if (error) { dev_err(&client->dev, "failed to install power off action: %d\n", error); - raydium_i2c_power_off(ts); return error; } @@ -1218,7 +1254,7 @@ static SIMPLE_DEV_PM_OPS(raydium_i2c_pm_ops, raydium_i2c_suspend, raydium_i2c_resume); static const struct i2c_device_id raydium_i2c_id[] = { - { "raydium_i2c" , 0 }, + { "raydium_i2c", 0 }, { "rm32380", 0 }, { /* sentinel */ } }; diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c index 6abae665ca71..e38ba3e4f183 100644 --- a/drivers/input/touchscreen/st1232.c +++ b/drivers/input/touchscreen/st1232.c @@ -92,7 +92,7 @@ static int st1232_ts_wait_ready(struct st1232_ts_data *ts) unsigned int retries; int error; - for (retries = 10; retries; retries--) { + for (retries = 100; retries; retries--) { error = st1232_ts_read_data(ts, REG_STATUS, 1); if (!error) { switch (ts->read_buf[0]) { @@ -389,6 +389,7 @@ static struct i2c_driver st1232_ts_driver = { .driver = { .name = ST1232_TS_NAME, .of_match_table = st1232_ts_dt_ids, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, .pm = &st1232_ts_pm_ops, }, }; diff --git a/drivers/input/touchscreen/tsc2004.c b/drivers/input/touchscreen/tsc2004.c index 0272cedcc726..9fdd870c4c0b 100644 --- a/drivers/input/touchscreen/tsc2004.c +++ b/drivers/input/touchscreen/tsc2004.c @@ -45,7 +45,9 @@ static int tsc2004_probe(struct i2c_client *i2c, static int tsc2004_remove(struct i2c_client *i2c) { - return tsc200x_remove(&i2c->dev); + tsc200x_remove(&i2c->dev); + + return 0; } static const struct i2c_device_id tsc2004_idtable[] = { diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c index 923496bbb368..a2f55920b9b2 100644 --- a/drivers/input/touchscreen/tsc2005.c +++ b/drivers/input/touchscreen/tsc2005.c @@ -66,7 +66,9 @@ static int tsc2005_probe(struct spi_device *spi) static int tsc2005_remove(struct spi_device *spi) { - return tsc200x_remove(&spi->dev); + tsc200x_remove(&spi->dev); + + return 0; } #ifdef CONFIG_OF diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c index b8d720d52013..27810f6c69f6 100644 --- a/drivers/input/touchscreen/tsc200x-core.c +++ b/drivers/input/touchscreen/tsc200x-core.c @@ -577,15 +577,13 @@ disable_regulator: } EXPORT_SYMBOL_GPL(tsc200x_probe); -int tsc200x_remove(struct device *dev) +void tsc200x_remove(struct device *dev) { struct tsc200x *ts = dev_get_drvdata(dev); sysfs_remove_group(&dev->kobj, &tsc200x_attr_group); regulator_disable(ts->vio); - - return 0; } EXPORT_SYMBOL_GPL(tsc200x_remove); diff --git a/drivers/input/touchscreen/tsc200x-core.h b/drivers/input/touchscreen/tsc200x-core.h index a43c08ccfd3d..4ded34425b21 100644 --- a/drivers/input/touchscreen/tsc200x-core.h +++ b/drivers/input/touchscreen/tsc200x-core.h @@ -74,6 +74,6 @@ extern const struct dev_pm_ops tsc200x_pm_ops; int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id, struct regmap *regmap, int (*tsc200x_cmd)(struct device *dev, u8 cmd)); -int tsc200x_remove(struct device *dev); +void tsc200x_remove(struct device *dev); #endif diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c index 22826c387da5..fe4ea6204a4e 100644 --- a/drivers/input/touchscreen/wacom_i2c.c +++ b/drivers/input/touchscreen/wacom_i2c.c @@ -6,6 +6,7 @@ * <tobita.tatsunosuke@wacom.co.jp> */ +#include <linux/bits.h> #include <linux/module.h> #include <linux/input.h> #include <linux/i2c.h> @@ -14,6 +15,15 @@ #include <linux/interrupt.h> #include <asm/unaligned.h> +/* Bitmasks (for data[3]) */ +#define WACOM_TIP_SWITCH BIT(0) +#define WACOM_BARREL_SWITCH BIT(1) +#define WACOM_ERASER BIT(2) +#define WACOM_INVERT BIT(3) +#define WACOM_BARREL_SWITCH_2 BIT(4) +#define WACOM_IN_PROXIMITY BIT(5) + +/* Registers */ #define WACOM_CMD_QUERY0 0x04 #define WACOM_CMD_QUERY1 0x00 #define WACOM_CMD_QUERY2 0x33 @@ -99,19 +109,19 @@ static irqreturn_t wacom_i2c_irq(int irq, void *dev_id) if (error < 0) goto out; - tsw = data[3] & 0x01; - ers = data[3] & 0x04; - f1 = data[3] & 0x02; - f2 = data[3] & 0x10; + tsw = data[3] & WACOM_TIP_SWITCH; + ers = data[3] & WACOM_ERASER; + f1 = data[3] & WACOM_BARREL_SWITCH; + f2 = data[3] & WACOM_BARREL_SWITCH_2; x = le16_to_cpup((__le16 *)&data[4]); y = le16_to_cpup((__le16 *)&data[6]); pressure = le16_to_cpup((__le16 *)&data[8]); if (!wac_i2c->prox) - wac_i2c->tool = (data[3] & 0x0c) ? + wac_i2c->tool = (data[3] & (WACOM_ERASER | WACOM_INVERT)) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; - wac_i2c->prox = data[3] & 0x20; + wac_i2c->prox = data[3] & WACOM_IN_PROXIMITY; input_report_key(input, BTN_TOUCH, tsw || ers); input_report_key(input, wac_i2c->tool, wac_i2c->prox); diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index 13cbeb997cc1..58da08cc3d01 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -929,10 +929,8 @@ static int __init amd_iommu_v2_init(void) { int ret; - pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n"); - if (!amd_iommu_v2_supported()) { - pr_info("AMD IOMMUv2 functionality not available on this system\n"); + pr_info("AMD IOMMUv2 functionality not available on this system - This is not a bug.\n"); /* * Load anyway to provide the symbols to other modules * which may use AMD IOMMUv2 optionally. @@ -947,6 +945,8 @@ static int __init amd_iommu_v2_init(void) amd_iommu_register_ppr_notifier(&ppr_nb); + pr_info("AMD IOMMUv2 loaded and initialized\n"); + return 0; out: diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c index b39d223926a4..71596fc62822 100644 --- a/drivers/iommu/intel/cap_audit.c +++ b/drivers/iommu/intel/cap_audit.c @@ -144,6 +144,7 @@ static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type) { struct dmar_drhd_unit *d; struct intel_iommu *i; + int rc = 0; rcu_read_lock(); if (list_empty(&dmar_drhd_units)) @@ -169,11 +170,11 @@ static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type) */ if (intel_cap_smts_sanity() && !intel_cap_flts_sanity() && !intel_cap_slts_sanity()) - return -EOPNOTSUPP; + rc = -EOPNOTSUPP; out: rcu_read_unlock(); - return 0; + return rc; } int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 0bde0c8b4126..b6a8f3282411 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1339,13 +1339,11 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, pte = &pte[pfn_level_offset(pfn, level)]; do { - unsigned long level_pfn; + unsigned long level_pfn = pfn & level_mask(level); if (!dma_pte_present(pte)) goto next; - level_pfn = pfn & level_mask(level); - /* If range covers entire pagetable, free it */ if (start_pfn <= level_pfn && last_pfn >= level_pfn + level_size(level) - 1) { @@ -1366,7 +1364,7 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, freelist); } next: - pfn += level_size(level); + pfn = level_pfn + level_size(level); } while (!first_pte_in_page(++pte) && pfn <= last_pfn); if (first_pte) diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 5cb260820eda..7f23ad61c094 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -200,8 +200,8 @@ static inline phys_addr_t rk_dte_pt_address(u32 dte) #define DTE_HI_MASK2 GENMASK(7, 4) #define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */ #define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */ -#define PAGE_DESC_HI_MASK1 GENMASK_ULL(39, 36) -#define PAGE_DESC_HI_MASK2 GENMASK_ULL(35, 32) +#define PAGE_DESC_HI_MASK1 GENMASK_ULL(35, 32) +#define PAGE_DESC_HI_MASK2 GENMASK_ULL(39, 36) static inline phys_addr_t rk_dte_pt_address_v2(u32 dte) { diff --git a/drivers/irqchip/irq-csky-mpintc.c b/drivers/irqchip/irq-csky-mpintc.c index cb403c960ac0..4aebd67d4f8f 100644 --- a/drivers/irqchip/irq-csky-mpintc.c +++ b/drivers/irqchip/irq-csky-mpintc.c @@ -78,7 +78,7 @@ static void csky_mpintc_handler(struct pt_regs *regs) readl_relaxed(reg_base + INTCL_RDYIR)); } -static void csky_mpintc_enable(struct irq_data *d) +static void csky_mpintc_unmask(struct irq_data *d) { void __iomem *reg_base = this_cpu_read(intcl_reg); @@ -87,7 +87,7 @@ static void csky_mpintc_enable(struct irq_data *d) writel_relaxed(d->hwirq, reg_base + INTCL_SENR); } -static void csky_mpintc_disable(struct irq_data *d) +static void csky_mpintc_mask(struct irq_data *d) { void __iomem *reg_base = this_cpu_read(intcl_reg); @@ -164,8 +164,8 @@ static int csky_irq_set_affinity(struct irq_data *d, static struct irq_chip csky_irq_chip = { .name = "C-SKY SMP Intc", .irq_eoi = csky_mpintc_eoi, - .irq_enable = csky_mpintc_enable, - .irq_disable = csky_mpintc_disable, + .irq_unmask = csky_mpintc_unmask, + .irq_mask = csky_mpintc_mask, .irq_set_type = csky_mpintc_set_type, #ifdef CONFIG_SMP .irq_set_affinity = csky_irq_set_affinity, diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index cf74cfa82045..259065d271ef 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -163,7 +163,13 @@ static void plic_irq_eoi(struct irq_data *d) { struct plic_handler *handler = this_cpu_ptr(&plic_handlers); - writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); + if (irqd_irq_masked(d)) { + plic_irq_unmask(d); + writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); + plic_irq_mask(d); + } else { + writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); + } } static struct irq_chip plic_chip = { diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c index 79fa36de8a04..cd9cb354dc2c 100644 --- a/drivers/media/cec/core/cec-adap.c +++ b/drivers/media/cec/core/cec-adap.c @@ -1199,6 +1199,7 @@ void cec_received_msg_ts(struct cec_adapter *adap, if (abort) dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT; msg->flags = dst->flags; + msg->sequence = dst->sequence; /* Remove it from the wait_queue */ list_del_init(&data->list); diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c index 1094575abf95..90acafd9a290 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c @@ -241,6 +241,7 @@ static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev, buf->offset = vaddr & ~PAGE_MASK; buf->size = size; buf->dma_sgt = &buf->sg_table; + buf->vb = vb; vec = vb2_create_framevec(vaddr, size); if (IS_ERR(vec)) goto userptr_fail_pfnvec; @@ -642,6 +643,7 @@ static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev, buf->dma_dir = vb->vb2_queue->dma_dir; buf->size = size; buf->db_attach = dba; + buf->vb = vb; return buf; } diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c index 822ce3021fde..48909faeced4 100644 --- a/drivers/media/i2c/hi846.c +++ b/drivers/media/i2c/hi846.c @@ -7,9 +7,9 @@ #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/module.h> -#include <linux/of_graph.h> #include <linux/pm_runtime.h> #include <linux/pm.h> +#include <linux/property.h> #include <linux/regulator/consumer.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> @@ -2176,7 +2176,7 @@ static struct i2c_driver hi846_i2c_driver = { .driver = { .name = "hi846", .pm = &hi846_pm_ops, - .of_match_table = of_match_ptr(hi846_of_match), + .of_match_table = hi846_of_match, }, .probe_new = hi846_probe, .remove = hi846_remove, diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 8176769a89fa..0f3d6b5667b0 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -751,10 +751,6 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *p64, /* * x86 is the only compat architecture with different struct alignment * between 32-bit and 64-bit tasks. - * - * On all other architectures, v4l2_event32 and v4l2_event32_time32 are - * the same as v4l2_event and v4l2_event_time32, so we can use the native - * handlers, converting v4l2_event to v4l2_event_time32 if necessary. */ struct v4l2_event32 { __u32 type; @@ -772,21 +768,6 @@ struct v4l2_event32 { __u32 reserved[8]; }; -#ifdef CONFIG_COMPAT_32BIT_TIME -struct v4l2_event32_time32 { - __u32 type; - union { - compat_s64 value64; - __u8 data[64]; - } u; - __u32 pending; - __u32 sequence; - struct old_timespec32 timestamp; - __u32 id; - __u32 reserved[8]; -}; -#endif - static int put_v4l2_event32(struct v4l2_event *p64, struct v4l2_event32 __user *p32) { @@ -802,7 +783,22 @@ static int put_v4l2_event32(struct v4l2_event *p64, return 0; } +#endif + #ifdef CONFIG_COMPAT_32BIT_TIME +struct v4l2_event32_time32 { + __u32 type; + union { + compat_s64 value64; + __u8 data[64]; + } u; + __u32 pending; + __u32 sequence; + struct old_timespec32 timestamp; + __u32 id; + __u32 reserved[8]; +}; + static int put_v4l2_event32_time32(struct v4l2_event *p64, struct v4l2_event32_time32 __user *p32) { @@ -818,7 +814,6 @@ static int put_v4l2_event32_time32(struct v4l2_event *p64, return 0; } #endif -#endif struct v4l2_edid32 { __u32 pad; @@ -880,9 +875,7 @@ static int put_v4l2_edid32(struct v4l2_edid *p64, #define VIDIOC_QUERYBUF32_TIME32 _IOWR('V', 9, struct v4l2_buffer32_time32) #define VIDIOC_QBUF32_TIME32 _IOWR('V', 15, struct v4l2_buffer32_time32) #define VIDIOC_DQBUF32_TIME32 _IOWR('V', 17, struct v4l2_buffer32_time32) -#ifdef CONFIG_X86_64 #define VIDIOC_DQEVENT32_TIME32 _IOR ('V', 89, struct v4l2_event32_time32) -#endif #define VIDIOC_PREPARE_BUF32_TIME32 _IOWR('V', 93, struct v4l2_buffer32_time32) #endif @@ -936,11 +929,11 @@ unsigned int v4l2_compat_translate_cmd(unsigned int cmd) #ifdef CONFIG_X86_64 case VIDIOC_DQEVENT32: return VIDIOC_DQEVENT; +#endif #ifdef CONFIG_COMPAT_32BIT_TIME case VIDIOC_DQEVENT32_TIME32: return VIDIOC_DQEVENT; #endif -#endif } return cmd; } @@ -1032,11 +1025,11 @@ int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd) #ifdef CONFIG_X86_64 case VIDIOC_DQEVENT32: return put_v4l2_event32(parg, arg); +#endif #ifdef CONFIG_COMPAT_32BIT_TIME case VIDIOC_DQEVENT32_TIME32: return put_v4l2_event32_time32(parg, arg); #endif -#endif } return 0; } diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index b883dcc0bbfa..e201e5976f34 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -241,7 +241,7 @@ static void mtk_smi_larb_config_port_gen2_general(struct device *dev) { struct mtk_smi_larb *larb = dev_get_drvdata(dev); u32 reg, flags_general = larb->larb_gen->flags_general; - const u8 *larbostd = larb->larb_gen->ostd[larb->larbid]; + const u8 *larbostd = larb->larb_gen->ostd ? larb->larb_gen->ostd[larb->larbid] : NULL; int i; if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask) diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index f4c8e1a61f53..b431cdd27353 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1514,6 +1514,12 @@ static int mmc_spi_remove(struct spi_device *spi) return 0; } +static const struct spi_device_id mmc_spi_dev_ids[] = { + { "mmc-spi-slot"}, + { }, +}; +MODULE_DEVICE_TABLE(spi, mmc_spi_dev_ids); + static const struct of_device_id mmc_spi_of_match_table[] = { { .compatible = "mmc-spi-slot", }, {}, @@ -1525,6 +1531,7 @@ static struct spi_driver mmc_spi_driver = { .name = "mmc_spi", .of_match_table = mmc_spi_of_match_table, }, + .id_table = mmc_spi_dev_ids, .probe = mmc_spi_probe, .remove = mmc_spi_remove, }; diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index afaf33707d46..764ee1b761d9 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -310,7 +310,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = { .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES - | ESDHC_FLAG_CQHCI | ESDHC_FLAG_STATE_LOST_IN_LPMODE | ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME, }; @@ -319,7 +318,6 @@ static struct esdhc_soc_data usdhc_imx8mm_data = { .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES - | ESDHC_FLAG_CQHCI | ESDHC_FLAG_STATE_LOST_IN_LPMODE, }; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 269c86569402..07c6da1f2f0f 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -771,7 +771,19 @@ static void sdhci_adma_table_pre(struct sdhci_host *host, len -= offset; } - BUG_ON(len > 65536); + /* + * The block layer forces a minimum segment size of PAGE_SIZE, + * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write + * multiple descriptors, noting that the ADMA table is sized + * for 4KiB chunks anyway, so it will be big enough. + */ + while (len > host->max_adma) { + int n = 32 * 1024; /* 32KiB*/ + + __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID); + addr += n; + len -= n; + } /* tran, valid */ if (len) @@ -3968,6 +3980,7 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev, * descriptor for each segment, plus 1 for a nop end descriptor. */ host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; + host->max_adma = 65536; host->max_timeout_count = 0xE; @@ -4633,10 +4646,12 @@ int sdhci_setup_host(struct sdhci_host *host) * be larger than 64 KiB though. */ if (host->flags & SDHCI_USE_ADMA) { - if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) + if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) { + host->max_adma = 65532; /* 32-bit alignment */ mmc->max_seg_size = 65535; - else + } else { mmc->max_seg_size = 65536; + } } else { mmc->max_seg_size = mmc->max_req_size; } diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index bb883553d3b4..d7929d725730 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -340,7 +340,8 @@ struct sdhci_adma2_64_desc { /* * Maximum segments assuming a 512KiB maximum requisition size and a minimum - * 4KiB page size. + * 4KiB page size. Note this also allows enough for multiple descriptors in + * case of PAGE_SIZE >= 64KiB. */ #define SDHCI_MAX_SEGS 128 @@ -543,6 +544,7 @@ struct sdhci_host { unsigned int blocks; /* remaining PIO blocks */ int sg_count; /* Mapped sg entries */ + int max_adma; /* Max. length in ADMA descriptor */ void *adma_table; /* ADMA descriptor table */ void *align_buffer; /* Bounce buffer */ diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 10506a4b66ef..6cccc3dc00bc 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -567,9 +567,7 @@ config XEN_NETDEV_BACKEND config VMXNET3 tristate "VMware VMXNET3 ethernet driver" depends on PCI && INET - depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \ - IA64_PAGE_SIZE_64KB || PARISC_PAGE_SIZE_64KB || \ - PPC_64K_PAGES) + depends on PAGE_SIZE_LESS_THAN_64KB help This driver supports VMware's vmxnet3 virtual ethernet NIC. To compile this driver as a module, choose M here: the diff --git a/drivers/net/amt.c b/drivers/net/amt.c index 47a04c330885..b732ee9a50ef 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -3286,7 +3286,7 @@ static void __exit amt_fini(void) { rtnl_link_unregister(&amt_link_ops); unregister_netdevice_notifier(&amt_notifier_block); - cancel_delayed_work(&source_gc_wq); + cancel_delayed_work_sync(&source_gc_wq); __amt_source_gc_work(); destroy_workqueue(amt_wq); } diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 43fc3087aeb3..013e9c02be71 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1002,57 +1002,32 @@ static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member) data &= ~PORT_VLAN_MEMBERSHIP; data |= (member & dev->port_mask); ksz_pwrite8(dev, port, P_MIRROR_CTRL, data); - dev->ports[port].member = member; } static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) { struct ksz_device *dev = ds->priv; - int forward = dev->member; struct ksz_port *p; - int member = -1; u8 data; - p = &dev->ports[port]; - ksz_pread8(dev, port, P_STP_CTRL, &data); data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); switch (state) { case BR_STATE_DISABLED: data |= PORT_LEARN_DISABLE; - if (port < dev->phy_port_cnt) - member = 0; break; case BR_STATE_LISTENING: data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); - if (port < dev->phy_port_cnt && - p->stp_state == BR_STATE_DISABLED) - member = dev->host_mask | p->vid_member; break; case BR_STATE_LEARNING: data |= PORT_RX_ENABLE; break; case BR_STATE_FORWARDING: data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); - - /* This function is also used internally. */ - if (port == dev->cpu_port) - break; - - /* Port is a member of a bridge. */ - if (dev->br_member & BIT(port)) { - dev->member |= BIT(port); - member = dev->member; - } else { - member = dev->host_mask | p->vid_member; - } break; case BR_STATE_BLOCKING: data |= PORT_LEARN_DISABLE; - if (port < dev->phy_port_cnt && - p->stp_state == BR_STATE_DISABLED) - member = dev->host_mask | p->vid_member; break; default: dev_err(ds->dev, "invalid STP state: %d\n", state); @@ -1060,22 +1035,11 @@ static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) } ksz_pwrite8(dev, port, P_STP_CTRL, data); + + p = &dev->ports[port]; p->stp_state = state; - /* Port membership may share register with STP state. */ - if (member >= 0 && member != p->member) - ksz8_cfg_port_member(dev, port, (u8)member); - - /* Check if forwarding needs to be updated. */ - if (state != BR_STATE_FORWARDING) { - if (dev->br_member & BIT(port)) - dev->member &= ~BIT(port); - } - /* When topology has changed the function ksz_update_port_member - * should be called to modify port forwarding behavior. - */ - if (forward != dev->member) - ksz_update_port_member(dev, port); + ksz_update_port_member(dev, port); } static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port) @@ -1341,7 +1305,7 @@ static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port) static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port) { - struct ksz_port *p = &dev->ports[port]; + struct dsa_switch *ds = dev->ds; struct ksz8 *ksz8 = dev->priv; const u32 *masks; u8 member; @@ -1368,10 +1332,11 @@ static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port) if (!ksz_is_ksz88x3(dev)) ksz8795_cpu_interface_select(dev, port); - member = dev->port_mask; + member = dsa_user_ports(ds); } else { - member = dev->host_mask | p->vid_member; + member = BIT(dsa_upstream_port(ds, port)); } + ksz8_cfg_port_member(dev, port, member); } @@ -1392,20 +1357,13 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds) ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true); p = &dev->ports[dev->cpu_port]; - p->vid_member = dev->port_mask; p->on = 1; ksz8_port_setup(dev, dev->cpu_port, true); - dev->member = dev->host_mask; for (i = 0; i < dev->phy_port_cnt; i++) { p = &dev->ports[i]; - /* Initialize to non-zero so that ksz_cfg_port_member() will - * be called. - */ - p->vid_member = BIT(i); - p->member = dev->port_mask; ksz8_port_stp_state_set(ds, i, BR_STATE_DISABLED); /* Last port may be disabled. */ diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 854e25f43fa7..353b5f981740 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -391,7 +391,6 @@ static void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member) { ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member); - dev->ports[port].member = member; } static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, @@ -400,8 +399,6 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, struct ksz_device *dev = ds->priv; struct ksz_port *p = &dev->ports[port]; u8 data; - int member = -1; - int forward = dev->member; ksz_pread8(dev, port, P_STP_CTRL, &data); data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); @@ -409,40 +406,18 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, switch (state) { case BR_STATE_DISABLED: data |= PORT_LEARN_DISABLE; - if (port != dev->cpu_port) - member = 0; break; case BR_STATE_LISTENING: data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); - if (port != dev->cpu_port && - p->stp_state == BR_STATE_DISABLED) - member = dev->host_mask | p->vid_member; break; case BR_STATE_LEARNING: data |= PORT_RX_ENABLE; break; case BR_STATE_FORWARDING: data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); - - /* This function is also used internally. */ - if (port == dev->cpu_port) - break; - - member = dev->host_mask | p->vid_member; - mutex_lock(&dev->dev_mutex); - - /* Port is a member of a bridge. */ - if (dev->br_member & (1 << port)) { - dev->member |= (1 << port); - member = dev->member; - } - mutex_unlock(&dev->dev_mutex); break; case BR_STATE_BLOCKING: data |= PORT_LEARN_DISABLE; - if (port != dev->cpu_port && - p->stp_state == BR_STATE_DISABLED) - member = dev->host_mask | p->vid_member; break; default: dev_err(ds->dev, "invalid STP state: %d\n", state); @@ -451,23 +426,8 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, ksz_pwrite8(dev, port, P_STP_CTRL, data); p->stp_state = state; - mutex_lock(&dev->dev_mutex); - /* Port membership may share register with STP state. */ - if (member >= 0 && member != p->member) - ksz9477_cfg_port_member(dev, port, (u8)member); - - /* Check if forwarding needs to be updated. */ - if (state != BR_STATE_FORWARDING) { - if (dev->br_member & (1 << port)) - dev->member &= ~(1 << port); - } - /* When topology has changed the function ksz_update_port_member - * should be called to modify port forwarding behavior. - */ - if (forward != dev->member) - ksz_update_port_member(dev, port); - mutex_unlock(&dev->dev_mutex); + ksz_update_port_member(dev, port); } static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port) @@ -1168,10 +1128,10 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port) static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) { - u8 data8; - u8 member; - u16 data16; struct ksz_port *p = &dev->ports[port]; + struct dsa_switch *ds = dev->ds; + u8 data8, member; + u16 data16; /* enable tag tail for host port */ if (cpu_port) @@ -1250,12 +1210,12 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8); p->phydev.duplex = 1; } - mutex_lock(&dev->dev_mutex); + if (cpu_port) - member = dev->port_mask; + member = dsa_user_ports(ds); else - member = dev->host_mask | p->vid_member; - mutex_unlock(&dev->dev_mutex); + member = BIT(dsa_upstream_port(ds, port)); + ksz9477_cfg_port_member(dev, port, member); /* clear pending interrupts */ @@ -1276,8 +1236,6 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds) const char *prev_mode; dev->cpu_port = i; - dev->host_mask = (1 << dev->cpu_port); - dev->port_mask |= dev->host_mask; p = &dev->ports[i]; /* Read from XMII register to determine host port @@ -1312,23 +1270,15 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds) /* enable cpu port */ ksz9477_port_setup(dev, i, true); - p->vid_member = dev->port_mask; p->on = 1; } } - dev->member = dev->host_mask; - for (i = 0; i < dev->port_cnt; i++) { if (i == dev->cpu_port) continue; p = &dev->ports[i]; - /* Initialize to non-zero so that ksz_cfg_port_member() will - * be called. - */ - p->vid_member = (1 << i); - p->member = dev->port_mask; ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED); p->on = 1; if (i < dev->phy_port_cnt) diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 7c2968a639eb..8a04302018dc 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -22,21 +22,40 @@ void ksz_update_port_member(struct ksz_device *dev, int port) { - struct ksz_port *p; + struct ksz_port *p = &dev->ports[port]; + struct dsa_switch *ds = dev->ds; + u8 port_member = 0, cpu_port; + const struct dsa_port *dp; int i; - for (i = 0; i < dev->port_cnt; i++) { - if (i == port || i == dev->cpu_port) + if (!dsa_is_user_port(ds, port)) + return; + + dp = dsa_to_port(ds, port); + cpu_port = BIT(dsa_upstream_port(ds, port)); + + for (i = 0; i < ds->num_ports; i++) { + const struct dsa_port *other_dp = dsa_to_port(ds, i); + struct ksz_port *other_p = &dev->ports[i]; + u8 val = 0; + + if (!dsa_is_user_port(ds, i)) continue; - p = &dev->ports[i]; - if (!(dev->member & (1 << i))) + if (port == i) + continue; + if (!dp->bridge_dev || dp->bridge_dev != other_dp->bridge_dev) continue; - /* Port is a member of the bridge and is forwarding. */ - if (p->stp_state == BR_STATE_FORWARDING && - p->member != dev->member) - dev->dev_ops->cfg_port_member(dev, i, dev->member); + if (other_p->stp_state == BR_STATE_FORWARDING && + p->stp_state == BR_STATE_FORWARDING) { + val |= BIT(port); + port_member |= BIT(i); + } + + dev->dev_ops->cfg_port_member(dev, i, val | cpu_port); } + + dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port); } EXPORT_SYMBOL_GPL(ksz_update_port_member); @@ -175,12 +194,6 @@ EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats); int ksz_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) { - struct ksz_device *dev = ds->priv; - - mutex_lock(&dev->dev_mutex); - dev->br_member |= (1 << port); - mutex_unlock(&dev->dev_mutex); - /* port_stp_state_set() will be called after to put the port in * appropriate state so there is no need to do anything. */ @@ -192,13 +205,6 @@ EXPORT_SYMBOL_GPL(ksz_port_bridge_join); void ksz_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) { - struct ksz_device *dev = ds->priv; - - mutex_lock(&dev->dev_mutex); - dev->br_member &= ~(1 << port); - dev->member &= ~(1 << port); - mutex_unlock(&dev->dev_mutex); - /* port_stp_state_set() will be called after to put the port in * forwarding state so there is no need to do anything. */ diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 1597c63988b4..54b456bc8972 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -25,8 +25,6 @@ struct ksz_port_mib { }; struct ksz_port { - u16 member; - u16 vid_member; bool remove_tag; /* Remove Tag flag set, for ksz8795 only */ int stp_state; struct phy_device phydev; @@ -83,8 +81,6 @@ struct ksz_device { struct ksz_port *ports; struct delayed_work mib_read; unsigned long mib_read_interval; - u16 br_member; - u16 member; u16 mirror_rx; u16 mirror_tx; u32 features; /* chip specific features */ diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index a429c9750add..147ca39531a3 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -1256,8 +1256,12 @@ qca8k_setup(struct dsa_switch *ds) /* Set initial MTU for every port. * We have only have a general MTU setting. So track * every port and set the max across all port. + * Set per port MTU to 1500 as the MTU change function + * will add the overhead and if its set to 1518 then it + * will apply the overhead again and we will end up with + * MTU of 1536 instead of 1518 */ - priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN; + priv->port_mtu[i] = ETH_DATA_LEN; } /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ @@ -1433,6 +1437,12 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); + /* From original code is reported port instability as SGMII also + * require delay set. Apply advised values here or take them from DT. + */ + if (state->interface == PHY_INTERFACE_MODE_SGMII) + qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); + /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and * falling edge is set writing in the PORT0 PAD reg */ @@ -1455,12 +1465,6 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, val); - /* From original code is reported port instability as SGMII also - * require delay set. Apply advised values here or take them from DT. - */ - if (state->interface == PHY_INTERFACE_MODE_SGMII) - qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); - break; default: dev_err(ds->dev, "xMII mode %s not supported for port %d\n", diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 24122ccda614..81b3756417ec 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -298,13 +298,14 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) } } - if (unlikely(buff->is_eop)) { + if (unlikely(buff->is_eop && buff->skb)) { u64_stats_update_begin(&self->stats.tx.syncp); ++self->stats.tx.packets; self->stats.tx.bytes += buff->skb->len; u64_stats_update_end(&self->stats.tx.syncp); dev_kfree_skb_any(buff->skb); + buff->skb = NULL; } buff->pa = 0U; buff->eop_index = 0xffffU; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index fc0e66006644..3f1704cbe1cb 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -559,6 +559,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, goto err_exit; if (fw.len == 0xFFFFU) { + if (sw.len > sizeof(self->rpc)) { + printk(KERN_INFO "Invalid sw len: %x\n", sw.len); + err = -EINVAL; + goto err_exit; + } err = hw_atl_utils_fw_rpc_call(self, sw.len); if (err < 0) goto err_exit; @@ -567,6 +572,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, if (rpc) { if (fw.len) { + if (fw.len > sizeof(self->rpc)) { + printk(KERN_INFO "Invalid fw len: %x\n", fw.len); + err = -EINVAL; + goto err_exit; + } err = hw_atl_utils_fw_downld_dwords(self, self->rpc_addr, diff --git a/drivers/net/ethernet/asix/ax88796c_main.h b/drivers/net/ethernet/asix/ax88796c_main.h index 80263c3cef75..4a83c991dcbe 100644 --- a/drivers/net/ethernet/asix/ax88796c_main.h +++ b/drivers/net/ethernet/asix/ax88796c_main.h @@ -127,9 +127,9 @@ struct ax88796c_device { #define AX_PRIV_FLAGS_MASK (AX_CAP_COMP) unsigned long flags; - #define EVENT_INTR BIT(0) - #define EVENT_TX BIT(1) - #define EVENT_SET_MULTI BIT(2) + #define EVENT_INTR 0 + #define EVENT_TX 1 + #define EVENT_SET_MULTI 2 }; diff --git a/drivers/net/ethernet/asix/ax88796c_spi.c b/drivers/net/ethernet/asix/ax88796c_spi.c index 94df4f96d2be..0710e716d682 100644 --- a/drivers/net/ethernet/asix/ax88796c_spi.c +++ b/drivers/net/ethernet/asix/ax88796c_spi.c @@ -34,7 +34,7 @@ int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status) /* OP */ ax_spi->cmd_buf[0] = AX_SPICMD_READ_STATUS; - ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)&status, 3); + ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)status, 3); if (ret) dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret); else diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h index 1835d2e451c0..fc7fce642666 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h @@ -635,11 +635,13 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, { int i, rc; struct bnx2x_ilt *ilt = BP_ILT(bp); - struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; + struct ilt_client_info *ilt_cli; if (!ilt || !ilt->lines) return -1; + ilt_cli = &ilt->clients[cli_num]; + if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM)) return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index d0d5da9b78f8..4c9507d82fd0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -2258,6 +2258,16 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db, } } +/* Must hold rtnl_lock */ +static inline bool bnxt_sriov_cfg(struct bnxt *bp) +{ +#if defined(CONFIG_BNXT_SRIOV) + return BNXT_PF(bp) && (bp->pf.active_vfs || bp->sriov_cfg); +#else + return false; +#endif +} + extern const u16 bnxt_lhint_arr[]; int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 5c464ea73576..951c4c569a9b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -360,7 +360,7 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack, NL_SET_ERR_MSG_MOD(extack, "Live patch already applied"); break; default: - netdev_err(bp->dev, "Unexpected live patch error: %hhd\n", err); + netdev_err(bp->dev, "Unexpected live patch error: %d\n", err); NL_SET_ERR_MSG_MOD(extack, "Failed to activate live patch"); break; } @@ -441,12 +441,13 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: { - if (BNXT_PF(bp) && bp->pf.active_vfs) { + rtnl_lock(); + if (bnxt_sriov_cfg(bp)) { NL_SET_ERR_MSG_MOD(extack, - "reload is unsupported when VFs are allocated"); + "reload is unsupported while VFs are allocated or being configured"); + rtnl_unlock(); return -EOPNOTSUPP; } - rtnl_lock(); if (bp->dev->reg_state == NETREG_UNREGISTERED) { rtnl_unlock(); return -ENODEV; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index e6a4a768b10b..1471b6130a2b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1868,7 +1868,7 @@ static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type, struct flow_cls_offload *flower = type_data; struct bnxt *bp = priv->bp; - if (flower->common.chain_index) + if (!tc_cls_can_offload_and_chain0(bp->dev, type_data)) return -EOPNOTSUPP; switch (type) { diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 64479c464b4e..ae9cca768d74 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -3196,6 +3196,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, } if (adapter->registered_device_map == 0) { dev_err(&pdev->dev, "could not register any net devices\n"); + err = -EINVAL; goto err_disable_interrupts; } diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 13121c4dcfe6..71730ef4cd57 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -4709,6 +4709,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p) lp->ibn = 3; lp->active = *p++; if (MOTO_SROM_BUG) lp->active = 0; + /* if (MOTO_SROM_BUG) statement indicates lp->active could + * be 8 (i.e. the size of array lp->phy) */ + if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy))) + return -EINVAL; lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1); lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1); lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2; @@ -5000,19 +5004,23 @@ mii_get_phy(struct net_device *dev) } if ((j == limit) && (i < DE4X5_MAX_MII)) { for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); - lp->phy[k].addr = i; - lp->phy[k].id = id; - lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ - lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */ - lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ - lp->mii_cnt++; - lp->active++; - printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name); - j = de4x5_debug; - de4x5_debug |= DEBUG_MII; - de4x5_dbg_mii(dev, k); - de4x5_debug = j; - printk("\n"); + if (k < DE4X5_MAX_PHY) { + lp->phy[k].addr = i; + lp->phy[k].id = id; + lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ + lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */ + lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ + lp->mii_cnt++; + lp->active++; + printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name); + j = de4x5_debug; + de4x5_debug |= DEBUG_MII; + de4x5_dbg_mii(dev, k); + de4x5_debug = j; + printk("\n"); + } else { + goto purgatory; + } } } purgatory: diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 714e961e7a77..6451c8383639 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -4550,10 +4550,10 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) fsl_mc_portal_free(priv->mc_io); - free_netdev(net_dev); - dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); + free_netdev(net_dev); + return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 23d9cbf262c3..740850b64aff 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -400,6 +400,10 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, return; if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { + /* DSAF_MAX_PORT_NUM is 6, but DSAF_GE_NUM is 8. + We need check to prevent array overflow */ + if (port >= DSAF_MAX_PORT_NUM) + return; reg_val_1 = 0x1 << port; port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off; /* there is difference between V1 and V2 in register.*/ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 67364ab63a1f..081295bff765 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -1081,7 +1081,8 @@ static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring, u32 j = 0; sprintf(result[j++], "%u", index); - sprintf(result[j++], "%u", ring->page_pool->pages_state_hold_cnt); + sprintf(result[j++], "%u", + READ_ONCE(ring->page_pool->pages_state_hold_cnt)); sprintf(result[j++], "%u", atomic_read(&ring->page_pool->pages_state_release_cnt)); sprintf(result[j++], "%u", ring->page_pool->p.pool_size); @@ -1106,6 +1107,11 @@ hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len) return -EFAULT; } + if (!priv->ring[h->kinfo.num_tqps].page_pool) { + dev_err(&h->pdev->dev, "page pool is not initialized\n"); + return -EFAULT; + } + for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++) result[i] = &data_str[i][0]; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index c8442b86df94..c9b4568d7a8d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -987,6 +987,7 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags) struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); const struct hnae3_ae_ops *ops = h->ae_algo->ops; const struct hns3_reset_type_map *rst_type_map; + enum ethtool_reset_flags rst_flags; u32 i, size; if (ops->ae_dev_resetting && ops->ae_dev_resetting(h)) @@ -1006,6 +1007,7 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags) for (i = 0; i < size; i++) { if (rst_type_map[i].rst_flags == *flags) { rst_type = rst_type_map[i].rst_type; + rst_flags = rst_type_map[i].rst_flags; break; } } @@ -1021,6 +1023,8 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags) ops->reset_event(h->pdev, h); + *flags &= ~rst_flags; + return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 25c419d40066..41afaeea881b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -703,9 +703,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) roundup_size = ilog2(roundup_size); for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { - tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); + tc_valid[i] = 1; tc_size[i] = roundup_size; - tc_offset[i] = rss_size * i; + tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0; } hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 5039a2536951..0bf3d47bb90d 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -3003,9 +3003,10 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) struct net_device *netdev = pci_get_drvdata(pdev); struct nic *nic = netdev_priv(netdev); + netif_device_detach(netdev); + if (netif_running(netdev)) e100_down(nic); - netif_device_detach(netdev); if ((nic->flags & wol_magic) | e100_asf(nic)) { /* enable reverse auto-negotiation */ @@ -3022,7 +3023,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) *enable_wake = false; } - pci_clear_master(pdev); + pci_disable_device(pdev); } static int __e100_power_off(struct pci_dev *pdev, bool wake) @@ -3042,8 +3043,6 @@ static int __maybe_unused e100_suspend(struct device *dev_d) __e100_shutdown(to_pci_dev(dev_d), &wake); - device_wakeup_disable(dev_d); - return 0; } @@ -3051,6 +3050,14 @@ static int __maybe_unused e100_resume(struct device *dev_d) { struct net_device *netdev = dev_get_drvdata(dev_d); struct nic *nic = netdev_priv(netdev); + int err; + + err = pci_enable_device(to_pci_dev(dev_d)); + if (err) { + netdev_err(netdev, "Resume cannot enable PCI device, aborting\n"); + return err; + } + pci_set_master(to_pci_dev(dev_d)); /* disable reverse auto-negotiation */ if (nic->phy == phy_82552_v) { @@ -3062,10 +3069,11 @@ static int __maybe_unused e100_resume(struct device *dev_d) smartspeed & ~(E100_82552_REV_ANEG)); } - netif_device_attach(netdev); if (netif_running(netdev)) e100_up(nic); + netif_device_attach(netdev); + return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 3d528fba754b..4d939af0a626 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -161,6 +161,7 @@ enum i40e_vsi_state_t { __I40E_VSI_OVERFLOW_PROMISC, __I40E_VSI_REINIT_REQUESTED, __I40E_VSI_DOWN_REQUESTED, + __I40E_VSI_RELEASING, /* This must be last as it determines the size of the BITMAP */ __I40E_VSI_STATE_SIZE__, }; @@ -1247,6 +1248,7 @@ void i40e_ptp_restore_hw_time(struct i40e_pf *pf); void i40e_ptp_init(struct i40e_pf *pf); void i40e_ptp_stop(struct i40e_pf *pf); int i40e_ptp_alloc_pins(struct i40e_pf *pf); +int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset); int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf); i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ba862131b9bd..e118cf9265c7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1790,6 +1790,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, bool is_add) { struct i40e_pf *pf = vsi->back; + u16 num_tc_qps = 0; u16 sections = 0; u8 netdev_tc = 0; u16 numtc = 1; @@ -1797,13 +1798,33 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, u8 offset; u16 qmap; int i; - u16 num_tc_qps = 0; sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; offset = 0; + /* zero out queue mapping, it will get updated on the end of the function */ + memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping)); + + if (vsi->type == I40E_VSI_MAIN) { + /* This code helps add more queue to the VSI if we have + * more cores than RSS can support, the higher cores will + * be served by ATR or other filters. Furthermore, the + * non-zero req_queue_pairs says that user requested a new + * queue count via ethtool's set_channels, so use this + * value for queues distribution across traffic classes + */ + if (vsi->req_queue_pairs > 0) + vsi->num_queue_pairs = vsi->req_queue_pairs; + else if (pf->flags & I40E_FLAG_MSIX_ENABLED) + vsi->num_queue_pairs = pf->num_lan_msix; + } /* Number of queues per enabled TC */ - num_tc_qps = vsi->alloc_queue_pairs; + if (vsi->type == I40E_VSI_MAIN || + (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0)) + num_tc_qps = vsi->num_queue_pairs; + else + num_tc_qps = vsi->alloc_queue_pairs; + if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { /* Find numtc from enabled TC bitmap */ for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { @@ -1881,15 +1902,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, } ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); } - - /* Set actual Tx/Rx queue pairs */ - vsi->num_queue_pairs = offset; - if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { - if (vsi->req_queue_pairs > 0) - vsi->num_queue_pairs = vsi->req_queue_pairs; - else if (pf->flags & I40E_FLAG_MSIX_ENABLED) - vsi->num_queue_pairs = pf->num_lan_msix; - } + /* Do not change previously set num_queue_pairs for PFs and VFs*/ + if ((vsi->type == I40E_VSI_MAIN && numtc != 1) || + (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) || + (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV)) + vsi->num_queue_pairs = offset; /* Scheduler section valid can only be set for ADD VSI */ if (is_add) { @@ -2623,7 +2640,8 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && - (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { + (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) && + !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) { int ret = i40e_sync_vsi_filters(pf->vsi[v]); if (ret) { @@ -5427,6 +5445,58 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, } /** + * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI + * @vsi: the VSI being reconfigured + * @vsi_offset: offset from main VF VSI + */ +int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset) +{ + struct i40e_vsi_context ctxt = {}; + struct i40e_pf *pf; + struct i40e_hw *hw; + int ret; + + if (!vsi) + return I40E_ERR_PARAM; + pf = vsi->back; + hw = &pf->hw; + + ctxt.seid = vsi->seid; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; + ctxt.flags = I40E_AQ_VSI_TYPE_VF; + ctxt.info = vsi->info; + + i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc, + false); + if (vsi->reconfig_rss) { + vsi->rss_size = min_t(int, pf->alloc_rss_size, + vsi->num_queue_pairs); + ret = i40e_vsi_config_rss(vsi); + if (ret) { + dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n"); + return ret; + } + vsi->reconfig_rss = false; + } + + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) { + dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + /* update the local VSI info with updated queue map */ + i40e_vsi_update_queue_map(vsi, &ctxt); + vsi->info.valid_sections = 0; + + return ret; +} + +/** * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map * @vsi: VSI to be configured * @enabled_tc: TC bitmap @@ -5717,24 +5787,6 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi) } /** - * i40e_is_any_channel - channel exist or not - * @vsi: ptr to VSI to which channels are associated with - * - * Returns true or false if channel(s) exist for associated VSI or not - **/ -static bool i40e_is_any_channel(struct i40e_vsi *vsi) -{ - struct i40e_channel *ch, *ch_tmp; - - list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { - if (ch->initialized) - return true; - } - - return false; -} - -/** * i40e_get_max_queues_for_channel * @vsi: ptr to VSI to which channels are associated with * @@ -6240,26 +6292,15 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi, /* By default we are in VEPA mode, if this is the first VF/VMDq * VSI to be added switch to VEB mode. */ - if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) || - (!i40e_is_any_channel(vsi))) { - if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) { - dev_dbg(&pf->pdev->dev, - "Failed to create channel. Override queues (%u) not power of 2\n", - vsi->tc_config.tc_info[0].qcount); - return -EINVAL; - } - if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; - if (vsi->type == I40E_VSI_MAIN) { - if (pf->flags & I40E_FLAG_TC_MQPRIO) - i40e_do_reset(pf, I40E_PF_RESET_FLAG, - true); - else - i40e_do_reset_safe(pf, - I40E_PF_RESET_FLAG); - } + if (vsi->type == I40E_VSI_MAIN) { + if (pf->flags & I40E_FLAG_TC_MQPRIO) + i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); + else + i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); } /* now onwards for main VSI, number of queues will be value * of TC0's queue count @@ -7912,12 +7953,20 @@ config_tc: vsi->seid); need_reset = true; goto exit; - } else { - dev_info(&vsi->back->pdev->dev, - "Setup channel (id:%u) utilizing num_queues %d\n", - vsi->seid, vsi->tc_config.tc_info[0].qcount); + } else if (enabled_tc && + (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) { + netdev_info(netdev, + "Failed to create channel. Override queues (%u) not power of 2\n", + vsi->tc_config.tc_info[0].qcount); + ret = -EINVAL; + need_reset = true; + goto exit; } + dev_info(&vsi->back->pdev->dev, + "Setup channel (id:%u) utilizing num_queues %d\n", + vsi->seid, vsi->tc_config.tc_info[0].qcount); + if (pf->flags & I40E_FLAG_TC_MQPRIO) { if (vsi->mqprio_qopt.max_rate[0]) { u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; @@ -8482,9 +8531,8 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi, err = i40e_add_del_cloud_filter(vsi, filter, true); if (err) { - dev_err(&pf->pdev->dev, - "Failed to add cloud filter, err %s\n", - i40e_stat_str(&pf->hw, err)); + dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n", + err); goto err; } @@ -13771,7 +13819,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); return -ENODEV; } - + set_bit(__I40E_VSI_RELEASING, vsi->state); uplink_seid = vsi->uplink_seid; if (vsi->type != I40E_VSI_SRIOV) { if (vsi->netdev_registered) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 472f56b360b8..80ae264c99ba 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -183,17 +183,18 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) /***********************misc routines*****************************/ /** - * i40e_vc_disable_vf + * i40e_vc_reset_vf * @vf: pointer to the VF info - * - * Disable the VF through a SW reset. + * @notify_vf: notify vf about reset or not + * Reset VF handler. **/ -static inline void i40e_vc_disable_vf(struct i40e_vf *vf) +static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf) { struct i40e_pf *pf = vf->pf; int i; - i40e_vc_notify_vf_reset(vf); + if (notify_vf) + i40e_vc_notify_vf_reset(vf); /* We want to ensure that an actual reset occurs initiated after this * function was called. However, we do not want to wait forever, so @@ -211,9 +212,14 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf) usleep_range(10000, 20000); } - dev_warn(&vf->pf->pdev->dev, - "Failed to initiate reset for VF %d after 200 milliseconds\n", - vf->vf_id); + if (notify_vf) + dev_warn(&vf->pf->pdev->dev, + "Failed to initiate reset for VF %d after 200 milliseconds\n", + vf->vf_id); + else + dev_dbg(&vf->pf->pdev->dev, + "Failed to initiate reset for VF %d after 200 milliseconds\n", + vf->vf_id); } /** @@ -674,14 +680,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, u16 vsi_queue_id, struct virtchnl_rxq_info *info) { + u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; struct i40e_hw *hw = &pf->hw; struct i40e_hmc_obj_rxq rx_ctx; - u16 pf_queue_id; int ret = 0; - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); - /* clear the context structure first */ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); @@ -719,6 +724,10 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, } rx_ctx.rxmax = info->max_pkt_size; + /* if port VLAN is configured increase the max packet size */ + if (vsi->info.pvid) + rx_ctx.rxmax += VLAN_HLEN; + /* enable 32bytes desc always */ rx_ctx.dsize = 1; @@ -2106,20 +2115,6 @@ err: } /** - * i40e_vc_reset_vf_msg - * @vf: pointer to the VF info - * - * called from the VF to reset itself, - * unlike other virtchnl messages, PF driver - * doesn't send the response back to the VF - **/ -static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) -{ - if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) - i40e_reset_vf(vf, false); -} - -/** * i40e_vc_config_promiscuous_mode_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2217,11 +2212,12 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; - struct i40e_pf *pf = vf->pf; u16 vsi_id, vsi_queue_id = 0; - u16 num_qps_all = 0; + struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; int i, j = 0, idx = 0; + struct i40e_vsi *vsi; + u16 num_qps_all = 0; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; @@ -2310,9 +2306,15 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; } else { - for (i = 0; i < vf->num_tc; i++) - pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs = - vf->ch[i].num_qps; + for (i = 0; i < vf->num_tc; i++) { + vsi = pf->vsi[vf->ch[i].vsi_idx]; + vsi->num_queue_pairs = vf->ch[i].num_qps; + + if (i40e_update_adq_vsi_queues(vsi, i)) { + aq_ret = I40E_ERR_CONFIG; + goto error_param; + } + } } error_param: @@ -2607,8 +2609,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) } else { /* successful request */ vf->num_req_queues = req_pairs; - i40e_vc_notify_vf_reset(vf); - i40e_reset_vf(vf, false); + i40e_vc_reset_vf(vf, true); return 0; } @@ -3803,8 +3804,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) vf->num_req_queues = 0; /* reset the VF in order to allocate resources */ - i40e_vc_notify_vf_reset(vf); - i40e_reset_vf(vf, false); + i40e_vc_reset_vf(vf, true); return I40E_SUCCESS; @@ -3844,8 +3844,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) } /* reset the VF in order to allocate resources */ - i40e_vc_notify_vf_reset(vf); - i40e_reset_vf(vf, false); + i40e_vc_reset_vf(vf, true); return I40E_SUCCESS; @@ -3907,7 +3906,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, i40e_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: - i40e_vc_reset_vf_msg(vf); + i40e_vc_reset_vf(vf, false); ret = 0; break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: @@ -4161,7 +4160,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) /* Force the VF interface down so it has to bring up with new MAC * address */ - i40e_vc_disable_vf(vf); + i40e_vc_reset_vf(vf, true); dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); error_param: @@ -4170,34 +4169,6 @@ error_param: } /** - * i40e_vsi_has_vlans - True if VSI has configured VLANs - * @vsi: pointer to the vsi - * - * Check if a VSI has configured any VLANs. False if we have a port VLAN or if - * we have no configured VLANs. Do not call while holding the - * mac_filter_hash_lock. - */ -static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi) -{ - bool have_vlans; - - /* If we have a port VLAN, then the VSI cannot have any VLANs - * configured, as all MAC/VLAN filters will be assigned to the PVID. - */ - if (vsi->info.pvid) - return false; - - /* Since we don't have a PVID, we know that if the device is in VLAN - * mode it must be because of a VLAN filter configured on this VSI. - */ - spin_lock_bh(&vsi->mac_filter_hash_lock); - have_vlans = i40e_is_vsi_in_vlan(vsi); - spin_unlock_bh(&vsi->mac_filter_hash_lock); - - return have_vlans; -} - -/** * i40e_ndo_set_vf_port_vlan * @netdev: network interface device structure * @vf_id: VF identifier @@ -4253,19 +4224,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, /* duplicate request, so just return success */ goto error_pvid; - if (i40e_vsi_has_vlans(vsi)) { - dev_err(&pf->pdev->dev, - "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", - vf_id); - /* Administrator Error - knock the VF offline until he does - * the right thing by reconfiguring his network correctly - * and then reloading the VF driver. - */ - i40e_vc_disable_vf(vf); - /* During reset the VF got a new VSI, so refresh the pointer. */ - vsi = pf->vsi[vf->lan_vsi_idx]; - } - + i40e_vc_reset_vf(vf, true); + /* During reset the VF got a new VSI, so refresh a pointer. */ + vsi = pf->vsi[vf->lan_vsi_idx]; /* Locked once because multiple functions below iterate list */ spin_lock_bh(&vsi->mac_filter_hash_lock); @@ -4641,7 +4602,7 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) goto out; vf->trusted = setting; - i40e_vc_disable_vf(vf); + i40e_vc_reset_vf(vf, true); dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", vf_id, setting ? "" : "un"); diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index e6e7c1da47fb..3789269ce741 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -39,6 +39,7 @@ #include "iavf_txrx.h" #include "iavf_fdir.h" #include "iavf_adv_rss.h" +#include <linux/bitmap.h> #define DEFAULT_DEBUG_LEVEL_SHIFT 3 #define PFX "iavf: " @@ -304,6 +305,7 @@ struct iavf_adapter { #define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26) #define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT(27) #define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT(28) +#define IAVF_FLAG_AQ_REQUEST_STATS BIT(29) /* OS defined structs */ struct net_device *netdev; @@ -443,6 +445,7 @@ int iavf_up(struct iavf_adapter *adapter); void iavf_down(struct iavf_adapter *adapter); int iavf_process_config(struct iavf_adapter *adapter); void iavf_schedule_reset(struct iavf_adapter *adapter); +void iavf_schedule_request_stats(struct iavf_adapter *adapter); void iavf_reset(struct iavf_adapter *adapter); void iavf_set_ethtool_ops(struct net_device *netdev); void iavf_update_stats(struct iavf_adapter *adapter); @@ -500,4 +503,5 @@ void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter); void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter); struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, const u8 *macaddr); +int iavf_lock_timeout(struct mutex *lock, unsigned int msecs); #endif /* _IAVF_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 5a359a0a20ec..0cecaff38d04 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -354,6 +354,9 @@ static void iavf_get_ethtool_stats(struct net_device *netdev, struct iavf_adapter *adapter = netdev_priv(netdev); unsigned int i; + /* Explicitly request stats refresh */ + iavf_schedule_request_stats(adapter); + iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats); rcu_read_lock(); @@ -723,12 +726,31 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue, * * Change the ITR settings for a specific queue. **/ -static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, - struct ethtool_coalesce *ec, int queue) +static int iavf_set_itr_per_queue(struct iavf_adapter *adapter, + struct ethtool_coalesce *ec, int queue) { struct iavf_ring *rx_ring = &adapter->rx_rings[queue]; struct iavf_ring *tx_ring = &adapter->tx_rings[queue]; struct iavf_q_vector *q_vector; + u16 itr_setting; + + itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + + if (ec->rx_coalesce_usecs != itr_setting && + ec->use_adaptive_rx_coalesce) { + netif_info(adapter, drv, adapter->netdev, + "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n"); + return -EINVAL; + } + + itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + + if (ec->tx_coalesce_usecs != itr_setting && + ec->use_adaptive_tx_coalesce) { + netif_info(adapter, drv, adapter->netdev, + "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n"); + return -EINVAL; + } rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); @@ -751,6 +773,7 @@ static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, * the Tx and Rx ITR values based on the values we have entered * into the q_vector, no need to write the values now. */ + return 0; } /** @@ -792,9 +815,11 @@ static int __iavf_set_coalesce(struct net_device *netdev, */ if (queue < 0) { for (i = 0; i < adapter->num_active_queues; i++) - iavf_set_itr_per_queue(adapter, ec, i); + if (iavf_set_itr_per_queue(adapter, ec, i)) + return -EINVAL; } else if (queue < adapter->num_active_queues) { - iavf_set_itr_per_queue(adapter, ec, queue); + if (iavf_set_itr_per_queue(adapter, ec, queue)) + return -EINVAL; } else { netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", adapter->num_active_queues - 1); @@ -1776,6 +1801,7 @@ static int iavf_set_channels(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); u32 num_req = ch->combined_count; + int i; if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && adapter->num_tc) { @@ -1786,7 +1812,7 @@ static int iavf_set_channels(struct net_device *netdev, /* All of these should have already been checked by ethtool before this * even gets to us, but just to be sure. */ - if (num_req > adapter->vsi_res->num_queue_pairs) + if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs) return -EINVAL; if (num_req == adapter->num_active_queues) @@ -1798,6 +1824,20 @@ static int iavf_set_channels(struct net_device *netdev, adapter->num_req_queues = num_req; adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; iavf_schedule_reset(adapter); + + /* wait for the reset is done */ + for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { + msleep(IAVF_RESET_WAIT_MS); + if (adapter->flags & IAVF_FLAG_RESET_PENDING) + continue; + break; + } + if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->num_active_queues = num_req; + return -EOPNOTSUPP; + } + return 0; } @@ -1844,14 +1884,13 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (hfunc) *hfunc = ETH_RSS_HASH_TOP; - if (!indir) - return 0; - - memcpy(key, adapter->rss_key, adapter->rss_key_size); + if (key) + memcpy(key, adapter->rss_key, adapter->rss_key_size); - /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < adapter->rss_lut_size; i++) - indir[i] = (u32)adapter->rss_lut[i]; + if (indir) + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_lut_size; i++) + indir[i] = (u32)adapter->rss_lut[i]; return 0; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 847d67e32a54..14934a7a13ef 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -147,7 +147,7 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, * * Returns 0 on success, negative on failure **/ -static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) +int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) { unsigned int wait, delay = 10; @@ -175,6 +175,19 @@ void iavf_schedule_reset(struct iavf_adapter *adapter) } /** + * iavf_schedule_request_stats - Set the flags and schedule statistics request + * @adapter: board private structure + * + * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly + * request and refresh ethtool stats + **/ +void iavf_schedule_request_stats(struct iavf_adapter *adapter) +{ + adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); +} + +/** * iavf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * @txqueue: queue number that is timing out @@ -697,6 +710,21 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) } /** + * iavf_restore_filters + * @adapter: board private structure + * + * Restore existing non MAC filters when VF netdev comes back up + **/ +static void iavf_restore_filters(struct iavf_adapter *adapter) +{ + u16 vid; + + /* re-add all VLAN filters */ + for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID) + iavf_add_vlan(adapter, vid); +} + +/** * iavf_vlan_rx_add_vid - Add a VLAN filter to a device * @netdev: network device struct * @proto: unused protocol data @@ -709,8 +737,11 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev, if (!VLAN_ALLOWED(adapter)) return -EIO; + if (iavf_add_vlan(adapter, vid) == NULL) return -ENOMEM; + + set_bit(vid, adapter->vsi.active_vlans); return 0; } @@ -725,11 +756,10 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - if (VLAN_ALLOWED(adapter)) { - iavf_del_vlan(adapter, vid); - return 0; - } - return -EIO; + iavf_del_vlan(adapter, vid); + clear_bit(vid, adapter->vsi.active_vlans); + + return 0; } /** @@ -1639,8 +1669,7 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); return 0; } - - if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && + if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) || (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { iavf_set_promiscuous(adapter, 0); return 0; @@ -1688,6 +1717,11 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_del_adv_rss_cfg(adapter); return 0; } + if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) { + iavf_request_stats(adapter); + return 0; + } + return -EAGAIN; } @@ -2123,8 +2157,8 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); - iavf_free_queues(adapter); iavf_free_q_vectors(adapter); + iavf_free_queues(adapter); memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); iavf_shutdown_adminq(&adapter->hw); adapter->netdev->flags &= ~IFF_UP; @@ -2152,7 +2186,6 @@ static void iavf_reset_task(struct work_struct *work) struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; struct iavf_mac_filter *f, *ftmp; - struct iavf_vlan_filter *vlf; struct iavf_cloud_filter *cf; u32 reg_val; int i = 0, err; @@ -2233,6 +2266,7 @@ continue_reset: (adapter->state == __IAVF_RESETTING)); if (running) { + netdev->flags &= ~IFF_UP; netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; @@ -2292,11 +2326,6 @@ continue_reset: list_for_each_entry(f, &adapter->mac_filter_list, list) { f->add = true; } - /* re-add all VLAN filters */ - list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { - vlf->add = true; - } - spin_unlock_bh(&adapter->mac_vlan_list_lock); /* check if TCs are running and re-add all cloud filters */ @@ -2310,7 +2339,6 @@ continue_reset: spin_unlock_bh(&adapter->cloud_filter_list_lock); adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; iavf_misc_irq_enable(adapter); @@ -2344,7 +2372,7 @@ continue_reset: * to __IAVF_RUNNING */ iavf_up_complete(adapter); - + netdev->flags |= IFF_UP; iavf_irq_enable(adapter, true); } else { iavf_change_state(adapter, __IAVF_DOWN); @@ -2357,8 +2385,10 @@ continue_reset: reset_err: mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); - if (running) + if (running) { iavf_change_state(adapter, __IAVF_RUNNING); + netdev->flags |= IFF_UP; + } dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); iavf_close(netdev); } @@ -2410,7 +2440,7 @@ static void iavf_adminq_task(struct work_struct *work) /* check for error indications */ val = rd32(hw, hw->aq.arq.len); - if (val == 0xdeadbeef) /* indicates device in reset */ + if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */ goto freedom; oldval = val; if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { @@ -3095,8 +3125,10 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter, return -ENOMEM; while (!mutex_trylock(&adapter->crit_lock)) { - if (--count == 0) - goto err; + if (--count == 0) { + kfree(filter); + return err; + } udelay(1); } @@ -3107,11 +3139,11 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter, /* start out with flow type and eth type IPv4 to begin with */ filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; err = iavf_parse_cls_flower(adapter, cls_flower, filter); - if (err < 0) + if (err) goto err; err = iavf_handle_tclass(adapter, tc, filter); - if (err < 0) + if (err) goto err; /* add filter to the list */ @@ -3308,6 +3340,9 @@ static int iavf_open(struct net_device *netdev) spin_unlock_bh(&adapter->mac_vlan_list_lock); + /* Restore VLAN filters that were removed with IFF_DOWN */ + iavf_restore_filters(adapter); + iavf_configure(adapter); iavf_up_complete(adapter); @@ -3415,11 +3450,16 @@ static int iavf_set_features(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - /* Don't allow changing VLAN_RX flag when adapter is not capable - * of VLAN offload + /* Don't allow enabling VLAN features when adapter is not capable + * of VLAN offload/filtering */ if (!VLAN_ALLOWED(adapter)) { - if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) + netdev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER); + if (features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER)) return -EINVAL; } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) @@ -3503,7 +3543,8 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) + if (adapter->vf_res && + !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) features &= ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 8c3f0f77cb57..d60bf7c21200 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -607,7 +607,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter) if (f->add) count++; } - if (!count) { + if (!count || !VLAN_ALLOWED(adapter)) { adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; @@ -673,9 +673,19 @@ void iavf_del_vlans(struct iavf_adapter *adapter) spin_lock_bh(&adapter->mac_vlan_list_lock); - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (f->remove) + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + /* since VLAN capabilities are not allowed, we dont want to send + * a VLAN delete request because it will most likely fail and + * create unnecessary errors/noise, so just free the VLAN + * filters marked for removal to enable bailing out before + * sending a virtchnl message + */ + if (f->remove && !VLAN_ALLOWED(adapter)) { + list_del(&f->list); + kfree(f); + } else if (f->remove) { count++; + } } if (!count) { adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; @@ -784,6 +794,8 @@ void iavf_request_stats(struct iavf_adapter *adapter) /* no error message, this isn't crucial */ return; } + + adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS; adapter->current_op = VIRTCHNL_OP_GET_STATS; vqs.vsi_id = adapter->vsi_res->vsi_id; /* queue maps are ignored for this message - only the vsi is used */ @@ -1722,8 +1734,37 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } spin_lock_bh(&adapter->mac_vlan_list_lock); iavf_add_filter(adapter, adapter->hw.mac.addr); + + if (VLAN_ALLOWED(adapter)) { + if (!list_empty(&adapter->vlan_filter_list)) { + struct iavf_vlan_filter *vlf; + + /* re-add all VLAN filters over virtchnl */ + list_for_each_entry(vlf, + &adapter->vlan_filter_list, + list) + vlf->add = true; + + adapter->aq_required |= + IAVF_FLAG_AQ_ADD_VLAN_FILTER; + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); iavf_process_config(adapter); + + /* unlock crit_lock before acquiring rtnl_lock as other + * processes holding rtnl_lock could be waiting for the same + * crit_lock + */ + mutex_unlock(&adapter->crit_lock); + rtnl_lock(); + netdev_update_features(adapter->netdev); + rtnl_unlock(); + if (iavf_lock_timeout(&adapter->crit_lock, 10000)) + dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", + __FUNCTION__); + } break; case VIRTCHNL_OP_ENABLE_QUEUES: diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 40562600a8cf..09a3297cd63c 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -89,8 +89,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) if (!vsi->rx_rings) goto err_rings; - /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */ - vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq), + /* txq_map needs to have enough space to track both Tx (stack) rings + * and XDP rings; at this point vsi->num_xdp_txq might not be set, + * so use num_possible_cpus() as we want to always provide XDP ring + * per CPU, regardless of queue count settings from user that might + * have come from ethtool's set_channels() callback; + */ + vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), sizeof(*vsi->txq_map), GFP_KERNEL); if (!vsi->txq_map) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f099797f35e3..4d1fc48c9744 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2609,7 +2609,18 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) ice_stat_str(status)); goto clear_xdp_rings; } - ice_vsi_assign_bpf_prog(vsi, prog); + + /* assign the prog only when it's not already present on VSI; + * this flow is a subject of both ethtool -L and ndo_bpf flows; + * VSI rebuild that happens under ethtool -L can expose us to + * the bpf_prog refcount issues as we would be swapping same + * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put + * on it as it would be treated as an 'old_prog'; for ndo_bpf + * this is not harmful as dev_xdp_install bumps the refcount + * before calling the op exposed by the driver; + */ + if (!ice_is_xdp_ena_vsi(vsi)) + ice_vsi_assign_bpf_prog(vsi, prog); return 0; clear_xdp_rings: @@ -2785,6 +2796,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); } else { + /* safe to call even when prog == vsi->xdp_prog as + * dev_xdp_install in net/core/dev.c incremented prog's + * refcount so corresponding bpf_prog_put won't cause + * underflow + */ ice_vsi_assign_bpf_prog(vsi, prog); } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 836be0d3b291..fd54d3ef890b 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -8026,7 +8026,7 @@ static int igb_poll(struct napi_struct *napi, int budget) if (likely(napi_complete_done(napi, work_done))) igb_ring_irq_enable(q_vector); - return min(work_done, budget - 1); + return work_done; } /** diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 6433c909c6b2..072391c494ce 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -25,6 +25,7 @@ #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/module.h> +#include <linux/property.h> #include <asm/checksum.h> @@ -239,6 +240,7 @@ ltq_etop_hw_init(struct net_device *dev) { struct ltq_etop_priv *priv = netdev_priv(dev); int i; + int err; ltq_pmu_enable(PMU_PPE); @@ -273,7 +275,13 @@ ltq_etop_hw_init(struct net_device *dev) if (IS_TX(i)) { ltq_dma_alloc_tx(&ch->dma); - request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv); + err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv); + if (err) { + netdev_err(dev, + "Unable to get Tx DMA IRQ %d\n", + irq); + return err; + } } else if (IS_RX(i)) { ltq_dma_alloc_rx(&ch->dma); for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM; @@ -281,7 +289,13 @@ ltq_etop_hw_init(struct net_device *dev) if (ltq_etop_alloc_skb(ch)) return -ENOMEM; ch->dma.desc = 0; - request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv); + err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv); + if (err) { + netdev_err(dev, + "Unable to get Rx DMA IRQ %d\n", + irq); + return err; + } } ch->dma.irq = irq; } @@ -726,7 +740,7 @@ static struct platform_driver ltq_mii_driver = { }, }; -int __init +static int __init init_ltq_etop(void) { int ret = platform_driver_probe(<q_mii_driver, ltq_etop_probe); diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 62a97c46fba0..ef878973b859 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -429,12 +429,14 @@ static const struct of_device_id orion_mdio_match[] = { }; MODULE_DEVICE_TABLE(of, orion_mdio_match); +#ifdef CONFIG_ACPI static const struct acpi_device_id orion_mdio_acpi_match[] = { { "MRVL0100", BUS_TYPE_SMI }, { "MRVL0101", BUS_TYPE_XSMI }, { }, }; MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match); +#endif static struct platform_driver orion_mdio_driver = { .probe = orion_mdio_probe, diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 2b18d89d9756..ce486e16489c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5017,11 +5017,13 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu) mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); } + if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) { + netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n", + mtu, (int)MVPP2_MAX_RX_BUF_SIZE); + return -EINVAL; + } + if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) { - if (port->xdp_prog) { - netdev_err(dev, "Jumbo frames are not supported with XDP\n"); - return -EINVAL; - } if (priv->percpu_pools) { netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu); mvpp2_bm_switch_buffers(priv, false); @@ -5307,8 +5309,8 @@ static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf) bool running = netif_running(port->dev); bool reset = !prog != !port->xdp_prog; - if (port->dev->mtu > ETH_DATA_LEN) { - NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled"); + if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) { + NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index c7fd466a0efd..a09a507369ac 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -236,10 +236,11 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, u64 lmt_addr, val, tbl_base; int pf, vf, num_vfs, hw_vfs; void __iomem *lmt_map_base; - int index = 0, off = 0; - int bytes_not_copied; int buf_size = 10240; + size_t off = 0; + int index = 0; char *buf; + int ret; /* don't allow partial reads */ if (*ppos != 0) @@ -303,15 +304,17 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, } off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); - bytes_not_copied = copy_to_user(buffer, buf, off); + ret = min(off, count); + if (copy_to_user(buffer, buf, ret)) + ret = -EFAULT; kfree(buf); iounmap(lmt_map_base); - if (bytes_not_copied) - return -EFAULT; + if (ret < 0) + return ret; - *ppos = off; - return off; + *ppos = ret; + return ret; } RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c index 3ce6ccd0f539..b4599fe4ca8d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c @@ -497,8 +497,8 @@ int prestera_bridge_port_join(struct net_device *br_dev, br_port = prestera_bridge_port_add(bridge, port->dev); if (IS_ERR(br_port)) { - err = PTR_ERR(br_port); - goto err_brport_create; + prestera_bridge_put(bridge); + return PTR_ERR(br_port); } err = switchdev_bridge_port_offload(br_port->dev, port->dev, NULL, @@ -519,8 +519,6 @@ err_port_join: switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL); err_switchdev_offload: prestera_bridge_port_put(br_port); -err_brport_create: - prestera_bridge_put(bridge); return err; } @@ -1124,7 +1122,7 @@ static int prestera_switchdev_blk_event(struct notifier_block *unused, prestera_port_obj_attr_set); break; default: - err = -EOPNOTSUPP; + return NOTIFY_DONE; } return notifier_from_errno(err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index f71ec4d9d68e..8eaa24d865c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -339,6 +339,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_PAGE_FAULT_RESUME: case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS: case MLX5_CMD_OP_DEALLOC_SF: + case MLX5_CMD_OP_DESTROY_UCTX: + case MLX5_CMD_OP_DESTROY_UMEM: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -464,9 +466,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: case MLX5_CMD_OP_CREATE_UCTX: - case MLX5_CMD_OP_DESTROY_UCTX: case MLX5_CMD_OP_CREATE_UMEM: - case MLX5_CMD_OP_DESTROY_UMEM: case MLX5_CMD_OP_ALLOC_MEMIC: case MLX5_CMD_OP_MODIFY_XRQ: case MLX5_CMD_OP_RELEASE_XRQ_ERROR: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 02e77ffe5c3e..5371ad0a12eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -164,13 +164,14 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); MLX5_SET(destroy_cq_in, in, uid, cq->uid); err = mlx5_cmd_exec_in(dev, destroy_cq, in); + if (err) + return err; synchronize_irq(cq->irqn); - mlx5_cq_put(cq); wait_for_completion(&cq->free); - return err; + return 0; } EXPORT_SYMBOL(mlx5_core_destroy_cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 07c8d9811bc8..10d195042ab5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -507,6 +507,8 @@ void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) if (!mlx5_debugfs_root) return; - if (cq->dbg) + if (cq->dbg) { rem_res_tree(cq->dbg); + cq->dbg = NULL; + } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index c1c6e74c79c4..2445e2ae3324 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -1356,9 +1356,13 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv, int mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr, + struct mlx5e_tc_mod_hdr_acts *mod_acts, const struct flow_action_entry *act, struct netlink_ext_ack *extack) { + bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR; + int err; + if (!priv) { NL_SET_ERR_MSG_MOD(extack, "offload of ct action isn't available"); @@ -1369,6 +1373,17 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv, attr->ct_attr.ct_action = act->ct.action; attr->ct_attr.nf_ft = act->ct.flow_table; + if (!clear_action) + goto out; + + err = mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to set registers for ct clear"); + return err; + } + attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + +out: return 0; } @@ -1898,23 +1913,16 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv, memcpy(pre_ct_attr, attr, attr_sz); - err = mlx5_tc_ct_entry_set_registers(ct_priv, mod_acts, 0, 0, 0, 0); - if (err) { - ct_dbg("Failed to set register for ct clear"); - goto err_set_registers; - } - mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type, mod_acts->num_actions, mod_acts->actions); if (IS_ERR(mod_hdr)) { err = PTR_ERR(mod_hdr); ct_dbg("Failed to add create ct clear mod hdr"); - goto err_set_registers; + goto err_mod_hdr; } pre_ct_attr->modify_hdr = mod_hdr; - pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; rule = mlx5_tc_rule_insert(priv, orig_spec, pre_ct_attr); if (IS_ERR(rule)) { @@ -1930,7 +1938,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv, err_insert: mlx5_modify_header_dealloc(priv->mdev, mod_hdr); -err_set_registers: +err_mod_hdr: netdev_warn(priv->netdev, "Failed to offload ct clear flow, err %d\n", err); kfree(pre_ct_attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h index 363329f4aac6..99662af1e41a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h @@ -110,6 +110,7 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec); int mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr, + struct mlx5e_tc_mod_hdr_acts *mod_acts, const struct flow_action_entry *act, struct netlink_ext_ack *extack); @@ -172,6 +173,7 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec) static inline int mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr, + struct mlx5e_tc_mod_hdr_acts *mod_acts, const struct flow_action_entry *act, struct netlink_ext_ack *extack) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h index 8f64f2c8895a..b689701ac7d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -102,6 +102,7 @@ struct mlx5e_tc_flow { refcount_t refcnt; struct rcu_head rcu_head; struct completion init_done; + struct completion del_hw_done; int tunnel_id; /* the mapped tunnel id of this flow */ struct mlx5_flow_attr *attr; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 660cca73c36c..042b1abe1437 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -245,8 +245,14 @@ static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow, struct list_head *flow_list, int index) { - if (IS_ERR(mlx5e_flow_get(flow))) + if (IS_ERR(mlx5e_flow_get(flow))) { + /* Flow is being deleted concurrently. Wait for it to be + * unoffloaded from hardware, otherwise deleting encap will + * fail. + */ + wait_for_completion(&flow->del_hw_done); return; + } wait_for_completion(&flow->init_done); flow->tmp_entry_index = index; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index 62abce008c7b..a2a9f68579dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -55,6 +55,7 @@ struct mlx5e_ktls_offload_context_rx { DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS); /* resync */ + spinlock_t lock; /* protects resync fields */ struct mlx5e_ktls_rx_resync_ctx resync; struct list_head list; }; @@ -386,14 +387,18 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r struct mlx5e_icosq *sq; bool trigger_poll; - memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq)); - sq = &c->async_icosq; ktls_resync = sq->ktls_resync; + trigger_poll = false; spin_lock_bh(&ktls_resync->lock); - list_add_tail(&priv_rx->list, &ktls_resync->list); - trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); + spin_lock_bh(&priv_rx->lock); + memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq)); + if (list_empty(&priv_rx->list)) { + list_add_tail(&priv_rx->list, &ktls_resync->list); + trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); + } + spin_unlock_bh(&priv_rx->lock); spin_unlock_bh(&ktls_resync->lock); if (!trigger_poll) @@ -617,6 +622,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, if (err) goto err_create_key; + INIT_LIST_HEAD(&priv_rx->list); + spin_lock_init(&priv_rx->lock); priv_rx->crypto_info = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; @@ -730,10 +737,14 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget) priv_rx = list_first_entry(&local_list, struct mlx5e_ktls_offload_context_rx, list); + spin_lock(&priv_rx->lock); cseg = post_static_params(sq, priv_rx); - if (IS_ERR(cseg)) + if (IS_ERR(cseg)) { + spin_unlock(&priv_rx->lock); break; - list_del(&priv_rx->list); + } + list_del_init(&priv_rx->list); + spin_unlock(&priv_rx->lock); db_cseg = cseg; } if (db_cseg) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 835caa1c7b74..3d45f4ae80c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1600,6 +1600,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, else mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); } + complete_all(&flow->del_hw_done); if (mlx5_flow_has_geneve_opt(flow)) mlx5_geneve_tlv_option_del(priv->mdev->geneve); @@ -3607,7 +3608,9 @@ parse_tc_nic_actions(struct mlx5e_priv *priv, attr->dest_chain = act->chain_index; break; case FLOW_ACTION_CT: - err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack); + err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, + &parse_attr->mod_hdr_acts, + act, extack); if (err) return err; @@ -4276,7 +4279,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported"); return -EOPNOTSUPP; } - err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack); + err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, + &parse_attr->mod_hdr_acts, + act, extack); if (err) return err; @@ -4465,6 +4470,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, INIT_LIST_HEAD(&flow->l3_to_l2_reformat); refcount_set(&flow->refcnt, 1); init_completion(&flow->init_done); + init_completion(&flow->del_hw_done); *__flow = flow; *__parse_attr = parse_attr; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index ec136b499204..51a8cecc4a7c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1305,12 +1305,17 @@ abort: */ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { + bool toggle_lag; int ret; if (!mlx5_esw_allowed(esw)) return 0; - mlx5_lag_disable_change(esw->dev); + toggle_lag = esw->mode == MLX5_ESWITCH_NONE; + + if (toggle_lag) + mlx5_lag_disable_change(esw->dev); + down_write(&esw->mode_lock); if (esw->mode == MLX5_ESWITCH_NONE) { ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs); @@ -1324,7 +1329,10 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) esw->esw_funcs.num_vfs = num_vfs; } up_write(&esw->mode_lock); - mlx5_lag_enable_change(esw->dev); + + if (toggle_lag) + mlx5_lag_enable_change(esw->dev); + return ret; } @@ -1572,6 +1580,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->enabled_vports = 0; esw->mode = MLX5_ESWITCH_NONE; esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; + if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) && + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) + esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; + else + esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; dev->priv.eswitch = esw; BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); @@ -1934,7 +1947,7 @@ free_out: return err; } -u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev) +u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev) { struct mlx5_eswitch *esw = dev->priv.eswitch; @@ -1948,7 +1961,7 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) struct mlx5_eswitch *esw; esw = dev->priv.eswitch; - return mlx5_esw_allowed(esw) ? esw->offloads.encap : + return (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) ? esw->offloads.encap : DEVLINK_ESWITCH_ENCAP_MODE_NONE; } EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f4eaa5893886..a46455694f7a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -3183,12 +3183,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) u64 mapping_id; int err; - if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) && - MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap)) - esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; - else - esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; - mutex_init(&esw->offloads.termtbl_mutex); mlx5_rdma_enable_roce(esw->dev); @@ -3286,7 +3280,6 @@ void esw_offloads_disable(struct mlx5_eswitch *esw) esw_offloads_metadata_uninit(esw); mlx5_rdma_disable_roce(esw->dev); mutex_destroy(&esw->offloads.termtbl_mutex); - esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; } static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) @@ -3630,7 +3623,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, *encap = esw->offloads.encap; unlock: up_write(&esw->mode_lock); - return 0; + return err; } static bool diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 31c99d53faf7..7e0e04cf26f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -40,7 +40,7 @@ #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) /* Max number of counters to query in bulk read is 32K */ #define MLX5_SW_MAX_COUNTERS_BULK BIT(15) -#define MLX5_SF_NUM_COUNTERS_BULK 6 +#define MLX5_SF_NUM_COUNTERS_BULK 8 #define MLX5_FC_POOL_MAX_THRESHOLD BIT(18) #define MLX5_FC_POOL_USED_BUFF_RATIO 10 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 48d2ea690d7a..4ddf6b330a44 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -615,6 +615,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, bool is_bonded, is_in_lag, mode_supported; int bond_status = 0; int num_slaves = 0; + int changed = 0; int idx; if (!netif_is_lag_master(upper)) @@ -653,27 +654,27 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, */ is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3; - if (!mlx5_lag_is_ready(ldev) && is_in_lag) { - NL_SET_ERR_MSG_MOD(info->info.extack, - "Can't activate LAG offload, PF is configured with more than 64 VFs"); - return 0; - } - /* Lag mode must be activebackup or hash. */ mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP || tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH; - if (is_in_lag && !mode_supported) - NL_SET_ERR_MSG_MOD(info->info.extack, - "Can't activate LAG offload, TX type isn't supported"); - is_bonded = is_in_lag && mode_supported; if (tracker->is_bonded != is_bonded) { tracker->is_bonded = is_bonded; - return 1; + changed = 1; } - return 0; + if (!is_in_lag) + return changed; + + if (!mlx5_lag_is_ready(ldev)) + NL_SET_ERR_MSG_MOD(info->info.extack, + "Can't activate LAG offload, PF is configured with more than 64 VFs"); + else if (!mode_supported) + NL_SET_ERR_MSG_MOD(info->info.extack, + "Can't activate LAG offload, TX type isn't supported"); + + return changed; } static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev, @@ -716,9 +717,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this, ldev = container_of(this, struct mlx5_lag, nb); - if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE) - return NOTIFY_DONE; - tracker = ldev->tracker; switch (event) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index 49089cbe897c..8cbd36c82b3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -135,25 +135,14 @@ static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn, static int dr_domain_query_vport(struct mlx5dr_domain *dmn, u16 vport_number, + bool other_vport, struct mlx5dr_cmd_vport_cap *vport_caps) { - u16 cmd_vport = vport_number; - bool other_vport = true; int ret; - if (vport_number == MLX5_VPORT_UPLINK) { - dr_domain_fill_uplink_caps(dmn, vport_caps); - return 0; - } - - if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) { - other_vport = false; - cmd_vport = 0; - } - ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev, other_vport, - cmd_vport, + vport_number, &vport_caps->icm_address_rx, &vport_caps->icm_address_tx); if (ret) @@ -161,7 +150,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn, ret = mlx5dr_cmd_query_gvmi(dmn->mdev, other_vport, - cmd_vport, + vport_number, &vport_caps->vport_gvmi); if (ret) return ret; @@ -176,9 +165,15 @@ static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn) { return dr_domain_query_vport(dmn, dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0, + false, &dmn->info.caps.vports.esw_manager_caps); } +static void dr_domain_query_uplink(struct mlx5dr_domain *dmn) +{ + dr_domain_fill_uplink_caps(dmn, &dmn->info.caps.vports.uplink_caps); +} + static struct mlx5dr_cmd_vport_cap * dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport) { @@ -190,7 +185,7 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport) if (!vport_caps) return NULL; - ret = dr_domain_query_vport(dmn, vport, vport_caps); + ret = dr_domain_query_vport(dmn, vport, true, vport_caps); if (ret) { kvfree(vport_caps); return NULL; @@ -207,16 +202,26 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport) return vport_caps; } +static bool dr_domain_is_esw_mgr_vport(struct mlx5dr_domain *dmn, u16 vport) +{ + struct mlx5dr_cmd_caps *caps = &dmn->info.caps; + + return (caps->is_ecpf && vport == MLX5_VPORT_ECPF) || + (!caps->is_ecpf && vport == 0); +} + struct mlx5dr_cmd_vport_cap * mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport) { struct mlx5dr_cmd_caps *caps = &dmn->info.caps; struct mlx5dr_cmd_vport_cap *vport_caps; - if ((caps->is_ecpf && vport == MLX5_VPORT_ECPF) || - (!caps->is_ecpf && vport == 0)) + if (dr_domain_is_esw_mgr_vport(dmn, vport)) return &caps->vports.esw_manager_caps; + if (vport == MLX5_VPORT_UPLINK) + return &caps->vports.uplink_caps; + vport_load: vport_caps = xa_load(&caps->vports.vports_caps_xa, vport); if (vport_caps) @@ -241,17 +246,6 @@ static void dr_domain_clear_vports(struct mlx5dr_domain *dmn) } } -static int dr_domain_query_uplink(struct mlx5dr_domain *dmn) -{ - struct mlx5dr_cmd_vport_cap *vport_caps; - - vport_caps = mlx5dr_domain_get_vport_cap(dmn, MLX5_VPORT_UPLINK); - if (!vport_caps) - return -EINVAL; - - return 0; -} - static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev, struct mlx5dr_domain *dmn) { @@ -281,11 +275,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev, goto free_vports_caps_xa; } - ret = dr_domain_query_uplink(dmn); - if (ret) { - mlx5dr_err(dmn, "Failed to query uplink vport caps (err: %d)", ret); - goto free_vports_caps_xa; - } + dr_domain_query_uplink(dmn); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 75c775bee351..793365242e85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -924,11 +924,12 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher, /* Check that all mask data was consumed */ for (i = 0; i < consumed_mask.match_sz; i++) { - if (consumed_mask.match_buf[i]) { - mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n"); - ret = -EOPNOTSUPP; - goto free_consumed_mask; - } + if (!((u8 *)consumed_mask.match_buf)[i]) + continue; + + mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n"); + ret = -EOPNOTSUPP; + goto free_consumed_mask; } ret = 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 3028b776da00..2333c2439c28 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -764,6 +764,7 @@ struct mlx5dr_roce_cap { struct mlx5dr_vports { struct mlx5dr_cmd_vport_cap esw_manager_caps; + struct mlx5dr_cmd_vport_cap uplink_caps; struct xarray vports_caps_xa; }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 5925db386b1b..03e5bad4e405 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2153,7 +2153,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, max_ports = mlxsw_core_max_ports(mlxsw_sp->core); local_port = mlxsw_reg_pude_local_port_get(pude_pl); - if (WARN_ON_ONCE(local_port >= max_ports)) + if (WARN_ON_ONCE(!local_port || local_port >= max_ports)) return; mlxsw_sp_port = mlxsw_sp->ports[local_port]; if (!mlxsw_sp_port) @@ -3290,10 +3290,10 @@ mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core) u8 max_rif_mac_profiles; if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES)) - return -EIO; - - max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, - MAX_RIF_MAC_PROFILES); + max_rif_mac_profiles = 1; + else + max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, + MAX_RIF_MAC_PROFILES); devlink_resource_size_params_init(&size_params, max_rif_mac_profiles, max_rif_mac_profiles, 1, DEVLINK_RESOURCE_UNIT_ENTRY); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 4fc97823bc84..7d7647481f70 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -914,8 +914,7 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter) } static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter, - u8 duplex, u16 local_adv, - u16 remote_adv) + u16 local_adv, u16 remote_adv) { struct lan743x_phy *phy = &adapter->phy; u8 cap; @@ -943,7 +942,6 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) phy_print_status(phydev); if (phydev->state == PHY_RUNNING) { - struct ethtool_link_ksettings ksettings; int remote_advertisement = 0; int local_advertisement = 0; @@ -980,18 +978,14 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) } lan743x_csr_write(adapter, MAC_CR, data); - memset(&ksettings, 0, sizeof(ksettings)); - phy_ethtool_get_link_ksettings(netdev, &ksettings); local_advertisement = linkmode_adv_to_mii_adv_t(phydev->advertising); remote_advertisement = linkmode_adv_to_mii_adv_t(phydev->lp_advertising); - lan743x_phy_update_flowcontrol(adapter, - ksettings.base.duplex, - local_advertisement, + lan743x_phy_update_flowcontrol(adapter, local_advertisement, remote_advertisement); - lan743x_ptp_update_latency(adapter, ksettings.base.speed); + lan743x_ptp_update_latency(adapter, phydev->speed); } } diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index e6c18b598d5c..409cde1e59c6 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1278,6 +1278,225 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_fdb_dump); +static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_ETYPE; + *(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588); + *(__be16 *)trap->key.etype.etype.mask = htons(0xffff); +} + +static void +ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_IPV4; + trap->key.ipv4.dport.value = PTP_EV_PORT; + trap->key.ipv4.dport.mask = 0xffff; +} + +static void +ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_IPV6; + trap->key.ipv6.dport.value = PTP_EV_PORT; + trap->key.ipv6.dport.mask = 0xffff; +} + +static void +ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_IPV4; + trap->key.ipv4.dport.value = PTP_GEN_PORT; + trap->key.ipv4.dport.mask = 0xffff; +} + +static void +ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_IPV6; + trap->key.ipv6.dport.value = PTP_GEN_PORT; + trap->key.ipv6.dport.mask = 0xffff; +} + +static int ocelot_trap_add(struct ocelot *ocelot, int port, + unsigned long cookie, + void (*populate)(struct ocelot_vcap_filter *f)) +{ + struct ocelot_vcap_block *block_vcap_is2; + struct ocelot_vcap_filter *trap; + bool new = false; + int err; + + block_vcap_is2 = &ocelot->block[VCAP_IS2]; + + trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie, + false); + if (!trap) { + trap = kzalloc(sizeof(*trap), GFP_KERNEL); + if (!trap) + return -ENOMEM; + + populate(trap); + trap->prio = 1; + trap->id.cookie = cookie; + trap->id.tc_offload = false; + trap->block_id = VCAP_IS2; + trap->type = OCELOT_VCAP_FILTER_OFFLOAD; + trap->lookup = 0; + trap->action.cpu_copy_ena = true; + trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; + trap->action.port_mask = 0; + new = true; + } + + trap->ingress_port_mask |= BIT(port); + + if (new) + err = ocelot_vcap_filter_add(ocelot, trap, NULL); + else + err = ocelot_vcap_filter_replace(ocelot, trap); + if (err) { + trap->ingress_port_mask &= ~BIT(port); + if (!trap->ingress_port_mask) + kfree(trap); + return err; + } + + return 0; +} + +static int ocelot_trap_del(struct ocelot *ocelot, int port, + unsigned long cookie) +{ + struct ocelot_vcap_block *block_vcap_is2; + struct ocelot_vcap_filter *trap; + + block_vcap_is2 = &ocelot->block[VCAP_IS2]; + + trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie, + false); + if (!trap) + return 0; + + trap->ingress_port_mask &= ~BIT(port); + if (!trap->ingress_port_mask) + return ocelot_vcap_filter_del(ocelot, trap); + + return ocelot_vcap_filter_replace(ocelot, trap); +} + +static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port) +{ + unsigned long l2_cookie = ocelot->num_phys_ports + 1; + + return ocelot_trap_add(ocelot, port, l2_cookie, + ocelot_populate_l2_ptp_trap_key); +} + +static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port) +{ + unsigned long l2_cookie = ocelot->num_phys_ports + 1; + + return ocelot_trap_del(ocelot, port, l2_cookie); +} + +static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port) +{ + unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2; + unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3; + int err; + + err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, + ocelot_populate_ipv4_ptp_event_trap_key); + if (err) + return err; + + err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, + ocelot_populate_ipv4_ptp_general_trap_key); + if (err) + ocelot_trap_del(ocelot, port, ipv4_ev_cookie); + + return err; +} + +static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port) +{ + unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2; + unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3; + int err; + + err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie); + err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie); + return err; +} + +static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port) +{ + unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4; + unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5; + int err; + + err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, + ocelot_populate_ipv6_ptp_event_trap_key); + if (err) + return err; + + err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, + ocelot_populate_ipv6_ptp_general_trap_key); + if (err) + ocelot_trap_del(ocelot, port, ipv6_ev_cookie); + + return err; +} + +static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port) +{ + unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4; + unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5; + int err; + + err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie); + err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie); + return err; +} + +static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port, + bool l2, bool l4) +{ + int err; + + if (l2) + err = ocelot_l2_ptp_trap_add(ocelot, port); + else + err = ocelot_l2_ptp_trap_del(ocelot, port); + if (err) + return err; + + if (l4) { + err = ocelot_ipv4_ptp_trap_add(ocelot, port); + if (err) + goto err_ipv4; + + err = ocelot_ipv6_ptp_trap_add(ocelot, port); + if (err) + goto err_ipv6; + } else { + err = ocelot_ipv4_ptp_trap_del(ocelot, port); + + err |= ocelot_ipv6_ptp_trap_del(ocelot, port); + } + if (err) + return err; + + return 0; + +err_ipv6: + ocelot_ipv4_ptp_trap_del(ocelot, port); +err_ipv4: + if (l2) + ocelot_l2_ptp_trap_del(ocelot, port); + return err; +} + int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr) { return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config, @@ -1288,7 +1507,9 @@ EXPORT_SYMBOL(ocelot_hwstamp_get); int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + bool l2 = false, l4 = false; struct hwtstamp_config cfg; + int err; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; @@ -1320,28 +1541,40 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: break; - case HWTSTAMP_FILTER_ALL: - case HWTSTAMP_FILTER_SOME: - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + l4 = true; + break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + l2 = true; + break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + l2 = true; + l4 = true; break; default: mutex_unlock(&ocelot->ptp_lock); return -ERANGE; } + err = ocelot_setup_ptp_traps(ocelot, port, l2, l4); + if (err) + return err; + + if (l2 && l4) + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + else if (l2) + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + else if (l4) + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + else + cfg.rx_filter = HWTSTAMP_FILTER_NONE; + /* Commit back the result & save it */ memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg)); mutex_unlock(&ocelot->ptp_lock); @@ -1444,7 +1677,10 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port, SOF_TIMESTAMPING_RAW_HARDWARE; info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | BIT(HWTSTAMP_TX_ONESTEP_SYNC); - info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT); return 0; } diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index 99d7376a70a7..337cd08b1a54 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -1217,6 +1217,22 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot, } EXPORT_SYMBOL(ocelot_vcap_filter_del); +int ocelot_vcap_filter_replace(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) +{ + struct ocelot_vcap_block *block = &ocelot->block[filter->block_id]; + int index; + + index = ocelot_vcap_block_get_filter_index(block, filter); + if (index < 0) + return index; + + vcap_entry_set(ocelot, index, filter); + + return 0; +} +EXPORT_SYMBOL(ocelot_vcap_filter_replace); + int ocelot_vcap_filter_stats_update(struct ocelot *ocelot, struct ocelot_vcap_filter *filter) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index df203738511b..0b1865e9f0b5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -565,7 +565,6 @@ struct nfp_net_dp { * @exn_name: Name for Exception interrupt * @shared_handler: Handler for shared interrupts * @shared_name: Name for shared interrupt - * @me_freq_mhz: ME clock_freq (MHz) * @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active, * @reconfig_sync_present and HW reconfiguration request * regs/machinery from async requests (sync must take @@ -650,8 +649,6 @@ struct nfp_net { irq_handler_t shared_handler; char shared_name[IFNAMSIZ + 8]; - u32 me_freq_mhz; - bool link_up; spinlock_t link_status_lock; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 1de076f55740..cf7882933993 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1344,7 +1344,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev, * ME timestamp ticks. There are 16 ME clock cycles for each timestamp * count. */ - factor = nn->me_freq_mhz / 16; + factor = nn->tlv_caps.me_freq_mhz / 16; /* Each pair of (usecs, max_frames) fields specifies that interrupts * should be coalesced until diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index cfeb7620ae20..07a00dd9cfe0 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -1209,7 +1209,7 @@ static void *nixge_get_nvmem_address(struct device *dev) cell = nvmem_cell_get(dev, "address"); if (IS_ERR(cell)) - return NULL; + return cell; mac = nvmem_cell_read(cell, &cell_size); nvmem_cell_put(cell); @@ -1282,7 +1282,7 @@ static int nixge_probe(struct platform_device *pdev) ndev->max_mtu = NIXGE_JUMBO_MTU; mac_addr = nixge_get_nvmem_address(&pdev->dev); - if (mac_addr && is_valid_ether_addr(mac_addr)) { + if (!IS_ERR(mac_addr) && is_valid_ether_addr(mac_addr)) { eth_hw_addr_set(ndev, mac_addr); kfree(mac_addr); } else { diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index a97f691839e0..6958adeca86d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1045,7 +1045,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, if (!parities) continue; - for (j = 0, bit_idx = 0; bit_idx < 32; j++) { + for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) { struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; if (qed_int_is_parity_flag(p_hwfn, p_bit) && @@ -1083,7 +1083,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, * to current group, making them responsible for the * previous assertion. */ - for (j = 0, bit_idx = 0; bit_idx < 32; j++) { + for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) { long unsigned int bitmask; u8 bit, bit_len; @@ -1382,7 +1382,7 @@ static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); for (i = 0; i < NUM_ATTN_REGS; i++) { /* j is array index, k is bit index */ - for (j = 0, k = 0; k < 32; j++) { + for (j = 0, k = 0; k < 32 && j < 32; j++) { struct aeu_invert_reg_bit *p_aeu; p_aeu = &aeu_descs[i].bits[j]; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index bbe21db20417..86c44bc5f73f 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5217,8 +5217,8 @@ static int rtl_get_ether_clk(struct rtl8169_private *tp) static void rtl_init_mac_address(struct rtl8169_private *tp) { + u8 mac_addr[ETH_ALEN] __aligned(2) = {}; struct net_device *dev = tp->dev; - u8 mac_addr[ETH_ALEN]; int rc; rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr); @@ -5233,7 +5233,8 @@ static void rtl_init_mac_address(struct rtl8169_private *tp) if (is_valid_ether_addr(mac_addr)) goto done; - eth_hw_addr_random(dev); + eth_random_addr(mac_addr); + dev->addr_assign_type = NET_ADDR_RANDOM; dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n"); done: eth_hw_addr_set(dev, mac_addr); diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index cc2d907c4c4b..23a336c5096e 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -392,7 +392,7 @@ static int sis96x_get_mac_addr(struct pci_dev *pci_dev, /* get MAC address from EEPROM */ for (i = 0; i < 3; i++) addr[i] = read_eeprom(ioaddr, i + EEPROMMACAddr); - eth_hw_addr_set(net_dev, (u8 *)addr); + eth_hw_addr_set(net_dev, (u8 *)addr); rc = 1; break; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 85208128f135..b7c2579c963b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -485,8 +485,28 @@ static int socfpga_dwmac_resume(struct device *dev) } #endif /* CONFIG_PM_SLEEP */ -static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend, - socfpga_dwmac_resume); +static int __maybe_unused socfpga_dwmac_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + stmmac_bus_clks_config(priv, false); + + return 0; +} + +static int __maybe_unused socfpga_dwmac_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + return stmmac_bus_clks_config(priv, true); +} + +static const struct dev_pm_ops socfpga_dwmac_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, socfpga_dwmac_resume) + SET_RUNTIME_PM_OPS(socfpga_dwmac_runtime_suspend, socfpga_dwmac_runtime_resume, NULL) +}; static const struct socfpga_dwmac_ops socfpga_gen5_ops = { .set_phy_mode = socfpga_gen5_set_phy_mode, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 43eead726886..5f129733aabd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -314,6 +314,7 @@ int stmmac_mdio_reset(struct mii_bus *mii); int stmmac_xpcs_setup(struct mii_bus *mii); void stmmac_set_ethtool_ops(struct net_device *netdev); +int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags); void stmmac_ptp_register(struct stmmac_priv *priv); void stmmac_ptp_unregister(struct stmmac_priv *priv); int stmmac_open(struct net_device *dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d3f350c25b9b..748195697e5a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -50,6 +50,13 @@ #include "dwxgmac2.h" #include "hwif.h" +/* As long as the interface is active, we keep the timestamping counter enabled + * with fine resolution and binary rollover. This avoid non-monotonic behavior + * (clock jumps) when changing timestamping settings at runtime. + */ +#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ + PTP_TCR_TSCTRLSSR) + #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) @@ -511,6 +518,14 @@ bool stmmac_eee_init(struct stmmac_priv *priv) return true; } +static inline u32 stmmac_cdc_adjust(struct stmmac_priv *priv) +{ + /* Correct the clk domain crossing(CDC) error */ + if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) + return (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate; + return 0; +} + /* stmmac_get_tx_hwtstamp - get HW TX timestamps * @priv: driver private structure * @p : descriptor pointer @@ -524,7 +539,6 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, { struct skb_shared_hwtstamps shhwtstamp; bool found = false; - s64 adjust = 0; u64 ns = 0; if (!priv->hwts_tx_en) @@ -543,12 +557,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, } if (found) { - /* Correct the clk domain crossing(CDC) error */ - if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { - adjust += -(2 * (NSEC_PER_SEC / - priv->plat->clk_ptp_rate)); - ns += adjust; - } + ns -= stmmac_cdc_adjust(priv); memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp.hwtstamp = ns_to_ktime(ns); @@ -573,7 +582,6 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, { struct skb_shared_hwtstamps *shhwtstamp = NULL; struct dma_desc *desc = p; - u64 adjust = 0; u64 ns = 0; if (!priv->hwts_rx_en) @@ -586,11 +594,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); - /* Correct the clk domain crossing(CDC) error */ - if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { - adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate); - ns -= adjust; - } + ns -= stmmac_cdc_adjust(priv); netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); shhwtstamp = skb_hwtstamps(skb); @@ -616,8 +620,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) { struct stmmac_priv *priv = netdev_priv(dev); struct hwtstamp_config config; - struct timespec64 now; - u64 temp = 0; u32 ptp_v2 = 0; u32 tstamp_all = 0; u32 ptp_over_ipv4_udp = 0; @@ -626,11 +628,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) u32 snap_type_sel = 0; u32 ts_master_en = 0; u32 ts_event_en = 0; - u32 sec_inc = 0; - u32 value = 0; - bool xmac; - - xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { netdev_alert(priv->dev, "No support for HW time stamping\n"); @@ -792,42 +789,17 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; - if (!priv->hwts_tx_en && !priv->hwts_rx_en) - stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); - else { - value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | - tstamp_all | ptp_v2 | ptp_over_ethernet | - ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | - ts_master_en | snap_type_sel); - stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); - - /* program Sub Second Increment reg */ - stmmac_config_sub_second_increment(priv, - priv->ptpaddr, priv->plat->clk_ptp_rate, - xmac, &sec_inc); - temp = div_u64(1000000000ULL, sec_inc); - - /* Store sub second increment and flags for later use */ - priv->sub_second_inc = sec_inc; - priv->systime_flags = value; - - /* calculate default added value: - * formula is : - * addend = (2^32)/freq_div_ratio; - * where, freq_div_ratio = 1e9ns/sec_inc - */ - temp = (u64)(temp << 32); - priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); - stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); + priv->systime_flags = STMMAC_HWTS_ACTIVE; - /* initialize system time */ - ktime_get_real_ts64(&now); - - /* lower 32 bits of tv_sec are safe until y2106 */ - stmmac_init_systime(priv, priv->ptpaddr, - (u32)now.tv_sec, now.tv_nsec); + if (priv->hwts_tx_en || priv->hwts_rx_en) { + priv->systime_flags |= tstamp_all | ptp_v2 | + ptp_over_ethernet | ptp_over_ipv6_udp | + ptp_over_ipv4_udp | ts_event_en | + ts_master_en | snap_type_sel; } + stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); + memcpy(&priv->tstamp_config, &config, sizeof(config)); return copy_to_user(ifr->ifr_data, &config, @@ -856,6 +828,66 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) } /** + * stmmac_init_tstamp_counter - init hardware timestamping counter + * @priv: driver private structure + * @systime_flags: timestamping flags + * Description: + * Initialize hardware counter for packet timestamping. + * This is valid as long as the interface is open and not suspended. + * Will be rerun after resuming from suspend, case in which the timestamping + * flags updated by stmmac_hwtstamp_set() also need to be restored. + */ +int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) +{ + bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + struct timespec64 now; + u32 sec_inc = 0; + u64 temp = 0; + int ret; + + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) + return -EOPNOTSUPP; + + ret = clk_prepare_enable(priv->plat->clk_ptp_ref); + if (ret < 0) { + netdev_warn(priv->dev, + "failed to enable PTP reference clock: %pe\n", + ERR_PTR(ret)); + return ret; + } + + stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); + priv->systime_flags = systime_flags; + + /* program Sub Second Increment reg */ + stmmac_config_sub_second_increment(priv, priv->ptpaddr, + priv->plat->clk_ptp_rate, + xmac, &sec_inc); + temp = div_u64(1000000000ULL, sec_inc); + + /* Store sub second increment for later use */ + priv->sub_second_inc = sec_inc; + + /* calculate default added value: + * formula is : + * addend = (2^32)/freq_div_ratio; + * where, freq_div_ratio = 1e9ns/sec_inc + */ + temp = (u64)(temp << 32); + priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); + stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); + + /* initialize system time */ + ktime_get_real_ts64(&now); + + /* lower 32 bits of tv_sec are safe until y2106 */ + stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); + + return 0; +} +EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter); + +/** * stmmac_init_ptp - init PTP * @priv: driver private structure * Description: this is to verify if the HW supports the PTPv1 or PTPv2. @@ -865,9 +897,11 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) static int stmmac_init_ptp(struct stmmac_priv *priv) { bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + int ret; - if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) - return -EOPNOTSUPP; + ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); + if (ret) + return ret; priv->adv_ts = 0; /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ @@ -3275,10 +3309,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) stmmac_mmc_setup(priv); if (init_ptp) { - ret = clk_prepare_enable(priv->plat->clk_ptp_ref); - if (ret < 0) - netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); - ret = stmmac_init_ptp(priv); if (ret == -EOPNOTSUPP) netdev_warn(priv->dev, "PTP not supported by HW\n"); @@ -3772,6 +3802,8 @@ int stmmac_release(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); u32 chan; + netif_tx_disable(dev); + if (device_may_wakeup(priv->device)) phylink_speed_down(priv->phylink, false); /* Stop and disconnect the PHY */ @@ -5164,12 +5196,13 @@ read_again: if (likely(!(status & rx_not_ls)) && (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || unlikely(status != llc_snap))) { - if (buf2_len) + if (buf2_len) { buf2_len -= ETH_FCS_LEN; - else + len -= ETH_FCS_LEN; + } else if (buf1_len) { buf1_len -= ETH_FCS_LEN; - - len -= ETH_FCS_LEN; + len -= ETH_FCS_LEN; + } } if (!skb) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 232ac98943cd..5d29f336315b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -816,7 +816,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev) if (ret) return ret; - clk_prepare_enable(priv->plat->clk_ptp_ref); + stmmac_init_tstamp_counter(priv, priv->systime_flags); } return 0; diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index bfdf89e54752..8a19a06b505d 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -306,7 +306,6 @@ static void sp_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ dev->netdev_ops = &sp_netdev_ops; - dev->needs_free_netdev = true; dev->mtu = SIXP_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->header_ops = &ax25_header_ops; diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index e2b332b54f06..7da2bb8a443c 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -31,6 +31,8 @@ #define AX_MTU 236 +/* some arch define END as assembly function ending, just undef it */ +#undef END /* SLIP/KISS protocol characters. */ #define END 0300 /* indicates end of frame */ #define ESC 0333 /* indicates byte stuffing */ diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index cff51731195a..d57472ea077f 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -661,22 +661,6 @@ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa) wait_for_completion(&ipa->completion); } -void ipa_cmd_pipeline_clear(struct ipa *ipa) -{ - u32 count = ipa_cmd_pipeline_clear_count(); - struct gsi_trans *trans; - - trans = ipa_cmd_trans_alloc(ipa, count); - if (trans) { - ipa_cmd_pipeline_clear_add(trans); - gsi_trans_commit_wait(trans); - ipa_cmd_pipeline_clear_wait(ipa); - } else { - dev_err(&ipa->pdev->dev, - "error allocating %u entry tag transaction\n", count); - } -} - static struct ipa_cmd_info * ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count) { diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index 69cd085d427d..05ed7e42e184 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -164,12 +164,6 @@ u32 ipa_cmd_pipeline_clear_count(void); void ipa_cmd_pipeline_clear_wait(struct ipa *ipa); /** - * ipa_cmd_pipeline_clear() - Clear the hardware pipeline - * @ipa: - IPA pointer - */ -void ipa_cmd_pipeline_clear(struct ipa *ipa); - -/** * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint * @ipa: IPA pointer * @tre_count: Number of elements in the transaction diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 5528d97110d5..03a170993420 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -853,6 +853,7 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, u32 offset; u32 val; + /* This should only be changed when HOL_BLOCK_EN is disabled */ offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); val = hol_block_timer_val(ipa, microseconds); iowrite32(val, ipa->reg_virt + offset); @@ -868,6 +869,9 @@ ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) val = enable ? HOL_BLOCK_EN_FMASK : 0; offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); iowrite32(val, endpoint->ipa->reg_virt + offset); + /* When enabling, the register must be written twice for IPA v4.5+ */ + if (enable && endpoint->ipa->version >= IPA_VERSION_4_5) + iowrite32(val, endpoint->ipa->reg_virt + offset); } void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) @@ -880,6 +884,7 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) continue; + ipa_endpoint_init_hol_block_enable(endpoint, false); ipa_endpoint_init_hol_block_timer(endpoint, 0); ipa_endpoint_init_hol_block_enable(endpoint, true); } @@ -1631,8 +1636,6 @@ void ipa_endpoint_suspend(struct ipa *ipa) if (ipa->modem_netdev) ipa_modem_suspend(ipa->modem_netdev); - ipa_cmd_pipeline_clear(ipa); - ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); } diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index cdfa98a76e1f..a448ec198bee 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -28,6 +28,7 @@ #include "ipa_reg.h" #include "ipa_mem.h" #include "ipa_table.h" +#include "ipa_smp2p.h" #include "ipa_modem.h" #include "ipa_uc.h" #include "ipa_interrupt.h" @@ -801,6 +802,11 @@ static int ipa_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; int ret; + /* Prevent the modem from triggering a call to ipa_setup(). This + * also ensures a modem-initiated setup that's underway completes. + */ + ipa_smp2p_irq_disable_setup(ipa); + ret = pm_runtime_get_sync(dev); if (WARN_ON(ret < 0)) goto out_power_put; diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c index ad116bcc0580..d0ab4d70c303 100644 --- a/drivers/net/ipa/ipa_modem.c +++ b/drivers/net/ipa/ipa_modem.c @@ -339,9 +339,6 @@ int ipa_modem_stop(struct ipa *ipa) if (state != IPA_MODEM_STATE_RUNNING) return -EBUSY; - /* Prevent the modem from triggering a call to ipa_setup() */ - ipa_smp2p_disable(ipa); - /* Clean up the netdev and endpoints if it was started */ if (netdev) { struct ipa_priv *priv = netdev_priv(netdev); @@ -369,6 +366,9 @@ static void ipa_modem_crashed(struct ipa *ipa) struct device *dev = &ipa->pdev->dev; int ret; + /* Prevent the modem from triggering a call to ipa_setup() */ + ipa_smp2p_irq_disable_setup(ipa); + ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "error %d getting power to handle crash\n", ret); diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c index e3da95d69409..06cec7199382 100644 --- a/drivers/net/ipa/ipa_resource.c +++ b/drivers/net/ipa/ipa_resource.c @@ -52,7 +52,7 @@ static bool ipa_resource_limits_valid(struct ipa *ipa, return false; } - group_count = data->rsrc_group_src_count; + group_count = data->rsrc_group_dst_count; if (!group_count || group_count > IPA_RESOURCE_GROUP_MAX) return false; diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c index df7639c39d71..211233612039 100644 --- a/drivers/net/ipa/ipa_smp2p.c +++ b/drivers/net/ipa/ipa_smp2p.c @@ -53,7 +53,7 @@ * @setup_ready_irq: IPA interrupt triggered by modem to signal GSI ready * @power_on: Whether IPA power is on * @notified: Whether modem has been notified of power state - * @disabled: Whether setup ready interrupt handling is disabled + * @setup_disabled: Whether setup ready interrupt handler is disabled * @mutex: Mutex protecting ready-interrupt/shutdown interlock * @panic_notifier: Panic notifier structure */ @@ -67,7 +67,7 @@ struct ipa_smp2p { u32 setup_ready_irq; bool power_on; bool notified; - bool disabled; + bool setup_disabled; struct mutex mutex; struct notifier_block panic_notifier; }; @@ -155,11 +155,9 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id) struct device *dev; int ret; - mutex_lock(&smp2p->mutex); - - if (smp2p->disabled) - goto out_mutex_unlock; - smp2p->disabled = true; /* If any others arrive, ignore them */ + /* Ignore any (spurious) interrupts received after the first */ + if (smp2p->ipa->setup_complete) + return IRQ_HANDLED; /* Power needs to be active for setup */ dev = &smp2p->ipa->pdev->dev; @@ -176,8 +174,6 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id) out_power_put: pm_runtime_mark_last_busy(dev); (void)pm_runtime_put_autosuspend(dev); -out_mutex_unlock: - mutex_unlock(&smp2p->mutex); return IRQ_HANDLED; } @@ -313,7 +309,7 @@ void ipa_smp2p_exit(struct ipa *ipa) kfree(smp2p); } -void ipa_smp2p_disable(struct ipa *ipa) +void ipa_smp2p_irq_disable_setup(struct ipa *ipa) { struct ipa_smp2p *smp2p = ipa->smp2p; @@ -322,7 +318,10 @@ void ipa_smp2p_disable(struct ipa *ipa) mutex_lock(&smp2p->mutex); - smp2p->disabled = true; + if (!smp2p->setup_disabled) { + disable_irq(smp2p->setup_ready_irq); + smp2p->setup_disabled = true; + } mutex_unlock(&smp2p->mutex); } diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h index 99a956789638..59cee31a7383 100644 --- a/drivers/net/ipa/ipa_smp2p.h +++ b/drivers/net/ipa/ipa_smp2p.h @@ -27,13 +27,12 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init); void ipa_smp2p_exit(struct ipa *ipa); /** - * ipa_smp2p_disable() - Prevent "ipa-setup-ready" interrupt handling + * ipa_smp2p_irq_disable_setup() - Disable the "setup ready" interrupt * @ipa: IPA pointer * - * Prevent handling of the "setup ready" interrupt from the modem. - * This is used before initiating shutdown of the driver. + * Disable the "ipa-setup-ready" interrupt from the modem. */ -void ipa_smp2p_disable(struct ipa *ipa); +void ipa_smp2p_irq_disable_setup(struct ipa *ipa); /** * ipa_smp2p_notify_reset() - Reset modem notification state diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index cad820568f75..966c3b4ad59d 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -61,6 +61,13 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL); + rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, + !(ctrl & ASPEED_MDIO_CTRL_FIRE), + ASPEED_MDIO_INTERVAL_US, + ASPEED_MDIO_TIMEOUT_US); + if (rc < 0) + return rc; + rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data, data & ASPEED_MDIO_DATA_IDLE, ASPEED_MDIO_INTERVAL_US, diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 3ad7397b8119..5904546acae6 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -710,6 +710,7 @@ static void phylink_resolve(struct work_struct *w) struct phylink_link_state link_state; struct net_device *ndev = pl->netdev; bool mac_config = false; + bool retrigger = false; bool cur_link_state; mutex_lock(&pl->state_mutex); @@ -723,6 +724,7 @@ static void phylink_resolve(struct work_struct *w) link_state.link = false; } else if (pl->mac_link_dropped) { link_state.link = false; + retrigger = true; } else { switch (pl->cur_link_an_mode) { case MLO_AN_PHY: @@ -739,6 +741,19 @@ static void phylink_resolve(struct work_struct *w) case MLO_AN_INBAND: phylink_mac_pcs_get_state(pl, &link_state); + /* The PCS may have a latching link-fail indicator. + * If the link was up, bring the link down and + * re-trigger the resolve. Otherwise, re-read the + * PCS state to get the current status of the link. + */ + if (!link_state.link) { + if (cur_link_state) + retrigger = true; + else + phylink_mac_pcs_get_state(pl, + &link_state); + } + /* If we have a phy, the "up" state is the union of * both the PHY and the MAC */ @@ -747,6 +762,15 @@ static void phylink_resolve(struct work_struct *w) /* Only update if the PHY link is up */ if (pl->phydev && pl->phy_state.link) { + /* If the interface has changed, force a + * link down event if the link isn't already + * down, and re-resolve. + */ + if (link_state.interface != + pl->phy_state.interface) { + retrigger = true; + link_state.link = false; + } link_state.interface = pl->phy_state.interface; /* If we have a PHY, we need to update with @@ -789,7 +813,7 @@ static void phylink_resolve(struct work_struct *w) else phylink_link_up(pl, link_state); } - if (!link_state.link && pl->mac_link_dropped) { + if (!link_state.link && retrigger) { pl->mac_link_dropped = false; queue_work(system_power_efficient_wq, &pl->resolve); } diff --git a/drivers/net/slip/slip.h b/drivers/net/slip/slip.h index c420e5948522..3d7f88b330c1 100644 --- a/drivers/net/slip/slip.h +++ b/drivers/net/slip/slip.h @@ -40,6 +40,8 @@ insmod -oslip_maxdev=nnn */ #define SL_MTU 296 /* 296; I am used to 600- FvK */ +/* some arch define END as assembly function ending, just undef it */ +#undef END /* SLIP protocol characters. */ #define END 0300 /* indicates end of frame */ #define ESC 0333 /* indicates byte stuffing */ diff --git a/drivers/net/tun.c b/drivers/net/tun.c index fecc9a1d293a..1572878c3403 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1010,6 +1010,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); int txq = skb->queue_mapping; + struct netdev_queue *queue; struct tun_file *tfile; int len = skb->len; @@ -1054,6 +1055,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) if (ptr_ring_produce(&tfile->tx_ring, skb)) goto drop; + /* NETIF_F_LLTX requires to do our own update of trans_start */ + queue = netdev_get_tx_queue(dev, txq); + queue->trans_start = jiffies; + /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) kill_fasync(&tfile->fasync, SIGIO, POLL_IN); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 4a02f33f0643..f9877a3e83ac 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -9603,12 +9603,9 @@ static int rtl8152_probe(struct usb_interface *intf, netdev->hw_features &= ~NETIF_F_RXCSUM; } - if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) { - switch (le16_to_cpu(udev->descriptor.idProduct)) { - case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2: - case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2: - tp->lenovo_macpassthru = 1; - } + if (udev->parent && + le16_to_cpu(udev->parent->descriptor.idVendor) == VENDOR_ID_LENOVO) { + tp->lenovo_macpassthru = 1; } if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial && diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 20fe4cd8f784..abe0149ed917 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1050,6 +1050,14 @@ static const struct net_device_ops smsc95xx_netdev_ops = { .ndo_set_features = smsc95xx_set_features, }; +static void smsc95xx_handle_link_change(struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + + phy_print_status(net->phydev); + usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); +} + static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) { struct smsc95xx_priv *pdata; @@ -1154,6 +1162,17 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->min_mtu = ETH_MIN_MTU; dev->net->max_mtu = ETH_DATA_LEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; + + ret = phy_connect_direct(dev->net, pdata->phydev, + &smsc95xx_handle_link_change, + PHY_INTERFACE_MODE_MII); + if (ret) { + netdev_err(dev->net, "can't attach PHY to %s\n", pdata->mdiobus->id); + goto unregister_mdio; + } + + phy_attached_info(dev->net->phydev); + return 0; unregister_mdio: @@ -1171,47 +1190,25 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) { struct smsc95xx_priv *pdata = dev->driver_priv; + phy_disconnect(dev->net->phydev); mdiobus_unregister(pdata->mdiobus); mdiobus_free(pdata->mdiobus); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); } -static void smsc95xx_handle_link_change(struct net_device *net) -{ - struct usbnet *dev = netdev_priv(net); - - phy_print_status(net->phydev); - usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); -} - static int smsc95xx_start_phy(struct usbnet *dev) { - struct smsc95xx_priv *pdata = dev->driver_priv; - struct net_device *net = dev->net; - int ret; + phy_start(dev->net->phydev); - ret = smsc95xx_reset(dev); - if (ret < 0) - return ret; - - ret = phy_connect_direct(net, pdata->phydev, - &smsc95xx_handle_link_change, - PHY_INTERFACE_MODE_MII); - if (ret) { - netdev_err(net, "can't attach PHY to %s\n", pdata->mdiobus->id); - return ret; - } - - phy_attached_info(net->phydev); - phy_start(net->phydev); return 0; } -static int smsc95xx_disconnect_phy(struct usbnet *dev) +static int smsc95xx_stop(struct usbnet *dev) { - phy_stop(dev->net->phydev); - phy_disconnect(dev->net->phydev); + if (dev->net->phydev) + phy_stop(dev->net->phydev); + return 0; } @@ -1966,7 +1963,7 @@ static const struct driver_info smsc95xx_info = { .unbind = smsc95xx_unbind, .link_reset = smsc95xx_link_reset, .reset = smsc95xx_start_phy, - .stop = smsc95xx_disconnect_phy, + .stop = smsc95xx_stop, .rx_fixup = smsc95xx_rx_fixup, .tx_fixup = smsc95xx_tx_fixup, .status = smsc95xx_status, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 1771d6e5224f..55db6a336f7e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3423,7 +3423,6 @@ static struct virtio_driver virtio_net_driver = { .feature_table_size = ARRAY_SIZE(features), .feature_table_legacy = features_legacy, .feature_table_size_legacy = ARRAY_SIZE(features_legacy), - .suppress_used_validation = true, .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c index 221fa3bb8705..f577449e4935 100644 --- a/drivers/nfc/virtual_ncidev.c +++ b/drivers/nfc/virtual_ncidev.c @@ -202,7 +202,7 @@ static int __init virtual_ncidev_init(void) miscdev.minor = MISC_DYNAMIC_MINOR; miscdev.name = "virtual_nci"; miscdev.fops = &virtual_ncidev_fops; - miscdev.mode = S_IALLUGO; + miscdev.mode = 0600; return misc_register(&miscdev); } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 4b5de8f5435a..4c63564adeaa 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -895,10 +895,19 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); cmnd->write_zeroes.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); - if (nvme_ns_has_pi(ns)) + + if (nvme_ns_has_pi(ns)) { cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT); - else - cmnd->write_zeroes.control = 0; + + switch (ns->pi_type) { + case NVME_NS_DPS_PI_TYPE1: + case NVME_NS_DPS_PI_TYPE2: + cmnd->write_zeroes.reftag = + cpu_to_le32(t10_pi_ref_tag(req)); + break; + } + } + return BLK_STS_OK; } @@ -2469,6 +2478,20 @@ static const struct nvme_core_quirk_entry core_quirks[] = { .vid = 0x14a4, .fr = "22301111", .quirks = NVME_QUIRK_SIMPLE_SUSPEND, + }, + { + /* + * This Kioxia CD6-V Series / HPE PE8030 device times out and + * aborts I/O during any load, but more easily reproducible + * with discards (fstrim). + * + * The device is left in a state where it is also not possible + * to use "nvme set-feature" to disable APST, but booting with + * nvme_core.default_ps_max_latency=0 works. + */ + .vid = 0x1e0f, + .mn = "KCD6XVUL6T40", + .quirks = NVME_QUIRK_NO_APST, } }; diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index c5a2b71c5268..282d54117e0a 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -698,6 +698,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, if (token >= 0) pr_warn("I/O fail on reconnect controller after %d sec\n", token); + else + token = -1; + opts->fast_io_fail_tmo = token; break; case NVMF_OPT_HOSTNQN: diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 33bc83d8d992..4ceb28675fdf 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -572,7 +572,7 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, return ret; } -static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, +static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, struct nvme_tcp_r2t_pdu *pdu) { struct nvme_tcp_data_pdu *data = req->pdu; @@ -581,32 +581,11 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, u8 hdgst = nvme_tcp_hdgst_len(queue); u8 ddgst = nvme_tcp_ddgst_len(queue); + req->state = NVME_TCP_SEND_H2C_PDU; + req->offset = 0; req->pdu_len = le32_to_cpu(pdu->r2t_length); req->pdu_sent = 0; - if (unlikely(!req->pdu_len)) { - dev_err(queue->ctrl->ctrl.device, - "req %d r2t len is %u, probably a bug...\n", - rq->tag, req->pdu_len); - return -EPROTO; - } - - if (unlikely(req->data_sent + req->pdu_len > req->data_len)) { - dev_err(queue->ctrl->ctrl.device, - "req %d r2t len %u exceeded data len %u (%zu sent)\n", - rq->tag, req->pdu_len, req->data_len, - req->data_sent); - return -EPROTO; - } - - if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { - dev_err(queue->ctrl->ctrl.device, - "req %d unexpected r2t offset %u (expected %zu)\n", - rq->tag, le32_to_cpu(pdu->r2t_offset), - req->data_sent); - return -EPROTO; - } - memset(data, 0, sizeof(*data)); data->hdr.type = nvme_tcp_h2c_data; data->hdr.flags = NVME_TCP_F_DATA_LAST; @@ -622,7 +601,6 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, data->command_id = nvme_cid(rq); data->data_offset = pdu->r2t_offset; data->data_length = cpu_to_le32(req->pdu_len); - return 0; } static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, @@ -630,7 +608,7 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, { struct nvme_tcp_request *req; struct request *rq; - int ret; + u32 r2t_length = le32_to_cpu(pdu->r2t_length); rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); if (!rq) { @@ -641,13 +619,28 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, } req = blk_mq_rq_to_pdu(rq); - ret = nvme_tcp_setup_h2c_data_pdu(req, pdu); - if (unlikely(ret)) - return ret; + if (unlikely(!r2t_length)) { + dev_err(queue->ctrl->ctrl.device, + "req %d r2t len is %u, probably a bug...\n", + rq->tag, r2t_length); + return -EPROTO; + } - req->state = NVME_TCP_SEND_H2C_PDU; - req->offset = 0; + if (unlikely(req->data_sent + r2t_length > req->data_len)) { + dev_err(queue->ctrl->ctrl.device, + "req %d r2t len %u exceeded data len %u (%zu sent)\n", + rq->tag, r2t_length, req->data_len, req->data_sent); + return -EPROTO; + } + if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { + dev_err(queue->ctrl->ctrl.device, + "req %d unexpected r2t offset %u (expected %zu)\n", + rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent); + return -EPROTO; + } + + nvme_tcp_setup_h2c_data_pdu(req, pdu); nvme_tcp_queue_request(req, false, true); return 0; @@ -1232,6 +1225,7 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) { + struct page *page; struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[qid]; @@ -1241,6 +1235,11 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) if (queue->hdr_digest || queue->data_digest) nvme_tcp_free_crypto(queue); + if (queue->pf_cache.va) { + page = virt_to_head_page(queue->pf_cache.va); + __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); + queue->pf_cache.va = NULL; + } sock_release(queue->sock); kfree(queue->pdu); mutex_destroy(&queue->send_mutex); diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 6aa30f30b572..6be6e59d273b 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -8,6 +8,7 @@ #include <linux/uio.h> #include <linux/falloc.h> #include <linux/file.h> +#include <linux/fs.h> #include "nvmet.h" #define NVMET_MAX_MPOOL_BVEC 16 @@ -266,7 +267,8 @@ static void nvmet_file_execute_rw(struct nvmet_req *req) if (req->ns->buffered_io) { if (likely(!req->f.mpool_alloc) && - nvmet_file_execute_io(req, IOCB_NOWAIT)) + (req->ns->file->f_mode & FMODE_NOWAIT) && + nvmet_file_execute_io(req, IOCB_NOWAIT)) return; nvmet_file_submit_buffered_io(req); } else diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 84c387e4bf43..cb6a473c3eaf 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -166,6 +166,8 @@ static struct workqueue_struct *nvmet_tcp_wq; static const struct nvmet_fabrics_ops nvmet_tcp_ops; static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c); static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd); +static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd); +static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd); static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd) @@ -297,6 +299,16 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) return 0; } +static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd) +{ + WARN_ON(unlikely(cmd->nr_mapped > 0)); + + kfree(cmd->iov); + sgl_free(cmd->req.sg); + cmd->iov = NULL; + cmd->req.sg = NULL; +} + static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd) { struct scatterlist *sg; @@ -306,6 +318,8 @@ static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd) for (i = 0; i < cmd->nr_mapped; i++) kunmap(sg_page(&sg[i])); + + cmd->nr_mapped = 0; } static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) @@ -387,7 +401,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) return 0; err: - sgl_free(cmd->req.sg); + nvmet_tcp_free_cmd_buffers(cmd); return NVME_SC_INTERNAL; } @@ -632,10 +646,8 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) } } - if (queue->nvme_sq.sqhd_disabled) { - kfree(cmd->iov); - sgl_free(cmd->req.sg); - } + if (queue->nvme_sq.sqhd_disabled) + nvmet_tcp_free_cmd_buffers(cmd); return 1; @@ -664,8 +676,7 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, if (left) return -EAGAIN; - kfree(cmd->iov); - sgl_free(cmd->req.sg); + nvmet_tcp_free_cmd_buffers(cmd); cmd->queue->snd_cmd = NULL; nvmet_tcp_put_cmd(cmd); return 1; @@ -700,10 +711,11 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) { struct nvmet_tcp_queue *queue = cmd->queue; + int left = NVME_TCP_DIGEST_LENGTH - cmd->offset; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct kvec iov = { .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, - .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset + .iov_len = left }; int ret; @@ -717,6 +729,10 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) return ret; cmd->offset += ret; + left -= ret; + + if (left) + return -EAGAIN; if (queue->nvme_sq.sqhd_disabled) { cmd->queue->snd_cmd = NULL; @@ -1406,8 +1422,7 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd) { nvmet_req_uninit(&cmd->req); nvmet_tcp_unmap_pdu_iovec(cmd); - kfree(cmd->iov); - sgl_free(cmd->req.sg); + nvmet_tcp_free_cmd_buffers(cmd); } static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) @@ -1417,7 +1432,10 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) for (i = 0; i < queue->nr_cmds; i++, cmd++) { if (nvmet_tcp_need_data_in(cmd)) - nvmet_tcp_finish_cmd(cmd); + nvmet_req_uninit(&cmd->req); + + nvmet_tcp_unmap_pdu_iovec(cmd); + nvmet_tcp_free_cmd_buffers(cmd); } if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { @@ -1437,7 +1455,9 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w) mutex_unlock(&nvmet_tcp_queue_mutex); nvmet_tcp_restore_socket_callbacks(queue); - flush_work(&queue->io_work); + cancel_work_sync(&queue->io_work); + /* stop accepting incoming data */ + queue->rcv_state = NVMET_TCP_RECV_ERR; nvmet_tcp_uninit_data_in_cmds(queue); nvmet_sq_destroy(&queue->nvme_sq); diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 32be5a03951f..b10f015b2e37 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -161,9 +161,10 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) * if it is then we are done, unless there is an * interrupt-map which takes precedence. */ + bool intc = of_property_read_bool(ipar, "interrupt-controller"); + imap = of_get_property(ipar, "interrupt-map", &imaplen); - if (imap == NULL && - of_property_read_bool(ipar, "interrupt-controller")) { + if (imap == NULL && intc) { pr_debug(" -> got it !\n"); return 0; } @@ -244,8 +245,20 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) pr_debug(" -> imaplen=%d\n", imaplen); } - if (!match) + if (!match) { + if (intc) { + /* + * The PASEMI Nemo is a known offender, so + * let's only warn for anyone else. + */ + WARN(!IS_ENABLED(CONFIG_PPC_PASEMI), + "%pOF interrupt-map failed, using interrupt-controller\n", + ipar); + return 0; + } + goto fail; + } /* * Successfully parsed an interrrupt-map translation; copy new diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 07813fb1ef37..b3faf89744aa 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -76,6 +76,7 @@ static void of_device_make_bus_id(struct device *dev) struct device_node *node = dev->of_node; const __be32 *reg; u64 addr; + u32 mask; /* Construct the name, using parent nodes if necessary to ensure uniqueness */ while (node->parent) { @@ -85,8 +86,13 @@ static void of_device_make_bus_id(struct device *dev) */ reg = of_get_property(node, "reg", NULL); if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) { - dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn", - addr, node, dev_name(dev)); + if (!of_property_read_u32(node, "mask", &mask)) + dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn", + addr, ffs(mask) - 1, node, dev_name(dev)); + + else + dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn", + addr, node, dev_name(dev)); return; } diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index e917bb3652bb..93b141110537 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -270,7 +270,8 @@ config VMD config PCIE_BRCMSTB tristate "Broadcom Brcmstb PCIe host controller" - depends on ARCH_BRCMSTB || ARCH_BCM2835 || ARCH_BCM4908 || COMPILE_TEST + depends on ARCH_BRCMSTB || ARCH_BCM2835 || ARCH_BCM4908 || \ + BMIPS_GENERIC || COMPILE_TEST depends on OF depends on PCI_MSI_IRQ_DOMAIN default ARCH_BRCMSTB diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index dcefdb42ac46..a89b7de72dcf 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -57,6 +57,29 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) return zpci_deconfigure_device(zdev); } +static int reset_slot(struct hotplug_slot *hotplug_slot, bool probe) +{ + struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, + hotplug_slot); + + if (zdev->state != ZPCI_FN_STATE_CONFIGURED) + return -EIO; + /* + * We can't take the zdev->lock as reset_slot may be called during + * probing and/or device removal which already happens under the + * zdev->lock. Instead the user should use the higher level + * pci_reset_function() or pci_bus_reset() which hold the PCI device + * lock preventing concurrent removal. If not using these functions + * holding the PCI device lock is required. + */ + + /* As long as the function is configured we can reset */ + if (probe) + return 0; + + return zpci_hot_reset_device(zdev); +} + static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, @@ -76,6 +99,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) static const struct hotplug_slot_ops s390_hotplug_slot_ops = { .enable_slot = enable_slot, .disable_slot = disable_slot, + .reset_slot = reset_slot, .get_power_status = get_power_status, .get_adapter_status = get_adapter_status, }; diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 12e296d634eb..48e3f4e47b29 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -148,6 +148,9 @@ static noinline void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 s raw_spinlock_t *lock = &desc->dev->msi_lock; unsigned long flags; + if (!desc->msi_attrib.can_mask) + return; + raw_spin_lock_irqsave(lock, flags); desc->msi_mask &= ~clear; desc->msi_mask |= set; @@ -181,7 +184,8 @@ static void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl) { void __iomem *desc_addr = pci_msix_desc_addr(desc); - writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL); + if (desc->msi_attrib.can_mask) + writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL); } static inline void pci_msix_mask(struct msi_desc *desc) @@ -200,23 +204,17 @@ static inline void pci_msix_unmask(struct msi_desc *desc) static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask) { - if (pci_msi_ignore_mask || desc->msi_attrib.is_virtual) - return; - if (desc->msi_attrib.is_msix) pci_msix_mask(desc); - else if (desc->msi_attrib.maskbit) + else pci_msi_mask(desc, mask); } static void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask) { - if (pci_msi_ignore_mask || desc->msi_attrib.is_virtual) - return; - if (desc->msi_attrib.is_msix) pci_msix_unmask(desc); - else if (desc->msi_attrib.maskbit) + else pci_msi_unmask(desc, mask); } @@ -370,6 +368,11 @@ static void free_msi_irqs(struct pci_dev *dev) for (i = 0; i < entry->nvec_used; i++) BUG_ON(irq_has_action(entry->irq + i)); + if (dev->msi_irq_groups) { + msi_destroy_sysfs(&dev->dev, dev->msi_irq_groups); + dev->msi_irq_groups = NULL; + } + pci_msi_teardown_msi_irqs(dev); list_for_each_entry_safe(entry, tmp, msi_list, list) { @@ -381,11 +384,6 @@ static void free_msi_irqs(struct pci_dev *dev) list_del(&entry->list); free_msi_entry(entry); } - - if (dev->msi_irq_groups) { - msi_destroy_sysfs(&dev->dev, dev->msi_irq_groups); - dev->msi_irq_groups = NULL; - } } static void pci_intx_for_msi(struct pci_dev *dev, int enable) @@ -479,12 +477,16 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd) goto out; pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); + /* Lies, damned lies, and MSIs */ + if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING) + control |= PCI_MSI_FLAGS_MASKBIT; entry->msi_attrib.is_msix = 0; entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); entry->msi_attrib.is_virtual = 0; entry->msi_attrib.entry_nr = 0; - entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); + entry->msi_attrib.can_mask = !pci_msi_ignore_mask && + !!(control & PCI_MSI_FLAGS_MASKBIT); entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); @@ -495,7 +497,7 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd) entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32; /* Save the initial mask status */ - if (entry->msi_attrib.maskbit) + if (entry->msi_attrib.can_mask) pci_read_config_dword(dev, entry->mask_pos, &entry->msi_mask); out: @@ -639,10 +641,13 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, entry->msi_attrib.is_virtual = entry->msi_attrib.entry_nr >= vec_count; + entry->msi_attrib.can_mask = !pci_msi_ignore_mask && + !entry->msi_attrib.is_virtual; + entry->msi_attrib.default_irq = dev->irq; entry->mask_base = base; - if (!entry->msi_attrib.is_virtual) { + if (entry->msi_attrib.can_mask) { addr = pci_msix_desc_addr(entry); entry->msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 1579a3724eb4..3d2fb394986a 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -5106,12 +5106,13 @@ static int pci_reset_bus_function(struct pci_dev *dev, bool probe) return pci_parent_bus_reset(dev, probe); } -static void pci_dev_lock(struct pci_dev *dev) +void pci_dev_lock(struct pci_dev *dev) { pci_cfg_access_lock(dev); /* block PM suspend, driver probe, etc. */ device_lock(&dev->dev); } +EXPORT_SYMBOL_GPL(pci_dev_lock); /* Return 1 on successful lock, 0 on contention */ int pci_dev_trylock(struct pci_dev *dev) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index aedb78c86ddc..003950c738d2 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -5851,3 +5851,9 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_PERICOM, 0x2303, pci_fixup_pericom_acs_store_forward); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_PERICOM, 0x2303, pci_fixup_pericom_acs_store_forward); + +static void nvidia_ion_ahci_fixup(struct pci_dev *pdev) +{ + pdev->dev_flags |= PCI_DEV_FLAGS_HAS_MSI_MASKING; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0ab8, nvidia_ion_ahci_fixup); diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index bae9d429b813..ecab9064a845 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -598,14 +598,14 @@ static struct irq_chip amd_gpio_irqchip = { #define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF)) -static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) +static bool do_amd_gpio_irq_handler(int irq, void *dev_id) { struct amd_gpio *gpio_dev = dev_id; struct gpio_chip *gc = &gpio_dev->gc; - irqreturn_t ret = IRQ_NONE; unsigned int i, irqnr; unsigned long flags; u32 __iomem *regs; + bool ret = false; u32 regval; u64 status, mask; @@ -627,6 +627,14 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) /* Each status bit covers four pins */ for (i = 0; i < 4; i++) { regval = readl(regs + i); + /* caused wake on resume context for shared IRQ */ + if (irq < 0 && (regval & BIT(WAKE_STS_OFF))) { + dev_dbg(&gpio_dev->pdev->dev, + "Waking due to GPIO %d: 0x%x", + irqnr + i, regval); + return true; + } + if (!(regval & PIN_IRQ_PENDING) || !(regval & BIT(INTERRUPT_MASK_OFF))) continue; @@ -650,9 +658,12 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) } writel(regval, regs + i); raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); - ret = IRQ_HANDLED; + ret = true; } } + /* did not cause wake on resume context for shared IRQ */ + if (irq < 0) + return false; /* Signal EOI to the GPIO unit */ raw_spin_lock_irqsave(&gpio_dev->lock, flags); @@ -664,6 +675,16 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) return ret; } +static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) +{ + return IRQ_RETVAL(do_amd_gpio_irq_handler(irq, dev_id)); +} + +static bool __maybe_unused amd_gpio_check_wake(void *dev_id) +{ + return do_amd_gpio_irq_handler(-1, dev_id); +} + static int amd_get_groups_count(struct pinctrl_dev *pctldev) { struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctldev); @@ -1033,6 +1054,7 @@ static int amd_gpio_probe(struct platform_device *pdev) goto out2; platform_set_drvdata(pdev, gpio_dev); + acpi_register_wakeup_handler(gpio_dev->irq, amd_gpio_check_wake, gpio_dev); dev_dbg(&pdev->dev, "amd gpio driver loaded\n"); return ret; @@ -1050,6 +1072,7 @@ static int amd_gpio_remove(struct platform_device *pdev) gpio_dev = platform_get_drvdata(pdev); gpiochip_remove(&gpio_dev->gc); + acpi_unregister_wakeup_handler(amd_gpio_check_wake, gpio_dev); return 0; } diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c index 0cc346bfc4c3..a7861079a650 100644 --- a/drivers/pinctrl/pinctrl-apple-gpio.c +++ b/drivers/pinctrl/pinctrl-apple-gpio.c @@ -258,7 +258,7 @@ static void apple_gpio_irq_ack(struct irq_data *data) pctl->base + REG_IRQ(irqgrp, data->hwirq)); } -static int apple_gpio_irq_type(unsigned int type) +static unsigned int apple_gpio_irq_type(unsigned int type) { switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_RISING: @@ -272,7 +272,7 @@ static int apple_gpio_irq_type(unsigned int type) case IRQ_TYPE_LEVEL_LOW: return REG_GPIOx_IN_IRQ_LO; default: - return -EINVAL; + return REG_GPIOx_IN_IRQ_OFF; } } @@ -288,7 +288,7 @@ static void apple_gpio_irq_unmask(struct irq_data *data) { struct apple_gpio_pinctrl *pctl = gpiochip_get_data(irq_data_get_irq_chip_data(data)); - int irqtype = apple_gpio_irq_type(irqd_get_trigger_type(data)); + unsigned int irqtype = apple_gpio_irq_type(irqd_get_trigger_type(data)); apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE, FIELD_PREP(REG_GPIOx_MODE, irqtype)); @@ -313,10 +313,10 @@ static int apple_gpio_irq_set_type(struct irq_data *data, { struct apple_gpio_pinctrl *pctl = gpiochip_get_data(irq_data_get_irq_chip_data(data)); - int irqtype = apple_gpio_irq_type(type); + unsigned int irqtype = apple_gpio_irq_type(type); - if (irqtype < 0) - return irqtype; + if (irqtype == REG_GPIOx_IN_IRQ_OFF) + return -EINVAL; apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE, FIELD_PREP(REG_GPIOx_MODE, irqtype)); diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index b9191f1abb1c..3e0c00766f59 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -197,6 +197,7 @@ config PINCTRL_QCOM_SPMI_PMIC select PINMUX select PINCONF select GENERIC_PINCONF + select GPIOLIB select GPIOLIB_IRQCHIP select IRQ_DOMAIN_HIERARCHY help @@ -211,6 +212,7 @@ config PINCTRL_QCOM_SSBI_PMIC select PINMUX select PINCONF select GENERIC_PINCONF + select GPIOLIB select GPIOLIB_IRQCHIP select IRQ_DOMAIN_HIERARCHY help diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c index c51793f6546f..fdfd7b8f3a76 100644 --- a/drivers/pinctrl/qcom/pinctrl-sdm845.c +++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c @@ -1310,6 +1310,7 @@ static const struct msm_pinctrl_soc_data sdm845_pinctrl = { .ngpios = 151, .wakeirq_map = sdm845_pdc_map, .nwakeirq_map = ARRAY_SIZE(sdm845_pdc_map), + .wakeirq_dual_edge_errata = true, }; static const struct msm_pinctrl_soc_data sdm845_acpi_pinctrl = { diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350.c b/drivers/pinctrl/qcom/pinctrl-sm8350.c index 4d8f8636c2b3..1c042d39380c 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8350.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8350.c @@ -1597,10 +1597,10 @@ static const struct msm_pingroup sm8350_groups[] = { [200] = PINGROUP(200, qdss_gpio, _, _, _, _, _, _, _, _), [201] = PINGROUP(201, _, _, _, _, _, _, _, _, _), [202] = PINGROUP(202, _, _, _, _, _, _, _, _, _), - [203] = UFS_RESET(ufs_reset, 0x1d8000), - [204] = SDC_PINGROUP(sdc2_clk, 0x1cf000, 14, 6), - [205] = SDC_PINGROUP(sdc2_cmd, 0x1cf000, 11, 3), - [206] = SDC_PINGROUP(sdc2_data, 0x1cf000, 9, 0), + [203] = UFS_RESET(ufs_reset, 0xd8000), + [204] = SDC_PINGROUP(sdc2_clk, 0xcf000, 14, 6), + [205] = SDC_PINGROUP(sdc2_cmd, 0xcf000, 11, 3), + [206] = SDC_PINGROUP(sdc2_data, 0xcf000, 9, 0), }; static const struct msm_gpio_wakeirq_map sm8350_pdc_map[] = { diff --git a/drivers/pinctrl/ralink/pinctrl-mt7620.c b/drivers/pinctrl/ralink/pinctrl-mt7620.c index 425d55a2ee19..6853b5b8b0fe 100644 --- a/drivers/pinctrl/ralink/pinctrl-mt7620.c +++ b/drivers/pinctrl/ralink/pinctrl-mt7620.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <asm/mach-ralink/ralink_regs.h> #include <asm/mach-ralink/mt7620.h> #include <linux/module.h> #include <linux/platform_device.h> diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c index 8d734bfc33d2..50bd26a30ac0 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c @@ -275,7 +275,7 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev, return 0; } -static struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev, +static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev, unsigned int offset) { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); @@ -289,7 +289,7 @@ static struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctlde continue; for (j = 0; j < num_pins; j++) { if (offset == pins[j]) - return (struct tegra_pingroup *)&pmx->soc->groups[group]; + return &pmx->soc->groups[group]; } } diff --git a/drivers/pinctrl/tegra/pinctrl-tegra194.c b/drivers/pinctrl/tegra/pinctrl-tegra194.c index b4fef9185d88..5c1dfcb46749 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra194.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra194.c @@ -1387,7 +1387,6 @@ static struct tegra_function tegra194_functions[] = { .schmitt_bit = schmitt_b, \ .drvtype_bit = 13, \ .lpdr_bit = e_lpdr, \ - .drv_reg = -1, \ #define drive_touch_clk_pcc4 DRV_PINGROUP_ENTRY_Y(0x2004, 12, 5, 20, 5, -1, -1, -1, -1, 1) #define drive_uart3_rx_pcc6 DRV_PINGROUP_ENTRY_Y(0x200c, 12, 5, 20, 5, -1, -1, -1, -1, 1) diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c index 9d1e7e03628e..4020b8354bae 100644 --- a/drivers/platform/chrome/cros_ec_ishtp.c +++ b/drivers/platform/chrome/cros_ec_ishtp.c @@ -41,9 +41,12 @@ enum cros_ec_ish_channel { #define ISHTP_SEND_TIMEOUT (3 * HZ) /* ISH Transport CrOS EC ISH client unique GUID */ -static const guid_t cros_ish_guid = - GUID_INIT(0x7b7154d0, 0x56f4, 0x4bdc, - 0xb0, 0xd8, 0x9e, 0x7c, 0xda, 0xe0, 0xd6, 0xa0); +static const struct ishtp_device_id cros_ec_ishtp_id_table[] = { + { .guid = GUID_INIT(0x7b7154d0, 0x56f4, 0x4bdc, + 0xb0, 0xd8, 0x9e, 0x7c, 0xda, 0xe0, 0xd6, 0xa0), }, + { } +}; +MODULE_DEVICE_TABLE(ishtp, cros_ec_ishtp_id_table); struct header { u8 channel; @@ -389,7 +392,7 @@ static int cros_ish_init(struct ishtp_cl *cros_ish_cl) ishtp_set_tx_ring_size(cros_ish_cl, CROS_ISH_CL_TX_RING_SIZE); ishtp_set_rx_ring_size(cros_ish_cl, CROS_ISH_CL_RX_RING_SIZE); - fw_client = ishtp_fw_cl_get_client(dev, &cros_ish_guid); + fw_client = ishtp_fw_cl_get_client(dev, &cros_ec_ishtp_id_table[0].guid); if (!fw_client) { dev_err(cl_data_to_dev(client_data), "ish client uuid not found\n"); @@ -765,7 +768,7 @@ static SIMPLE_DEV_PM_OPS(cros_ec_ishtp_pm_ops, cros_ec_ishtp_suspend, static struct ishtp_cl_driver cros_ec_ishtp_driver = { .name = "cros_ec_ishtp", - .guid = &cros_ish_guid, + .id = cros_ec_ishtp_id_table, .probe = cros_ec_ishtp_probe, .remove = cros_ec_ishtp_remove, .reset = cros_ec_ishtp_reset, @@ -791,4 +794,3 @@ MODULE_DESCRIPTION("ChromeOS EC ISHTP Client Driver"); MODULE_AUTHOR("Rushikesh S Kadam <rushikesh.s.kadam@intel.com>"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("ishtp:*"); diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c index 0b7f58feb701..c897a2f15840 100644 --- a/drivers/platform/mellanox/mlxreg-lc.c +++ b/drivers/platform/mellanox/mlxreg-lc.c @@ -413,7 +413,7 @@ mlxreg_lc_create_static_devices(struct mlxreg_lc *mlxreg_lc, struct mlxreg_hotpl int size) { struct mlxreg_hotplug_device *dev = devs; - int i; + int i, ret; /* Create static I2C device feeding by auxiliary or main power. */ for (i = 0; i < size; i++, dev++) { @@ -423,6 +423,7 @@ mlxreg_lc_create_static_devices(struct mlxreg_lc *mlxreg_lc, struct mlxreg_hotpl dev->brdinfo->type, dev->nr, dev->brdinfo->addr); dev->adapter = NULL; + ret = PTR_ERR(dev->client); goto fail_create_static_devices; } } @@ -435,7 +436,7 @@ fail_create_static_devices: i2c_unregister_device(dev->client); dev->client = NULL; } - return IS_ERR(dev->client); + return ret; } static void diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index d4c079f4afc6..7400bc5da5be 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -185,7 +185,7 @@ config ACER_WMI config AMD_PMC tristate "AMD SoC PMC driver" - depends on ACPI && PCI + depends on ACPI && PCI && RTC_CLASS help The driver provides support for AMD Power Management Controller primarily responsible for S2Idle transactions that are driven from diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig index 2fffa57e596e..fe224a54f24c 100644 --- a/drivers/platform/x86/dell/Kconfig +++ b/drivers/platform/x86/dell/Kconfig @@ -187,7 +187,7 @@ config DELL_WMI_AIO config DELL_WMI_DESCRIPTOR tristate - default m + default n depends on ACPI_WMI config DELL_WMI_LED diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c index b183967ecfb7..435a91fe2568 100644 --- a/drivers/platform/x86/hp_accel.c +++ b/drivers/platform/x86/hp_accel.c @@ -331,9 +331,11 @@ static int lis3lv02d_probe(struct platform_device *device) INIT_WORK(&hpled_led.work, delayed_set_status_worker); ret = led_classdev_register(NULL, &hpled_led.led_classdev); if (ret) { + i8042_remove_filter(hp_accel_i8042_filter); lis3lv02d_joystick_disable(&lis3_dev); lis3lv02d_poweroff(&lis3_dev); flush_work(&hpled_led.work); + lis3lv02d_remove_fs(&lis3_dev); return ret; } diff --git a/drivers/platform/x86/intel/ishtp_eclite.c b/drivers/platform/x86/intel/ishtp_eclite.c index 12fc98a48657..93ac8b2dbf38 100644 --- a/drivers/platform/x86/intel/ishtp_eclite.c +++ b/drivers/platform/x86/intel/ishtp_eclite.c @@ -93,9 +93,12 @@ struct ishtp_opregion_dev { }; /* eclite ishtp client UUID: 6a19cc4b-d760-4de3-b14d-f25ebd0fbcd9 */ -static const guid_t ecl_ishtp_guid = - GUID_INIT(0x6a19cc4b, 0xd760, 0x4de3, - 0xb1, 0x4d, 0xf2, 0x5e, 0xbd, 0xf, 0xbc, 0xd9); +static const struct ishtp_device_id ecl_ishtp_id_table[] = { + { .guid = GUID_INIT(0x6a19cc4b, 0xd760, 0x4de3, + 0xb1, 0x4d, 0xf2, 0x5e, 0xbd, 0xf, 0xbc, 0xd9), }, + { } +}; +MODULE_DEVICE_TABLE(ishtp, ecl_ishtp_id_table); /* ACPI DSM UUID: 91d936a7-1f01-49c6-a6b4-72f00ad8d8a5 */ static const guid_t ecl_acpi_guid = @@ -462,7 +465,7 @@ static int ecl_ishtp_cl_init(struct ishtp_cl *ecl_ishtp_cl) ishtp_set_tx_ring_size(ecl_ishtp_cl, ECL_CL_TX_RING_SIZE); ishtp_set_rx_ring_size(ecl_ishtp_cl, ECL_CL_RX_RING_SIZE); - fw_client = ishtp_fw_cl_get_client(dev, &ecl_ishtp_guid); + fw_client = ishtp_fw_cl_get_client(dev, &ecl_ishtp_id_table[0].guid); if (!fw_client) { dev_err(cl_data_to_dev(opr_dev), "fw client not found\n"); return -ENOENT; @@ -674,7 +677,7 @@ static const struct dev_pm_ops ecl_ishtp_pm_ops = { static struct ishtp_cl_driver ecl_ishtp_cl_driver = { .name = "ishtp-eclite", - .guid = &ecl_ishtp_guid, + .id = ecl_ishtp_id_table, .probe = ecl_ishtp_cl_probe, .remove = ecl_ishtp_cl_remove, .reset = ecl_ishtp_cl_reset, @@ -698,4 +701,3 @@ MODULE_DESCRIPTION("ISH ISHTP eclite client opregion driver"); MODULE_AUTHOR("K Naduvalath, Sumesh <sumesh.k.naduvalath@intel.com>"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("ishtp:*"); diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index 7ee010aa740a..c1d9ed9b7b67 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -152,7 +152,7 @@ struct sabi_config { static const struct sabi_config sabi_configs[] = { { - /* I don't know if it is really 2, but it it is + /* I don't know if it is really 2, but it is * less than 3 anyway */ .sabi_version = 2, diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c index 9472aae72df2..c4d9c45350f7 100644 --- a/drivers/platform/x86/think-lmi.c +++ b/drivers/platform/x86/think-lmi.c @@ -888,8 +888,10 @@ static int tlmi_analyze(void) break; if (!item) break; - if (!*item) + if (!*item) { + kfree(item); continue; + } /* It is not allowed to have '/' for file name. Convert it into '\'. */ strreplace(item, '/', '\\'); @@ -902,6 +904,7 @@ static int tlmi_analyze(void) setting = kzalloc(sizeof(*setting), GFP_KERNEL); if (!setting) { ret = -ENOMEM; + kfree(item); goto fail_clear_attr; } setting->index = i; @@ -916,7 +919,6 @@ static int tlmi_analyze(void) } kobject_init(&setting->kobj, &tlmi_attr_setting_ktype); tlmi_priv.setting[i] = setting; - tlmi_priv.settings_count++; kfree(item); } @@ -983,7 +985,12 @@ static void tlmi_remove(struct wmi_device *wdev) static int tlmi_probe(struct wmi_device *wdev, const void *context) { - tlmi_analyze(); + int ret; + + ret = tlmi_analyze(); + if (ret) + return ret; + return tlmi_sysfs_init(); } diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h index f8e26823075f..2ce5086a5af2 100644 --- a/drivers/platform/x86/think-lmi.h +++ b/drivers/platform/x86/think-lmi.h @@ -55,7 +55,6 @@ struct tlmi_attr_setting { struct think_lmi { struct wmi_device *wmi_device; - int settings_count; bool can_set_bios_settings; bool can_get_bios_selections; bool can_set_bios_password; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 9c632df734bb..b3ac9c3f3b7c 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -1105,15 +1105,6 @@ static int tpacpi_rfk_update_swstate(const struct tpacpi_rfk *tp_rfk) return status; } -/* Query FW and update rfkill sw state for all rfkill switches */ -static void tpacpi_rfk_update_swstate_all(void) -{ - unsigned int i; - - for (i = 0; i < TPACPI_RFK_SW_MAX; i++) - tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[i]); -} - /* * Sync the HW-blocking state of all rfkill switches, * do notice it causes the rfkill core to schedule uevents @@ -3074,9 +3065,6 @@ static void tpacpi_send_radiosw_update(void) if (wlsw == TPACPI_RFK_RADIO_OFF) tpacpi_rfk_update_hwblock_state(true); - /* Sync sw blocking state */ - tpacpi_rfk_update_swstate_all(); - /* Sync hw blocking state last if it is hw-unblocked */ if (wlsw == TPACPI_RFK_RADIO_ON) tpacpi_rfk_update_hwblock_state(false); @@ -8766,6 +8754,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (1st gen) */ TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */ TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */ + TPACPI_Q_LNV3('N', '4', '0', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (4nd gen) */ TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */ TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */ }; diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c index 44faa3a74db6..b740866b228d 100644 --- a/drivers/powercap/dtpm_cpu.c +++ b/drivers/powercap/dtpm_cpu.c @@ -166,16 +166,13 @@ static struct dtpm_ops dtpm_ops = { static int cpuhp_dtpm_cpu_offline(unsigned int cpu) { - struct em_perf_domain *pd; struct dtpm_cpu *dtpm_cpu; - pd = em_cpu_get(cpu); - if (!pd) - return -EINVAL; - dtpm_cpu = per_cpu(dtpm_per_cpu, cpu); + if (dtpm_cpu) + dtpm_update_power(&dtpm_cpu->dtpm); - return dtpm_update_power(&dtpm_cpu->dtpm); + return 0; } static int cpuhp_dtpm_cpu_online(unsigned int cpu) diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c index 6bc5791a7ec5..08e429a06922 100644 --- a/drivers/ptp/ptp_clockmatrix.c +++ b/drivers/ptp/ptp_clockmatrix.c @@ -1699,12 +1699,9 @@ static int initialize_dco_operating_mode(struct idtcm_channel *channel) /* PTP Hardware Clock interface */ -/** +/* * Maximum absolute value for write phase offset in picoseconds * - * @channel: channel - * @delta_ns: delta in nanoseconds - * * Destination signed register is 32-bit register in resolution of 50ps * * 0x7fffffff * 50 = 2147483647 * 50 = 107374182350 diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 34f943c8c9fd..0f1b5a7d2a89 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -1304,10 +1304,11 @@ ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r) if (!ext) return -ENOMEM; - err = -EINVAL; ext->mem = ptp_ocp_get_mem(bp, r); - if (!ext->mem) + if (IS_ERR(ext->mem)) { + err = PTR_ERR(ext->mem); goto out; + } ext->bp = bp; ext->info = r->extra; @@ -1371,8 +1372,8 @@ ptp_ocp_register_mem(struct ptp_ocp *bp, struct ocp_resource *r) void __iomem *mem; mem = ptp_ocp_get_mem(bp, r); - if (!mem) - return -EINVAL; + if (IS_ERR(mem)) + return PTR_ERR(mem); bp_assign_entry(bp, r, mem); diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index aa29841bbb79..21e3b05a5153 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -476,7 +476,9 @@ config PWM_SAMSUNG depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST depends on HAS_IOMEM help - Generic PWM framework driver for Samsung. + Generic PWM framework driver for Samsung S3C24xx, S3C64xx, S5Pv210 + and Exynos SoCs. + Choose Y here only if you build for such Samsung SoC. To compile this driver as a module, choose M here: the module will be called pwm-samsung. diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 4527f09a5c50..fb04a439462c 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -532,6 +532,15 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state) struct pwm_chip *chip; int err; + /* + * Some lowlevel driver's implementations of .apply() make use of + * mutexes, also with some drivers only returning when the new + * configuration is active calling pwm_apply_state() from atomic context + * is a bad idea. So make it explicit that calling this function might + * sleep. + */ + might_sleep(); + if (!pwm || !state || !state->period || state->duty_cycle > state->period) return -EINVAL; diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c index e748604403cc..98b34ea9f38e 100644 --- a/drivers/pwm/pwm-atmel.c +++ b/drivers/pwm/pwm-atmel.c @@ -24,7 +24,6 @@ #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> -#include <linux/mutex.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c index dd94c4312a0c..0a4ff55fad04 100644 --- a/drivers/pwm/pwm-samsung.c +++ b/drivers/pwm/pwm-samsung.c @@ -117,6 +117,20 @@ static inline unsigned int to_tcon_channel(unsigned int channel) return (channel == 0) ? 0 : (channel + 1); } +static void __pwm_samsung_manual_update(struct samsung_pwm_chip *chip, + struct pwm_device *pwm) +{ + unsigned int tcon_chan = to_tcon_channel(pwm->hwpwm); + u32 tcon; + + tcon = readl(chip->base + REG_TCON); + tcon |= TCON_MANUALUPDATE(tcon_chan); + writel(tcon, chip->base + REG_TCON); + + tcon &= ~TCON_MANUALUPDATE(tcon_chan); + writel(tcon, chip->base + REG_TCON); +} + static void pwm_samsung_set_divisor(struct samsung_pwm_chip *pwm, unsigned int channel, u8 divisor) { @@ -276,6 +290,13 @@ static void pwm_samsung_disable(struct pwm_chip *chip, struct pwm_device *pwm) tcon &= ~TCON_AUTORELOAD(tcon_chan); writel(tcon, our_chip->base + REG_TCON); + /* + * In case the PWM is at 100% duty cycle, force a manual + * update to prevent the signal from staying high. + */ + if (readl(our_chip->base + REG_TCMPB(pwm->hwpwm)) == (u32)-1U) + __pwm_samsung_manual_update(our_chip, pwm); + our_chip->disabled_mask |= BIT(pwm->hwpwm); spin_unlock_irqrestore(&samsung_pwm_lock, flags); @@ -284,18 +305,11 @@ static void pwm_samsung_disable(struct pwm_chip *chip, struct pwm_device *pwm) static void pwm_samsung_manual_update(struct samsung_pwm_chip *chip, struct pwm_device *pwm) { - unsigned int tcon_chan = to_tcon_channel(pwm->hwpwm); - u32 tcon; unsigned long flags; spin_lock_irqsave(&samsung_pwm_lock, flags); - tcon = readl(chip->base + REG_TCON); - tcon |= TCON_MANUALUPDATE(tcon_chan); - writel(tcon, chip->base + REG_TCON); - - tcon &= ~TCON_MANUALUPDATE(tcon_chan); - writel(tcon, chip->base + REG_TCON); + __pwm_samsung_manual_update(chip, pwm); spin_unlock_irqrestore(&samsung_pwm_lock, flags); } diff --git a/drivers/pwm/pwm-visconti.c b/drivers/pwm/pwm-visconti.c index af4e37d3e3a6..927c4cbb1daf 100644 --- a/drivers/pwm/pwm-visconti.c +++ b/drivers/pwm/pwm-visconti.c @@ -144,28 +144,17 @@ static int visconti_pwm_probe(struct platform_device *pdev) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - platform_set_drvdata(pdev, priv); - priv->chip.dev = dev; priv->chip.ops = &visconti_pwm_ops; priv->chip.npwm = 4; - ret = pwmchip_add(&priv->chip); + ret = devm_pwmchip_add(&pdev->dev, &priv->chip); if (ret < 0) return dev_err_probe(&pdev->dev, ret, "Cannot register visconti PWM\n"); return 0; } -static int visconti_pwm_remove(struct platform_device *pdev) -{ - struct visconti_pwm_chip *priv = platform_get_drvdata(pdev); - - pwmchip_remove(&priv->chip); - - return 0; -} - static const struct of_device_id visconti_pwm_of_match[] = { { .compatible = "toshiba,visconti-pwm", }, { } @@ -178,7 +167,6 @@ static struct platform_driver visconti_pwm_driver = { .of_match_table = visconti_pwm_of_match, }, .probe = visconti_pwm_probe, - .remove = visconti_pwm_remove, }; module_platform_driver(visconti_pwm_driver); diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c index ea2aa151080a..480bfc29782f 100644 --- a/drivers/pwm/pwm-vt8500.c +++ b/drivers/pwm/pwm-vt8500.c @@ -56,7 +56,7 @@ struct vt8500_chip { #define to_vt8500_chip(chip) container_of(chip, struct vt8500_chip, chip) #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) -static inline void pwm_busy_wait(struct vt8500_chip *vt8500, int nr, u8 bitmask) +static inline void vt8500_pwm_busy_wait(struct vt8500_chip *vt8500, int nr, u8 bitmask) { int loops = msecs_to_loops(10); u32 mask = bitmask << (nr << 8); @@ -106,18 +106,18 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, dc = c; writel(prescale, vt8500->base + REG_SCALAR(pwm->hwpwm)); - pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_SCALAR_UPDATE); + vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_SCALAR_UPDATE); writel(pv, vt8500->base + REG_PERIOD(pwm->hwpwm)); - pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_PERIOD_UPDATE); + vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_PERIOD_UPDATE); writel(dc, vt8500->base + REG_DUTY(pwm->hwpwm)); - pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_DUTY_UPDATE); + vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_DUTY_UPDATE); val = readl(vt8500->base + REG_CTRL(pwm->hwpwm)); val |= CTRL_AUTOLOAD; writel(val, vt8500->base + REG_CTRL(pwm->hwpwm)); - pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE); + vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE); clk_disable(vt8500->clk); return 0; @@ -138,7 +138,7 @@ static int vt8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) val = readl(vt8500->base + REG_CTRL(pwm->hwpwm)); val |= CTRL_ENABLE; writel(val, vt8500->base + REG_CTRL(pwm->hwpwm)); - pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE); + vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE); return 0; } @@ -151,7 +151,7 @@ static void vt8500_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) val = readl(vt8500->base + REG_CTRL(pwm->hwpwm)); val &= ~CTRL_ENABLE; writel(val, vt8500->base + REG_CTRL(pwm->hwpwm)); - pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE); + vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE); clk_disable(vt8500->clk); } @@ -171,7 +171,7 @@ static int vt8500_pwm_set_polarity(struct pwm_chip *chip, val &= ~CTRL_INVERT; writel(val, vt8500->base + REG_CTRL(pwm->hwpwm)); - pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE); + vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE); return 0; } diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 7208eeb8459a..058e56a10ab8 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -441,6 +441,7 @@ config RTC_DRV_X1205 config RTC_DRV_PCF8523 tristate "NXP PCF8523" + select REGMAP_I2C help If you say yes here you get support for the NXP PCF8523 RTC chips. @@ -582,14 +583,6 @@ config RTC_DRV_TPS65910 This driver can also be built as a module. If so, the module will be called rtc-tps65910. -config RTC_DRV_TPS80031 - tristate "TI TPS80031/TPS80032 RTC driver" - depends on MFD_TPS80031 - help - TI Power Management IC TPS80031 supports RTC functionality - along with alarm. This driver supports the RTC driver for - the TPS80031 RTC module. - config RTC_DRV_RC5T583 tristate "RICOH 5T583 RTC driver" depends on MFD_RC5T583 @@ -1929,4 +1922,14 @@ config RTC_DRV_WILCO_EC This can also be built as a module. If so, the module will be named "rtc_wilco_ec". +config RTC_DRV_MSC313 + tristate "MStar MSC313 RTC" + depends on ARCH_MSTARV7 || COMPILE_TEST + help + If you say yes here you get support for the Mstar MSC313e On-Chip + Real Time Clock. + + This driver can also be built as a module, if so, the module + will be called "rtc-msc313". + endif # RTC_CLASS diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 5ceeafe4d5b2..678a8ef4abae 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -103,6 +103,7 @@ obj-$(CONFIG_RTC_DRV_MCP795) += rtc-mcp795.o obj-$(CONFIG_RTC_DRV_MESON) += rtc-meson.o obj-$(CONFIG_RTC_DRV_MOXART) += rtc-moxart.o obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o +obj-$(CONFIG_RTC_DRV_MSC313) += rtc-msc313.o obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o obj-$(CONFIG_RTC_DRV_MT2712) += rtc-mt2712.o obj-$(CONFIG_RTC_DRV_MT6397) += rtc-mt6397.o @@ -169,7 +170,6 @@ obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o obj-$(CONFIG_RTC_DRV_TPS6586X) += rtc-tps6586x.o obj-$(CONFIG_RTC_DRV_TPS65910) += rtc-tps65910.o -obj-$(CONFIG_RTC_DRV_TPS80031) += rtc-tps80031.o obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index f77bc089eb6b..4b460c61f1d8 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -232,6 +232,7 @@ static struct rtc_device *rtc_allocate_device(void) rtc->pie_enabled = 0; set_bit(RTC_FEATURE_ALARM, rtc->features); + set_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features); return rtc; } @@ -334,7 +335,8 @@ static void devm_rtc_unregister_device(void *data) * letting any rtc_class_open() users access it again */ rtc_proc_del_device(rtc); - cdev_device_del(&rtc->char_dev, &rtc->dev); + if (!test_bit(RTC_NO_CDEV, &rtc->flags)) + cdev_device_del(&rtc->char_dev, &rtc->dev); rtc->ops = NULL; mutex_unlock(&rtc->ops_lock); } @@ -363,7 +365,9 @@ struct rtc_device *devm_rtc_allocate_device(struct device *dev) rtc->id = id; rtc->dev.parent = dev; - dev_set_name(&rtc->dev, "rtc%d", id); + err = dev_set_name(&rtc->dev, "rtc%d", id); + if (err) + return ERR_PTR(err); err = devm_add_action_or_reset(dev, devm_rtc_release_device, rtc); if (err) @@ -386,6 +390,12 @@ int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc) if (!rtc->ops->set_alarm) clear_bit(RTC_FEATURE_ALARM, rtc->features); + if (rtc->uie_unsupported) + clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features); + + if (rtc->ops->set_offset) + set_bit(RTC_FEATURE_CORRECTION, rtc->features); + rtc->owner = owner; rtc_device_get_offset(rtc); @@ -397,12 +407,14 @@ int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc) rtc_dev_prepare(rtc); err = cdev_device_add(&rtc->char_dev, &rtc->dev); - if (err) + if (err) { + set_bit(RTC_NO_CDEV, &rtc->flags); dev_warn(rtc->dev.parent, "failed to add char device %d:%d\n", MAJOR(rtc->dev.devt), rtc->id); - else + } else { dev_dbg(rtc->dev.parent, "char device (%d:%d)\n", MAJOR(rtc->dev.devt), rtc->id); + } rtc_proc_add_device(rtc); diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c index 5b8ebe86124a..e104972a28fd 100644 --- a/drivers/rtc/dev.c +++ b/drivers/rtc/dev.c @@ -208,6 +208,7 @@ static long rtc_dev_ioctl(struct file *file, const struct rtc_class_ops *ops = rtc->ops; struct rtc_time tm; struct rtc_wkalrm alarm; + struct rtc_param param; void __user *uarg = (void __user *)arg; err = mutex_lock_interruptible(&rtc->ops_lock); @@ -221,6 +222,7 @@ static long rtc_dev_ioctl(struct file *file, switch (cmd) { case RTC_EPOCH_SET: case RTC_SET_TIME: + case RTC_PARAM_SET: if (!capable(CAP_SYS_TIME)) err = -EACCES; break; @@ -382,6 +384,69 @@ static long rtc_dev_ioctl(struct file *file, err = -EFAULT; return err; + case RTC_PARAM_GET: + if (copy_from_user(¶m, uarg, sizeof(param))) { + mutex_unlock(&rtc->ops_lock); + return -EFAULT; + } + + switch(param.param) { + long offset; + case RTC_PARAM_FEATURES: + if (param.index != 0) + err = -EINVAL; + param.uvalue = rtc->features[0]; + break; + + case RTC_PARAM_CORRECTION: + mutex_unlock(&rtc->ops_lock); + if (param.index != 0) + return -EINVAL; + err = rtc_read_offset(rtc, &offset); + mutex_lock(&rtc->ops_lock); + if (err == 0) + param.svalue = offset; + break; + + default: + if (rtc->ops->param_get) + err = rtc->ops->param_get(rtc->dev.parent, ¶m); + else + err = -EINVAL; + } + + if (!err) + if (copy_to_user(uarg, ¶m, sizeof(param))) + err = -EFAULT; + + break; + + case RTC_PARAM_SET: + if (copy_from_user(¶m, uarg, sizeof(param))) { + mutex_unlock(&rtc->ops_lock); + return -EFAULT; + } + + switch(param.param) { + case RTC_PARAM_FEATURES: + err = -EINVAL; + break; + + case RTC_PARAM_CORRECTION: + mutex_unlock(&rtc->ops_lock); + if (param.index != 0) + return -EINVAL; + return rtc_set_offset(rtc, param.svalue); + + default: + if (rtc->ops->param_set) + err = rtc->ops->param_set(rtc->dev.parent, ¶m); + else + err = -EINVAL; + } + + break; + default: /* Finally try the driver's ioctl interface */ if (ops->ioctl) { diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 9a2bd4947007..d8e835798153 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -423,6 +423,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) if (err) return err; now = rtc_tm_to_time64(&tm); + if (scheduled <= now) return -ETIME; /* @@ -447,6 +448,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { + ktime_t alarm_time; int err; if (!rtc->ops) @@ -468,7 +470,15 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) if (rtc->aie_timer.enabled) rtc_timer_remove(rtc, &rtc->aie_timer); - rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); + alarm_time = rtc_tm_to_ktime(alarm->time); + /* + * Round down so we never miss a deadline, checking for past deadline is + * done in __rtc_set_alarm + */ + if (test_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features)) + alarm_time = ktime_sub_ns(alarm_time, (u64)alarm->time.tm_sec * NSEC_PER_SEC); + + rtc->aie_timer.node.expires = alarm_time; rtc->aie_timer.period = 0; if (alarm->enabled) err = rtc_timer_enqueue(rtc, &rtc->aie_timer); @@ -561,7 +571,8 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) if (rtc->uie_rtctimer.enabled == enabled) goto out; - if (rtc->uie_unsupported || !test_bit(RTC_FEATURE_ALARM, rtc->features)) { + if (!test_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features) || + !test_bit(RTC_FEATURE_ALARM, rtc->features)) { mutex_unlock(&rtc->ops_lock); #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL return rtc_dev_update_irq_enable_emul(rtc, enabled); diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c index a9b355510cd4..e188ab517f1e 100644 --- a/drivers/rtc/rtc-ab-eoz9.c +++ b/drivers/rtc/rtc-ab-eoz9.c @@ -534,7 +534,6 @@ static int abeoz9_probe(struct i2c_client *client, data->rtc->ops = &rtc_ops; data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; data->rtc->range_max = RTC_TIMESTAMP_END_2099; - data->rtc->uie_unsupported = 1; clear_bit(RTC_FEATURE_ALARM, data->rtc->features); if (client->irq > 0) { @@ -546,6 +545,8 @@ static int abeoz9_probe(struct i2c_client *client, dev_err(dev, "failed to request alarm irq\n"); return ret; } + } else { + clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, data->rtc->features); } if (client->irq > 0 || device_property_read_bool(dev, "wakeup-source")) { diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c index b40048871295..ea33e149d545 100644 --- a/drivers/rtc/rtc-ab8500.c +++ b/drivers/rtc/rtc-ab8500.c @@ -184,25 +184,9 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { int retval, i; unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)]; - unsigned long mins, secs = 0, cursec = 0; - struct rtc_time curtm; + unsigned long mins; - /* Get the number of seconds since 1970 */ - secs = rtc_tm_to_time64(&alarm->time); - - /* - * Check whether alarm is set less than 1min. - * Since our RTC doesn't support alarm resolution less than 1min, - * return -EINVAL, so UIE EMUL can take it up, incase of UIE_ON - */ - ab8500_rtc_read_time(dev, &curtm); /* Read current time */ - cursec = rtc_tm_to_time64(&curtm); - if ((secs - cursec) < 59) { - dev_dbg(dev, "Alarm less than 1 minute not supported\r\n"); - return -EINVAL; - } - - mins = secs / 60; + mins = (unsigned long)rtc_tm_to_time64(&alarm->time) / 60; buf[2] = mins & 0xFF; buf[1] = (mins >> 8) & 0xFF; @@ -394,7 +378,8 @@ static int ab8500_rtc_probe(struct platform_device *pdev) dev_pm_set_wake_irq(&pdev->dev, irq); platform_set_drvdata(pdev, rtc); - rtc->uie_unsupported = 1; + set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features); + clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features); rtc->range_max = (1ULL << 24) * 60 - 1; // 24-bit minutes + 59 secs rtc->start_secs = RTC_TIMESTAMP_BEGIN_2000; diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c index b3de6d2e680a..2f83adef966e 100644 --- a/drivers/rtc/rtc-ds1302.c +++ b/drivers/rtc/rtc-ds1302.c @@ -199,11 +199,18 @@ static const struct of_device_id ds1302_dt_ids[] = { MODULE_DEVICE_TABLE(of, ds1302_dt_ids); #endif +static const struct spi_device_id ds1302_spi_ids[] = { + { .name = "ds1302", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(spi, ds1302_spi_ids); + static struct spi_driver ds1302_driver = { .driver.name = "rtc-ds1302", .driver.of_match_table = of_match_ptr(ds1302_dt_ids), .probe = ds1302_probe, .remove = ds1302_remove, + .id_table = ds1302_spi_ids, }; module_spi_driver(ds1302_driver); diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c index 66fc8617d07e..93ce72b9ae59 100644 --- a/drivers/rtc/rtc-ds1390.c +++ b/drivers/rtc/rtc-ds1390.c @@ -219,12 +219,19 @@ static const struct of_device_id ds1390_of_match[] = { }; MODULE_DEVICE_TABLE(of, ds1390_of_match); +static const struct spi_device_id ds1390_spi_ids[] = { + { .name = "ds1390" }, + {} +}; +MODULE_DEVICE_TABLE(spi, ds1390_spi_ids); + static struct spi_driver ds1390_driver = { .driver = { .name = "rtc-ds1390", .of_match_table = of_match_ptr(ds1390_of_match), }, .probe = ds1390_probe, + .id_table = ds1390_spi_ids, }; module_spi_driver(ds1390_driver); diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index f736f8c22e96..6d383b629d20 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -557,7 +557,7 @@ static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80) * registered automatically when being referenced. */ of_node_put(fixed_clock); - return 0; + return NULL; } /* First disable the clock */ diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c index bad7792b6ca5..0d515b3df571 100644 --- a/drivers/rtc/rtc-mcp795.c +++ b/drivers/rtc/rtc-mcp795.c @@ -430,12 +430,19 @@ static const struct of_device_id mcp795_of_match[] = { MODULE_DEVICE_TABLE(of, mcp795_of_match); #endif +static const struct spi_device_id mcp795_spi_ids[] = { + { .name = "mcp795" }, + { } +}; +MODULE_DEVICE_TABLE(spi, mcp795_spi_ids); + static struct spi_driver mcp795_driver = { .driver = { .name = "rtc-mcp795", .of_match_table = of_match_ptr(mcp795_of_match), }, .probe = mcp795_probe, + .id_table = mcp795_spi_ids, }; module_spi_driver(mcp795_driver); diff --git a/drivers/rtc/rtc-msc313.c b/drivers/rtc/rtc-msc313.c new file mode 100644 index 000000000000..f3fde013c4b8 --- /dev/null +++ b/drivers/rtc/rtc-msc313.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Real time clocks driver for MStar/SigmaStar ARMv7 SoCs. + * Based on "Real Time Clock driver for msb252x." that was contained + * in various MStar kernels. + * + * (C) 2019 Daniel Palmer + * (C) 2021 Romain Perier + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/platform_device.h> +#include <linux/rtc.h> + +/* Registers */ +#define REG_RTC_CTRL 0x00 +#define REG_RTC_FREQ_CW_L 0x04 +#define REG_RTC_FREQ_CW_H 0x08 +#define REG_RTC_LOAD_VAL_L 0x0C +#define REG_RTC_LOAD_VAL_H 0x10 +#define REG_RTC_MATCH_VAL_L 0x14 +#define REG_RTC_MATCH_VAL_H 0x18 +#define REG_RTC_STATUS_INT 0x1C +#define REG_RTC_CNT_VAL_L 0x20 +#define REG_RTC_CNT_VAL_H 0x24 + +/* Control bits for REG_RTC_CTRL */ +#define SOFT_RSTZ_BIT BIT(0) +#define CNT_EN_BIT BIT(1) +#define WRAP_EN_BIT BIT(2) +#define LOAD_EN_BIT BIT(3) +#define READ_EN_BIT BIT(4) +#define INT_MASK_BIT BIT(5) +#define INT_FORCE_BIT BIT(6) +#define INT_CLEAR_BIT BIT(7) + +/* Control bits for REG_RTC_STATUS_INT */ +#define RAW_INT_BIT BIT(0) +#define ALM_INT_BIT BIT(1) + +struct msc313_rtc { + struct rtc_device *rtc_dev; + void __iomem *rtc_base; +}; + +static int msc313_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + struct msc313_rtc *priv = dev_get_drvdata(dev); + unsigned long seconds; + + seconds = readw(priv->rtc_base + REG_RTC_MATCH_VAL_L) + | ((unsigned long)readw(priv->rtc_base + REG_RTC_MATCH_VAL_H) << 16); + + rtc_time64_to_tm(seconds, &alarm->time); + + if (!(readw(priv->rtc_base + REG_RTC_CTRL) & INT_MASK_BIT)) + alarm->enabled = 1; + + return 0; +} + +static int msc313_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct msc313_rtc *priv = dev_get_drvdata(dev); + u16 reg; + + reg = readw(priv->rtc_base + REG_RTC_CTRL); + if (enabled) + reg &= ~INT_MASK_BIT; + else + reg |= INT_MASK_BIT; + writew(reg, priv->rtc_base + REG_RTC_CTRL); + return 0; +} + +static int msc313_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + struct msc313_rtc *priv = dev_get_drvdata(dev); + unsigned long seconds; + + seconds = rtc_tm_to_time64(&alarm->time); + writew((seconds & 0xFFFF), priv->rtc_base + REG_RTC_MATCH_VAL_L); + writew((seconds >> 16) & 0xFFFF, priv->rtc_base + REG_RTC_MATCH_VAL_H); + + msc313_rtc_alarm_irq_enable(dev, alarm->enabled); + + return 0; +} + +static bool msc313_rtc_get_enabled(struct msc313_rtc *priv) +{ + return readw(priv->rtc_base + REG_RTC_CTRL) & CNT_EN_BIT; +} + +static void msc313_rtc_set_enabled(struct msc313_rtc *priv) +{ + u16 reg; + + reg = readw(priv->rtc_base + REG_RTC_CTRL); + reg |= CNT_EN_BIT; + writew(reg, priv->rtc_base + REG_RTC_CTRL); +} + +static int msc313_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct msc313_rtc *priv = dev_get_drvdata(dev); + u32 seconds; + u16 reg; + + if (!msc313_rtc_get_enabled(priv)) + return -EINVAL; + + reg = readw(priv->rtc_base + REG_RTC_CTRL); + writew(reg | READ_EN_BIT, priv->rtc_base + REG_RTC_CTRL); + + /* Wait for HW latch done */ + while (readw(priv->rtc_base + REG_RTC_CTRL) & READ_EN_BIT) + udelay(1); + + seconds = readw(priv->rtc_base + REG_RTC_CNT_VAL_L) + | ((unsigned long)readw(priv->rtc_base + REG_RTC_CNT_VAL_H) << 16); + + rtc_time64_to_tm(seconds, tm); + + return 0; +} + +static int msc313_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct msc313_rtc *priv = dev_get_drvdata(dev); + unsigned long seconds; + u16 reg; + + seconds = rtc_tm_to_time64(tm); + writew(seconds & 0xFFFF, priv->rtc_base + REG_RTC_LOAD_VAL_L); + writew((seconds >> 16) & 0xFFFF, priv->rtc_base + REG_RTC_LOAD_VAL_H); + + /* Enable load for loading value into internal RTC counter */ + reg = readw(priv->rtc_base + REG_RTC_CTRL); + writew(reg | LOAD_EN_BIT, priv->rtc_base + REG_RTC_CTRL); + + /* Wait for HW latch done */ + while (readw(priv->rtc_base + REG_RTC_CTRL) & LOAD_EN_BIT) + udelay(1); + msc313_rtc_set_enabled(priv); + return 0; +} + +static const struct rtc_class_ops msc313_rtc_ops = { + .read_time = msc313_rtc_read_time, + .set_time = msc313_rtc_set_time, + .read_alarm = msc313_rtc_read_alarm, + .set_alarm = msc313_rtc_set_alarm, + .alarm_irq_enable = msc313_rtc_alarm_irq_enable, +}; + +static irqreturn_t msc313_rtc_interrupt(s32 irq, void *dev_id) +{ + struct msc313_rtc *priv = dev_get_drvdata(dev_id); + u16 reg; + + reg = readw(priv->rtc_base + REG_RTC_STATUS_INT); + if (!(reg & ALM_INT_BIT)) + return IRQ_NONE; + + reg = readw(priv->rtc_base + REG_RTC_CTRL); + reg |= INT_CLEAR_BIT; + reg &= ~INT_FORCE_BIT; + writew(reg, priv->rtc_base + REG_RTC_CTRL); + + rtc_update_irq(priv->rtc_dev, 1, RTC_IRQF | RTC_AF); + + return IRQ_HANDLED; +} + +static int msc313_rtc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct msc313_rtc *priv; + unsigned long rate; + struct clk *clk; + int ret; + int irq; + + priv = devm_kzalloc(&pdev->dev, sizeof(struct msc313_rtc), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->rtc_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->rtc_base)) + return PTR_ERR(priv->rtc_base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + + priv->rtc_dev = devm_rtc_allocate_device(dev); + if (IS_ERR(priv->rtc_dev)) + return PTR_ERR(priv->rtc_dev); + + priv->rtc_dev->ops = &msc313_rtc_ops; + priv->rtc_dev->range_max = U32_MAX; + + ret = devm_request_irq(dev, irq, msc313_rtc_interrupt, IRQF_SHARED, + dev_name(&pdev->dev), &pdev->dev); + if (ret) { + dev_err(dev, "Could not request IRQ\n"); + return ret; + } + + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) { + dev_err(dev, "No input reference clock\n"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(dev, "Failed to enable the reference clock, %d\n", ret); + return ret; + } + + ret = devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare, clk); + if (ret) + return ret; + + rate = clk_get_rate(clk); + writew(rate & 0xFFFF, priv->rtc_base + REG_RTC_FREQ_CW_L); + writew((rate >> 16) & 0xFFFF, priv->rtc_base + REG_RTC_FREQ_CW_H); + + platform_set_drvdata(pdev, priv); + + return devm_rtc_register_device(priv->rtc_dev); +} + +static const struct of_device_id msc313_rtc_of_match_table[] = { + { .compatible = "mstar,msc313-rtc" }, + { } +}; +MODULE_DEVICE_TABLE(of, msc313_rtc_of_match_table); + +static struct platform_driver msc313_rtc_driver = { + .probe = msc313_rtc_probe, + .driver = { + .name = "msc313-rtc", + .of_match_table = msc313_rtc_of_match_table, + }, +}; + +module_platform_driver(msc313_rtc_driver); + +MODULE_AUTHOR("Daniel Palmer <daniel@thingy.jp>"); +MODULE_AUTHOR("Romain Perier <romain.perier@gmail.com>"); +MODULE_DESCRIPTION("MStar RTC Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index d46e0f0cc502..4d4f3b1a7309 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -1029,6 +1029,5 @@ static struct platform_driver omap_rtc_driver = { module_platform_driver(omap_rtc_driver); -MODULE_ALIAS("platform:omap_rtc"); MODULE_AUTHOR("George G. Davis (and others)"); MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c index 0f58cac81d8c..7473e6c8a183 100644 --- a/drivers/rtc/rtc-pcf2123.c +++ b/drivers/rtc/rtc-pcf2123.c @@ -451,12 +451,21 @@ static const struct of_device_id pcf2123_dt_ids[] = { MODULE_DEVICE_TABLE(of, pcf2123_dt_ids); #endif +static const struct spi_device_id pcf2123_spi_ids[] = { + { .name = "pcf2123", }, + { .name = "rv2123", }, + { .name = "rtc-pcf2123", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(spi, pcf2123_spi_ids); + static struct spi_driver pcf2123_driver = { .driver = { .name = "rtc-pcf2123", .of_match_table = of_match_ptr(pcf2123_dt_ids), }, .probe = pcf2123_probe, + .id_table = pcf2123_spi_ids, }; module_spi_driver(pcf2123_driver); diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c index 14da4ab30104..15e50bb10cf0 100644 --- a/drivers/rtc/rtc-pcf85063.c +++ b/drivers/rtc/rtc-pcf85063.c @@ -34,6 +34,7 @@ #define PCF85063_REG_CTRL1 0x00 /* status */ #define PCF85063_REG_CTRL1_CAP_SEL BIT(0) #define PCF85063_REG_CTRL1_STOP BIT(5) +#define PCF85063_REG_CTRL1_EXT_TEST BIT(7) #define PCF85063_REG_CTRL2 0x01 #define PCF85063_CTRL2_AF BIT(6) @@ -117,6 +118,7 @@ static int pcf85063_rtc_set_time(struct device *dev, struct rtc_time *tm) * reset state until all time/date registers are written */ rc = regmap_update_bits(pcf85063->regmap, PCF85063_REG_CTRL1, + PCF85063_REG_CTRL1_EXT_TEST | PCF85063_REG_CTRL1_STOP, PCF85063_REG_CTRL1_STOP); if (rc) @@ -297,7 +299,7 @@ static int pcf85063_ioctl(struct device *dev, unsigned int cmd, if (ret < 0) return ret; - status = status & PCF85063_REG_SC_OS ? RTC_VL_DATA_INVALID : 0; + status = (status & PCF85063_REG_SC_OS) ? RTC_VL_DATA_INVALID : 0; return put_user(status, (unsigned int __user *)arg); @@ -479,6 +481,18 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063) struct clk *clk; struct clk_init_data init; struct device_node *node = pcf85063->rtc->dev.parent->of_node; + struct device_node *fixed_clock; + + fixed_clock = of_get_child_by_name(node, "clock"); + if (fixed_clock) { + /* + * skip registering square wave clock when a fixed + * clock has been registered. The fixed clock is + * registered automatically when being referenced. + */ + of_node_put(fixed_clock); + return NULL; + } init.name = "pcf85063-clkout"; init.ops = &pcf85063_clkout_ops; diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c index 8b6fb20774bf..c93acade7205 100644 --- a/drivers/rtc/rtc-pcf8523.c +++ b/drivers/rtc/rtc-pcf8523.c @@ -4,8 +4,10 @@ */ #include <linux/bcd.h> +#include <linux/bitfield.h> #include <linux/i2c.h> #include <linux/module.h> +#include <linux/regmap.h> #include <linux/rtc.h> #include <linux/of.h> #include <linux/pm_wakeirq.h> @@ -19,11 +21,10 @@ #define PCF8523_CONTROL2_AF BIT(3) #define PCF8523_REG_CONTROL3 0x02 -#define PCF8523_CONTROL3_PM_BLD BIT(7) /* battery low detection disabled */ -#define PCF8523_CONTROL3_PM_VDD BIT(6) /* switch-over disabled */ -#define PCF8523_CONTROL3_PM_DSM BIT(5) /* direct switching mode */ -#define PCF8523_CONTROL3_PM_MASK 0xe0 +#define PCF8523_CONTROL3_PM GENMASK(7,5) +#define PCF8523_PM_STANDBY 0x7 #define PCF8523_CONTROL3_BLF BIT(2) /* battery low bit, read-only */ +#define PCF8523_CONTROL3_BSF BIT(3) #define PCF8523_REG_SECONDS 0x03 #define PCF8523_SECONDS_OS BIT(7) @@ -48,127 +49,45 @@ struct pcf8523 { struct rtc_device *rtc; - struct i2c_client *client; + struct regmap *regmap; }; -static int pcf8523_read(struct i2c_client *client, u8 reg, u8 *valuep) +static int pcf8523_load_capacitance(struct pcf8523 *pcf8523, struct device_node *node) { - struct i2c_msg msgs[2]; - u8 value = 0; - int err; - - msgs[0].addr = client->addr; - msgs[0].flags = 0; - msgs[0].len = sizeof(reg); - msgs[0].buf = ® - - msgs[1].addr = client->addr; - msgs[1].flags = I2C_M_RD; - msgs[1].len = sizeof(value); - msgs[1].buf = &value; - - err = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); - if (err < 0) - return err; - - *valuep = value; - - return 0; -} - -static int pcf8523_write(struct i2c_client *client, u8 reg, u8 value) -{ - u8 buffer[2] = { reg, value }; - struct i2c_msg msg; - int err; - - msg.addr = client->addr; - msg.flags = 0; - msg.len = sizeof(buffer); - msg.buf = buffer; - - err = i2c_transfer(client->adapter, &msg, 1); - if (err < 0) - return err; - - return 0; -} - -static int pcf8523_voltage_low(struct i2c_client *client) -{ - u8 value; - int err; - - err = pcf8523_read(client, PCF8523_REG_CONTROL3, &value); - if (err < 0) - return err; - - return !!(value & PCF8523_CONTROL3_BLF); -} - -static int pcf8523_load_capacitance(struct i2c_client *client) -{ - u32 load; - u8 value; - int err; - - err = pcf8523_read(client, PCF8523_REG_CONTROL1, &value); - if (err < 0) - return err; + u32 load, value = 0; load = 12500; - of_property_read_u32(client->dev.of_node, "quartz-load-femtofarads", - &load); + of_property_read_u32(node, "quartz-load-femtofarads", &load); switch (load) { default: - dev_warn(&client->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 12500", + dev_warn(&pcf8523->rtc->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 12500", load); fallthrough; case 12500: value |= PCF8523_CONTROL1_CAP_SEL; break; case 7000: - value &= ~PCF8523_CONTROL1_CAP_SEL; break; } - err = pcf8523_write(client, PCF8523_REG_CONTROL1, value); - - return err; -} - -static int pcf8523_set_pm(struct i2c_client *client, u8 pm) -{ - u8 value; - int err; - - err = pcf8523_read(client, PCF8523_REG_CONTROL3, &value); - if (err < 0) - return err; - - value = (value & ~PCF8523_CONTROL3_PM_MASK) | pm; - - err = pcf8523_write(client, PCF8523_REG_CONTROL3, value); - if (err < 0) - return err; - - return 0; + return regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL1, + PCF8523_CONTROL1_CAP_SEL, value); } static irqreturn_t pcf8523_irq(int irq, void *dev_id) { - struct pcf8523 *pcf8523 = i2c_get_clientdata(dev_id); - u8 value; + struct pcf8523 *pcf8523 = dev_id; + u32 value; int err; - err = pcf8523_read(pcf8523->client, PCF8523_REG_CONTROL2, &value); + err = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL2, &value); if (err < 0) return IRQ_HANDLED; if (value & PCF8523_CONTROL2_AF) { value &= ~PCF8523_CONTROL2_AF; - pcf8523_write(pcf8523->client, PCF8523_REG_CONTROL2, value); + regmap_write(pcf8523->regmap, PCF8523_REG_CONTROL2, value); rtc_update_irq(pcf8523->rtc, 1, RTC_IRQF | RTC_AF); return IRQ_HANDLED; @@ -177,68 +96,14 @@ static irqreturn_t pcf8523_irq(int irq, void *dev_id) return IRQ_NONE; } -static int pcf8523_stop_rtc(struct i2c_client *client) -{ - u8 value; - int err; - - err = pcf8523_read(client, PCF8523_REG_CONTROL1, &value); - if (err < 0) - return err; - - value |= PCF8523_CONTROL1_STOP; - - err = pcf8523_write(client, PCF8523_REG_CONTROL1, value); - if (err < 0) - return err; - - return 0; -} - -static int pcf8523_start_rtc(struct i2c_client *client) -{ - u8 value; - int err; - - err = pcf8523_read(client, PCF8523_REG_CONTROL1, &value); - if (err < 0) - return err; - - value &= ~PCF8523_CONTROL1_STOP; - - err = pcf8523_write(client, PCF8523_REG_CONTROL1, value); - if (err < 0) - return err; - - return 0; -} - static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm) { - struct i2c_client *client = to_i2c_client(dev); - u8 start = PCF8523_REG_SECONDS, regs[7]; - struct i2c_msg msgs[2]; + struct pcf8523 *pcf8523 = dev_get_drvdata(dev); + u8 regs[7]; int err; - err = pcf8523_voltage_low(client); - if (err < 0) { - return err; - } else if (err > 0) { - dev_err(dev, "low voltage detected, time is unreliable\n"); - return -EINVAL; - } - - msgs[0].addr = client->addr; - msgs[0].flags = 0; - msgs[0].len = 1; - msgs[0].buf = &start; - - msgs[1].addr = client->addr; - msgs[1].flags = I2C_M_RD; - msgs[1].len = sizeof(regs); - msgs[1].buf = regs; - - err = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + err = regmap_bulk_read(pcf8523->regmap, PCF8523_REG_SECONDS, regs, + sizeof(regs)); if (err < 0) return err; @@ -258,63 +123,50 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm) static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm) { - struct i2c_client *client = to_i2c_client(dev); - struct i2c_msg msg; - u8 regs[8]; + struct pcf8523 *pcf8523 = dev_get_drvdata(dev); + u8 regs[7]; int err; - err = pcf8523_stop_rtc(client); + err = regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL1, + PCF8523_CONTROL1_STOP, PCF8523_CONTROL1_STOP); if (err < 0) return err; - regs[0] = PCF8523_REG_SECONDS; /* This will purposely overwrite PCF8523_SECONDS_OS */ - regs[1] = bin2bcd(tm->tm_sec); - regs[2] = bin2bcd(tm->tm_min); - regs[3] = bin2bcd(tm->tm_hour); - regs[4] = bin2bcd(tm->tm_mday); - regs[5] = tm->tm_wday; - regs[6] = bin2bcd(tm->tm_mon + 1); - regs[7] = bin2bcd(tm->tm_year - 100); - - msg.addr = client->addr; - msg.flags = 0; - msg.len = sizeof(regs); - msg.buf = regs; - - err = i2c_transfer(client->adapter, &msg, 1); + regs[0] = bin2bcd(tm->tm_sec); + regs[1] = bin2bcd(tm->tm_min); + regs[2] = bin2bcd(tm->tm_hour); + regs[3] = bin2bcd(tm->tm_mday); + regs[4] = tm->tm_wday; + regs[5] = bin2bcd(tm->tm_mon + 1); + regs[6] = bin2bcd(tm->tm_year - 100); + + err = regmap_bulk_write(pcf8523->regmap, PCF8523_REG_SECONDS, regs, + sizeof(regs)); if (err < 0) { /* * If the time cannot be set, restart the RTC anyway. Note * that errors are ignored if the RTC cannot be started so * that we have a chance to propagate the original error. */ - pcf8523_start_rtc(client); + regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL1, + PCF8523_CONTROL1_STOP, 0); return err; } - return pcf8523_start_rtc(client); + return regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL1, + PCF8523_CONTROL1_STOP, 0); } static int pcf8523_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm) { - struct i2c_client *client = to_i2c_client(dev); - u8 start = PCF8523_REG_MINUTE_ALARM, regs[4]; - struct i2c_msg msgs[2]; - u8 value; + struct pcf8523 *pcf8523 = dev_get_drvdata(dev); + u8 regs[4]; + u32 value; int err; - msgs[0].addr = client->addr; - msgs[0].flags = 0; - msgs[0].len = 1; - msgs[0].buf = &start; - - msgs[1].addr = client->addr; - msgs[1].flags = I2C_M_RD; - msgs[1].len = sizeof(regs); - msgs[1].buf = regs; - - err = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + err = regmap_bulk_read(pcf8523->regmap, PCF8523_REG_MINUTE_ALARM, regs, + sizeof(regs)); if (err < 0) return err; @@ -324,12 +176,12 @@ static int pcf8523_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm) tm->time.tm_mday = bcd2bin(regs[2] & 0x3F); tm->time.tm_wday = bcd2bin(regs[3] & 0x7); - err = pcf8523_read(client, PCF8523_REG_CONTROL1, &value); + err = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL1, &value); if (err < 0) return err; tm->enabled = !!(value & PCF8523_CONTROL1_AIE); - err = pcf8523_read(client, PCF8523_REG_CONTROL2, &value); + err = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL2, &value); if (err < 0) return err; tm->pending = !!(value & PCF8523_CONTROL2_AF); @@ -339,30 +191,16 @@ static int pcf8523_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm) static int pcf8523_irq_enable(struct device *dev, unsigned int enabled) { - struct i2c_client *client = to_i2c_client(dev); - u8 value; - int err; - - err = pcf8523_read(client, PCF8523_REG_CONTROL1, &value); - if (err < 0) - return err; - - value &= PCF8523_CONTROL1_AIE; + struct pcf8523 *pcf8523 = dev_get_drvdata(dev); - if (enabled) - value |= PCF8523_CONTROL1_AIE; - - err = pcf8523_write(client, PCF8523_REG_CONTROL1, value); - if (err < 0) - return err; - - return 0; + return regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL1, + PCF8523_CONTROL1_AIE, enabled ? + PCF8523_CONTROL1_AIE : 0); } static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm) { - struct i2c_client *client = to_i2c_client(dev); - struct i2c_msg msg; + struct pcf8523 *pcf8523 = dev_get_drvdata(dev); u8 regs[5]; int err; @@ -370,7 +208,7 @@ static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm) if (err) return err; - err = pcf8523_write(client, PCF8523_REG_CONTROL2, 0); + err = regmap_write(pcf8523->regmap, PCF8523_REG_CONTROL2, 0); if (err < 0) return err; @@ -382,16 +220,13 @@ static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm) rtc_time64_to_tm(alarm_time, &tm->time); } - regs[0] = PCF8523_REG_MINUTE_ALARM; - regs[1] = bin2bcd(tm->time.tm_min); - regs[2] = bin2bcd(tm->time.tm_hour); - regs[3] = bin2bcd(tm->time.tm_mday); - regs[4] = ALARM_DIS; - msg.addr = client->addr; - msg.flags = 0; - msg.len = sizeof(regs); - msg.buf = regs; - err = i2c_transfer(client->adapter, &msg, 1); + regs[0] = bin2bcd(tm->time.tm_min); + regs[1] = bin2bcd(tm->time.tm_hour); + regs[2] = bin2bcd(tm->time.tm_mday); + regs[3] = ALARM_DIS; + + err = regmap_bulk_write(pcf8523->regmap, PCF8523_REG_MINUTE_ALARM, regs, + sizeof(regs)); if (err < 0) return err; @@ -401,24 +236,101 @@ static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm) return 0; } -#ifdef CONFIG_RTC_INTF_DEV +static int pcf8523_param_get(struct device *dev, struct rtc_param *param) +{ + struct pcf8523 *pcf8523 = dev_get_drvdata(dev); + int ret; + + switch(param->param) { + u32 value; + + case RTC_PARAM_BACKUP_SWITCH_MODE: + ret = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL3, &value); + if (ret < 0) + return ret; + + value = FIELD_GET(PCF8523_CONTROL3_PM, value); + + switch(value) { + case 0x0: + case 0x4: + param->uvalue = RTC_BSM_LEVEL; + break; + case 0x1: + case 0x5: + param->uvalue = RTC_BSM_DIRECT; + break; + case PCF8523_PM_STANDBY: + param->uvalue = RTC_BSM_STANDBY; + break; + default: + param->uvalue = RTC_BSM_DISABLED; + } + + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int pcf8523_param_set(struct device *dev, struct rtc_param *param) +{ + struct pcf8523 *pcf8523 = dev_get_drvdata(dev); + + switch(param->param) { + u8 mode; + case RTC_PARAM_BACKUP_SWITCH_MODE: + switch (param->uvalue) { + case RTC_BSM_DISABLED: + mode = 0x2; + break; + case RTC_BSM_DIRECT: + mode = 0x1; + break; + case RTC_BSM_LEVEL: + mode = 0x0; + break; + case RTC_BSM_STANDBY: + mode = PCF8523_PM_STANDBY; + break; + default: + return -EINVAL; + } + + return regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL3, + PCF8523_CONTROL3_PM, + FIELD_PREP(PCF8523_CONTROL3_PM, mode)); + + break; + + default: + return -EINVAL; + } + + return 0; +} + static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { - struct i2c_client *client = to_i2c_client(dev); + struct pcf8523 *pcf8523 = dev_get_drvdata(dev); unsigned int flags = 0; - u8 value; + u32 value; int ret; switch (cmd) { case RTC_VL_READ: - ret = pcf8523_voltage_low(client); + ret = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL3, &value); if (ret < 0) return ret; - if (ret) + + if (value & PCF8523_CONTROL3_BLF) flags |= RTC_VL_BACKUP_LOW; - ret = pcf8523_read(client, PCF8523_REG_SECONDS, &value); + ret = regmap_read(pcf8523->regmap, PCF8523_REG_SECONDS, &value); if (ret < 0) return ret; @@ -431,18 +343,15 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd, return -ENOIOCTLCMD; } } -#else -#define pcf8523_rtc_ioctl NULL -#endif static int pcf8523_rtc_read_offset(struct device *dev, long *offset) { - struct i2c_client *client = to_i2c_client(dev); + struct pcf8523 *pcf8523 = dev_get_drvdata(dev); int err; - u8 value; + u32 value; s8 val; - err = pcf8523_read(client, PCF8523_REG_OFFSET, &value); + err = regmap_read(pcf8523->regmap, PCF8523_REG_OFFSET, &value); if (err < 0) return err; @@ -455,9 +364,9 @@ static int pcf8523_rtc_read_offset(struct device *dev, long *offset) static int pcf8523_rtc_set_offset(struct device *dev, long offset) { - struct i2c_client *client = to_i2c_client(dev); + struct pcf8523 *pcf8523 = dev_get_drvdata(dev); long reg_m0, reg_m1; - u8 value; + u32 value; reg_m0 = clamp(DIV_ROUND_CLOSEST(offset, 4340), -64L, 63L); reg_m1 = clamp(DIV_ROUND_CLOSEST(offset, 4069), -64L, 63L); @@ -467,7 +376,7 @@ static int pcf8523_rtc_set_offset(struct device *dev, long offset) else value = (reg_m1 & 0x7f) | PCF8523_OFFSET_MODE; - return pcf8523_write(client, PCF8523_REG_OFFSET, value); + return regmap_write(pcf8523->regmap, PCF8523_REG_OFFSET, value); } static const struct rtc_class_ops pcf8523_rtc_ops = { @@ -479,6 +388,14 @@ static const struct rtc_class_ops pcf8523_rtc_ops = { .ioctl = pcf8523_rtc_ioctl, .read_offset = pcf8523_rtc_read_offset, .set_offset = pcf8523_rtc_set_offset, + .param_get = pcf8523_param_get, + .param_set = pcf8523_param_set, +}; + +static const struct regmap_config regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x13, }; static int pcf8523_probe(struct i2c_client *client, @@ -487,6 +404,7 @@ static int pcf8523_probe(struct i2c_client *client, struct pcf8523 *pcf8523; struct rtc_device *rtc; bool wakeup_source = false; + u32 value; int err; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) @@ -496,46 +414,60 @@ static int pcf8523_probe(struct i2c_client *client, if (!pcf8523) return -ENOMEM; + pcf8523->regmap = devm_regmap_init_i2c(client, ®map_config); + if (IS_ERR(pcf8523->regmap)) + return PTR_ERR(pcf8523->regmap); + i2c_set_clientdata(client, pcf8523); - pcf8523->client = client; - err = pcf8523_load_capacitance(client); + rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + pcf8523->rtc = rtc; + + err = pcf8523_load_capacitance(pcf8523, client->dev.of_node); if (err < 0) dev_warn(&client->dev, "failed to set xtal load capacitance: %d", err); - err = pcf8523_set_pm(client, 0); + err = regmap_read(pcf8523->regmap, PCF8523_REG_SECONDS, &value); if (err < 0) return err; - rtc = devm_rtc_allocate_device(&client->dev); - if (IS_ERR(rtc)) - return PTR_ERR(rtc); + if (value & PCF8523_SECONDS_OS) { + err = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL3, &value); + if (err < 0) + return err; + + if (FIELD_GET(PCF8523_CONTROL3_PM, value) == PCF8523_PM_STANDBY) { + err = regmap_write(pcf8523->regmap, PCF8523_REG_CONTROL3, + value & ~PCF8523_CONTROL3_PM); + if (err < 0) + return err; + } + } - pcf8523->rtc = rtc; rtc->ops = &pcf8523_rtc_ops; rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; rtc->range_max = RTC_TIMESTAMP_END_2099; rtc->uie_unsupported = 1; if (client->irq > 0) { - err = pcf8523_write(client, PCF8523_TMR_CLKOUT_CTRL, 0x38); + err = regmap_write(pcf8523->regmap, PCF8523_TMR_CLKOUT_CTRL, 0x38); if (err < 0) return err; err = devm_request_threaded_irq(&client->dev, client->irq, NULL, pcf8523_irq, IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_LOW, - dev_name(&rtc->dev), client); + dev_name(&rtc->dev), pcf8523); if (err) return err; dev_pm_set_wake_irq(&client->dev, client->irq); } -#ifdef CONFIG_OF wakeup_source = of_property_read_bool(client->dev.of_node, "wakeup-source"); -#endif if (client->irq > 0 || wakeup_source) device_init_wakeup(&client->dev, true); @@ -548,19 +480,17 @@ static const struct i2c_device_id pcf8523_id[] = { }; MODULE_DEVICE_TABLE(i2c, pcf8523_id); -#ifdef CONFIG_OF static const struct of_device_id pcf8523_of_match[] = { { .compatible = "nxp,pcf8523" }, { .compatible = "microcrystal,rv8523" }, { } }; MODULE_DEVICE_TABLE(of, pcf8523_of_match); -#endif static struct i2c_driver pcf8523_driver = { .driver = { .name = "rtc-pcf8523", - .of_match_table = of_match_ptr(pcf8523_of_match), + .of_match_table = pcf8523_of_match, }, .probe = pcf8523_probe, .id_table = pcf8523_id, diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c index 12c807306893..cdc623b3e365 100644 --- a/drivers/rtc/rtc-rv3028.c +++ b/drivers/rtc/rtc-rv3028.c @@ -10,6 +10,7 @@ #include <linux/clk-provider.h> #include <linux/bcd.h> +#include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/i2c.h> #include <linux/interrupt.h> @@ -80,6 +81,10 @@ #define RV3028_BACKUP_TCE BIT(5) #define RV3028_BACKUP_TCR_MASK GENMASK(1,0) +#define RV3028_BACKUP_BSM GENMASK(3,2) + +#define RV3028_BACKUP_BSM_DSM 0x1 +#define RV3028_BACKUP_BSM_LSM 0x3 #define OFFSET_STEP_PPT 953674 @@ -512,6 +517,71 @@ exit_eerd: } +static int rv3028_param_get(struct device *dev, struct rtc_param *param) +{ + struct rv3028_data *rv3028 = dev_get_drvdata(dev); + int ret; + + switch(param->param) { + u32 value; + + case RTC_PARAM_BACKUP_SWITCH_MODE: + ret = regmap_read(rv3028->regmap, RV3028_BACKUP, &value); + if (ret < 0) + return ret; + + value = FIELD_GET(RV3028_BACKUP_BSM, value); + + switch(value) { + case RV3028_BACKUP_BSM_DSM: + param->uvalue = RTC_BSM_DIRECT; + break; + case RV3028_BACKUP_BSM_LSM: + param->uvalue = RTC_BSM_LEVEL; + break; + default: + param->uvalue = RTC_BSM_DISABLED; + } + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int rv3028_param_set(struct device *dev, struct rtc_param *param) +{ + struct rv3028_data *rv3028 = dev_get_drvdata(dev); + + switch(param->param) { + u8 mode; + case RTC_PARAM_BACKUP_SWITCH_MODE: + switch (param->uvalue) { + case RTC_BSM_DISABLED: + mode = 0; + break; + case RTC_BSM_DIRECT: + mode = RV3028_BACKUP_BSM_DSM; + break; + case RTC_BSM_LEVEL: + mode = RV3028_BACKUP_BSM_LSM; + break; + default: + return -EINVAL; + } + + return rv3028_update_cfg(rv3028, RV3028_BACKUP, RV3028_BACKUP_BSM, + FIELD_PREP(RV3028_BACKUP_BSM, mode)); + + default: + return -EINVAL; + } + + return 0; +} + static int rv3028_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { struct rv3028_data *rv3028 = dev_get_drvdata(dev); @@ -776,6 +846,8 @@ static const struct rtc_class_ops rv3028_rtc_ops = { .read_offset = rv3028_read_offset, .set_offset = rv3028_set_offset, .ioctl = rv3028_ioctl, + .param_get = rv3028_param_get, + .param_set = rv3028_param_set, }; static const struct regmap_config regmap_config = { @@ -878,6 +950,8 @@ static int rv3028_probe(struct i2c_client *client) if (ret) return ret; + set_bit(RTC_FEATURE_BACKUP_SWITCH_MODE, rv3028->rtc->features); + rv3028->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; rv3028->rtc->range_max = RTC_TIMESTAMP_END_2099; rv3028->rtc->ops = &rv3028_rtc_ops; diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c index d63102d5cb1e..c3bee305eacc 100644 --- a/drivers/rtc/rtc-rv3032.c +++ b/drivers/rtc/rtc-rv3032.c @@ -106,6 +106,7 @@ struct rv3032_data { struct regmap *regmap; struct rtc_device *rtc; + bool trickle_charger_set; #ifdef CONFIG_COMMON_CLK struct clk_hw clkout_hw; #endif @@ -310,14 +311,6 @@ static int rv3032_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) u8 ctrl = 0; int ret; - /* The alarm has no seconds, round up to nearest minute */ - if (alrm->time.tm_sec) { - time64_t alarm_time = rtc_tm_to_time64(&alrm->time); - - alarm_time += 60 - alrm->time.tm_sec; - rtc_time64_to_tm(alarm_time, &alrm->time); - } - ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL2, RV3032_CTRL2_AIE | RV3032_CTRL2_UIE, 0); if (ret) @@ -402,6 +395,75 @@ static int rv3032_set_offset(struct device *dev, long offset) FIELD_PREP(RV3032_OFFSET_MSK, offset)); } +static int rv3032_param_get(struct device *dev, struct rtc_param *param) +{ + struct rv3032_data *rv3032 = dev_get_drvdata(dev); + int ret; + + switch(param->param) { + u32 value; + + case RTC_PARAM_BACKUP_SWITCH_MODE: + ret = regmap_read(rv3032->regmap, RV3032_PMU, &value); + if (ret < 0) + return ret; + + value = FIELD_GET(RV3032_PMU_BSM, value); + + switch(value) { + case RV3032_PMU_BSM_DSM: + param->uvalue = RTC_BSM_DIRECT; + break; + case RV3032_PMU_BSM_LSM: + param->uvalue = RTC_BSM_LEVEL; + break; + default: + param->uvalue = RTC_BSM_DISABLED; + } + + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int rv3032_param_set(struct device *dev, struct rtc_param *param) +{ + struct rv3032_data *rv3032 = dev_get_drvdata(dev); + + switch(param->param) { + u8 mode; + case RTC_PARAM_BACKUP_SWITCH_MODE: + if (rv3032->trickle_charger_set) + return -EINVAL; + + switch (param->uvalue) { + case RTC_BSM_DISABLED: + mode = 0; + break; + case RTC_BSM_DIRECT: + mode = RV3032_PMU_BSM_DSM; + break; + case RTC_BSM_LEVEL: + mode = RV3032_PMU_BSM_LSM; + break; + default: + return -EINVAL; + } + + return rv3032_update_cfg(rv3032, RV3032_PMU, RV3032_PMU_BSM, + FIELD_PREP(RV3032_PMU_BSM, mode)); + + default: + return -EINVAL; + } + + return 0; +} + static int rv3032_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { struct rv3032_data *rv3032 = dev_get_drvdata(dev); @@ -541,6 +603,8 @@ static int rv3032_trickle_charger_setup(struct device *dev, struct rv3032_data * return 0; } + rv3032->trickle_charger_set = true; + return rv3032_update_cfg(rv3032, RV3032_PMU, RV3032_PMU_TCR | RV3032_PMU_TCM | RV3032_PMU_BSM, val | FIELD_PREP(RV3032_PMU_TCR, i)); @@ -617,11 +681,11 @@ static int rv3032_clkout_set_rate(struct clk_hw *hw, unsigned long rate, ret = rv3032_enter_eerd(rv3032, &eerd); if (ret) - goto exit_eerd; + return ret; ret = regmap_write(rv3032->regmap, RV3032_CLKOUT1, hfd & 0xff); if (ret) - return ret; + goto exit_eerd; ret = regmap_write(rv3032->regmap, RV3032_CLKOUT2, RV3032_CLKOUT2_OS | FIELD_PREP(RV3032_CLKOUT2_HFD_MSK, hfd >> 8)); @@ -813,6 +877,8 @@ static const struct rtc_class_ops rv3032_rtc_ops = { .read_alarm = rv3032_get_alarm, .set_alarm = rv3032_set_alarm, .alarm_irq_enable = rv3032_alarm_irq_enable, + .param_get = rv3032_param_get, + .param_set = rv3032_param_set, }; static const struct regmap_config regmap_config = { @@ -883,6 +949,9 @@ static int rv3032_probe(struct i2c_client *client) rv3032_trickle_charger_setup(&client->dev, rv3032); + set_bit(RTC_FEATURE_BACKUP_SWITCH_MODE, rv3032->rtc->features); + set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rv3032->rtc->features); + rv3032->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; rv3032->rtc->range_max = RTC_TIMESTAMP_END_2099; rv3032->rtc->ops = &rv3032_rtc_ops; diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c index 72adef5a5ebe..0d5ed38bf60c 100644 --- a/drivers/rtc/rtc-rv8803.c +++ b/drivers/rtc/rtc-rv8803.c @@ -340,8 +340,8 @@ static int rv8803_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) } } - ctrl[1] &= ~RV8803_FLAG_AF; - err = rv8803_write_reg(rv8803->client, RV8803_FLAG, ctrl[1]); + ctrl[0] &= ~RV8803_FLAG_AF; + err = rv8803_write_reg(rv8803->client, RV8803_FLAG, ctrl[0]); mutex_unlock(&rv8803->flags_lock); if (err) return err; diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c index f4d425002f7f..758fd6e11a15 100644 --- a/drivers/rtc/rtc-rx6110.c +++ b/drivers/rtc/rtc-rx6110.c @@ -422,7 +422,7 @@ static struct regmap_config regmap_i2c_config = { static int rx6110_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { - struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + struct i2c_adapter *adapter = client->adapter; struct rx6110_data *rx6110; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c index d38aaf08108c..5bfdd34a72ff 100644 --- a/drivers/rtc/rtc-rx8025.c +++ b/drivers/rtc/rtc-rx8025.c @@ -248,9 +248,6 @@ static int rx8025_set_time(struct device *dev, struct rtc_time *dt) u8 date[7]; int ret; - if ((dt->tm_year < 100) || (dt->tm_year > 199)) - return -EINVAL; - /* * Here the read-only bits are written as "0". I'm not sure if that * is sound. @@ -318,9 +315,6 @@ static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t) u8 ald[2]; int ctrl2, err; - if (client->irq <= 0) - return -EINVAL; - err = rx8025_read_regs(client, RX8025_REG_ALDMIN, 2, ald); if (err) return err; @@ -355,20 +349,6 @@ static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t) u8 ald[2]; int err; - if (client->irq <= 0) - return -EINVAL; - - /* - * Hardware alarm precision is 1 minute! - * round up to nearest minute - */ - if (t->time.tm_sec) { - time64_t alarm_time = rtc_tm_to_time64(&t->time); - - alarm_time += 60 - t->time.tm_sec; - rtc_time64_to_tm(alarm_time, &t->time); - } - ald[0] = bin2bcd(t->time.tm_min); if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224) ald[1] = bin2bcd(t->time.tm_hour); @@ -423,17 +403,7 @@ static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled) return 0; } -static const struct rtc_class_ops rx8025_rtc_ops = { - .read_time = rx8025_get_time, - .set_time = rx8025_set_time, - .read_alarm = rx8025_read_alarm, - .set_alarm = rx8025_set_alarm, - .alarm_irq_enable = rx8025_alarm_irq_enable, -}; - /* - * Clock precision adjustment support - * * According to the RX8025 SA/NB application manual the frequency and * temperature characteristics can be approximated using the following * equation: @@ -444,11 +414,8 @@ static const struct rtc_class_ops rx8025_rtc_ops = { * a : Coefficient = (-35 +-5) * 10**-9 * ut: Ultimate temperature in degree = +25 +-5 degree * t : Any temperature in degree - * - * Note that the clock adjustment in ppb must be entered (which is - * the negative value of the deviation). */ -static int rx8025_get_clock_adjust(struct device *dev, int *adj) +static int rx8025_read_offset(struct device *dev, long *offset) { struct i2c_client *client = to_i2c_client(dev); int digoff; @@ -457,63 +424,75 @@ static int rx8025_get_clock_adjust(struct device *dev, int *adj) if (digoff < 0) return digoff; - *adj = digoff >= 64 ? digoff - 128 : digoff; - if (*adj > 0) - (*adj)--; - *adj *= -RX8025_ADJ_RESOLUTION; + *offset = digoff >= 64 ? digoff - 128 : digoff; + if (*offset > 0) + (*offset)--; + *offset *= RX8025_ADJ_RESOLUTION; return 0; } -static int rx8025_set_clock_adjust(struct device *dev, int adj) +static int rx8025_set_offset(struct device *dev, long offset) { struct i2c_client *client = to_i2c_client(dev); u8 digoff; int err; - adj /= -RX8025_ADJ_RESOLUTION; - if (adj > RX8025_ADJ_DATA_MAX) - adj = RX8025_ADJ_DATA_MAX; - else if (adj < RX8025_ADJ_DATA_MIN) - adj = RX8025_ADJ_DATA_MIN; - else if (adj > 0) - adj++; - else if (adj < 0) - adj += 128; - digoff = adj; + offset /= RX8025_ADJ_RESOLUTION; + if (offset > RX8025_ADJ_DATA_MAX) + offset = RX8025_ADJ_DATA_MAX; + else if (offset < RX8025_ADJ_DATA_MIN) + offset = RX8025_ADJ_DATA_MIN; + else if (offset > 0) + offset++; + else if (offset < 0) + offset += 128; + digoff = offset; err = rx8025_write_reg(client, RX8025_REG_DIGOFF, digoff); if (err) return err; - dev_dbg(dev, "%s: write 0x%02x\n", __func__, digoff); - return 0; } +static const struct rtc_class_ops rx8025_rtc_ops = { + .read_time = rx8025_get_time, + .set_time = rx8025_set_time, + .read_alarm = rx8025_read_alarm, + .set_alarm = rx8025_set_alarm, + .alarm_irq_enable = rx8025_alarm_irq_enable, + .read_offset = rx8025_read_offset, + .set_offset = rx8025_set_offset, +}; + static ssize_t rx8025_sysfs_show_clock_adjust(struct device *dev, struct device_attribute *attr, char *buf) { - int err, adj; + long adj; + int err; - err = rx8025_get_clock_adjust(dev, &adj); + dev_warn_once(dev, "clock_adjust_ppb is deprecated, use offset\n"); + err = rx8025_read_offset(dev, &adj); if (err) return err; - return sprintf(buf, "%d\n", adj); + return sprintf(buf, "%ld\n", -adj); } static ssize_t rx8025_sysfs_store_clock_adjust(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int adj, err; + long adj; + int err; - if (sscanf(buf, "%i", &adj) != 1) + dev_warn_once(dev, "clock_adjust_ppb is deprecated, use offset\n"); + if (kstrtol(buf, 10, &adj) != 0) return -EINVAL; - err = rx8025_set_clock_adjust(dev, adj); + err = rx8025_set_offset(dev, -adj); return err ? err : count; } @@ -522,15 +501,14 @@ static DEVICE_ATTR(clock_adjust_ppb, S_IRUGO | S_IWUSR, rx8025_sysfs_show_clock_adjust, rx8025_sysfs_store_clock_adjust); -static int rx8025_sysfs_register(struct device *dev) -{ - return device_create_file(dev, &dev_attr_clock_adjust_ppb); -} +static struct attribute *rx8025_attrs[] = { + &dev_attr_clock_adjust_ppb.attr, + NULL +}; -static void rx8025_sysfs_unregister(struct device *dev) -{ - device_remove_file(dev, &dev_attr_clock_adjust_ppb); -} +static const struct attribute_group rx8025_attr_group = { + .attrs = rx8025_attrs, +}; static int rx8025_probe(struct i2c_client *client, const struct i2c_device_id *id) @@ -559,12 +537,13 @@ static int rx8025_probe(struct i2c_client *client, if (err) return err; - rx8025->rtc = devm_rtc_device_register(&client->dev, client->name, - &rx8025_rtc_ops, THIS_MODULE); - if (IS_ERR(rx8025->rtc)) { - dev_err(&client->dev, "unable to register the class device\n"); + rx8025->rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(rx8025->rtc)) return PTR_ERR(rx8025->rtc); - } + + rx8025->rtc->ops = &rx8025_rtc_ops; + rx8025->rtc->range_min = RTC_TIMESTAMP_BEGIN_1900; + rx8025->rtc->range_max = RTC_TIMESTAMP_END_2099; if (client->irq > 0) { dev_info(&client->dev, "IRQ %d supplied\n", client->irq); @@ -572,25 +551,20 @@ static int rx8025_probe(struct i2c_client *client, rx8025_handle_irq, IRQF_ONESHOT, "rx8025", client); - if (err) { - dev_err(&client->dev, "unable to request IRQ, alarms disabled\n"); - client->irq = 0; - } + if (err) + clear_bit(RTC_FEATURE_ALARM, rx8025->rtc->features); } rx8025->rtc->max_user_freq = 1; - /* the rx8025 alarm only supports a minute accuracy */ - rx8025->rtc->uie_unsupported = 1; + set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rx8025->rtc->features); + clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rx8025->rtc->features); - err = rx8025_sysfs_register(&client->dev); - return err; -} + err = rtc_add_group(rx8025->rtc, &rx8025_attr_group); + if (err) + return err; -static int rx8025_remove(struct i2c_client *client) -{ - rx8025_sysfs_unregister(&client->dev); - return 0; + return devm_rtc_register_device(rx8025->rtc); } static struct i2c_driver rx8025_driver = { @@ -598,7 +572,6 @@ static struct i2c_driver rx8025_driver = { .name = "rtc-rx8025", }, .probe = rx8025_probe, - .remove = rx8025_remove, .id_table = rx8025_id, }; diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c index b5bdeda7d767..26278c770731 100644 --- a/drivers/rtc/rtc-s35390a.c +++ b/drivers/rtc/rtc-s35390a.c @@ -285,9 +285,6 @@ static int s35390a_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) alm->time.tm_min, alm->time.tm_hour, alm->time.tm_mday, alm->time.tm_mon, alm->time.tm_year, alm->time.tm_wday); - if (alm->time.tm_sec != 0) - dev_warn(&client->dev, "Alarms are only supported on a per minute basis!\n"); - /* disable interrupt (which deasserts the irq line) */ err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &sts, sizeof(sts)); if (err < 0) @@ -491,8 +488,8 @@ static int s35390a_probe(struct i2c_client *client, s35390a->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; s35390a->rtc->range_max = RTC_TIMESTAMP_END_2099; - /* supports per-minute alarms only, therefore set uie_unsupported */ - s35390a->rtc->uie_unsupported = 1; + set_bit(RTC_FEATURE_ALARM_RES_MINUTE, s35390a->rtc->features); + clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, s35390a->rtc->features ); if (status1 & S35390A_FLAG_INT2) rtc_update_irq(s35390a->rtc, 1, RTC_AF); diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index e57d3ca70a78..db529733c9c4 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -127,10 +127,9 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) return ret; } -/* Time read/write */ -static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) +/* Read time from RTC and convert it from BCD */ +static int s3c_rtc_read_time(struct s3c_rtc *info, struct rtc_time *tm) { - struct s3c_rtc *info = dev_get_drvdata(dev); unsigned int have_retried = 0; int ret; @@ -139,54 +138,40 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) return ret; retry_get_time: - rtc_tm->tm_min = readb(info->base + S3C2410_RTCMIN); - rtc_tm->tm_hour = readb(info->base + S3C2410_RTCHOUR); - rtc_tm->tm_mday = readb(info->base + S3C2410_RTCDATE); - rtc_tm->tm_mon = readb(info->base + S3C2410_RTCMON); - rtc_tm->tm_year = readb(info->base + S3C2410_RTCYEAR); - rtc_tm->tm_sec = readb(info->base + S3C2410_RTCSEC); - - /* the only way to work out whether the system was mid-update + tm->tm_min = readb(info->base + S3C2410_RTCMIN); + tm->tm_hour = readb(info->base + S3C2410_RTCHOUR); + tm->tm_mday = readb(info->base + S3C2410_RTCDATE); + tm->tm_mon = readb(info->base + S3C2410_RTCMON); + tm->tm_year = readb(info->base + S3C2410_RTCYEAR); + tm->tm_sec = readb(info->base + S3C2410_RTCSEC); + + /* + * The only way to work out whether the system was mid-update * when we read it is to check the second counter, and if it * is zero, then we re-try the entire read */ - - if (rtc_tm->tm_sec == 0 && !have_retried) { + if (tm->tm_sec == 0 && !have_retried) { have_retried = 1; goto retry_get_time; } - rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); - rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); - rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); - rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday); - rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon); - rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); - s3c_rtc_disable_clk(info); - rtc_tm->tm_year += 100; - rtc_tm->tm_mon -= 1; + tm->tm_sec = bcd2bin(tm->tm_sec); + tm->tm_min = bcd2bin(tm->tm_min); + tm->tm_hour = bcd2bin(tm->tm_hour); + tm->tm_mday = bcd2bin(tm->tm_mday); + tm->tm_mon = bcd2bin(tm->tm_mon); + tm->tm_year = bcd2bin(tm->tm_year); - dev_dbg(dev, "read time %ptR\n", rtc_tm); return 0; } -static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) +/* Convert time to BCD and write it to RTC */ +static int s3c_rtc_write_time(struct s3c_rtc *info, const struct rtc_time *tm) { - struct s3c_rtc *info = dev_get_drvdata(dev); - int year = tm->tm_year - 100; int ret; - dev_dbg(dev, "set time %ptR\n", tm); - - /* we get around y2k by simply not supporting it */ - - if (year < 0 || year >= 100) { - dev_err(dev, "rtc only supports 100 years\n"); - return -EINVAL; - } - ret = s3c_rtc_enable_clk(info); if (ret) return ret; @@ -195,14 +180,48 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) writeb(bin2bcd(tm->tm_min), info->base + S3C2410_RTCMIN); writeb(bin2bcd(tm->tm_hour), info->base + S3C2410_RTCHOUR); writeb(bin2bcd(tm->tm_mday), info->base + S3C2410_RTCDATE); - writeb(bin2bcd(tm->tm_mon + 1), info->base + S3C2410_RTCMON); - writeb(bin2bcd(year), info->base + S3C2410_RTCYEAR); + writeb(bin2bcd(tm->tm_mon), info->base + S3C2410_RTCMON); + writeb(bin2bcd(tm->tm_year), info->base + S3C2410_RTCYEAR); s3c_rtc_disable_clk(info); return 0; } +static int s3c_rtc_gettime(struct device *dev, struct rtc_time *tm) +{ + struct s3c_rtc *info = dev_get_drvdata(dev); + int ret; + + ret = s3c_rtc_read_time(info, tm); + if (ret) + return ret; + + /* Convert internal representation to actual date/time */ + tm->tm_year += 100; + tm->tm_mon -= 1; + + dev_dbg(dev, "read time %ptR\n", tm); + return 0; +} + +static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) +{ + struct s3c_rtc *info = dev_get_drvdata(dev); + struct rtc_time rtc_tm = *tm; + + dev_dbg(dev, "set time %ptR\n", tm); + + /* + * Convert actual date/time to internal representation. + * We get around Y2K by simply not supporting it. + */ + rtc_tm.tm_year -= 100; + rtc_tm.tm_mon += 1; + + return s3c_rtc_write_time(info, &rtc_tm); +} + static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct s3c_rtc *info = dev_get_drvdata(dev); @@ -447,15 +466,20 @@ static int s3c_rtc_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 1); - /* register RTC and exit */ - info->rtc = devm_rtc_device_register(&pdev->dev, "s3c", &s3c_rtcops, - THIS_MODULE); + info->rtc = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(info->rtc)) { - dev_err(&pdev->dev, "cannot attach rtc\n"); ret = PTR_ERR(info->rtc); goto err_nortc; } + info->rtc->ops = &s3c_rtcops; + info->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + info->rtc->range_max = RTC_TIMESTAMP_END_2099; + + ret = devm_rtc_register_device(info->rtc); + if (ret) + goto err_nortc; + ret = devm_request_irq(&pdev->dev, info->irq_alarm, s3c_rtc_alarmirq, 0, "s3c2410-rtc alarm", info); if (ret) { diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c index fb9c6b709e13..4243fe6d3842 100644 --- a/drivers/rtc/rtc-s5m.c +++ b/drivers/rtc/rtc-s5m.c @@ -861,4 +861,3 @@ module_platform_driver(s5m_rtc_driver); MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>"); MODULE_DESCRIPTION("Samsung S5M/S2MPS14 RTC driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:s5m-rtc"); diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index adec1b14a8de..711832c758ae 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -673,8 +673,17 @@ static int sun6i_rtc_probe(struct platform_device *pdev) struct sun6i_rtc_dev *chip = sun6i_rtc; int ret; - if (!chip) - return -ENODEV; + if (!chip) { + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + spin_lock_init(&chip->lock); + + chip->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(chip->base)) + return PTR_ERR(chip->base); + } platform_set_drvdata(pdev, chip); diff --git a/drivers/rtc/rtc-tps80031.c b/drivers/rtc/rtc-tps80031.c deleted file mode 100644 index c77b8eab94a0..000000000000 --- a/drivers/rtc/rtc-tps80031.c +++ /dev/null @@ -1,324 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * rtc-tps80031.c -- TI TPS80031/TPS80032 RTC driver - * - * RTC driver for TI TPS80031/TPS80032 Fully Integrated - * Power Management with Power Path and Battery Charger - * - * Copyright (c) 2012, NVIDIA Corporation. - * - * Author: Laxman Dewangan <ldewangan@nvidia.com> - */ - -#include <linux/bcd.h> -#include <linux/device.h> -#include <linux/err.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/mfd/tps80031.h> -#include <linux/platform_device.h> -#include <linux/pm.h> -#include <linux/rtc.h> -#include <linux/slab.h> - -#define ENABLE_ALARM_INT 0x08 -#define ALARM_INT_STATUS 0x40 - -/** - * Setting bit to 1 in STOP_RTC will run the RTC and - * setting this bit to 0 will freeze RTC. - */ -#define STOP_RTC 0x1 - -/* Power on reset Values of RTC registers */ -#define TPS80031_RTC_POR_YEAR 0 -#define TPS80031_RTC_POR_MONTH 1 -#define TPS80031_RTC_POR_DAY 1 - -/* Numbers of registers for time and alarms */ -#define TPS80031_RTC_TIME_NUM_REGS 7 -#define TPS80031_RTC_ALARM_NUM_REGS 6 - -/** - * PMU RTC have only 2 nibbles to store year information, so using an - * offset of 100 to set the base year as 2000 for our driver. - */ -#define RTC_YEAR_OFFSET 100 - -struct tps80031_rtc { - struct rtc_device *rtc; - int irq; -}; - -static int tps80031_rtc_read_time(struct device *dev, struct rtc_time *tm) -{ - u8 buff[TPS80031_RTC_TIME_NUM_REGS]; - int ret; - - ret = tps80031_reads(dev->parent, TPS80031_SLAVE_ID1, - TPS80031_SECONDS_REG, TPS80031_RTC_TIME_NUM_REGS, buff); - if (ret < 0) { - dev_err(dev, "reading RTC_SECONDS_REG failed, err = %d\n", ret); - return ret; - } - - tm->tm_sec = bcd2bin(buff[0]); - tm->tm_min = bcd2bin(buff[1]); - tm->tm_hour = bcd2bin(buff[2]); - tm->tm_mday = bcd2bin(buff[3]); - tm->tm_mon = bcd2bin(buff[4]) - 1; - tm->tm_year = bcd2bin(buff[5]) + RTC_YEAR_OFFSET; - tm->tm_wday = bcd2bin(buff[6]); - return 0; -} - -static int tps80031_rtc_set_time(struct device *dev, struct rtc_time *tm) -{ - u8 buff[7]; - int ret; - - buff[0] = bin2bcd(tm->tm_sec); - buff[1] = bin2bcd(tm->tm_min); - buff[2] = bin2bcd(tm->tm_hour); - buff[3] = bin2bcd(tm->tm_mday); - buff[4] = bin2bcd(tm->tm_mon + 1); - buff[5] = bin2bcd(tm->tm_year % RTC_YEAR_OFFSET); - buff[6] = bin2bcd(tm->tm_wday); - - /* Stop RTC while updating the RTC time registers */ - ret = tps80031_clr_bits(dev->parent, TPS80031_SLAVE_ID1, - TPS80031_RTC_CTRL_REG, STOP_RTC); - if (ret < 0) { - dev_err(dev->parent, "Stop RTC failed, err = %d\n", ret); - return ret; - } - - ret = tps80031_writes(dev->parent, TPS80031_SLAVE_ID1, - TPS80031_SECONDS_REG, - TPS80031_RTC_TIME_NUM_REGS, buff); - if (ret < 0) { - dev_err(dev, "writing RTC_SECONDS_REG failed, err %d\n", ret); - return ret; - } - - ret = tps80031_set_bits(dev->parent, TPS80031_SLAVE_ID1, - TPS80031_RTC_CTRL_REG, STOP_RTC); - if (ret < 0) - dev_err(dev->parent, "Start RTC failed, err = %d\n", ret); - return ret; -} - -static int tps80031_rtc_alarm_irq_enable(struct device *dev, - unsigned int enable) -{ - int ret; - - if (enable) - ret = tps80031_set_bits(dev->parent, TPS80031_SLAVE_ID1, - TPS80031_RTC_INTERRUPTS_REG, ENABLE_ALARM_INT); - else - ret = tps80031_clr_bits(dev->parent, TPS80031_SLAVE_ID1, - TPS80031_RTC_INTERRUPTS_REG, ENABLE_ALARM_INT); - if (ret < 0) { - dev_err(dev, "Update on RTC_INT failed, err = %d\n", ret); - return ret; - } - return 0; -} - -static int tps80031_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) -{ - u8 buff[TPS80031_RTC_ALARM_NUM_REGS]; - int ret; - - buff[0] = bin2bcd(alrm->time.tm_sec); - buff[1] = bin2bcd(alrm->time.tm_min); - buff[2] = bin2bcd(alrm->time.tm_hour); - buff[3] = bin2bcd(alrm->time.tm_mday); - buff[4] = bin2bcd(alrm->time.tm_mon + 1); - buff[5] = bin2bcd(alrm->time.tm_year % RTC_YEAR_OFFSET); - ret = tps80031_writes(dev->parent, TPS80031_SLAVE_ID1, - TPS80031_ALARM_SECONDS_REG, - TPS80031_RTC_ALARM_NUM_REGS, buff); - if (ret < 0) { - dev_err(dev, "Writing RTC_ALARM failed, err %d\n", ret); - return ret; - } - return tps80031_rtc_alarm_irq_enable(dev, alrm->enabled); -} - -static int tps80031_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) -{ - u8 buff[6]; - int ret; - - ret = tps80031_reads(dev->parent, TPS80031_SLAVE_ID1, - TPS80031_ALARM_SECONDS_REG, - TPS80031_RTC_ALARM_NUM_REGS, buff); - if (ret < 0) { - dev_err(dev->parent, - "reading RTC_ALARM failed, err = %d\n", ret); - return ret; - } - - alrm->time.tm_sec = bcd2bin(buff[0]); - alrm->time.tm_min = bcd2bin(buff[1]); - alrm->time.tm_hour = bcd2bin(buff[2]); - alrm->time.tm_mday = bcd2bin(buff[3]); - alrm->time.tm_mon = bcd2bin(buff[4]) - 1; - alrm->time.tm_year = bcd2bin(buff[5]) + RTC_YEAR_OFFSET; - return 0; -} - -static int clear_alarm_int_status(struct device *dev, struct tps80031_rtc *rtc) -{ - int ret; - u8 buf; - - /** - * As per datasheet, A dummy read of this RTC_STATUS_REG register - * is necessary before each I2C read in order to update the status - * register value. - */ - ret = tps80031_read(dev->parent, TPS80031_SLAVE_ID1, - TPS80031_RTC_STATUS_REG, &buf); - if (ret < 0) { - dev_err(dev, "reading RTC_STATUS failed. err = %d\n", ret); - return ret; - } - - /* clear Alarm status bits.*/ - ret = tps80031_set_bits(dev->parent, TPS80031_SLAVE_ID1, - TPS80031_RTC_STATUS_REG, ALARM_INT_STATUS); - if (ret < 0) { - dev_err(dev, "clear Alarm INT failed, err = %d\n", ret); - return ret; - } - return 0; -} - -static irqreturn_t tps80031_rtc_irq(int irq, void *data) -{ - struct device *dev = data; - struct tps80031_rtc *rtc = dev_get_drvdata(dev); - int ret; - - ret = clear_alarm_int_status(dev, rtc); - if (ret < 0) - return ret; - - rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF); - return IRQ_HANDLED; -} - -static const struct rtc_class_ops tps80031_rtc_ops = { - .read_time = tps80031_rtc_read_time, - .set_time = tps80031_rtc_set_time, - .set_alarm = tps80031_rtc_set_alarm, - .read_alarm = tps80031_rtc_read_alarm, - .alarm_irq_enable = tps80031_rtc_alarm_irq_enable, -}; - -static int tps80031_rtc_probe(struct platform_device *pdev) -{ - struct tps80031_rtc *rtc; - struct rtc_time tm; - int ret; - - rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); - if (!rtc) - return -ENOMEM; - - rtc->irq = platform_get_irq(pdev, 0); - platform_set_drvdata(pdev, rtc); - - /* Start RTC */ - ret = tps80031_set_bits(pdev->dev.parent, TPS80031_SLAVE_ID1, - TPS80031_RTC_CTRL_REG, STOP_RTC); - if (ret < 0) { - dev_err(&pdev->dev, "failed to start RTC. err = %d\n", ret); - return ret; - } - - /* If RTC have POR values, set time 01:01:2000 */ - tps80031_rtc_read_time(&pdev->dev, &tm); - if ((tm.tm_year == RTC_YEAR_OFFSET + TPS80031_RTC_POR_YEAR) && - (tm.tm_mon == (TPS80031_RTC_POR_MONTH - 1)) && - (tm.tm_mday == TPS80031_RTC_POR_DAY)) { - tm.tm_year = 2000; - tm.tm_mday = 1; - tm.tm_mon = 1; - ret = tps80031_rtc_set_time(&pdev->dev, &tm); - if (ret < 0) { - dev_err(&pdev->dev, - "RTC set time failed, err = %d\n", ret); - return ret; - } - } - - /* Clear alarm intretupt status if it is there */ - ret = clear_alarm_int_status(&pdev->dev, rtc); - if (ret < 0) { - dev_err(&pdev->dev, "Clear alarm int failed, err = %d\n", ret); - return ret; - } - - rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, - &tps80031_rtc_ops, THIS_MODULE); - if (IS_ERR(rtc->rtc)) { - ret = PTR_ERR(rtc->rtc); - dev_err(&pdev->dev, "RTC registration failed, err %d\n", ret); - return ret; - } - - ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL, - tps80031_rtc_irq, - IRQF_ONESHOT, - dev_name(&pdev->dev), rtc); - if (ret < 0) { - dev_err(&pdev->dev, "request IRQ:%d failed, err = %d\n", - rtc->irq, ret); - return ret; - } - device_set_wakeup_capable(&pdev->dev, 1); - return 0; -} - -#ifdef CONFIG_PM_SLEEP -static int tps80031_rtc_suspend(struct device *dev) -{ - struct tps80031_rtc *rtc = dev_get_drvdata(dev); - - if (device_may_wakeup(dev)) - enable_irq_wake(rtc->irq); - return 0; -} - -static int tps80031_rtc_resume(struct device *dev) -{ - struct tps80031_rtc *rtc = dev_get_drvdata(dev); - - if (device_may_wakeup(dev)) - disable_irq_wake(rtc->irq); - return 0; -}; -#endif - -static SIMPLE_DEV_PM_OPS(tps80031_pm_ops, tps80031_rtc_suspend, - tps80031_rtc_resume); - -static struct platform_driver tps80031_rtc_driver = { - .driver = { - .name = "tps80031-rtc", - .pm = &tps80031_pm_ops, - }, - .probe = tps80031_rtc_probe, -}; - -module_platform_driver(tps80031_rtc_driver); - -MODULE_ALIAS("platform:tps80031-rtc"); -MODULE_DESCRIPTION("TI TPS80031/TPS80032 RTC driver"); -MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 2c40fe15da55..6043c832d09e 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -731,7 +731,7 @@ static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr, ff_flag = (devmap->features & DASD_FEATURE_FAILFAST) != 0; else ff_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_FAILFAST) != 0; - return snprintf(buf, PAGE_SIZE, ff_flag ? "1\n" : "0\n"); + return sysfs_emit(buf, ff_flag ? "1\n" : "0\n"); } static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr, @@ -773,7 +773,7 @@ dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf) spin_unlock(&dasd_devmap_lock); out: - return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n"); + return sysfs_emit(buf, ro_flag ? "1\n" : "0\n"); } static ssize_t @@ -834,7 +834,7 @@ dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf) erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0; else erplog = (DASD_FEATURE_DEFAULT & DASD_FEATURE_ERPLOG) != 0; - return snprintf(buf, PAGE_SIZE, erplog ? "1\n" : "0\n"); + return sysfs_emit(buf, erplog ? "1\n" : "0\n"); } static ssize_t @@ -1033,13 +1033,13 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr, dasd_put_device(device); goto out; } else { - len = snprintf(buf, PAGE_SIZE, "%s\n", - device->discipline->name); + len = sysfs_emit(buf, "%s\n", + device->discipline->name); dasd_put_device(device); return len; } out: - len = snprintf(buf, PAGE_SIZE, "none\n"); + len = sysfs_emit(buf, "none\n"); return len; } @@ -1056,30 +1056,30 @@ dasd_device_status_show(struct device *dev, struct device_attribute *attr, if (!IS_ERR(device)) { switch (device->state) { case DASD_STATE_NEW: - len = snprintf(buf, PAGE_SIZE, "new\n"); + len = sysfs_emit(buf, "new\n"); break; case DASD_STATE_KNOWN: - len = snprintf(buf, PAGE_SIZE, "detected\n"); + len = sysfs_emit(buf, "detected\n"); break; case DASD_STATE_BASIC: - len = snprintf(buf, PAGE_SIZE, "basic\n"); + len = sysfs_emit(buf, "basic\n"); break; case DASD_STATE_UNFMT: - len = snprintf(buf, PAGE_SIZE, "unformatted\n"); + len = sysfs_emit(buf, "unformatted\n"); break; case DASD_STATE_READY: - len = snprintf(buf, PAGE_SIZE, "ready\n"); + len = sysfs_emit(buf, "ready\n"); break; case DASD_STATE_ONLINE: - len = snprintf(buf, PAGE_SIZE, "online\n"); + len = sysfs_emit(buf, "online\n"); break; default: - len = snprintf(buf, PAGE_SIZE, "no stat\n"); + len = sysfs_emit(buf, "no stat\n"); break; } dasd_put_device(device); } else - len = snprintf(buf, PAGE_SIZE, "unknown\n"); + len = sysfs_emit(buf, "unknown\n"); return len; } @@ -1120,7 +1120,7 @@ static ssize_t dasd_vendor_show(struct device *dev, device = dasd_device_from_cdev(to_ccwdev(dev)); vendor = ""; if (IS_ERR(device)) - return snprintf(buf, PAGE_SIZE, "%s\n", vendor); + return sysfs_emit(buf, "%s\n", vendor); if (device->discipline && device->discipline->get_uid && !device->discipline->get_uid(device, &uid)) @@ -1128,7 +1128,7 @@ static ssize_t dasd_vendor_show(struct device *dev, dasd_put_device(device); - return snprintf(buf, PAGE_SIZE, "%s\n", vendor); + return sysfs_emit(buf, "%s\n", vendor); } static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL); @@ -1148,7 +1148,7 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf) device = dasd_device_from_cdev(to_ccwdev(dev)); uid_string[0] = 0; if (IS_ERR(device)) - return snprintf(buf, PAGE_SIZE, "%s\n", uid_string); + return sysfs_emit(buf, "%s\n", uid_string); if (device->discipline && device->discipline->get_uid && !device->discipline->get_uid(device, &uid)) { @@ -1183,7 +1183,7 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf) } dasd_put_device(device); - return snprintf(buf, PAGE_SIZE, "%s\n", uid_string); + return sysfs_emit(buf, "%s\n", uid_string); } static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL); @@ -1201,7 +1201,7 @@ dasd_eer_show(struct device *dev, struct device_attribute *attr, char *buf) eer_flag = dasd_eer_enabled(devmap->device); else eer_flag = 0; - return snprintf(buf, PAGE_SIZE, eer_flag ? "1\n" : "0\n"); + return sysfs_emit(buf, eer_flag ? "1\n" : "0\n"); } static ssize_t @@ -1243,7 +1243,7 @@ dasd_expires_show(struct device *dev, struct device_attribute *attr, char *buf) device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) return -ENODEV; - len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_expires); + len = sysfs_emit(buf, "%lu\n", device->default_expires); dasd_put_device(device); return len; } @@ -1283,7 +1283,7 @@ dasd_retries_show(struct device *dev, struct device_attribute *attr, char *buf) device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) return -ENODEV; - len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_retries); + len = sysfs_emit(buf, "%lu\n", device->default_retries); dasd_put_device(device); return len; } @@ -1324,7 +1324,7 @@ dasd_timeout_show(struct device *dev, struct device_attribute *attr, device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) return -ENODEV; - len = snprintf(buf, PAGE_SIZE, "%lu\n", device->blk_timeout); + len = sysfs_emit(buf, "%lu\n", device->blk_timeout); dasd_put_device(device); return len; } @@ -1398,11 +1398,11 @@ static ssize_t dasd_hpf_show(struct device *dev, struct device_attribute *attr, return -ENODEV; if (!device->discipline || !device->discipline->hpf_enabled) { dasd_put_device(device); - return snprintf(buf, PAGE_SIZE, "%d\n", dasd_nofcx); + return sysfs_emit(buf, "%d\n", dasd_nofcx); } hpf = device->discipline->hpf_enabled(device); dasd_put_device(device); - return snprintf(buf, PAGE_SIZE, "%d\n", hpf); + return sysfs_emit(buf, "%d\n", hpf); } static DEVICE_ATTR(hpf, 0444, dasd_hpf_show, NULL); @@ -1416,13 +1416,13 @@ static ssize_t dasd_reservation_policy_show(struct device *dev, devmap = dasd_find_busid(dev_name(dev)); if (IS_ERR(devmap)) { - rc = snprintf(buf, PAGE_SIZE, "ignore\n"); + rc = sysfs_emit(buf, "ignore\n"); } else { spin_lock(&dasd_devmap_lock); if (devmap->features & DASD_FEATURE_FAILONSLCK) - rc = snprintf(buf, PAGE_SIZE, "fail\n"); + rc = sysfs_emit(buf, "fail\n"); else - rc = snprintf(buf, PAGE_SIZE, "ignore\n"); + rc = sysfs_emit(buf, "ignore\n"); spin_unlock(&dasd_devmap_lock); } return rc; @@ -1457,14 +1457,14 @@ static ssize_t dasd_reservation_state_show(struct device *dev, device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) - return snprintf(buf, PAGE_SIZE, "none\n"); + return sysfs_emit(buf, "none\n"); if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) - rc = snprintf(buf, PAGE_SIZE, "reserved\n"); + rc = sysfs_emit(buf, "reserved\n"); else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) - rc = snprintf(buf, PAGE_SIZE, "lost\n"); + rc = sysfs_emit(buf, "lost\n"); else - rc = snprintf(buf, PAGE_SIZE, "none\n"); + rc = sysfs_emit(buf, "none\n"); dasd_put_device(device); return rc; } @@ -1531,7 +1531,7 @@ dasd_path_threshold_show(struct device *dev, device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) return -ENODEV; - len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_thrhld); + len = sysfs_emit(buf, "%lu\n", device->path_thrhld); dasd_put_device(device); return len; } @@ -1578,7 +1578,7 @@ dasd_path_autodisable_show(struct device *dev, else flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_PATH_AUTODISABLE) != 0; - return snprintf(buf, PAGE_SIZE, flag ? "1\n" : "0\n"); + return sysfs_emit(buf, flag ? "1\n" : "0\n"); } static ssize_t @@ -1616,7 +1616,7 @@ dasd_path_interval_show(struct device *dev, device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) return -ENODEV; - len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_interval); + len = sysfs_emit(buf, "%lu\n", device->path_interval); dasd_put_device(device); return len; } @@ -1662,9 +1662,9 @@ dasd_device_fcs_show(struct device *dev, struct device_attribute *attr, return -ENODEV; fc_sec = dasd_path_get_fcs_device(device); if (fc_sec == -EINVAL) - rc = snprintf(buf, PAGE_SIZE, "Inconsistent\n"); + rc = sysfs_emit(buf, "Inconsistent\n"); else - rc = snprintf(buf, PAGE_SIZE, "%s\n", dasd_path_get_fcs_str(fc_sec)); + rc = sysfs_emit(buf, "%s\n", dasd_path_get_fcs_str(fc_sec)); dasd_put_device(device); return rc; @@ -1677,7 +1677,7 @@ dasd_path_fcs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) struct dasd_path *path = to_dasd_path(kobj); unsigned int fc_sec = path->fc_security; - return snprintf(buf, PAGE_SIZE, "%s\n", dasd_path_get_fcs_str(fc_sec)); + return sysfs_emit(buf, "%s\n", dasd_path_get_fcs_str(fc_sec)); } static struct kobj_attribute path_fcs_attribute = @@ -1698,7 +1698,7 @@ static ssize_t dasd_##_name##_show(struct device *dev, \ val = _func(device); \ dasd_put_device(device); \ \ - return snprintf(buf, PAGE_SIZE, "%d\n", val); \ + return sysfs_emit(buf, "%d\n", val); \ } \ static DEVICE_ATTR(_name, 0444, dasd_##_name##_show, NULL); \ diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 646ec796bb83..dfde0d941c3c 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -1047,24 +1047,24 @@ raw3270_probe (struct ccw_device *cdev) static ssize_t raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%i\n", - ((struct raw3270 *) dev_get_drvdata(dev))->model); + return sysfs_emit(buf, "%i\n", + ((struct raw3270 *)dev_get_drvdata(dev))->model); } static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL); static ssize_t raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%i\n", - ((struct raw3270 *) dev_get_drvdata(dev))->rows); + return sysfs_emit(buf, "%i\n", + ((struct raw3270 *)dev_get_drvdata(dev))->rows); } static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL); static ssize_t raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%i\n", - ((struct raw3270 *) dev_get_drvdata(dev))->cols); + return sysfs_emit(buf, "%i\n", + ((struct raw3270 *)dev_get_drvdata(dev))->cols); } static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL); diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c index 1f5fab617b67..f7e75d9fedf6 100644 --- a/drivers/s390/char/tape_std.c +++ b/drivers/s390/char/tape_std.c @@ -53,7 +53,6 @@ int tape_std_assign(struct tape_device *device) { int rc; - struct timer_list timeout; struct tape_request *request; request = tape_alloc_request(2, 11); @@ -70,7 +69,7 @@ tape_std_assign(struct tape_device *device) * So we set up a timeout for this call. */ timer_setup(&request->timer, tape_std_assign_timeout, 0); - mod_timer(&timeout, jiffies + 2 * HZ); + mod_timer(&request->timer, jiffies + msecs_to_jiffies(2000)); rc = tape_do_io_interruptible(device, request); diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 1097e76982a5..5440f285f349 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -285,7 +285,7 @@ static ssize_t chp_configure_show(struct device *dev, if (status < 0) return status; - return snprintf(buf, PAGE_SIZE, "%d\n", status); + return sysfs_emit(buf, "%d\n", status); } static int cfg_wait_idle(void); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 2bc55ccf3f23..ce9e7517430f 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -437,8 +437,8 @@ static ssize_t dev_busid_show(struct device *dev, struct subchannel *sch = to_subchannel(dev); struct pmcw *pmcw = &sch->schib.pmcw; - if ((pmcw->st == SUBCHANNEL_TYPE_IO || - pmcw->st == SUBCHANNEL_TYPE_MSG) && pmcw->dnv) + if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) || + (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w)) return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid, pmcw->dev); else diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c index 6c6c04e1b74d..907d67aeac23 100644 --- a/drivers/scsi/elx/libefc_sli/sli4.c +++ b/drivers/scsi/elx/libefc_sli/sli4.c @@ -4145,7 +4145,7 @@ static int sli_get_read_config(struct sli4 *sli4) { struct sli4_rsp_read_config *conf = sli4->bmbx.virt; - u32 i, total, total_size; + u32 i, total; u32 *base; if (sli_cmd_read_config(sli4, sli4->bmbx.virt)) { @@ -4203,8 +4203,7 @@ sli_get_read_config(struct sli4 *sli4) for (i = 0; i < SLI4_RSRC_MAX; i++) { total = sli4->ext[i].number * sli4->ext[i].size; - total_size = BITS_TO_LONGS(total) * sizeof(long); - sli4->ext[i].use_map = kzalloc(total_size, GFP_KERNEL); + sli4->ext[i].use_map = bitmap_zalloc(total, GFP_KERNEL); if (!sli4->ext[i].use_map) { efc_log_err(sli4, "bitmap memory allocation failed %d\n", i); @@ -4743,7 +4742,7 @@ sli_reset(struct sli4 *sli4) sli4->ext[0].base = NULL; for (i = 0; i < SLI4_RSRC_MAX; i++) { - kfree(sli4->ext[i].use_map); + bitmap_free(sli4->ext[i].use_map); sli4->ext[i].use_map = NULL; sli4->ext[i].base = NULL; } @@ -4784,7 +4783,7 @@ sli_teardown(struct sli4 *sli4) for (i = 0; i < SLI4_RSRC_MAX; i++) { sli4->ext[i].base = NULL; - kfree(sli4->ext[i].use_map); + bitmap_free(sli4->ext[i].use_map); sli4->ext[i].use_map = NULL; } diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index cda0135c43db..8049b00b6766 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -388,6 +388,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost->shost_state = SHOST_CREATED; INIT_LIST_HEAD(&shost->__devices); INIT_LIST_HEAD(&shost->__targets); + INIT_LIST_HEAD(&shost->eh_abort_list); INIT_LIST_HEAD(&shost->eh_cmd_q); INIT_LIST_HEAD(&shost->starved_list); init_waitqueue_head(&shost->host_wait); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 27eb652b564f..81dab9b82f79 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -639,8 +639,8 @@ static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc) mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP; current_time = ktime_get_real(); TimeStamp = ktime_to_ms(current_time); - mpi_request->Reserved7 = cpu_to_le32(TimeStamp & 0xFFFFFFFF); - mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp >> 32); + mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32); + mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF); init_completion(&ioc->scsih_cmds.done); ioc->put_smid_default(ioc, smid); dinitprintk(ioc, ioc_info(ioc, diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index db6a759de1e9..a0af986633d2 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -142,6 +142,8 @@ #define MPT_MAX_CALLBACKS 32 +#define MPT_MAX_HBA_NUM_PHYS 32 + #define INTERNAL_CMDS_COUNT 10 /* reserved cmds */ /* reserved for issuing internally framed scsi io cmds */ #define INTERNAL_SCSIIO_CMDS_COUNT 3 @@ -798,6 +800,7 @@ struct _sas_phy { * @enclosure_handle: handle for this a member of an enclosure * @device_info: bitwise defining capabilities of this sas_host/expander * @responding: used in _scsih_expander_device_mark_responding + * @nr_phys_allocated: Allocated memory for this many count phys * @phy: a list of phys that make up this sas_host/expander * @sas_port_list: list of ports attached to this sas_host/expander * @port: hba port entry containing node's port number info @@ -813,6 +816,7 @@ struct _sas_node { u16 enclosure_handle; u64 enclosure_logical_id; u8 responding; + u8 nr_phys_allocated; struct hba_port *port; struct _sas_phy *phy; struct list_head sas_port_list; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index cee7170beae8..00792767c620 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -3869,7 +3869,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; - if (!sas_device_priv_data) + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) continue; if (sas_device_priv_data->sas_target->sas_address != sas_address) @@ -6406,11 +6406,26 @@ _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) int i, j, count = 0, lcount = 0; int ret; u64 sas_addr; + u8 num_phys; drsprintk(ioc, ioc_info(ioc, "updating ports for sas_host(0x%016llx)\n", (unsigned long long)ioc->sas_hba.sas_address)); + mpt3sas_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + if (num_phys > ioc->sas_hba.nr_phys_allocated) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + ioc->sas_hba.num_phys = num_phys; + port_table = kcalloc(ioc->sas_hba.num_phys, sizeof(struct hba_port), GFP_KERNEL); if (!port_table) @@ -6611,6 +6626,30 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) ioc->sas_hba.phy[i].hba_vphy = 1; } + /* + * Add new HBA phys to STL if these new phys got added as part + * of HBA Firmware upgrade/downgrade operation. + */ + if (!ioc->sas_hba.phy[i].phy) { + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, + &phy_pg0, i))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + ioc->sas_hba.phy[i].phy_id = i; + mpt3sas_transport_add_host_phy(ioc, + &ioc->sas_hba.phy[i], phy_pg0, + ioc->sas_hba.parent_dev); + continue; + } ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. AttachedDevHandle); @@ -6622,6 +6661,19 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) attached_handle, i, link_rate, ioc->sas_hba.phy[i].port); } + /* + * Clear the phy details if this phy got disabled as part of + * HBA Firmware upgrade/downgrade operation. + */ + for (i = ioc->sas_hba.num_phys; + i < ioc->sas_hba.nr_phys_allocated; i++) { + if (ioc->sas_hba.phy[i].phy && + ioc->sas_hba.phy[i].phy->negotiated_linkrate >= + SAS_LINK_RATE_1_5_GBPS) + mpt3sas_transport_update_links(ioc, + ioc->sas_hba.sas_address, 0, i, + MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL); + } out: kfree(sas_iounit_pg0); } @@ -6654,7 +6706,10 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) __FILE__, __LINE__, __func__); return; } - ioc->sas_hba.phy = kcalloc(num_phys, + + ioc->sas_hba.nr_phys_allocated = max_t(u8, + MPT_MAX_HBA_NUM_PHYS, num_phys); + ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated, sizeof(struct _sas_phy), GFP_KERNEL); if (!ioc->sas_hba.phy) { ioc_err(ioc, "failure at %s:%d/%s()!\n", diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 30f9545d2285..032efb294ee5 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2762,7 +2762,12 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) if (fcport->loop_id != FC_NO_LOOP_ID) fcport->logout_on_delete = 1; - qlt_schedule_sess_for_deletion(fcport); + if (!EDIF_NEGOTIATION_PENDING(fcport)) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x911e, + "%s %d schedule session deletion\n", __func__, + __LINE__); + qlt_schedule_sess_for_deletion(fcport); + } } else { qla2x00_port_logout(fcport->vha, fcport); } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 8924eeb9367d..9ebf4a234d9a 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -639,9 +639,9 @@ struct qla_els_pt_arg { u8 els_opcode; u8 vp_idx; __le16 nport_handle; - u16 control_flags; + u16 control_flags, ox_id; __le32 rx_xchg_address; - port_id_t did; + port_id_t did, sid; u32 tx_len, tx_byte_count, rx_len, rx_byte_count; dma_addr_t tx_addr, rx_addr; diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c index ad746c62f0d4..53d2b8562027 100644 --- a/drivers/scsi/qla2xxx/qla_edif.c +++ b/drivers/scsi/qla2xxx/qla_edif.c @@ -218,7 +218,7 @@ fc_port_t *fcport) "%s edif not enabled\n", __func__); goto done; } - if (vha->e_dbell.db_flags != EDB_ACTIVE) { + if (DBELL_INACTIVE(vha)) { ql_dbg(ql_dbg_edif, vha, 0x09102, "%s doorbell not enabled\n", __func__); goto done; @@ -290,63 +290,6 @@ qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid) return false; } -static void qla_edif_reset_auth_wait(struct fc_port *fcport, int state, - int waitonly) -{ - int cnt, max_cnt = 200; - bool traced = false; - - fcport->keep_nport_handle = 1; - - if (!waitonly) { - qla2x00_set_fcport_disc_state(fcport, state); - qlt_schedule_sess_for_deletion(fcport); - } else { - qla2x00_set_fcport_disc_state(fcport, state); - } - - ql_dbg(ql_dbg_edif, fcport->vha, 0xf086, - "%s: waiting for session, max_cnt=%u\n", - __func__, max_cnt); - - cnt = 0; - - if (waitonly) { - /* Marker wait min 10 msecs. */ - msleep(50); - cnt += 50; - } - while (1) { - if (!traced) { - ql_dbg(ql_dbg_edif, fcport->vha, 0xf086, - "%s: session sleep.\n", - __func__); - traced = true; - } - msleep(20); - cnt++; - if (waitonly && (fcport->disc_state == state || - fcport->disc_state == DSC_LOGIN_COMPLETE)) - break; - if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) - break; - if (cnt > max_cnt) - break; - } - - if (!waitonly) { - ql_dbg(ql_dbg_edif, fcport->vha, 0xf086, - "%s: waited for session - %8phC, loopid=%x portid=%06x fcport=%p state=%u, cnt=%u\n", - __func__, fcport->port_name, fcport->loop_id, - fcport->d_id.b24, fcport, fcport->disc_state, cnt); - } else { - ql_dbg(ql_dbg_edif, fcport->vha, 0xf086, - "%s: waited ONLY for session - %8phC, loopid=%x portid=%06x fcport=%p state=%u, cnt=%u\n", - __func__, fcport->port_name, fcport->loop_id, - fcport->d_id.b24, fcport, fcport->disc_state, cnt); - } -} - static void qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl, int index) @@ -529,7 +472,8 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job) struct app_start_reply appreply; struct fc_port *fcport, *tf; - ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app start\n", __func__); + ql_log(ql_log_info, vha, 0x1313, + "EDIF application registration with driver, FC device connections will be re-established.\n"); sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, &appstart, @@ -538,9 +482,9 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job) ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app_vid=%x app_start_flags %x\n", __func__, appstart.app_info.app_vid, appstart.app_start_flags); - if (vha->e_dbell.db_flags != EDB_ACTIVE) { + if (DBELL_INACTIVE(vha)) { /* mark doorbell as active since an app is now present */ - vha->e_dbell.db_flags = EDB_ACTIVE; + vha->e_dbell.db_flags |= EDB_ACTIVE; } else { ql_dbg(ql_dbg_edif, vha, 0x911e, "%s doorbell already active\n", __func__); @@ -554,37 +498,36 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job) qla2xxx_wake_dpc(vha); } else { list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { + ql_dbg(ql_dbg_edif, vha, 0x2058, + "FCSP - nn %8phN pn %8phN portid=%06x.\n", + fcport->node_name, fcport->port_name, + fcport->d_id.b24); ql_dbg(ql_dbg_edif, vha, 0xf084, - "%s: sess %p %8phC lid %#04x s_id %06x logout %d\n", - __func__, fcport, fcport->port_name, - fcport->loop_id, fcport->d_id.b24, - fcport->logout_on_delete); - - ql_dbg(ql_dbg_edif, vha, 0xf084, - "keep %d els_logo %d disc state %d auth state %d stop state %d\n", - fcport->keep_nport_handle, - fcport->send_els_logo, fcport->disc_state, - fcport->edif.auth_state, fcport->edif.app_stop); + "%s: se_sess %p / sess %p from port %8phC " + "loop_id %#04x s_id %06x logout %d " + "keep %d els_logo %d disc state %d auth state %d" + "stop state %d\n", + __func__, fcport->se_sess, fcport, + fcport->port_name, fcport->loop_id, + fcport->d_id.b24, fcport->logout_on_delete, + fcport->keep_nport_handle, fcport->send_els_logo, + fcport->disc_state, fcport->edif.auth_state, + fcport->edif.app_stop); if (atomic_read(&vha->loop_state) == LOOP_DOWN) break; - if (!(fcport->flags & FCF_FCSP_DEVICE)) - continue; fcport->edif.app_started = 1; - if (fcport->edif.app_stop || - (fcport->disc_state != DSC_LOGIN_COMPLETE && - fcport->disc_state != DSC_LOGIN_PEND && - fcport->disc_state != DSC_DELETED)) { - /* no activity */ - fcport->edif.app_stop = 0; - - ql_dbg(ql_dbg_edif, vha, 0x911e, - "%s wwpn %8phC calling qla_edif_reset_auth_wait\n", - __func__, fcport->port_name); - fcport->edif.app_sess_online = 1; - qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 0); - } + fcport->login_retry = vha->hw->login_retry_count; + + /* no activity */ + fcport->edif.app_stop = 0; + + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s wwpn %8phC calling qla_edif_reset_auth_wait\n", + __func__, fcport->port_name); + fcport->edif.app_sess_online = 0; + qlt_schedule_sess_for_deletion(fcport); qla_edif_sa_ctl_init(vha, fcport); } } @@ -601,14 +544,14 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job) appreply.edif_enode_active = vha->pur_cinfo.enode_flags; appreply.edif_edb_active = vha->e_dbell.db_flags; - bsg_job->reply_len = sizeof(struct fc_bsg_reply) + - sizeof(struct app_start_reply); + bsg_job->reply_len = sizeof(struct fc_bsg_reply); SET_DID_STATUS(bsg_reply->result, DID_OK); - sg_copy_from_buffer(bsg_job->reply_payload.sg_list, - bsg_job->reply_payload.sg_cnt, &appreply, - sizeof(struct app_start_reply)); + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + &appreply, + sizeof(struct app_start_reply)); ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app start completed with 0x%x\n", @@ -800,15 +743,15 @@ qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job) ql_dbg(ql_dbg_edif, vha, 0x911e, "%s AUTH complete - RESUME with prli for wwpn %8phC\n", __func__, fcport->port_name); - qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 1); qla24xx_post_prli_work(vha, fcport); } errstate_exit: bsg_job->reply_len = sizeof(struct fc_bsg_reply); - sg_copy_from_buffer(bsg_job->reply_payload.sg_list, - bsg_job->reply_payload.sg_cnt, &appplogireply, - sizeof(struct app_plogi_reply)); + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + &appplogireply, + sizeof(struct app_plogi_reply)); return rval; } @@ -873,7 +816,7 @@ qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job) if (qla_ini_mode_enabled(fcport->vha)) { fcport->send_els_logo = 1; - qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 0); + qlt_schedule_sess_for_deletion(fcport); } } @@ -891,7 +834,7 @@ static int qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job) { int32_t rval = 0; - int32_t num_cnt; + int32_t pcnt = 0; struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct app_pinfo_req app_req; struct app_pinfo_reply *app_reply; @@ -903,16 +846,14 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job) bsg_job->request_payload.sg_cnt, &app_req, sizeof(struct app_pinfo_req)); - num_cnt = app_req.num_ports; /* num of ports alloc'd by app */ - app_reply = kzalloc((sizeof(struct app_pinfo_reply) + - sizeof(struct app_pinfo) * num_cnt), GFP_KERNEL); + sizeof(struct app_pinfo) * app_req.num_ports), GFP_KERNEL); + if (!app_reply) { SET_DID_STATUS(bsg_reply->result, DID_ERROR); rval = -1; } else { struct fc_port *fcport = NULL, *tf; - uint32_t pcnt = 0; list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { if (!(fcport->flags & FCF_FCSP_DEVICE)) @@ -924,7 +865,7 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job) "APP request entry - portid=%06x.\n", tdid.b24); /* Ran out of space */ - if (pcnt > app_req.num_ports) + if (pcnt >= app_req.num_ports) break; if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24) @@ -981,9 +922,11 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job) SET_DID_STATUS(bsg_reply->result, DID_OK); } - sg_copy_from_buffer(bsg_job->reply_payload.sg_list, - bsg_job->reply_payload.sg_cnt, app_reply, - sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * num_cnt); + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + app_reply, + sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * pcnt); kfree(app_reply); @@ -1000,10 +943,11 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job) { int32_t rval = 0; struct fc_bsg_reply *bsg_reply = bsg_job->reply; - uint32_t ret_size, size; + uint32_t size; struct app_sinfo_req app_req; struct app_stats_reply *app_reply; + uint32_t pcnt = 0; sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, &app_req, @@ -1019,18 +963,12 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job) size = sizeof(struct app_stats_reply) + (sizeof(struct app_sinfo) * app_req.num_ports); - if (size > bsg_job->reply_payload.payload_len) - ret_size = bsg_job->reply_payload.payload_len; - else - ret_size = size; - app_reply = kzalloc(size, GFP_KERNEL); if (!app_reply) { SET_DID_STATUS(bsg_reply->result, DID_ERROR); rval = -1; } else { struct fc_port *fcport = NULL, *tf; - uint32_t pcnt = 0; list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { if (fcport->edif.enable) { @@ -1054,9 +992,11 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job) SET_DID_STATUS(bsg_reply->result, DID_OK); } + bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, - bsg_job->reply_payload.sg_cnt, app_reply, ret_size); + bsg_job->reply_payload.sg_cnt, app_reply, + sizeof(struct app_stats_reply) + (sizeof(struct app_sinfo) * pcnt)); kfree(app_reply); @@ -1130,8 +1070,7 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job) __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[1]); rval = EXT_STATUS_INVALID_PARAM; - bsg_job->reply_len = sizeof(struct fc_bsg_reply); - SET_DID_STATUS(bsg_reply->result, DID_ERROR); + done = false; break; } @@ -1330,7 +1269,7 @@ qla24xx_sadb_update(struct bsg_job *bsg_job) goto done; } - if (vha->e_dbell.db_flags != EDB_ACTIVE) { + if (DBELL_INACTIVE(vha)) { ql_log(ql_log_warn, vha, 0x70a1, "App not started\n"); rval = -EIO; SET_DID_STATUS(bsg_reply->result, DID_ERROR); @@ -1651,6 +1590,40 @@ qla_enode_stop(scsi_qla_host_t *vha) spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); } +static void qla_enode_clear(scsi_qla_host_t *vha, port_id_t portid) +{ + unsigned long flags; + struct enode *e, *tmp; + struct purexevent *purex; + LIST_HEAD(enode_list); + + if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) { + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s enode not active\n", __func__); + return; + } + spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); + list_for_each_entry_safe(e, tmp, &vha->pur_cinfo.head, list) { + purex = &e->u.purexinfo; + if (purex->pur_info.pur_sid.b24 == portid.b24) { + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s free ELS sid=%06x. xchg %x, nb=%xh\n", + __func__, portid.b24, + purex->pur_info.pur_rx_xchg_address, + purex->pur_info.pur_bytes_rcvd); + + list_del_init(&e->list); + list_add_tail(&e->list, &enode_list); + } + } + spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); + + list_for_each_entry_safe(e, tmp, &enode_list, list) { + list_del_init(&e->list); + qla_enode_free(vha, e); + } +} + /* * allocate enode struct and populate buffer * returns: enode pointer with buffers @@ -1695,41 +1668,25 @@ static struct enode * qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2) { struct enode *node_rtn = NULL; - struct enode *list_node = NULL; + struct enode *list_node, *q; unsigned long flags; - struct list_head *pos, *q; uint32_t sid; - uint32_t rw_flag; struct purexevent *purex; /* secure the list from moving under us */ spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); - list_for_each_safe(pos, q, &vha->pur_cinfo.head) { - list_node = list_entry(pos, struct enode, list); + list_for_each_entry_safe(list_node, q, &vha->pur_cinfo.head, list) { /* node type determines what p1 and p2 are */ purex = &list_node->u.purexinfo; sid = p1; - rw_flag = p2; if (purex->pur_info.pur_sid.b24 == sid) { - if (purex->pur_info.pur_pend == 1 && - rw_flag == PUR_GET) { - /* - * if the receive is in progress - * and its a read/get then can't - * transfer yet - */ - ql_dbg(ql_dbg_edif, vha, 0x9106, - "%s purex xfer in progress for sid=%x\n", - __func__, sid); - } else { - /* found it and its complete */ - node_rtn = list_node; - list_del(pos); - break; - } + /* found it and its complete */ + node_rtn = list_node; + list_del(&list_node->list); + break; } } @@ -1802,7 +1759,8 @@ qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp, qla_els_pt_iocb(vha, els_iocb, a); ql_dbg(ql_dbg_edif, vha, 0x0183, - "Sending ELS reject...\n"); + "Sending ELS reject ox_id %04x s:%06x -> d:%06x\n", + a->ox_id, a->sid.b24, a->did.b24); ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x0185, vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c)); /* flush iocb to mem before notifying hw doorbell */ @@ -1814,7 +1772,7 @@ qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp, void qla_edb_init(scsi_qla_host_t *vha) { - if (vha->e_dbell.db_flags == EDB_ACTIVE) { + if (DBELL_ACTIVE(vha)) { /* list already init'd - error */ ql_dbg(ql_dbg_edif, vha, 0x09102, "edif db already initialized, cannot reinit\n"); @@ -1850,6 +1808,57 @@ qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node) node->ntype = N_UNDEF; } +static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid) +{ + unsigned long flags; + struct edb_node *e, *tmp; + port_id_t sid; + LIST_HEAD(edb_list); + + if (DBELL_INACTIVE(vha)) { + /* doorbell list not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s doorbell not enabled\n", __func__); + return; + } + + /* grab lock so list doesn't move */ + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + list_for_each_entry_safe(e, tmp, &vha->e_dbell.head, list) { + switch (e->ntype) { + case VND_CMD_AUTH_STATE_NEEDED: + case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN: + sid = e->u.plogi_did; + break; + case VND_CMD_AUTH_STATE_ELS_RCVD: + sid = e->u.els_sid; + break; + case VND_CMD_AUTH_STATE_SAUPDATE_COMPL: + /* app wants to see this */ + continue; + default: + ql_log(ql_log_warn, vha, 0x09102, + "%s unknown node type: %x\n", __func__, e->ntype); + sid.b24 = 0; + break; + } + if (sid.b24 == portid.b24) { + ql_dbg(ql_dbg_edif, vha, 0x910f, + "%s free doorbell event : node type = %x %p\n", + __func__, e->ntype, e); + list_del_init(&e->list); + list_add_tail(&e->list, &edb_list); + } + } + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + list_for_each_entry_safe(e, tmp, &edb_list, list) { + qla_edb_node_free(vha, e); + list_del_init(&e->list); + kfree(e); + } +} + /* function called when app is stopping */ void @@ -1858,7 +1867,7 @@ qla_edb_stop(scsi_qla_host_t *vha) unsigned long flags; struct edb_node *node, *q; - if (vha->e_dbell.db_flags != EDB_ACTIVE) { + if (DBELL_INACTIVE(vha)) { /* doorbell list not enabled */ ql_dbg(ql_dbg_edif, vha, 0x09102, "%s doorbell not enabled\n", __func__); @@ -1909,7 +1918,7 @@ qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr) { unsigned long flags; - if (vha->e_dbell.db_flags != EDB_ACTIVE) { + if (DBELL_INACTIVE(vha)) { /* doorbell list not enabled */ ql_dbg(ql_dbg_edif, vha, 0x09102, "%s doorbell not enabled\n", __func__); @@ -1940,7 +1949,7 @@ qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype, return; } - if (vha->e_dbell.db_flags != EDB_ACTIVE) { + if (DBELL_INACTIVE(vha)) { if (fcport) fcport->edif.auth_state = dbtype; /* doorbell list not enabled */ @@ -2035,7 +2044,7 @@ qla_edif_timer(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; if (!vha->vp_idx && N2N_TOPO(ha) && ha->flags.n2n_fw_acc_sec) { - if (vha->e_dbell.db_flags != EDB_ACTIVE && + if (DBELL_INACTIVE(vha) && ha->edif_post_stop_cnt_down) { ha->edif_post_stop_cnt_down--; @@ -2073,7 +2082,7 @@ edif_doorbell_show(struct device *dev, struct device_attribute *attr, sz = 256; /* stop new threads from waiting if we're not init'd */ - if (vha->e_dbell.db_flags != EDB_ACTIVE) { + if (DBELL_INACTIVE(vha)) { ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122, "%s error - edif db not enabled\n", __func__); return 0; @@ -2346,6 +2355,7 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp) a.tx_addr = vha->hw->elsrej.cdma; a.vp_idx = vha->vp_idx; a.control_flags = EPD_ELS_RJT; + a.ox_id = le16_to_cpu(p->ox_id); sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16); @@ -2357,7 +2367,7 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp) return; } - if (totlen > MAX_PAYLOAD) { + if (totlen > ELS_MAX_PAYLOAD) { ql_dbg(ql_dbg_edif, vha, 0x0910d, "%s WARNING: verbose ELS frame received (totlen=%x)\n", __func__, totlen); @@ -2387,7 +2397,6 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp) purex = &ptr->u.purexinfo; purex->pur_info.pur_sid = a.did; - purex->pur_info.pur_pend = 0; purex->pur_info.pur_bytes_rcvd = totlen; purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr); purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle); @@ -2396,6 +2405,8 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp) purex->pur_info.pur_did.b.al_pa = p->d_id[0]; purex->pur_info.vp_idx = p->vp_idx; + a.sid = purex->pur_info.pur_did; + rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp, purex->msgp_len); if (rc) { @@ -2419,7 +2430,7 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp) fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid); - if (host->e_dbell.db_flags != EDB_ACTIVE || + if (DBELL_INACTIVE(vha) || (fcport && EDIF_SESSION_DOWN(fcport))) { ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n", __func__, host->e_dbell.db_flags, @@ -2436,7 +2447,7 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp) ql_dbg(ql_dbg_edif, host, 0x0910c, "%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n", __func__, purex->pur_info.pur_bytes_rcvd, purex->pur_info.pur_sid.b24, - purex->pur_info.pur_did.b24, p->rx_xchg_addr); + purex->pur_info.pur_did.b24, purex->pur_info.pur_rx_xchg_address); qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL); } @@ -3139,18 +3150,14 @@ static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport, /* release any sadb entries -- only done at teardown */ void qla_edif_sadb_release(struct qla_hw_data *ha) { - struct list_head *pos; - struct list_head *tmp; - struct edif_sa_index_entry *entry; + struct edif_sa_index_entry *entry, *tmp; - list_for_each_safe(pos, tmp, &ha->sadb_rx_index_list) { - entry = list_entry(pos, struct edif_sa_index_entry, next); + list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) { list_del(&entry->next); kfree(entry); } - list_for_each_safe(pos, tmp, &ha->sadb_tx_index_list) { - entry = list_entry(pos, struct edif_sa_index_entry, next); + list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) { list_del(&entry->next); kfree(entry); } @@ -3449,7 +3456,7 @@ done: void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess) { - if (sess->edif.app_sess_online && vha->e_dbell.db_flags & EDB_ACTIVE) { + if (sess->edif.app_sess_online && DBELL_ACTIVE(vha)) { ql_dbg(ql_dbg_disc, vha, 0xf09c, "%s: sess %8phN send port_offline event\n", __func__, sess->port_name); @@ -3459,3 +3466,12 @@ void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess) qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24); } } + +void qla_edif_clear_appdata(struct scsi_qla_host *vha, struct fc_port *fcport) +{ + if (!(fcport->flags & FCF_FCSP_DEVICE)) + return; + + qla_edb_clear(vha, fcport->d_id); + qla_enode_clear(vha, fcport->d_id); +} diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h index 9e8f28d0caa1..a965ca8e47ce 100644 --- a/drivers/scsi/qla2xxx/qla_edif.h +++ b/drivers/scsi/qla2xxx/qla_edif.h @@ -41,9 +41,12 @@ struct pur_core { }; enum db_flags_t { - EDB_ACTIVE = 0x1, + EDB_ACTIVE = BIT_0, }; +#define DBELL_ACTIVE(_v) (_v->e_dbell.db_flags & EDB_ACTIVE) +#define DBELL_INACTIVE(_v) (!(_v->e_dbell.db_flags & EDB_ACTIVE)) + struct edif_dbell { enum db_flags_t db_flags; spinlock_t db_lock; @@ -93,7 +96,6 @@ struct sa_update_28xx { }; #define NUM_ENTRIES 256 -#define MAX_PAYLOAD 1024 #define PUR_GET 1 struct dinfo { @@ -102,7 +104,6 @@ struct dinfo { }; struct pur_ninfo { - unsigned int pur_pend:1; port_id_t pur_sid; port_id_t pur_did; uint8_t vp_idx; @@ -128,9 +129,15 @@ struct enode { } u; }; +#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES)) + #define EDIF_SESSION_DOWN(_s) \ (qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \ _s->disc_state == DSC_DELETED || \ !_s->edif.app_sess_online)) +#define EDIF_NEGOTIATION_PENDING(_fcport) \ + (DBELL_ACTIVE(_fcport->vha) && \ + (_fcport->disc_state == DSC_LOGIN_AUTH_PEND)) + #endif /* __QLA_EDIF_H */ diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h index 58b718d35d19..53026d82ebff 100644 --- a/drivers/scsi/qla2xxx/qla_edif_bsg.h +++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h @@ -8,7 +8,7 @@ #define __QLA_EDIF_BSG_H /* BSG Vendor specific commands */ -#define ELS_MAX_PAYLOAD 1024 +#define ELS_MAX_PAYLOAD 2112 #ifndef WWN_SIZE #define WWN_SIZE 8 #endif diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 3c4fa8bac88d..8d8503a28479 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -142,6 +142,8 @@ void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport, void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport); int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsgjob); void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess); +void qla_edif_clear_appdata(struct scsi_qla_host *vha, + struct fc_port *fcport); const char *sc_to_str(uint16_t cmd); /* @@ -171,7 +173,6 @@ extern int ql2xasynctmfenable; extern int ql2xgffidenable; extern int ql2xenabledif; extern int ql2xenablehba_err_chk; -extern int ql2xtargetreset; extern int ql2xdontresethba; extern uint64_t ql2xmaxlun; extern int ql2xmdcapmask; @@ -818,7 +819,6 @@ extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *); extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *); extern void qlafx00_timer_routine(scsi_qla_host_t *); extern int qlafx00_rescan_isp(scsi_qla_host_t *); -extern int qlafx00_loop_reset(scsi_qla_host_t *vha); /* qla82xx related functions */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 908a72ebf7c2..070b636802d0 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -330,12 +330,9 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY; } else { if (vha->hw->flags.edif_enabled && - vha->e_dbell.db_flags & EDB_ACTIVE) { + DBELL_ACTIVE(vha)) { lio->u.logio.flags |= (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI); - ql_dbg(ql_dbg_disc, vha, 0x2072, - "Async-login: w/ FCSP %8phC hdl=%x, loopid=%x portid=%06x\n", - fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24); } else { lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; } @@ -344,12 +341,14 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, if (NVME_TARGET(vha->hw, fcport)) lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; + rval = qla2x00_start_sp(sp); + ql_dbg(ql_dbg_disc, vha, 0x2072, - "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n", + "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n", fcport->port_name, sp->handle, fcport->loop_id, - fcport->d_id.b24, fcport->login_retry); + fcport->d_id.b24, fcport->login_retry, + lio->u.logio.flags & SRB_LOGIN_FCSP ? "FCSP" : ""); - rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { fcport->flags |= FCF_LOGIN_NEEDED; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); @@ -862,7 +861,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, break; case DSC_LS_PLOGI_COMP: if (vha->hw->flags.edif_enabled && - vha->e_dbell.db_flags & EDB_ACTIVE) { + DBELL_ACTIVE(vha)) { /* check to see if App support secure or not */ qla24xx_post_gpdb_work(vha, fcport, 0); break; @@ -987,8 +986,6 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], sp->u.iocb_cmd.u.mbx.in_mb[2]); - if (res == QLA_FUNCTION_TIMEOUT) - return; sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); memset(&ea, 0, sizeof(ea)); @@ -1026,8 +1023,8 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { - list_del_init(&fcport->gnl_entry); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + list_del_init(&fcport->gnl_entry); fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); ea.fcport = fcport; @@ -1454,7 +1451,7 @@ static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport, qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, fcport->d_id.b24); - if (vha->e_dbell.db_flags == EDB_ACTIVE) { + if (DBELL_ACTIVE(vha)) { ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n", __func__, __LINE__, fcport->port_name); @@ -1786,16 +1783,72 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) fc_port_t *fcport; unsigned long flags; - fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); - if (fcport) { - if (fcport->flags & FCF_FCP2_DEVICE) { - ql_dbg(ql_dbg_disc, vha, 0x2115, - "Delaying session delete for FCP2 portid=%06x %8phC ", - fcport->d_id.b24, fcport->port_name); - return; + switch (ea->id.b.rsvd_1) { + case RSCN_PORT_ADDR: + fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); + if (fcport) { + if (fcport->flags & FCF_FCP2_DEVICE) { + ql_dbg(ql_dbg_disc, vha, 0x2115, + "Delaying session delete for FCP2 portid=%06x %8phC ", + fcport->d_id.b24, fcport->port_name); + return; + } + + if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) { + /* + * On ipsec start by remote port, Target port + * may use RSCN to trigger initiator to + * relogin. If driver is already in the + * process of a relogin, then ignore the RSCN + * and allow the current relogin to continue. + * This reduces thrashing of the connection. + */ + if (atomic_read(&fcport->state) == FCS_ONLINE) { + /* + * If state = online, then set scan_needed=1 to do relogin. + * Otherwise we're already in the middle of a relogin + */ + fcport->scan_needed = 1; + fcport->rscn_gen++; + } + } else { + fcport->scan_needed = 1; + fcport->rscn_gen++; + } + } + break; + case RSCN_AREA_ADDR: + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->flags & FCF_FCP2_DEVICE) + continue; + + if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) { + fcport->scan_needed = 1; + fcport->rscn_gen++; + } + } + break; + case RSCN_DOM_ADDR: + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->flags & FCF_FCP2_DEVICE) + continue; + + if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) { + fcport->scan_needed = 1; + fcport->rscn_gen++; + } + } + break; + case RSCN_FAB_ADDR: + default: + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->flags & FCF_FCP2_DEVICE) + continue; + + fcport->scan_needed = 1; + fcport->rscn_gen++; } - fcport->scan_needed = 1; - fcport->rscn_gen++; + break; } spin_lock_irqsave(&vha->work_lock, flags); @@ -4187,7 +4240,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) * fw shal not send PRLI after PLOGI Acc */ if (ha->flags.edif_enabled && - vha->e_dbell.db_flags & EDB_ACTIVE) { + DBELL_ACTIVE(vha)) { ha->fw_options[3] |= BIT_15; ha->flags.n2n_fw_acc_sec = 1; } else { @@ -4433,6 +4486,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha) (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); } + /* ELS pass through payload is limit by frame size. */ + if (ha->flags.edif_enabled) + mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD); + rval = qla2x00_init_firmware(vha, ha->init_cb_size); next_check: if (rval) { @@ -5339,8 +5396,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) * use link up to wake up app to get ready for * authentication. */ - if (ha->flags.edif_enabled && - !(vha->e_dbell.db_flags & EDB_ACTIVE)) + if (ha->flags.edif_enabled && DBELL_INACTIVE(vha)) qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); @@ -5833,6 +5889,10 @@ void qla_register_fcport_fn(struct work_struct *work) qla2x00_update_fcport(fcport->vha, fcport); + ql_dbg(ql_dbg_disc, fcport->vha, 0x911e, + "%s rscn gen %d/%d next DS %d\n", __func__, + rscn_gen, fcport->rscn_gen, fcport->next_disc_state); + if (rscn_gen != fcport->rscn_gen) { /* RSCN(s) came in while registration */ switch (fcport->next_disc_state) { diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 9d4ad1d2b00a..ed604f2185bf 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -3034,8 +3034,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, elsio->u.els_plogi.els_cmd = els_opcode; elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode; - if (els_opcode == ELS_DCMD_PLOGI && vha->hw->flags.edif_enabled && - vha->e_dbell.db_flags & EDB_ACTIVE) { + if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) { struct fc_els_flogi *p = ptr; p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index b26f2699adb2..aaf6504570fd 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2233,6 +2233,10 @@ qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req, } } else if (comp_status == CS_PORT_LOGGED_OUT) { + ql_dbg(ql_dbg_disc, vha, 0x911e, + "%s %d schedule session deletion\n", + __func__, __LINE__); + els->u.els_plogi.len = 0; res = DID_IMM_RETRY << 16; qlt_schedule_sess_for_deletion(sp->fcport); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 73a353153d33..10d2655ef676 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -1695,10 +1695,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; if (IS_FWI2_CAPABLE(vha->hw)) mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; - if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) { - mcp->in_mb |= MBX_15; - mcp->out_mb |= MBX_7|MBX_21|MBX_22|MBX_23; - } + if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) + mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 6e920da64863..350b0c4346fb 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -739,29 +739,6 @@ qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag) } int -qlafx00_loop_reset(scsi_qla_host_t *vha) -{ - int ret; - struct fc_port *fcport; - struct qla_hw_data *ha = vha->hw; - - if (ql2xtargetreset) { - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (fcport->port_type != FCT_TARGET) - continue; - - ret = ha->isp_ops->target_reset(fcport, 0, 0); - if (ret != QLA_SUCCESS) { - ql_dbg(ql_dbg_taskm, vha, 0x803d, - "Bus Reset failed: Reset=%d " - "d_id=%x.\n", ret, fcport->d_id.b24); - } - } - } - return QLA_SUCCESS; -} - -int qlafx00_iospace_config(struct qla_hw_data *ha) { if (pci_request_selected_regions(ha->pdev, ha->bars, diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index c3ff50ffe205..abcd30917263 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -202,12 +202,6 @@ MODULE_PARM_DESC(ql2xdbwr, " 0 -- Regular doorbell.\n" " 1 -- CAMRAM doorbell (faster).\n"); -int ql2xtargetreset = 1; -module_param(ql2xtargetreset, int, S_IRUGO); -MODULE_PARM_DESC(ql2xtargetreset, - "Enable target reset." - "Default is 1 - use hw defaults."); - int ql2xgffidenable; module_param(ql2xgffidenable, int, S_IRUGO); MODULE_PARM_DESC(ql2xgffidenable, @@ -1695,27 +1689,10 @@ int qla2x00_loop_reset(scsi_qla_host_t *vha) { int ret; - struct fc_port *fcport; struct qla_hw_data *ha = vha->hw; - if (IS_QLAFX00(ha)) { - return qlafx00_loop_reset(vha); - } - - if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) { - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (fcport->port_type != FCT_TARGET) - continue; - - ret = ha->isp_ops->target_reset(fcport, 0, 0); - if (ret != QLA_SUCCESS) { - ql_dbg(ql_dbg_taskm, vha, 0x802c, - "Bus Reset failed: Reset=%d " - "d_id=%x.\n", ret, fcport->d_id.b24); - } - } - } - + if (IS_QLAFX00(ha)) + return QLA_SUCCESS; if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { atomic_set(&vha->loop_state, LOOP_DOWN); @@ -3908,13 +3885,13 @@ qla2x00_remove_one(struct pci_dev *pdev) static inline void qla24xx_free_purex_list(struct purex_list *list) { - struct list_head *item, *next; + struct purex_item *item, *next; ulong flags; spin_lock_irqsave(&list->lock, flags); - list_for_each_safe(item, next, &list->head) { - list_del(item); - kfree(list_entry(item, struct purex_item, list)); + list_for_each_entry_safe(item, next, &list->head, list) { + list_del(&item->list); + kfree(item); } spin_unlock_irqrestore(&list->lock, flags); } @@ -4375,7 +4352,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, /* allocate the purex dma pool */ ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev, - MAX_PAYLOAD, 8, 0); + ELS_MAX_PAYLOAD, 8, 0); if (!ha->purex_dma_pool) { ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 7d8242c120fc..8993d438e0b7 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1003,6 +1003,7 @@ void qlt_free_session_done(struct work_struct *work) "%s bypassing release_all_sadb\n", __func__); } + qla_edif_clear_appdata(vha, sess); qla_edif_sess_down(vha, sess); } qla2x00_mark_device_lost(vha, sess, 0); @@ -4812,7 +4813,7 @@ static int qlt_handle_login(struct scsi_qla_host *vha, } if (vha->hw->flags.edif_enabled) { - if (!(vha->e_dbell.db_flags & EDB_ACTIVE)) { + if (DBELL_INACTIVE(vha)) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d Term INOT due to app not started lid=%d, NportID %06X ", __func__, __LINE__, loop_id, port_id.b24); diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 4b117165bf8b..27e440f8a702 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -6,9 +6,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "10.02.07.100-k" +#define QLA2XXX_VERSION "10.02.07.200-k" #define QLA_DRIVER_MAJOR_VER 10 #define QLA_DRIVER_MINOR_VER 2 #define QLA_DRIVER_PATCH_VER 7 -#define QLA_DRIVER_BETA_VER 100 +#define QLA_DRIVER_BETA_VER 200 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 06e7266018c7..3c0da3770edf 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1189,7 +1189,7 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr, __func__, off_dst, scsi_bufflen(scp), act_len, scsi_get_resid(scp)); n = scsi_bufflen(scp) - (off_dst + act_len); - scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n)); + scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n)); return 0; } @@ -1562,7 +1562,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) unsigned char pq_pdt; unsigned char *arr; unsigned char *cmd = scp->cmnd; - int alloc_len, n, ret; + u32 alloc_len, n; + int ret; bool have_wlun, is_disk, is_zbc, is_disk_zbc; alloc_len = get_unaligned_be16(cmd + 3); @@ -1585,7 +1586,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) kfree(arr); return check_condition_result; } else if (0x1 & cmd[1]) { /* EVPD bit set */ - int lu_id_num, port_group_id, target_dev_id, len; + int lu_id_num, port_group_id, target_dev_id; + u32 len; char lu_id_str[6]; int host_no = devip->sdbg_host->shost->host_no; @@ -1676,9 +1678,9 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) kfree(arr); return check_condition_result; } - len = min(get_unaligned_be16(arr + 2) + 4, alloc_len); + len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len); ret = fill_from_dev_buffer(scp, arr, - min(len, SDEBUG_MAX_INQ_ARR_SZ)); + min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ)); kfree(arr); return ret; } @@ -1714,7 +1716,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) } put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */ ret = fill_from_dev_buffer(scp, arr, - min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ)); + min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ)); kfree(arr); return ret; } @@ -1729,8 +1731,8 @@ static int resp_requests(struct scsi_cmnd *scp, unsigned char *cmd = scp->cmnd; unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */ bool dsense = !!(cmd[1] & 1); - int alloc_len = cmd[4]; - int len = 18; + u32 alloc_len = cmd[4]; + u32 len = 18; int stopped_state = atomic_read(&devip->stopped); memset(arr, 0, sizeof(arr)); @@ -1774,7 +1776,7 @@ static int resp_requests(struct scsi_cmnd *scp, arr[7] = 0xa; } } - return fill_from_dev_buffer(scp, arr, min_t(int, len, alloc_len)); + return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len)); } static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) @@ -2312,7 +2314,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp, { int pcontrol, pcode, subpcode, bd_len; unsigned char dev_spec; - int alloc_len, offset, len, target_dev_id; + u32 alloc_len, offset, len; + int target_dev_id; int target = scp->device->id; unsigned char *ap; unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; @@ -2468,7 +2471,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp, arr[0] = offset - 1; else put_unaligned_be16((offset - 2), arr + 0); - return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset)); + return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset)); } #define SDEBUG_MAX_MSELECT_SZ 512 @@ -2499,11 +2502,11 @@ static int resp_mode_select(struct scsi_cmnd *scp, __func__, param_len, res); md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2); bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6); - if (md_len > 2) { + off = bd_len + (mselect6 ? 4 : 8); + if (md_len > 2 || off >= res) { mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); return check_condition_result; } - off = bd_len + (mselect6 ? 4 : 8); mpage = arr[off] & 0x3f; ps = !!(arr[off] & 0x80); if (ps) { @@ -2583,7 +2586,8 @@ static int resp_ie_l_pg(unsigned char *arr) static int resp_log_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { - int ppc, sp, pcode, subpcode, alloc_len, len, n; + int ppc, sp, pcode, subpcode; + u32 alloc_len, len, n; unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; unsigned char *cmd = scp->cmnd; @@ -2653,9 +2657,9 @@ static int resp_log_sense(struct scsi_cmnd *scp, mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); return check_condition_result; } - len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len); + len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len); return fill_from_dev_buffer(scp, arr, - min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ)); + min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ)); } static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip) @@ -4259,6 +4263,8 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) mk_sense_invalid_opcode(scp); return check_condition_result; } + if (vnum == 0) + return 0; /* not an error */ a_num = is_bytchk3 ? 1 : vnum; /* Treat following check like one for read (i.e. no write) access */ ret = check_device_access_params(scp, lba, a_num, false); @@ -4322,6 +4328,8 @@ static int resp_report_zones(struct scsi_cmnd *scp, } zs_lba = get_unaligned_be64(cmd + 2); alloc_len = get_unaligned_be32(cmd + 10); + if (alloc_len == 0) + return 0; /* not an error */ rep_opts = cmd[14] & 0x3f; partial = cmd[14] & 0x80; @@ -4426,7 +4434,7 @@ static int resp_report_zones(struct scsi_cmnd *scp, put_unaligned_be64(sdebug_capacity - 1, arr + 8); rep_len = (unsigned long)desc - (unsigned long)arr; - ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len)); + ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len)); fini: read_unlock(macc_lckp); @@ -4649,6 +4657,7 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip, struct sdeb_zone_state *zsp) { enum sdebug_z_cond zc; + struct sdeb_store_info *sip = devip2sip(devip, false); if (zbc_zone_is_conv(zsp)) return; @@ -4660,6 +4669,10 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip, if (zsp->z_cond == ZC4_CLOSED) devip->nr_closed--; + if (zsp->z_wp > zsp->z_start) + memset(sip->storep + zsp->z_start * sdebug_sector_size, 0, + (zsp->z_wp - zsp->z_start) * sdebug_sector_size); + zsp->z_non_seq_resource = false; zsp->z_wp = zsp->z_start; zsp->z_cond = ZC1_EMPTY; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index a531336f6a0a..2371edbc3af4 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -133,6 +133,23 @@ static bool scsi_eh_should_retry_cmd(struct scsi_cmnd *cmd) return true; } +static void scsi_eh_complete_abort(struct scsi_cmnd *scmd, struct Scsi_Host *shost) +{ + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + list_del_init(&scmd->eh_entry); + /* + * If the abort succeeds, and there is no further + * EH action, clear the ->last_reset time. + */ + if (list_empty(&shost->eh_abort_list) && + list_empty(&shost->eh_cmd_q)) + if (shost->eh_deadline != -1) + shost->last_reset = 0; + spin_unlock_irqrestore(shost->host_lock, flags); +} + /** * scmd_eh_abort_handler - Handle command aborts * @work: command to be aborted. @@ -150,6 +167,7 @@ scmd_eh_abort_handler(struct work_struct *work) container_of(work, struct scsi_cmnd, abort_work.work); struct scsi_device *sdev = scmd->device; enum scsi_disposition rtn; + unsigned long flags; if (scsi_host_eh_past_deadline(sdev->host)) { SCSI_LOG_ERROR_RECOVERY(3, @@ -173,12 +191,14 @@ scmd_eh_abort_handler(struct work_struct *work) SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_WARNING, scmd, "retry aborted command\n")); + scsi_eh_complete_abort(scmd, sdev->host); scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); return; } else { SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_WARNING, scmd, "finish aborted command\n")); + scsi_eh_complete_abort(scmd, sdev->host); scsi_finish_command(scmd); return; } @@ -191,6 +211,9 @@ scmd_eh_abort_handler(struct work_struct *work) } } + spin_lock_irqsave(sdev->host->host_lock, flags); + list_del_init(&scmd->eh_entry); + spin_unlock_irqrestore(sdev->host->host_lock, flags); scsi_eh_scmd_add(scmd); } @@ -221,6 +244,8 @@ scsi_abort_command(struct scsi_cmnd *scmd) spin_lock_irqsave(shost->host_lock, flags); if (shost->eh_deadline != -1 && !shost->last_reset) shost->last_reset = jiffies; + BUG_ON(!list_empty(&scmd->eh_entry)); + list_add_tail(&scmd->eh_entry, &shost->eh_abort_list); spin_unlock_irqrestore(shost->host_lock, flags); scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED; diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 34412eac4566..400df3354cd6 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -347,6 +347,8 @@ static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq, { struct scsi_request *req = scsi_req(rq); + if (hdr->cmd_len < 6) + return -EMSGSIZE; if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len)) return -EFAULT; if (!scsi_cmd_allowed(req->cmd, mode)) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b731c2983515..621d841d819a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1153,6 +1153,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) cmd->sense_buffer = buf; cmd->prot_sdb = prot; cmd->flags = flags; + INIT_LIST_HEAD(&cmd->eh_entry); INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); cmd->jiffies_at_alloc = jiffies_at_alloc; cmd->retries = retries; @@ -1184,8 +1185,6 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev, } cmd->cmd_len = scsi_req(req)->cmd_len; - if (cmd->cmd_len == 0) - cmd->cmd_len = scsi_command_size(cmd->cmnd); cmd->cmnd = scsi_req(req)->cmd; cmd->transfersize = blk_rq_bytes(req); cmd->allowed = scsi_req(req)->retries; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index d3d362289ecc..d4edce930a4a 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -792,6 +792,7 @@ store_state_field(struct device *dev, struct device_attribute *attr, int i, ret; struct scsi_device *sdev = to_scsi_device(dev); enum scsi_device_state state = 0; + bool rescan_dev = false; for (i = 0; i < ARRAY_SIZE(sdev_states); i++) { const int len = strlen(sdev_states[i].name); @@ -810,20 +811,27 @@ store_state_field(struct device *dev, struct device_attribute *attr, } mutex_lock(&sdev->state_mutex); - ret = scsi_device_set_state(sdev, state); - /* - * If the device state changes to SDEV_RUNNING, we need to - * run the queue to avoid I/O hang, and rescan the device - * to revalidate it. Running the queue first is necessary - * because another thread may be waiting inside - * blk_mq_freeze_queue_wait() and because that call may be - * waiting for pending I/O to finish. - */ - if (ret == 0 && state == SDEV_RUNNING) { + if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) { + ret = 0; + } else { + ret = scsi_device_set_state(sdev, state); + if (ret == 0 && state == SDEV_RUNNING) + rescan_dev = true; + } + mutex_unlock(&sdev->state_mutex); + + if (rescan_dev) { + /* + * If the device state changes to SDEV_RUNNING, we need to + * run the queue to avoid I/O hang, and rescan the device + * to revalidate it. Running the queue first is necessary + * because another thread may be waiting inside + * blk_mq_freeze_queue_wait() and because that call may be + * waiting for pending I/O to finish. + */ blk_mq_run_hw_queues(sdev->request_queue, true); scsi_rescan_device(dev); } - mutex_unlock(&sdev->state_mutex); return ret == 0 ? count : -EINVAL; } @@ -1383,6 +1391,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) * We're treating error on bsg register as non-fatal, so * pretend nothing went wrong. */ + error = PTR_ERR(sdev->bsg_dev); sdev_printk(KERN_INFO, sdev, "Failed to register bsg queue, errno=%d\n", error); @@ -1580,7 +1589,6 @@ static struct device_type scsi_dev_type = { void scsi_sysfs_device_initialize(struct scsi_device *sdev) { - int i, j = 0; unsigned long flags; struct Scsi_Host *shost = sdev->host; struct scsi_host_template *hostt = shost->hostt; @@ -1592,15 +1600,7 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev) scsi_enable_async_suspend(&sdev->sdev_gendev); dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu", sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); - sdev->gendev_attr_groups[j++] = &scsi_sdev_attr_group; - if (hostt->sdev_groups) { - for (i = 0; hostt->sdev_groups[i] && - j < ARRAY_SIZE(sdev->gendev_attr_groups); - i++, j++) { - sdev->gendev_attr_groups[j] = hostt->sdev_groups[i]; - } - } - WARN_ON_ONCE(j >= ARRAY_SIZE(sdev->gendev_attr_groups)); + sdev->sdev_gendev.groups = hostt->sdev_groups; device_initialize(&sdev->sdev_dev); sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 78343d3f9385..554b6f784223 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1899,12 +1899,12 @@ static void session_recovery_timedout(struct work_struct *work) } spin_unlock_irqrestore(&session->lock, flags); - if (session->transport->session_recovery_timedout) - session->transport->session_recovery_timedout(session); - ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n"); scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n"); + + if (session->transport->session_recovery_timedout) + session->transport->session_recovery_timedout(session); } static void __iscsi_unblock_session(struct work_struct *work) diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 50f8dc960ae2..8e4af111c078 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -693,7 +693,6 @@ static int sr_probe(struct device *dev) cd->device = sdev; cd->disk = disk; cd->driver = &sr_template; - cd->disk = disk; cd->capacity = 0x1fffff; cd->device->changed = 1; /* force recheck CD type */ cd->media_present = 1; diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c index 4e1ff209b933..4a0bbcf1757a 100644 --- a/drivers/scsi/ufs/ufs-debugfs.c +++ b/drivers/scsi/ufs/ufs-debugfs.c @@ -8,6 +8,18 @@ static struct dentry *ufs_debugfs_root; +struct ufs_debugfs_attr { + const char *name; + mode_t mode; + const struct file_operations *fops; +}; + +/* @file corresponds to a debugfs attribute in directory hba->debugfs_root. */ +static inline struct ufs_hba *hba_from_file(const struct file *file) +{ + return d_inode(file->f_path.dentry->d_parent)->i_private; +} + void __init ufs_debugfs_init(void) { ufs_debugfs_root = debugfs_create_dir("ufshcd", NULL); @@ -20,7 +32,7 @@ void ufs_debugfs_exit(void) static int ufs_debugfs_stats_show(struct seq_file *s, void *data) { - struct ufs_hba *hba = s->private; + struct ufs_hba *hba = hba_from_file(s->file); struct ufs_event_hist *e = hba->ufs_stats.event; #define PRT(fmt, typ) \ @@ -126,13 +138,93 @@ static void ufs_debugfs_restart_ee(struct work_struct *work) ufs_debugfs_put_user_access(hba); } +static int ufs_saved_err_show(struct seq_file *s, void *data) +{ + struct ufs_debugfs_attr *attr = s->private; + struct ufs_hba *hba = hba_from_file(s->file); + const int *p; + + if (strcmp(attr->name, "saved_err") == 0) { + p = &hba->saved_err; + } else if (strcmp(attr->name, "saved_uic_err") == 0) { + p = &hba->saved_uic_err; + } else { + return -ENOENT; + } + + seq_printf(s, "%d\n", *p); + return 0; +} + +static ssize_t ufs_saved_err_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ufs_debugfs_attr *attr = file->f_inode->i_private; + struct ufs_hba *hba = hba_from_file(file); + char val_str[16] = { }; + int val, ret; + + if (count > sizeof(val_str)) + return -EINVAL; + if (copy_from_user(val_str, buf, count)) + return -EFAULT; + ret = kstrtoint(val_str, 0, &val); + if (ret < 0) + return ret; + + spin_lock_irq(hba->host->host_lock); + if (strcmp(attr->name, "saved_err") == 0) { + hba->saved_err = val; + } else if (strcmp(attr->name, "saved_uic_err") == 0) { + hba->saved_uic_err = val; + } else { + ret = -ENOENT; + } + if (ret == 0) + ufshcd_schedule_eh_work(hba); + spin_unlock_irq(hba->host->host_lock); + + return ret < 0 ? ret : count; +} + +static int ufs_saved_err_open(struct inode *inode, struct file *file) +{ + return single_open(file, ufs_saved_err_show, inode->i_private); +} + +static const struct file_operations ufs_saved_err_fops = { + .owner = THIS_MODULE, + .open = ufs_saved_err_open, + .read = seq_read, + .write = ufs_saved_err_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct ufs_debugfs_attr ufs_attrs[] = { + { "stats", 0400, &ufs_debugfs_stats_fops }, + { "saved_err", 0600, &ufs_saved_err_fops }, + { "saved_uic_err", 0600, &ufs_saved_err_fops }, + { } +}; + void ufs_debugfs_hba_init(struct ufs_hba *hba) { + const struct ufs_debugfs_attr *attr; + struct dentry *root; + /* Set default exception event rate limit period to 20ms */ hba->debugfs_ee_rate_limit_ms = 20; INIT_DELAYED_WORK(&hba->debugfs_ee_work, ufs_debugfs_restart_ee); - hba->debugfs_root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root); - debugfs_create_file("stats", 0400, hba->debugfs_root, hba, &ufs_debugfs_stats_fops); + + root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root); + if (IS_ERR_OR_NULL(root)) + return; + hba->debugfs_root = root; + d_inode(root)->i_private = hba; + for (attr = ufs_attrs; attr->name; attr++) + debugfs_create_file(attr->name, attr->mode, root, (void *)attr, + attr->fops); debugfs_create_file("exception_event_mask", 0600, hba->debugfs_root, hba, &ee_usr_mask_fops); debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root, diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c index 30d0c1aba0c7..cd26bc82462e 100644 --- a/drivers/scsi/ufs/ufs-exynos.c +++ b/drivers/scsi/ufs/ufs-exynos.c @@ -12,8 +12,10 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/mfd/syscon.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include <linux/regmap.h> #include "ufshcd.h" #include "ufshcd-pltfrm.h" @@ -48,10 +50,11 @@ #define HCI_ERR_EN_T_LAYER 0x84 #define HCI_ERR_EN_DME_LAYER 0x88 #define HCI_CLKSTOP_CTRL 0xB0 +#define REFCLKOUT_STOP BIT(4) #define REFCLK_STOP BIT(2) #define UNIPRO_MCLK_STOP BIT(1) #define UNIPRO_PCLK_STOP BIT(0) -#define CLK_STOP_MASK (REFCLK_STOP |\ +#define CLK_STOP_MASK (REFCLKOUT_STOP | REFCLK_STOP |\ UNIPRO_MCLK_STOP |\ UNIPRO_PCLK_STOP) #define HCI_MISC 0xB4 @@ -74,6 +77,52 @@ UIC_TRANSPORT_NO_CONNECTION_RX |\ UIC_TRANSPORT_BAD_TC) +/* FSYS UFS Shareability */ +#define UFS_WR_SHARABLE BIT(2) +#define UFS_RD_SHARABLE BIT(1) +#define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE) +#define UFS_SHAREABILITY_OFFSET 0x710 + +/* Multi-host registers */ +#define MHCTRL 0xC4 +#define MHCTRL_EN_VH_MASK (0xE) +#define MHCTRL_EN_VH(vh) (vh << 1) +#define PH2VH_MBOX 0xD8 + +#define MH_MSG_MASK (0xFF) + +#define MH_MSG(id, msg) ((id << 8) | (msg & 0xFF)) +#define MH_MSG_PH_READY 0x1 +#define MH_MSG_VH_READY 0x2 + +#define ALLOW_INQUIRY BIT(25) +#define ALLOW_MODE_SELECT BIT(24) +#define ALLOW_MODE_SENSE BIT(23) +#define ALLOW_PRE_FETCH GENMASK(22, 21) +#define ALLOW_READ_CMD_ALL GENMASK(20, 18) /* read_6/10/16 */ +#define ALLOW_READ_BUFFER BIT(17) +#define ALLOW_READ_CAPACITY GENMASK(16, 15) +#define ALLOW_REPORT_LUNS BIT(14) +#define ALLOW_REQUEST_SENSE BIT(13) +#define ALLOW_SYNCHRONIZE_CACHE GENMASK(8, 7) +#define ALLOW_TEST_UNIT_READY BIT(6) +#define ALLOW_UNMAP BIT(5) +#define ALLOW_VERIFY BIT(4) +#define ALLOW_WRITE_CMD_ALL GENMASK(3, 1) /* write_6/10/16 */ + +#define ALLOW_TRANS_VH_DEFAULT (ALLOW_INQUIRY | ALLOW_MODE_SELECT | \ + ALLOW_MODE_SENSE | ALLOW_PRE_FETCH | \ + ALLOW_READ_CMD_ALL | ALLOW_READ_BUFFER | \ + ALLOW_READ_CAPACITY | ALLOW_REPORT_LUNS | \ + ALLOW_REQUEST_SENSE | ALLOW_SYNCHRONIZE_CACHE | \ + ALLOW_TEST_UNIT_READY | ALLOW_UNMAP | \ + ALLOW_VERIFY | ALLOW_WRITE_CMD_ALL) + +#define HCI_MH_ALLOWABLE_TRAN_OF_VH 0x30C +#define HCI_MH_IID_IN_TASK_TAG 0X308 + +#define PH_READY_TIMEOUT_MS (5 * MSEC_PER_SEC) + enum { UNIPRO_L1_5 = 0,/* PHY Adapter */ UNIPRO_L2, /* Data Link */ @@ -149,6 +198,117 @@ static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) return 0; } +static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) +{ + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + + /* IO Coherency setting */ + if (ufs->sysreg) { + return regmap_update_bits(ufs->sysreg, + ufs->shareability_reg_offset, + UFS_SHARABLE, UFS_SHARABLE); + } + + attr->tx_dif_p_nsec = 3200000; + + return 0; +} + +static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + + /* Enable Virtual Host #1 */ + ufshcd_rmwl(hba, MHCTRL_EN_VH_MASK, MHCTRL_EN_VH(1), MHCTRL); + /* Default VH Transfer permissions */ + hci_writel(ufs, ALLOW_TRANS_VH_DEFAULT, HCI_MH_ALLOWABLE_TRAN_OF_VH); + /* IID information is replaced in TASKTAG[7:5] instead of IID in UCD */ + hci_writel(ufs, 0x1, HCI_MH_IID_IN_TASK_TAG); + + return 0; +} + +static int exynosauto_ufs_pre_link(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + int i; + u32 tx_line_reset_period, rx_line_reset_period; + + rx_line_reset_period = (RX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC; + tx_line_reset_period = (TX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC; + + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40); + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, i), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, i), 0x0); + + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE2, i), + (rx_line_reset_period >> 16) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE1, i), + (rx_line_reset_period >> 8) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE0, i), + (rx_line_reset_period) & 0xFF); + + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x2f, i), 0x79); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x84, i), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x25, i), 0xf6); + } + + for_each_ufs_tx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, i), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + /* Not to affect VND_TX_LINERESET_PVALUE to VND_TX_CLK_PRD */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, i), + 0x02); + + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, i), + (tx_line_reset_period >> 16) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, i), + (tx_line_reset_period >> 8) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE0, i), + (tx_line_reset_period) & 0xFF); + + /* TX PWM Gear Capability / PWM_G1_ONLY */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x04, i), 0x1); + } + + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0); + + ufshcd_dme_set(hba, UIC_ARG_MIB(0xa011), 0x8000); + + return 0; +} + +static int exynosauto_ufs_pre_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + + /* PACP_PWR_req and delivered to the remote DME */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000); + + return 0; +} + +static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + u32 enabled_vh; + + enabled_vh = ufshcd_readl(hba, MHCTRL) & MHCTRL_EN_VH_MASK; + + /* Send physical host ready message to virtual hosts */ + ufshcd_writel(hba, MH_MSG(enabled_vh, MH_MSG_PH_READY), PH2VH_MBOX); + + return 0; +} + static int exynos7_ufs_pre_link(struct exynos_ufs *ufs) { struct ufs_hba *hba = ufs->hba; @@ -793,6 +953,27 @@ static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index) } } +static int exynos_ufs_setup_clocks(struct ufs_hba *hba, bool on, + enum ufs_notify_change_status status) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + if (!ufs) + return 0; + + if (on && status == PRE_CHANGE) { + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) + exynos_ufs_disable_auto_ctrl_hcc(ufs); + exynos_ufs_ungate_clks(ufs); + } else if (!on && status == POST_CHANGE) { + exynos_ufs_gate_clks(ufs); + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) + exynos_ufs_enable_auto_ctrl_hcc(ufs); + } + + return 0; +} + static int exynos_ufs_pre_link(struct ufs_hba *hba) { struct exynos_ufs *ufs = ufshcd_get_variant(hba); @@ -808,8 +989,12 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba) /* m-phy */ exynos_ufs_phy_init(ufs); - exynos_ufs_config_phy_time_attr(ufs); - exynos_ufs_config_phy_cap_attr(ufs); + if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) { + exynos_ufs_config_phy_time_attr(ufs); + exynos_ufs_config_phy_cap_attr(ufs); + } + + exynos_ufs_setup_clocks(hba, true, PRE_CHANGE); if (ufs->drv_data->pre_link) ufs->drv_data->pre_link(ufs); @@ -893,17 +1078,10 @@ static int exynos_ufs_post_link(struct ufs_hba *hba) static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs) { struct device_node *np = dev->of_node; - struct exynos_ufs_drv_data *drv_data = &exynos_ufs_drvs; struct exynos_ufs_uic_attr *attr; int ret = 0; - while (drv_data->compatible) { - if (of_device_is_compatible(np, drv_data->compatible)) { - ufs->drv_data = drv_data; - break; - } - drv_data++; - } + ufs->drv_data = device_get_match_data(dev); if (ufs->drv_data && ufs->drv_data->uic_attr) { attr = ufs->drv_data->uic_attr; @@ -913,6 +1091,17 @@ static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs) goto out; } + ufs->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg"); + if (IS_ERR(ufs->sysreg)) + ufs->sysreg = NULL; + else { + if (of_property_read_u32_index(np, "samsung,sysreg", 1, + &ufs->shareability_reg_offset)) { + dev_warn(dev, "can't get an offset from sysreg. Set to default value\n"); + ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET; + } + } + ufs->pclk_avail_min = PCLK_AVAIL_MIN; ufs->pclk_avail_max = PCLK_AVAIL_MAX; @@ -927,6 +1116,18 @@ out: return ret; } +static inline void exynos_ufs_priv_init(struct ufs_hba *hba, + struct exynos_ufs *ufs) +{ + ufs->hba = hba; + ufs->opts = ufs->drv_data->opts; + ufs->rx_sel_idx = PA_MAXDATALANES; + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX) + ufs->rx_sel_idx = 0; + hba->priv = (void *)ufs; + hba->quirks = ufs->drv_data->quirks; +} + static int exynos_ufs_init(struct ufs_hba *hba) { struct device *dev = hba->dev; @@ -976,13 +1177,8 @@ static int exynos_ufs_init(struct ufs_hba *hba) if (ret) goto phy_off; - ufs->hba = hba; - ufs->opts = ufs->drv_data->opts; - ufs->rx_sel_idx = PA_MAXDATALANES; - if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX) - ufs->rx_sel_idx = 0; - hba->priv = (void *)ufs; - hba->quirks = ufs->drv_data->quirks; + exynos_ufs_priv_init(hba, ufs); + if (ufs->drv_data->drv_init) { ret = ufs->drv_data->drv_init(dev, ufs); if (ret) { @@ -1110,6 +1306,12 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba, switch (status) { case PRE_CHANGE: + if (ufs->drv_data->pre_hce_enable) { + ret = ufs->drv_data->pre_hce_enable(ufs); + if (ret) + return ret; + } + ret = exynos_ufs_host_reset(hba); if (ret) return ret; @@ -1119,6 +1321,10 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba, exynos_ufs_calc_pwm_clk_div(ufs); if (!(ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)) exynos_ufs_enable_auto_ctrl_hcc(ufs); + + if (ufs->drv_data->post_hce_enable) + ret = ufs->drv_data->post_hce_enable(ufs); + break; } @@ -1202,12 +1408,77 @@ static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) return 0; } +static int exynosauto_ufs_vh_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + if (status == POST_CHANGE) { + ufshcd_set_link_active(hba); + ufshcd_set_ufs_dev_active(hba); + } + + return 0; +} + +static int exynosauto_ufs_vh_wait_ph_ready(struct ufs_hba *hba) +{ + u32 mbox; + ktime_t start, stop; + + start = ktime_get(); + stop = ktime_add(start, ms_to_ktime(PH_READY_TIMEOUT_MS)); + + do { + mbox = ufshcd_readl(hba, PH2VH_MBOX); + /* TODO: Mailbox message protocols between the PH and VHs are + * not implemented yet. This will be supported later + */ + if ((mbox & MH_MSG_MASK) == MH_MSG_PH_READY) + return 0; + + usleep_range(40, 50); + } while (ktime_before(ktime_get(), stop)); + + return -ETIME; +} + +static int exynosauto_ufs_vh_init(struct ufs_hba *hba) +{ + struct device *dev = hba->dev; + struct platform_device *pdev = to_platform_device(dev); + struct exynos_ufs *ufs; + int ret; + + ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL); + if (!ufs) + return -ENOMEM; + + /* exynos-specific hci */ + ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci"); + if (IS_ERR(ufs->reg_hci)) { + dev_err(dev, "cannot ioremap for hci vendor register\n"); + return PTR_ERR(ufs->reg_hci); + } + + ret = exynosauto_ufs_vh_wait_ph_ready(hba); + if (ret) + return ret; + + ufs->drv_data = device_get_match_data(dev); + if (!ufs->drv_data) + return -ENODEV; + + exynos_ufs_priv_init(hba, ufs); + + return 0; +} + static struct ufs_hba_variant_ops ufs_hba_exynos_ops = { .name = "exynos_ufs", .init = exynos_ufs_init, .hce_enable_notify = exynos_ufs_hce_enable_notify, .link_startup_notify = exynos_ufs_link_startup_notify, .pwr_change_notify = exynos_ufs_pwr_change_notify, + .setup_clocks = exynos_ufs_setup_clocks, .setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req, .setup_task_mgmt = exynos_ufs_specify_nexus_t_tm_req, .hibern8_notify = exynos_ufs_hibern8_notify, @@ -1215,12 +1486,24 @@ static struct ufs_hba_variant_ops ufs_hba_exynos_ops = { .resume = exynos_ufs_resume, }; +static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = { + .name = "exynosauto_ufs_vh", + .init = exynosauto_ufs_vh_init, + .link_startup_notify = exynosauto_ufs_vh_link_startup_notify, +}; + static int exynos_ufs_probe(struct platform_device *pdev) { int err; struct device *dev = &pdev->dev; + const struct ufs_hba_variant_ops *vops = &ufs_hba_exynos_ops; + const struct exynos_ufs_drv_data *drv_data = + device_get_match_data(dev); - err = ufshcd_pltfrm_init(pdev, &ufs_hba_exynos_ops); + if (drv_data && drv_data->vops) + vops = drv_data->vops; + + err = ufshcd_pltfrm_init(pdev, vops); if (err) dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); @@ -1261,8 +1544,35 @@ static struct exynos_ufs_uic_attr exynos7_uic_attr = { .pa_dbg_option_suite = 0x30103, }; +static struct exynos_ufs_drv_data exynosauto_ufs_drvs = { + .uic_attr = &exynos7_uic_attr, + .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | + UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR | + UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | + UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING, + .opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL | + EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | + EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX, + .drv_init = exynosauto_ufs_drv_init, + .post_hce_enable = exynosauto_ufs_post_hce_enable, + .pre_link = exynosauto_ufs_pre_link, + .pre_pwr_change = exynosauto_ufs_pre_pwr_change, + .post_pwr_change = exynosauto_ufs_post_pwr_change, +}; + +static struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = { + .vops = &ufs_hba_exynosauto_vh_ops, + .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | + UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR | + UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | + UFSHCI_QUIRK_BROKEN_HCE | + UFSHCD_QUIRK_BROKEN_UIC_CMD | + UFSHCD_QUIRK_SKIP_PH_CONFIGURATION | + UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING, + .opts = EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX, +}; + static struct exynos_ufs_drv_data exynos_ufs_drvs = { - .compatible = "samsung,exynos7-ufs", .uic_attr = &exynos7_uic_attr, .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR | @@ -1287,6 +1597,10 @@ static struct exynos_ufs_drv_data exynos_ufs_drvs = { static const struct of_device_id exynos_ufs_of_match[] = { { .compatible = "samsung,exynos7-ufs", .data = &exynos_ufs_drvs }, + { .compatible = "samsung,exynosautov9-ufs", + .data = &exynosauto_ufs_drvs }, + { .compatible = "samsung,exynosautov9-ufs-vh", + .data = &exynosauto_ufs_vh_drvs }, {}, }; diff --git a/drivers/scsi/ufs/ufs-exynos.h b/drivers/scsi/ufs/ufs-exynos.h index dadf4fd10dd8..1c33e5466082 100644 --- a/drivers/scsi/ufs/ufs-exynos.h +++ b/drivers/scsi/ufs/ufs-exynos.h @@ -56,6 +56,22 @@ #define TX_GRAN_NVAL_10_08 0x0296 #define TX_GRAN_NVAL_H(v) (((v) >> 8) & 0x3) +#define VND_TX_CLK_PRD 0xAA +#define VND_TX_CLK_PRD_EN 0xA9 +#define VND_TX_LINERESET_PVALUE0 0xAD +#define VND_TX_LINERESET_PVALUE1 0xAC +#define VND_TX_LINERESET_PVALUE2 0xAB + +#define TX_LINE_RESET_TIME 3200 + +#define VND_RX_CLK_PRD 0x12 +#define VND_RX_CLK_PRD_EN 0x11 +#define VND_RX_LINERESET_VALUE0 0x1D +#define VND_RX_LINERESET_VALUE1 0x1C +#define VND_RX_LINERESET_VALUE2 0x1B + +#define RX_LINE_RESET_TIME 1000 + #define RX_FILLER_ENABLE 0x0316 #define RX_FILLER_EN (1 << 1) #define RX_LINERESET_VAL 0x0317 @@ -99,7 +115,7 @@ struct exynos_ufs; #define PA_HIBERN8TIME_VAL 0x20 #define PCLK_AVAIL_MIN 70000000 -#define PCLK_AVAIL_MAX 133000000 +#define PCLK_AVAIL_MAX 167000000 struct exynos_ufs_uic_attr { /* TX Attributes */ @@ -142,7 +158,7 @@ struct exynos_ufs_uic_attr { }; struct exynos_ufs_drv_data { - char *compatible; + const struct ufs_hba_variant_ops *vops; struct exynos_ufs_uic_attr *uic_attr; unsigned int quirks; unsigned int opts; @@ -154,6 +170,8 @@ struct exynos_ufs_drv_data { struct ufs_pa_layer_attr *pwr); int (*post_pwr_change)(struct exynos_ufs *ufs, struct ufs_pa_layer_attr *pwr); + int (*pre_hce_enable)(struct exynos_ufs *ufs); + int (*post_hce_enable)(struct exynos_ufs *ufs); }; struct ufs_phy_time_cfg { @@ -191,7 +209,9 @@ struct exynos_ufs { struct ufs_pa_layer_attr dev_req_params; struct ufs_phy_time_cfg t_cfg; ktime_t entry_hibern8_t; - struct exynos_ufs_drv_data *drv_data; + const struct exynos_ufs_drv_data *drv_data; + struct regmap *sysreg; + u32 shareability_reg_offset; u32 opts; #define EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL BIT(0) @@ -199,6 +219,7 @@ struct exynos_ufs { #define EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL BIT(2) #define EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX BIT(3) #define EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER BIT(4) +#define EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR BIT(5) }; #define for_each_ufs_rx_lane(ufs, i) \ diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c index fc5b214347b3..5393b5c9dd9c 100644 --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c @@ -1189,6 +1189,7 @@ static int ufs_mtk_probe(struct platform_device *pdev) } link = device_link_add(dev, &reset_pdev->dev, DL_FLAG_AUTOPROBE_CONSUMER); + put_device(&reset_pdev->dev); if (!link) { dev_notice(dev, "add reset device_link fail\n"); goto skip_reset; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 5c6a58a666d2..13c09dbd99b9 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -132,6 +132,14 @@ enum { UFSHCD_CAN_QUEUE = 32, }; +static const char *const ufshcd_state_name[] = { + [UFSHCD_STATE_RESET] = "reset", + [UFSHCD_STATE_OPERATIONAL] = "operational", + [UFSHCD_STATE_ERROR] = "error", + [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal", + [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal", +}; + /* UFSHCD error handling flags */ enum { UFSHCD_EH_IN_PROGRESS = (1 << 0), @@ -236,7 +244,6 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); static irqreturn_t ufshcd_intr(int irq, void *__hba); static int ufshcd_change_power_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *pwr_mode); -static void ufshcd_schedule_eh_work(struct ufs_hba *hba); static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, @@ -711,7 +718,7 @@ static inline bool ufshcd_is_device_present(struct ufs_hba *hba) * This function is used to get the OCS field from UTRD * Returns the OCS field in the UTRD */ -static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) +static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) { return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS; } @@ -2323,6 +2330,9 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) int ret; unsigned long flags; + if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) + return 0; + ufshcd_hold(hba, false); mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); @@ -2367,17 +2377,24 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) sizeof(struct ufshcd_sg_entry))); else lrbp->utr_descriptor_ptr->prd_table_length = - cpu_to_le16((u16) (sg_segments)); + cpu_to_le16(sg_segments); - prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr; + prd_table = lrbp->ucd_prdt_ptr; scsi_for_each_sg(cmd, sg, sg_segments, i) { - prd_table[i].size = - cpu_to_le32(((u32) sg_dma_len(sg))-1); - prd_table[i].base_addr = - cpu_to_le32(lower_32_bits(sg->dma_address)); - prd_table[i].upper_addr = - cpu_to_le32(upper_32_bits(sg->dma_address)); + const unsigned int len = sg_dma_len(sg); + + /* + * From the UFSHCI spec: "Data Byte Count (DBC): A '0' + * based value that indicates the length, in bytes, of + * the data block. A maximum of length of 256KB may + * exist for any entry. Bits 1:0 of this field shall be + * 11b to indicate Dword granularity. A value of '3' + * indicates 4 bytes, '7' indicates 8 bytes, etc." + */ + WARN_ONCE(len > 256 * 1024, "len = %#x\n", len); + prd_table[i].size = cpu_to_le32(len - 1); + prd_table[i].addr = cpu_to_le64(sg->dma_address); prd_table[i].reserved = 0; } } else { @@ -2661,7 +2678,7 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) lrb->ucd_req_dma_addr = cmd_desc_element_addr; lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset; - lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table; + lrb->ucd_prdt_ptr = cmd_descp[i].prd_table; lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset; } @@ -5084,7 +5101,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { int result = 0; int scsi_status; - int ocs; + enum utp_ocs ocs; /* overall command status of utrd */ ocs = ufshcd_get_tr_ocs(lrbp); @@ -5243,11 +5260,9 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) * __ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance * @completed_reqs: bitmask that indicates which requests to complete - * @retry_requests: whether to ask the SCSI core to retry completed requests */ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, - unsigned long completed_reqs, - bool retry_requests) + unsigned long completed_reqs) { struct ufshcd_lrb *lrbp; struct scsi_cmnd *cmd; @@ -5263,8 +5278,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) ufshcd_update_monitor(hba, lrbp); ufshcd_add_command_trace(hba, index, UFS_CMD_COMP); - result = retry_requests ? DID_BUS_BUSY << 16 : - ufshcd_transfer_rsp_status(hba, lrbp); + result = ufshcd_transfer_rsp_status(hba, lrbp); scsi_dma_unmap(cmd); cmd->result = result; /* Mark completed command as NULL in LRB */ @@ -5290,14 +5304,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, /** * ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance - * @retry_requests: whether or not to ask to retry requests * * Returns * IRQ_HANDLED - If interrupt is valid * IRQ_NONE - If invalid interrupt */ -static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba, - bool retry_requests) +static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) { unsigned long completed_reqs, flags; u32 tr_doorbell; @@ -5326,8 +5338,7 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba, spin_unlock_irqrestore(&hba->outstanding_lock, flags); if (completed_reqs) { - __ufshcd_transfer_req_compl(hba, completed_reqs, - retry_requests); + __ufshcd_transfer_req_compl(hba, completed_reqs); return IRQ_HANDLED; } else { return IRQ_NONE; @@ -5826,13 +5837,7 @@ out: /* Complete requests that have door-bell cleared */ static void ufshcd_complete_requests(struct ufs_hba *hba) { - ufshcd_transfer_req_compl(hba, /*retry_requests=*/false); - ufshcd_tmc_handler(hba); -} - -static void ufshcd_retry_aborted_requests(struct ufs_hba *hba) -{ - ufshcd_transfer_req_compl(hba, /*retry_requests=*/true); + ufshcd_transfer_req_compl(hba); ufshcd_tmc_handler(hba); } @@ -5914,9 +5919,10 @@ static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); } -/* host lock must be held before calling this func */ -static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba) +void ufshcd_schedule_eh_work(struct ufs_hba *hba) { + lockdep_assert_held(hba->host->host_lock); + /* handle fatal errors only when link is not in error state */ if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { if (hba->force_reset || ufshcd_is_link_broken(hba) || @@ -6076,6 +6082,13 @@ static void ufshcd_err_handler(struct work_struct *work) hba = container_of(work, struct ufs_hba, eh_work); + dev_info(hba->dev, + "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n", + __func__, ufshcd_state_name[hba->ufshcd_state], + hba->is_powered, hba->shutting_down, hba->saved_err, + hba->saved_uic_err, hba->force_reset, + ufshcd_is_link_broken(hba) ? "; link is broken" : ""); + down(&hba->host_sem); spin_lock_irqsave(hba->host->host_lock, flags); if (ufshcd_err_handling_should_stop(hba)) { @@ -6170,6 +6183,8 @@ again: err_xfer = true; goto lock_skip_pending_xfer_clear; } + dev_err(hba->dev, "Aborted tag %d / CDB %#02x\n", tag, + hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1); } /* Clear pending task management requests */ @@ -6181,7 +6196,8 @@ again: } lock_skip_pending_xfer_clear: - ufshcd_retry_aborted_requests(hba); + /* Complete the requests that are cleared by s/w */ + ufshcd_complete_requests(hba); spin_lock_irqsave(hba->host->host_lock, flags); hba->silence_err_logs = false; @@ -6249,6 +6265,9 @@ skip_err_handling: spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_err_handling_unprepare(hba); up(&hba->host_sem); + + dev_info(hba->dev, "%s finished; HBA state %s\n", __func__, + ufshcd_state_name[hba->ufshcd_state]); } /** @@ -6434,9 +6453,8 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) irqreturn_t ret = IRQ_NONE; int tag; - pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); - spin_lock_irqsave(hba->host->host_lock, flags); + pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); issued = hba->outstanding_tasks & ~pending; for_each_set_bit(tag, &issued, hba->nutmrs) { struct request *req = hba->tmf_rqs[tag]; @@ -6473,7 +6491,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) retval |= ufshcd_tmc_handler(hba); if (intr_status & UTP_TRANSFER_REQ_COMPL) - retval |= ufshcd_transfer_req_compl(hba, /*retry_requests=*/false); + retval |= ufshcd_transfer_req_compl(hba); return retval; } @@ -6545,6 +6563,10 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) err = ufshcd_wait_for_register(hba, REG_UTP_TASK_REQ_DOOR_BELL, mask, 0, 1000, 1000); + + dev_err(hba->dev, "Clearing task management function with tag %d %s\n", + tag, err ? "succeeded" : "failed"); + out: return err; } @@ -6593,11 +6615,6 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, err = wait_for_completion_io_timeout(&wait, msecs_to_jiffies(TM_CMD_TIMEOUT)); if (!err) { - /* - * Make sure that ufshcd_compl_tm() does not trigger a - * use-after-free. - */ - req->end_io_data = NULL; ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR); dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", __func__, tm_function); @@ -6637,7 +6654,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, u8 tm_function, u8 *tm_response) { struct utp_task_req_desc treq = { { 0 }, }; - int ocs_value, err; + enum utp_ocs ocs_value; + int err; /* Configure task request descriptor */ treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); @@ -6815,7 +6833,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, int err; enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY; struct utp_task_req_desc treq = { { 0 }, }; - int ocs_value; + enum utp_ocs ocs_value; u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC; switch (msgcode) { @@ -6893,7 +6911,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) err = ufshcd_clear_cmd(hba, pos); if (err) break; - __ufshcd_transfer_req_compl(hba, 1U << pos, false); + __ufshcd_transfer_req_compl(hba, 1U << pos); } } @@ -7055,7 +7073,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) dev_err(hba->dev, "%s: cmd was completed, but without a notifying intr, tag = %d", __func__, tag); - __ufshcd_transfer_req_compl(hba, 1UL << tag, /*retry_requests=*/false); + __ufshcd_transfer_req_compl(hba, 1UL << tag); goto release; } @@ -7092,6 +7110,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) goto release; } + lrbp->cmd = NULL; err = SUCCESS; release: @@ -7121,7 +7140,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) ufshpb_reset_host(hba); ufshcd_hba_stop(hba); hba->silence_err_logs = true; - ufshcd_retry_aborted_requests(hba); + ufshcd_complete_requests(hba); hba->silence_err_logs = false; /* scale up clocks to max frequency before full reinitialization */ @@ -8002,6 +8021,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) if (ret) goto out; + if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) + goto out; + /* Debug counters initialization */ ufshcd_clear_dbg_ufs_stats(hba); @@ -9772,6 +9794,11 @@ static int __init ufshcd_core_init(void) { int ret; + /* Verify that there are no gaps in struct utp_transfer_cmd_desc. */ + static_assert(sizeof(struct utp_transfer_cmd_desc) == + 2 * ALIGNED_UPIU_SIZE + + SG_ALL * sizeof(struct ufshcd_sg_entry)); + ufs_debugfs_init(); ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index dd765863b05f..54750d72c8fb 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -589,6 +589,18 @@ enum ufshcd_quirks { * This quirk allows only sg entries aligned with page size. */ UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14, + + /* + * This quirk needs to be enabled if the host controller does not + * support UIC command + */ + UFSHCD_QUIRK_BROKEN_UIC_CMD = 1 << 15, + + /* + * This quirk needs to be enabled if the host controller cannot + * support physical host configuration. + */ + UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16, }; enum ufshcd_caps { @@ -1023,6 +1035,7 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk); void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val); void ufshcd_hba_stop(struct ufs_hba *hba); +void ufshcd_schedule_eh_work(struct ufs_hba *hba); static inline void check_upiu_size(void) { diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index de95be5d11d4..6a295c88d850 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -389,7 +389,7 @@ enum { }; /* Overall command status values */ -enum { +enum utp_ocs { OCS_SUCCESS = 0x0, OCS_INVALID_CMD_TABLE_ATTR = 0x1, OCS_INVALID_PRDT_ATTR = 0x2, @@ -402,6 +402,9 @@ enum { OCS_INVALID_CRYPTO_CONFIG = 0x9, OCS_GENERAL_CRYPTO_ERROR = 0xA, OCS_INVALID_COMMAND_STATUS = 0x0F, +}; + +enum { MASK_OCS = 0x0F, }; @@ -412,20 +415,18 @@ enum { /** * struct ufshcd_sg_entry - UFSHCI PRD Entry - * @base_addr: Lower 32bit physical address DW-0 - * @upper_addr: Upper 32bit physical address DW-1 + * @addr: Physical address; DW-0 and DW-1. * @reserved: Reserved for future use DW-2 * @size: size of physical segment DW-3 */ struct ufshcd_sg_entry { - __le32 base_addr; - __le32 upper_addr; + __le64 addr; __le32 reserved; __le32 size; }; /** - * struct utp_transfer_cmd_desc - UFS Command Descriptor structure + * struct utp_transfer_cmd_desc - UTP Command Descriptor (UCD) * @command_upiu: Command UPIU Frame address * @response_upiu: Response UPIU Frame address * @prd_table: Physical Region Descriptor @@ -451,7 +452,7 @@ struct request_desc_header { }; /** - * struct utp_transfer_req_desc - UTRD structure + * struct utp_transfer_req_desc - UTP Transfer Request Descriptor (UTRD) * @header: UTRD header DW-0 to DW-3 * @command_desc_base_addr_lo: UCD base address low DW-4 * @command_desc_base_addr_hi: UCD base address high DW-5 diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index f4eca441b433..ded5ba9b1466 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -331,7 +331,7 @@ ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, cdb[0] = UFSHPB_READ; if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ) - ppn_tmp = swab64(ppn); + ppn_tmp = (__force __be64)swab64((__force u64)ppn); /* ppn value is stored as big-endian in the host memory */ memcpy(&cdb[6], &ppn_tmp, sizeof(__be64)); @@ -394,8 +394,6 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) if (!ufshpb_is_supported_chunk(hpb, transfer_len)) return 0; - WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH); - if (hpb->is_hcm) { /* * in host control mode, reads are the main source for @@ -1572,7 +1570,7 @@ static void ufshpb_lu_parameter_init(struct ufs_hba *hba, if (ufshpb_is_legacy(hba)) hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH; else - hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH; + hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd; hpb->lu_pinned_start = hpb_lu_info->pinned_start; hpb->lu_pinned_end = hpb_lu_info->num_pinned ? @@ -2582,7 +2580,7 @@ void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) { struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev; int version, ret; - u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW; + int max_single_cmd; hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL]; @@ -2598,18 +2596,22 @@ void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) if (version == HPB_SUPPORT_LEGACY_VERSION) hpb_dev_info->is_legacy = true; - ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, - QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd); - if (ret) - dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed", - __func__); - hpb_dev_info->max_hpb_single_cmd = max_hpb_single_cmd; - /* * Get the number of user logical unit to check whether all * scsi_device finish initialization */ hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU]; + + if (hpb_dev_info->is_legacy) + return; + + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd); + + if (ret) + hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH; + else + hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH); } void ufshpb_init(struct ufs_hba *hba) diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h index f15d8fdbce2e..b475dbd78988 100644 --- a/drivers/scsi/ufs/ufshpb.h +++ b/drivers/scsi/ufs/ufshpb.h @@ -31,7 +31,6 @@ /* hpb support chunk size */ #define HPB_LEGACY_CHUNK_HIGH 1 -#define HPB_MULTI_CHUNK_LOW 7 #define HPB_MULTI_CHUNK_HIGH 255 /* hpb vender defined opcode */ diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 19f7d7b90625..28e1d98ae102 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -977,7 +977,6 @@ static unsigned int features[] = { static struct virtio_driver virtio_scsi_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), - .suppress_used_validation = true, .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index bd0fbcdbdefe..e24e220e56ee 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c @@ -834,8 +834,10 @@ static int __init maple_bus_init(void) maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN); - if (!maple_queue_cache) + if (!maple_queue_cache) { + retval = -ENOMEM; goto cleanup_bothirqs; + } INIT_LIST_HEAD(&maple_waitq); INIT_LIST_HEAD(&maple_sentq); @@ -848,6 +850,7 @@ static int __init maple_bus_init(void) if (!mdev[i]) { while (i-- > 0) maple_free_dev(mdev[i]); + retval = -ENOMEM; goto cleanup_cache; } baseunits[i] = mdev[i]; diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 8b3d268ac63c..b808c94641fa 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -37,6 +37,7 @@ #define CQSPI_NEEDS_WR_DELAY BIT(0) #define CQSPI_DISABLE_DAC_MODE BIT(1) #define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2) +#define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3) /* Capabilities */ #define CQSPI_SUPPORTS_OCTAL BIT(0) @@ -86,6 +87,7 @@ struct cqspi_st { struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; bool use_dma_read; u32 pd_dev_id; + bool wr_completion; }; struct cqspi_driver_platdata { @@ -996,9 +998,11 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata, * polling on the controller's side. spinand and spi-nor will take * care of polling the status register. */ - reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL); - reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL; - writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL); + if (cqspi->wr_completion) { + reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL); + reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL; + writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL); + } reg = readl(reg_base + CQSPI_REG_SIZE); reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; @@ -1736,6 +1740,10 @@ static int cqspi_probe(struct platform_device *pdev) cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); master->max_speed_hz = cqspi->master_ref_clk_hz; + + /* write completion is supported by default */ + cqspi->wr_completion = true; + ddata = of_device_get_match_data(dev); if (ddata) { if (ddata->quirks & CQSPI_NEEDS_WR_DELAY) @@ -1747,6 +1755,8 @@ static int cqspi_probe(struct platform_device *pdev) cqspi->use_direct_mode = true; if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA) cqspi->use_dma_read = true; + if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION) + cqspi->wr_completion = false; if (of_device_is_compatible(pdev->dev.of_node, "xlnx,versal-ospi-1.0")) @@ -1859,6 +1869,10 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = { .quirks = CQSPI_DISABLE_DAC_MODE, }; +static const struct cqspi_driver_platdata socfpga_qspi = { + .quirks = CQSPI_NO_SUPPORT_WR_COMPLETION, +}; + static const struct cqspi_driver_platdata versal_ospi = { .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA, @@ -1887,6 +1901,10 @@ static const struct of_device_id cqspi_dt_ids[] = { .compatible = "xlnx,versal-ospi-1.0", .data = (void *)&versal_ospi, }, + { + .compatible = "intel,socfpga-qspi", + .data = (void *)&socfpga_qspi, + }, { /* end of table */ } }; diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 5d98611dd999..c72e501c270f 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -912,7 +912,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev) ret = devm_spi_register_controller(&pdev->dev, controller); if (ret < 0) { - dev_err(&pdev->dev, "spi_register_controller error.\n"); + dev_err_probe(&pdev->dev, ret, "spi_register_controller error: %i\n", ret); goto out_pm_get; } diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index 27a446faf143..e2affaee4e76 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -491,22 +491,26 @@ static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas) int ret; mas->tx = dma_request_chan(mas->dev, "tx"); - ret = dev_err_probe(mas->dev, IS_ERR(mas->tx), "Failed to get tx DMA ch\n"); - if (ret < 0) + if (IS_ERR(mas->tx)) { + ret = dev_err_probe(mas->dev, PTR_ERR(mas->tx), + "Failed to get tx DMA ch\n"); goto err_tx; + } mas->rx = dma_request_chan(mas->dev, "rx"); - ret = dev_err_probe(mas->dev, IS_ERR(mas->rx), "Failed to get rx DMA ch\n"); - if (ret < 0) + if (IS_ERR(mas->rx)) { + ret = dev_err_probe(mas->dev, PTR_ERR(mas->rx), + "Failed to get rx DMA ch\n"); goto err_rx; + } return 0; err_rx: + mas->rx = NULL; dma_release_channel(mas->tx); - mas->tx = NULL; err_tx: - mas->rx = NULL; + mas->tx = NULL; return ret; } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index b23e675953e1..fdd530b150a7 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -3099,12 +3099,6 @@ void spi_unregister_controller(struct spi_controller *ctlr) device_del(&ctlr->dev); - /* Release the last reference on the controller if its driver - * has not yet been converted to devm_spi_alloc_master/slave(). - */ - if (!ctlr->devm_allocated) - put_device(&ctlr->dev); - /* free bus id */ mutex_lock(&board_lock); if (found == ctlr) @@ -3113,6 +3107,12 @@ void spi_unregister_controller(struct spi_controller *ctlr) if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) mutex_unlock(&ctlr->add_lock); + + /* Release the last reference on the controller if its driver + * has not yet been converted to devm_spi_alloc_master/slave(). + */ + if (!ctlr->devm_allocated) + put_device(&ctlr->dev); } EXPORT_SYMBOL_GPL(spi_unregister_controller); diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 59af251e7576..7fec86946131 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -66,8 +66,6 @@ source "drivers/staging/gdm724x/Kconfig" source "drivers/staging/fwserial/Kconfig" -source "drivers/staging/netlogic/Kconfig" - source "drivers/staging/gs_fpgaboot/Kconfig" source "drivers/staging/unisys/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 76f413470bc8..e66e19c45425 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_RTL8723BS) += rtl8723bs/ obj-$(CONFIG_R8712U) += rtl8712/ obj-$(CONFIG_R8188EU) += r8188eu/ obj-$(CONFIG_RTS5208) += rts5208/ -obj-$(CONFIG_NETLOGIC_XLR_NET) += netlogic/ obj-$(CONFIG_OCTEON_ETHERNET) += octeon/ obj-$(CONFIG_OCTEON_USB) += octeon-usb/ obj-$(CONFIG_VT6655) += vt6655/ diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c index cf263a58a148..6fd549a424d5 100644 --- a/drivers/staging/fbtft/fb_ssd1351.c +++ b/drivers/staging/fbtft/fb_ssd1351.c @@ -187,7 +187,6 @@ static struct fbtft_display display = { }, }; -#ifdef CONFIG_FB_BACKLIGHT static int update_onboard_backlight(struct backlight_device *bd) { struct fbtft_par *par = bl_get_data(bd); @@ -231,9 +230,6 @@ static void register_onboard_backlight(struct fbtft_par *par) if (!par->fbtftops.unregister_backlight) par->fbtftops.unregister_backlight = fbtft_unregister_backlight; } -#else -static void register_onboard_backlight(struct fbtft_par *par) { }; -#endif FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1351", &display); diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index ecb5f75f6dd5..f2684d2d6851 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -128,7 +128,6 @@ static int fbtft_request_gpios(struct fbtft_par *par) return 0; } -#ifdef CONFIG_FB_BACKLIGHT static int fbtft_backlight_update_status(struct backlight_device *bd) { struct fbtft_par *par = bl_get_data(bd); @@ -161,6 +160,7 @@ void fbtft_unregister_backlight(struct fbtft_par *par) par->info->bl_dev = NULL; } } +EXPORT_SYMBOL(fbtft_unregister_backlight); static const struct backlight_ops fbtft_bl_ops = { .get_brightness = fbtft_backlight_get_brightness, @@ -198,12 +198,7 @@ void fbtft_register_backlight(struct fbtft_par *par) if (!par->fbtftops.unregister_backlight) par->fbtftops.unregister_backlight = fbtft_unregister_backlight; } -#else -void fbtft_register_backlight(struct fbtft_par *par) { }; -void fbtft_unregister_backlight(struct fbtft_par *par) { }; -#endif EXPORT_SYMBOL(fbtft_register_backlight); -EXPORT_SYMBOL(fbtft_unregister_backlight); static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) @@ -853,13 +848,11 @@ int fbtft_register_framebuffer(struct fb_info *fb_info) fb_info->fix.smem_len >> 10, text1, HZ / fb_info->fbdefio->delay, text2); -#ifdef CONFIG_FB_BACKLIGHT /* Turn on backlight if available */ if (fb_info->bl_dev) { fb_info->bl_dev->props.power = FB_BLANK_UNBLANK; fb_info->bl_dev->ops->update_status(fb_info->bl_dev); } -#endif return 0; diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c index 1ed4772d2771..843760675876 100644 --- a/drivers/staging/greybus/audio_helper.c +++ b/drivers/staging/greybus/audio_helper.c @@ -192,7 +192,11 @@ int gbaudio_remove_component_controls(struct snd_soc_component *component, unsigned int num_controls) { struct snd_card *card = component->card->snd_card; + int err; - return gbaudio_remove_controls(card, component->dev, controls, - num_controls, component->name_prefix); + down_write(&card->controls_rwsem); + err = gbaudio_remove_controls(card, component->dev, controls, + num_controls, component->name_prefix); + up_write(&card->controls_rwsem); + return err; } diff --git a/drivers/staging/netlogic/Kconfig b/drivers/staging/netlogic/Kconfig deleted file mode 100644 index e1712606ee3c..000000000000 --- a/drivers/staging/netlogic/Kconfig +++ /dev/null @@ -1,9 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -config NETLOGIC_XLR_NET - tristate "Netlogic XLR/XLS network device" - depends on CPU_XLR - depends on NETDEVICES - select PHYLIB - help - This driver support Netlogic XLR/XLS on chip gigabit - Ethernet. diff --git a/drivers/staging/netlogic/Makefile b/drivers/staging/netlogic/Makefile deleted file mode 100644 index 7e2902af26a3..000000000000 --- a/drivers/staging/netlogic/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_NETLOGIC_XLR_NET) += xlr_net.o platform_net.o diff --git a/drivers/staging/netlogic/TODO b/drivers/staging/netlogic/TODO deleted file mode 100644 index 20e22ecb9903..000000000000 --- a/drivers/staging/netlogic/TODO +++ /dev/null @@ -1,11 +0,0 @@ -* Implementing 64bit stat counter in software -* All memory allocation should be changed to DMA allocations -* Changing comments into linux standard format - -Please send patches -To: -Ganesan Ramalingam <ganesanr@broadcom.com> -Greg Kroah-Hartman <gregkh@linuxfoundation.org> -Cc: -Jayachandran Chandrashekaran Nair <jchandra@broadcom.com> - diff --git a/drivers/staging/netlogic/platform_net.c b/drivers/staging/netlogic/platform_net.c deleted file mode 100644 index 8be9d0b0c22c..000000000000 --- a/drivers/staging/netlogic/platform_net.c +++ /dev/null @@ -1,219 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -/* - * Copyright (c) 2003-2012 Broadcom Corporation - * All Rights Reserved - */ - -#include <linux/device.h> -#include <linux/platform_device.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/io.h> -#include <linux/delay.h> -#include <linux/ioport.h> -#include <linux/resource.h> -#include <linux/phy.h> - -#include <asm/netlogic/haldefs.h> -#include <asm/netlogic/common.h> -#include <asm/netlogic/xlr/fmn.h> -#include <asm/netlogic/xlr/xlr.h> -#include <asm/netlogic/psb-bootinfo.h> -#include <asm/netlogic/xlr/pic.h> -#include <asm/netlogic/xlr/iomap.h> - -#include "platform_net.h" - -/* Linux Net */ -#define MAX_NUM_GMAC 8 -#define MAX_NUM_XLS_GMAC 8 -#define MAX_NUM_XLR_GMAC 4 - -static u32 xlr_gmac_offsets[] = { - NETLOGIC_IO_GMAC_0_OFFSET, NETLOGIC_IO_GMAC_1_OFFSET, - NETLOGIC_IO_GMAC_2_OFFSET, NETLOGIC_IO_GMAC_3_OFFSET, - NETLOGIC_IO_GMAC_4_OFFSET, NETLOGIC_IO_GMAC_5_OFFSET, - NETLOGIC_IO_GMAC_6_OFFSET, NETLOGIC_IO_GMAC_7_OFFSET -}; - -static u32 xlr_gmac_irqs[] = { PIC_GMAC_0_IRQ, PIC_GMAC_1_IRQ, - PIC_GMAC_2_IRQ, PIC_GMAC_3_IRQ, - PIC_GMAC_4_IRQ, PIC_GMAC_5_IRQ, - PIC_GMAC_6_IRQ, PIC_GMAC_7_IRQ -}; - -static struct resource xlr_net0_res[8]; -static struct resource xlr_net1_res[8]; -static u32 __iomem *gmac4_addr; -static u32 __iomem *gpio_addr; - -static void xlr_resource_init(struct resource *res, int offset, int irq) -{ - res->name = "gmac"; - - res->start = CPHYSADDR(nlm_mmio_base(offset)); - res->end = res->start + 0xfff; - res->flags = IORESOURCE_MEM; - - res++; - res->name = "gmac"; - res->start = irq; - res->end = irq; - res->flags = IORESOURCE_IRQ; -} - -static struct platform_device *gmac_controller2_init(void *gmac0_addr) -{ - int mac; - static struct xlr_net_data ndata1 = { - .phy_interface = PHY_INTERFACE_MODE_SGMII, - .rfr_station = FMN_STNID_GMAC1_FR_0, - .bucket_size = xlr_board_fmn_config.bucket_size, - .gmac_fmn_info = &xlr_board_fmn_config.gmac[1], - }; - - static struct platform_device xlr_net_dev1 = { - .name = "xlr-net", - .id = 1, - .dev.platform_data = &ndata1, - }; - - gmac4_addr = - ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_4_OFFSET)), - 0xfff); - ndata1.serdes_addr = gmac4_addr; - ndata1.pcs_addr = gmac4_addr; - ndata1.mii_addr = gmac0_addr; - ndata1.gpio_addr = gpio_addr; - ndata1.cpu_mask = nlm_current_node()->coremask; - - xlr_net_dev1.resource = xlr_net1_res; - - for (mac = 0; mac < 4; mac++) { - ndata1.tx_stnid[mac] = FMN_STNID_GMAC1_TX0 + mac; - ndata1.phy_addr[mac] = mac + 4 + 0x10; - - xlr_resource_init(&xlr_net1_res[mac * 2], - xlr_gmac_offsets[mac + 4], - xlr_gmac_irqs[mac + 4]); - } - xlr_net_dev1.num_resources = 8; - - return &xlr_net_dev1; -} - -static void xls_gmac_init(void) -{ - int mac; - struct platform_device *xlr_net_dev1; - void __iomem *gmac0_addr = - ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_0_OFFSET)), - 0xfff); - - static struct xlr_net_data ndata0 = { - .rfr_station = FMN_STNID_GMACRFR_0, - .bucket_size = xlr_board_fmn_config.bucket_size, - .gmac_fmn_info = &xlr_board_fmn_config.gmac[0], - }; - - static struct platform_device xlr_net_dev0 = { - .name = "xlr-net", - .id = 0, - }; - xlr_net_dev0.dev.platform_data = &ndata0; - ndata0.serdes_addr = gmac0_addr; - ndata0.pcs_addr = gmac0_addr; - ndata0.mii_addr = gmac0_addr; - - /* Passing GPIO base for serdes init. Only needed on sgmii ports */ - gpio_addr = - ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET)), - 0xfff); - ndata0.gpio_addr = gpio_addr; - ndata0.cpu_mask = nlm_current_node()->coremask; - - xlr_net_dev0.resource = xlr_net0_res; - - switch (nlm_prom_info.board_major_version) { - case 12: - /* first block RGMII or XAUI, use RGMII */ - ndata0.phy_interface = PHY_INTERFACE_MODE_RGMII; - ndata0.tx_stnid[0] = FMN_STNID_GMAC0_TX0; - ndata0.phy_addr[0] = 0; - - xlr_net_dev0.num_resources = 2; - - xlr_resource_init(&xlr_net0_res[0], xlr_gmac_offsets[0], - xlr_gmac_irqs[0]); - platform_device_register(&xlr_net_dev0); - - /* second block is XAUI, not supported yet */ - break; - default: - /* default XLS config, all ports SGMII */ - ndata0.phy_interface = PHY_INTERFACE_MODE_SGMII; - for (mac = 0; mac < 4; mac++) { - ndata0.tx_stnid[mac] = FMN_STNID_GMAC0_TX0 + mac; - ndata0.phy_addr[mac] = mac + 0x10; - - xlr_resource_init(&xlr_net0_res[mac * 2], - xlr_gmac_offsets[mac], - xlr_gmac_irqs[mac]); - } - xlr_net_dev0.num_resources = 8; - platform_device_register(&xlr_net_dev0); - - xlr_net_dev1 = gmac_controller2_init(gmac0_addr); - platform_device_register(xlr_net_dev1); - } -} - -static void xlr_gmac_init(void) -{ - int mac; - - /* assume all GMACs for now */ - static struct xlr_net_data ndata0 = { - .phy_interface = PHY_INTERFACE_MODE_RGMII, - .serdes_addr = NULL, - .pcs_addr = NULL, - .rfr_station = FMN_STNID_GMACRFR_0, - .bucket_size = xlr_board_fmn_config.bucket_size, - .gmac_fmn_info = &xlr_board_fmn_config.gmac[0], - .gpio_addr = NULL, - }; - - static struct platform_device xlr_net_dev0 = { - .name = "xlr-net", - .id = 0, - .dev.platform_data = &ndata0, - }; - ndata0.mii_addr = - ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_0_OFFSET)), - 0xfff); - - ndata0.cpu_mask = nlm_current_node()->coremask; - - for (mac = 0; mac < MAX_NUM_XLR_GMAC; mac++) { - ndata0.tx_stnid[mac] = FMN_STNID_GMAC0_TX0 + mac; - ndata0.phy_addr[mac] = mac; - xlr_resource_init(&xlr_net0_res[mac * 2], xlr_gmac_offsets[mac], - xlr_gmac_irqs[mac]); - } - xlr_net_dev0.num_resources = 8; - xlr_net_dev0.resource = xlr_net0_res; - - platform_device_register(&xlr_net_dev0); -} - -static int __init xlr_net_init(void) -{ - if (nlm_chip_is_xls()) - xls_gmac_init(); - else - xlr_gmac_init(); - - return 0; -} - -arch_initcall(xlr_net_init); diff --git a/drivers/staging/netlogic/platform_net.h b/drivers/staging/netlogic/platform_net.h deleted file mode 100644 index c8d4c13424c6..000000000000 --- a/drivers/staging/netlogic/platform_net.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ -/* - * Copyright (c) 2003-2012 Broadcom Corporation - * All Rights Reserved - */ - -#define PORTS_PER_CONTROLLER 4 - -struct xlr_net_data { - int cpu_mask; - u32 __iomem *mii_addr; - u32 __iomem *serdes_addr; - u32 __iomem *pcs_addr; - u32 __iomem *gpio_addr; - int phy_interface; - int rfr_station; - int tx_stnid[PORTS_PER_CONTROLLER]; - int *bucket_size; - int phy_addr[PORTS_PER_CONTROLLER]; - struct xlr_fmn_info *gmac_fmn_info; -}; diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c deleted file mode 100644 index 69ea61faf8fa..000000000000 --- a/drivers/staging/netlogic/xlr_net.c +++ /dev/null @@ -1,1080 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -/* - * Copyright (c) 2003-2012 Broadcom Corporation - * All Rights Reserved - */ - -#include <linux/phy.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/smp.h> -#include <linux/ethtool.h> -#include <linux/module.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/jiffies.h> -#include <linux/interrupt.h> -#include <linux/platform_device.h> - -#include <asm/mipsregs.h> -/* - * fmn.h - For FMN credit configuration and registering fmn_handler. - * FMN is communication mechanism that allows processing agents within - * XLR/XLS to communicate each other. - */ -#include <asm/netlogic/xlr/fmn.h> - -#include "platform_net.h" -#include "xlr_net.h" - -/* - * The readl/writel implementation byteswaps on XLR/XLS, so - * we need to use __raw_ IO to read the NAE registers - * because they are in the big-endian MMIO area on the SoC. - */ -static inline void xlr_nae_wreg(u32 __iomem *base, unsigned int reg, u32 val) -{ - __raw_writel(val, base + reg); -} - -static inline u32 xlr_nae_rdreg(u32 __iomem *base, unsigned int reg) -{ - return __raw_readl(base + reg); -} - -static inline void xlr_reg_update(u32 *base_addr, u32 off, u32 val, u32 mask) -{ - u32 tmp; - - tmp = xlr_nae_rdreg(base_addr, off); - xlr_nae_wreg(base_addr, off, (tmp & ~mask) | (val & mask)); -} - -#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES - -static int send_to_rfr_fifo(struct xlr_net_priv *priv, void *addr) -{ - struct nlm_fmn_msg msg; - int ret = 0, num_try = 0, stnid; - unsigned long paddr, mflags; - - paddr = virt_to_bus(addr); - msg.msg0 = (u64)paddr & 0xffffffffe0ULL; - msg.msg1 = 0; - msg.msg2 = 0; - msg.msg3 = 0; - stnid = priv->nd->rfr_station; - do { - mflags = nlm_cop2_enable_irqsave(); - ret = nlm_fmn_send(1, 0, stnid, &msg); - nlm_cop2_disable_irqrestore(mflags); - if (ret == 0) - return 0; - } while (++num_try < 10000); - - netdev_err(priv->ndev, "Send to RFR failed in RX path\n"); - return ret; -} - -static inline unsigned char *xlr_alloc_skb(void) -{ - struct sk_buff *skb; - int buf_len = sizeof(struct sk_buff *); - unsigned char *skb_data; - - /* skb->data is cache aligned */ - skb = alloc_skb(XLR_RX_BUF_SIZE, GFP_ATOMIC); - if (!skb) - return NULL; - skb_data = skb->data; - skb_reserve(skb, MAC_SKB_BACK_PTR_SIZE); - memcpy(skb_data, &skb, buf_len); - - return skb->data; -} - -static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, int code, - struct nlm_fmn_msg *msg, void *arg) -{ - struct sk_buff *skb; - void *skb_data = NULL; - struct net_device *ndev; - struct xlr_net_priv *priv; - u32 port, length; - unsigned char *addr; - struct xlr_adapter *adapter = arg; - - length = (msg->msg0 >> 40) & 0x3fff; - if (length == 0) { - addr = bus_to_virt(msg->msg0 & 0xffffffffffULL); - addr = addr - MAC_SKB_BACK_PTR_SIZE; - skb = (struct sk_buff *)(*(unsigned long *)addr); - dev_kfree_skb_any((struct sk_buff *)addr); - } else { - addr = (unsigned char *) - bus_to_virt(msg->msg0 & 0xffffffffe0ULL); - length = length - BYTE_OFFSET - MAC_CRC_LEN; - port = ((int)msg->msg0) & 0x0f; - addr = addr - MAC_SKB_BACK_PTR_SIZE; - skb = (struct sk_buff *)(*(unsigned long *)addr); - skb->dev = adapter->netdev[port]; - if (!skb->dev) - return; - ndev = skb->dev; - priv = netdev_priv(ndev); - - /* 16 byte IP header align */ - skb_reserve(skb, BYTE_OFFSET); - skb_put(skb, length); - skb->protocol = eth_type_trans(skb, skb->dev); - netif_rx(skb); - /* Fill rx ring */ - skb_data = xlr_alloc_skb(); - if (skb_data) - send_to_rfr_fifo(priv, skb_data); - } -} - -static struct phy_device *xlr_get_phydev(struct xlr_net_priv *priv) -{ - return mdiobus_get_phy(priv->mii_bus, priv->phy_addr); -} - -/* - * Ethtool operation - */ -static int xlr_get_link_ksettings(struct net_device *ndev, - struct ethtool_link_ksettings *ecmd) -{ - struct xlr_net_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = xlr_get_phydev(priv); - - if (!phydev) - return -ENODEV; - - phy_ethtool_ksettings_get(phydev, ecmd); - - return 0; -} - -static int xlr_set_link_ksettings(struct net_device *ndev, - const struct ethtool_link_ksettings *ecmd) -{ - struct xlr_net_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = xlr_get_phydev(priv); - - if (!phydev) - return -ENODEV; - return phy_ethtool_ksettings_set(phydev, ecmd); -} - -static const struct ethtool_ops xlr_ethtool_ops = { - .get_link_ksettings = xlr_get_link_ksettings, - .set_link_ksettings = xlr_set_link_ksettings, -}; - -/* - * Net operations - */ -static int xlr_net_fill_rx_ring(struct net_device *ndev) -{ - void *skb_data; - struct xlr_net_priv *priv = netdev_priv(ndev); - int i; - - for (i = 0; i < MAX_FRIN_SPILL / 4; i++) { - skb_data = xlr_alloc_skb(); - if (!skb_data) - return -ENOMEM; - send_to_rfr_fifo(priv, skb_data); - } - netdev_info(ndev, "Rx ring setup done\n"); - return 0; -} - -static int xlr_net_open(struct net_device *ndev) -{ - u32 err; - struct xlr_net_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = xlr_get_phydev(priv); - - /* schedule a link state check */ - phy_start(phydev); - - err = phy_start_aneg(phydev); - if (err) { - pr_err("Autoneg failed\n"); - return err; - } - /* Setup the speed from PHY to internal reg*/ - xlr_set_gmac_speed(priv); - - netif_tx_start_all_queues(ndev); - - return 0; -} - -static int xlr_net_stop(struct net_device *ndev) -{ - struct xlr_net_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = xlr_get_phydev(priv); - - phy_stop(phydev); - netif_tx_stop_all_queues(ndev); - return 0; -} - -static void xlr_make_tx_desc(struct nlm_fmn_msg *msg, unsigned long addr, - struct sk_buff *skb) -{ - unsigned long physkb = virt_to_phys(skb); - int cpu_core = nlm_core_id(); - int fr_stn_id = cpu_core * 8 + XLR_FB_STN; /* FB to 6th bucket */ - - msg->msg0 = (((u64)1 << 63) | /* End of packet descriptor */ - ((u64)127 << 54) | /* No Free back */ - (u64)skb->len << 40 | /* Length of data */ - ((u64)addr)); - msg->msg1 = (((u64)1 << 63) | - ((u64)fr_stn_id << 54) | /* Free back id */ - (u64)0 << 40 | /* Set len to 0 */ - ((u64)physkb & 0xffffffff)); /* 32bit address */ - msg->msg2 = 0; - msg->msg3 = 0; -} - -static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb, - struct net_device *ndev) -{ - struct nlm_fmn_msg msg; - struct xlr_net_priv *priv = netdev_priv(ndev); - int ret; - u32 flags; - - xlr_make_tx_desc(&msg, virt_to_phys(skb->data), skb); - flags = nlm_cop2_enable_irqsave(); - ret = nlm_fmn_send(2, 0, priv->tx_stnid, &msg); - nlm_cop2_disable_irqrestore(flags); - if (ret) - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; -} - -static void xlr_hw_set_mac_addr(struct net_device *ndev) -{ - struct xlr_net_priv *priv = netdev_priv(ndev); - - /* set mac station address */ - xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0, - ((ndev->dev_addr[5] << 24) | (ndev->dev_addr[4] << 16) | - (ndev->dev_addr[3] << 8) | (ndev->dev_addr[2]))); - xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0 + 1, - ((ndev->dev_addr[1] << 24) | (ndev->dev_addr[0] << 16))); - - xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2, 0xffffffff); - xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2 + 1, 0xffffffff); - xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3, 0xffffffff); - xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3 + 1, 0xffffffff); - - xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG, - (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) | - (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) | - (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID)); - - if (priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII || - priv->nd->phy_interface == PHY_INTERFACE_MODE_SGMII) - xlr_reg_update(priv->base_addr, R_IPG_IFG, MAC_B2B_IPG, 0x7f); -} - -static int xlr_net_set_mac_addr(struct net_device *ndev, void *data) -{ - int err; - - err = eth_mac_addr(ndev, data); - if (err) - return err; - xlr_hw_set_mac_addr(ndev); - return 0; -} - -static void xlr_set_rx_mode(struct net_device *ndev) -{ - struct xlr_net_priv *priv = netdev_priv(ndev); - u32 regval; - - regval = xlr_nae_rdreg(priv->base_addr, R_MAC_FILTER_CONFIG); - - if (ndev->flags & IFF_PROMISC) { - regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) | - (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) | - (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) | - (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN); - } else { - regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) | - (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN)); - } - - xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG, regval); -} - -static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats) -{ - struct xlr_net_priv *priv = netdev_priv(ndev); - - stats->rx_packets = xlr_nae_rdreg(priv->base_addr, RX_PACKET_COUNTER); - stats->tx_packets = xlr_nae_rdreg(priv->base_addr, TX_PACKET_COUNTER); - stats->rx_bytes = xlr_nae_rdreg(priv->base_addr, RX_BYTE_COUNTER); - stats->tx_bytes = xlr_nae_rdreg(priv->base_addr, TX_BYTE_COUNTER); - stats->tx_errors = xlr_nae_rdreg(priv->base_addr, TX_FCS_ERROR_COUNTER); - stats->rx_dropped = xlr_nae_rdreg(priv->base_addr, - RX_DROP_PACKET_COUNTER); - stats->tx_dropped = xlr_nae_rdreg(priv->base_addr, - TX_DROP_FRAME_COUNTER); - - stats->multicast = xlr_nae_rdreg(priv->base_addr, - RX_MULTICAST_PACKET_COUNTER); - stats->collisions = xlr_nae_rdreg(priv->base_addr, - TX_TOTAL_COLLISION_COUNTER); - - stats->rx_length_errors = xlr_nae_rdreg(priv->base_addr, - RX_FRAME_LENGTH_ERROR_COUNTER); - stats->rx_over_errors = xlr_nae_rdreg(priv->base_addr, - RX_DROP_PACKET_COUNTER); - stats->rx_crc_errors = xlr_nae_rdreg(priv->base_addr, - RX_FCS_ERROR_COUNTER); - stats->rx_frame_errors = xlr_nae_rdreg(priv->base_addr, - RX_ALIGNMENT_ERROR_COUNTER); - - stats->rx_fifo_errors = xlr_nae_rdreg(priv->base_addr, - RX_DROP_PACKET_COUNTER); - stats->rx_missed_errors = xlr_nae_rdreg(priv->base_addr, - RX_CARRIER_SENSE_ERROR_COUNTER); - - stats->rx_errors = (stats->rx_over_errors + stats->rx_crc_errors + - stats->rx_frame_errors + stats->rx_fifo_errors + - stats->rx_missed_errors); - - stats->tx_aborted_errors = xlr_nae_rdreg(priv->base_addr, - TX_EXCESSIVE_COLLISION_PACKET_COUNTER); - stats->tx_carrier_errors = xlr_nae_rdreg(priv->base_addr, - TX_DROP_FRAME_COUNTER); - stats->tx_fifo_errors = xlr_nae_rdreg(priv->base_addr, - TX_DROP_FRAME_COUNTER); -} - -static const struct net_device_ops xlr_netdev_ops = { - .ndo_open = xlr_net_open, - .ndo_stop = xlr_net_stop, - .ndo_start_xmit = xlr_net_start_xmit, - .ndo_select_queue = dev_pick_tx_cpu_id, - .ndo_set_mac_address = xlr_net_set_mac_addr, - .ndo_set_rx_mode = xlr_set_rx_mode, - .ndo_get_stats64 = xlr_stats, -}; - -/* - * Gmac init - */ -static void *xlr_config_spill(struct xlr_net_priv *priv, int reg_start_0, - int reg_start_1, int reg_size, int size) -{ - void *spill; - u32 *base; - unsigned long phys_addr; - u32 spill_size; - - base = priv->base_addr; - spill_size = size; - spill = kmalloc(spill_size + SMP_CACHE_BYTES, GFP_KERNEL); - if (!spill) - return ZERO_SIZE_PTR; - - spill = PTR_ALIGN(spill, SMP_CACHE_BYTES); - phys_addr = virt_to_phys(spill); - dev_dbg(&priv->ndev->dev, "Allocated spill %d bytes at %lx\n", - size, phys_addr); - xlr_nae_wreg(base, reg_start_0, (phys_addr >> 5) & 0xffffffff); - xlr_nae_wreg(base, reg_start_1, ((u64)phys_addr >> 37) & 0x07); - xlr_nae_wreg(base, reg_size, spill_size); - - return spill; -} - -/* - * Configure the 6 FIFO's that are used by the network accelarator to - * communicate with the rest of the XLx device. 4 of the FIFO's are for - * packets from NA --> cpu (called Class FIFO's) and 2 are for feeding - * the NA with free descriptors. - */ -static void xlr_config_fifo_spill_area(struct xlr_net_priv *priv) -{ - priv->frin_spill = xlr_config_spill(priv, - R_REG_FRIN_SPILL_MEM_START_0, - R_REG_FRIN_SPILL_MEM_START_1, - R_REG_FRIN_SPILL_MEM_SIZE, - MAX_FRIN_SPILL * sizeof(u64)); - priv->frout_spill = xlr_config_spill(priv, - R_FROUT_SPILL_MEM_START_0, - R_FROUT_SPILL_MEM_START_1, - R_FROUT_SPILL_MEM_SIZE, - MAX_FROUT_SPILL * sizeof(u64)); - priv->class_0_spill = xlr_config_spill(priv, - R_CLASS0_SPILL_MEM_START_0, - R_CLASS0_SPILL_MEM_START_1, - R_CLASS0_SPILL_MEM_SIZE, - MAX_CLASS_0_SPILL * sizeof(u64)); - priv->class_1_spill = xlr_config_spill(priv, - R_CLASS1_SPILL_MEM_START_0, - R_CLASS1_SPILL_MEM_START_1, - R_CLASS1_SPILL_MEM_SIZE, - MAX_CLASS_1_SPILL * sizeof(u64)); - priv->class_2_spill = xlr_config_spill(priv, - R_CLASS2_SPILL_MEM_START_0, - R_CLASS2_SPILL_MEM_START_1, - R_CLASS2_SPILL_MEM_SIZE, - MAX_CLASS_2_SPILL * sizeof(u64)); - priv->class_3_spill = xlr_config_spill(priv, - R_CLASS3_SPILL_MEM_START_0, - R_CLASS3_SPILL_MEM_START_1, - R_CLASS3_SPILL_MEM_SIZE, - MAX_CLASS_3_SPILL * sizeof(u64)); -} - -/* - * Configure PDE to Round-Robin distribution of packets to the - * available cpu - */ -static void xlr_config_pde(struct xlr_net_priv *priv) -{ - int i = 0; - u64 bkt_map = 0; - - /* Each core has 8 buckets(station) */ - for (i = 0; i < hweight32(priv->nd->cpu_mask); i++) - bkt_map |= (0xff << (i * 8)); - - xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0, (bkt_map & 0xffffffff)); - xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0 + 1, - ((bkt_map >> 32) & 0xffffffff)); - - xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1, (bkt_map & 0xffffffff)); - xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1 + 1, - ((bkt_map >> 32) & 0xffffffff)); - - xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2, (bkt_map & 0xffffffff)); - xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2 + 1, - ((bkt_map >> 32) & 0xffffffff)); - - xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3, (bkt_map & 0xffffffff)); - xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3 + 1, - ((bkt_map >> 32) & 0xffffffff)); -} - -/* - * Setup the Message ring credits, bucket size and other - * common configuration - */ -static int xlr_config_common(struct xlr_net_priv *priv) -{ - struct xlr_fmn_info *gmac = priv->nd->gmac_fmn_info; - int start_stn_id = gmac->start_stn_id; - int end_stn_id = gmac->end_stn_id; - int *bucket_size = priv->nd->bucket_size; - int i, j, err; - - /* Setting non-core MsgBktSize(0x321 - 0x325) */ - for (i = start_stn_id; i <= end_stn_id; i++) { - xlr_nae_wreg(priv->base_addr, - R_GMAC_RFR0_BUCKET_SIZE + i - start_stn_id, - bucket_size[i]); - } - - /* - * Setting non-core Credit counter register - * Distributing Gmac's credit to CPU's - */ - for (i = 0; i < 8; i++) { - for (j = 0; j < 8; j++) - xlr_nae_wreg(priv->base_addr, - (R_CC_CPU0_0 + (i * 8)) + j, - gmac->credit_config[(i * 8) + j]); - } - - xlr_nae_wreg(priv->base_addr, R_MSG_TX_THRESHOLD, 3); - xlr_nae_wreg(priv->base_addr, R_DMACR0, 0xffffffff); - xlr_nae_wreg(priv->base_addr, R_DMACR1, 0xffffffff); - xlr_nae_wreg(priv->base_addr, R_DMACR2, 0xffffffff); - xlr_nae_wreg(priv->base_addr, R_DMACR3, 0xffffffff); - xlr_nae_wreg(priv->base_addr, R_FREEQCARVE, 0); - - err = xlr_net_fill_rx_ring(priv->ndev); - if (err) - return err; - nlm_register_fmn_handler(start_stn_id, end_stn_id, xlr_net_fmn_handler, - priv->adapter); - return 0; -} - -static void xlr_config_translate_table(struct xlr_net_priv *priv) -{ - u32 cpu_mask; - u32 val; - int bkts[32]; /* one bucket is assumed for each cpu */ - int b1, b2, c1, c2, i, j, k; - int use_bkt; - - use_bkt = 0; - cpu_mask = priv->nd->cpu_mask; - - pr_info("Using %s-based distribution\n", - (use_bkt) ? "bucket" : "class"); - j = 0; - for (i = 0; i < 32; i++) { - if ((1 << i) & cpu_mask) { - /* for each cpu, mark the 4+threadid bucket */ - bkts[j] = ((i / 4) * 8) + (i % 4); - j++; - } - } - - /*configure the 128 * 9 Translation table to send to available buckets*/ - k = 0; - c1 = 3; - c2 = 0; - for (i = 0; i < 64; i++) { - /* - * On use_bkt set the b0, b1 are used, else - * the 4 classes are used, here implemented - * a logic to distribute the packets to the - * buckets equally or based on the class - */ - c1 = (c1 + 1) & 3; - c2 = (c1 + 1) & 3; - b1 = bkts[k]; - k = (k + 1) % j; - b2 = bkts[k]; - k = (k + 1) % j; - - val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) | - (c2 << 7) | (b2 << 1) | (use_bkt << 0)); - dev_dbg(&priv->ndev->dev, "Table[%d] b1=%d b2=%d c1=%d c2=%d\n", - i, b1, b2, c1, c2); - xlr_nae_wreg(priv->base_addr, R_TRANSLATETABLE + i, val); - c1 = c2; - } -} - -static void xlr_config_parser(struct xlr_net_priv *priv) -{ - u32 val; - - /* Mark it as ETHERNET type */ - xlr_nae_wreg(priv->base_addr, R_L2TYPE_0, 0x01); - - /* Use 7bit CRChash for flow classification with 127 as CRC polynomial*/ - xlr_nae_wreg(priv->base_addr, R_PARSERCONFIGREG, - ((0x7f << 8) | (1 << 1))); - - /* configure the parser : L2 Type is configured in the bootloader */ - /* extract IP: src, dest protocol */ - xlr_nae_wreg(priv->base_addr, R_L3CTABLE, - (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) | - (0x0800 << 0)); - xlr_nae_wreg(priv->base_addr, R_L3CTABLE + 1, - (9 << 25) | (1 << 21) | (12 << 14) | (4 << 10) | - (16 << 4) | 4); - - /* Configure to extract SRC port and Dest port for TCP and UDP pkts */ - xlr_nae_wreg(priv->base_addr, R_L4CTABLE, 6); - xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 2, 17); - val = ((0 << 21) | (2 << 17) | (2 << 11) | (2 << 7)); - xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 1, val); - xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 3, val); - - xlr_config_translate_table(priv); -} - -static int xlr_phy_write(u32 *base_addr, int phy_addr, int regnum, u16 val) -{ - unsigned long timeout, stoptime, checktime; - int timedout; - - /* 100ms timeout*/ - timeout = msecs_to_jiffies(100); - stoptime = jiffies + timeout; - timedout = 0; - - xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS, (phy_addr << 8) | regnum); - - /* Write the data which starts the write cycle */ - xlr_nae_wreg(base_addr, R_MII_MGMT_WRITE_DATA, (u32)val); - - /* poll for the read cycle to complete */ - while (!timedout) { - checktime = jiffies; - if (xlr_nae_rdreg(base_addr, R_MII_MGMT_INDICATORS) == 0) - break; - timedout = time_after(checktime, stoptime); - } - if (timedout) { - pr_info("Phy device write err: device busy"); - return -EBUSY; - } - - return 0; -} - -static int xlr_phy_read(u32 *base_addr, int phy_addr, int regnum) -{ - unsigned long timeout, stoptime, checktime; - int timedout; - - /* 100ms timeout*/ - timeout = msecs_to_jiffies(100); - stoptime = jiffies + timeout; - timedout = 0; - - /* setup the phy reg to be used */ - xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS, - (phy_addr << 8) | (regnum << 0)); - - /* Issue the read command */ - xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND, - (1 << O_MII_MGMT_COMMAND__rstat)); - - /* poll for the read cycle to complete */ - while (!timedout) { - checktime = jiffies; - if (xlr_nae_rdreg(base_addr, R_MII_MGMT_INDICATORS) == 0) - break; - timedout = time_after(checktime, stoptime); - } - if (timedout) { - pr_info("Phy device read err: device busy"); - return -EBUSY; - } - - /* clear the read cycle */ - xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND, 0); - - /* Read the data */ - return xlr_nae_rdreg(base_addr, R_MII_MGMT_STATUS); -} - -static int xlr_mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 val) -{ - struct xlr_net_priv *priv = bus->priv; - int ret; - - ret = xlr_phy_write(priv->mii_addr, phy_addr, regnum, val); - dev_dbg(&priv->ndev->dev, "mii_write phy %d : %d <- %x [%x]\n", - phy_addr, regnum, val, ret); - return ret; -} - -static int xlr_mii_read(struct mii_bus *bus, int phy_addr, int regnum) -{ - struct xlr_net_priv *priv = bus->priv; - int ret; - - ret = xlr_phy_read(priv->mii_addr, phy_addr, regnum); - dev_dbg(&priv->ndev->dev, "mii_read phy %d : %d [%x]\n", - phy_addr, regnum, ret); - return ret; -} - -/* - * XLR ports are RGMII. XLS ports are SGMII mostly except the port0, - * which can be configured either SGMII or RGMII, considered SGMII - * by default, if board setup to RGMII the port_type need to set - * accordingly.Serdes and PCS layer need to configured for SGMII - */ -static void xlr_sgmii_init(struct xlr_net_priv *priv) -{ - int phy; - - xlr_phy_write(priv->serdes_addr, 26, 0, 0x6DB0); - xlr_phy_write(priv->serdes_addr, 26, 1, 0xFFFF); - xlr_phy_write(priv->serdes_addr, 26, 2, 0xB6D0); - xlr_phy_write(priv->serdes_addr, 26, 3, 0x00FF); - xlr_phy_write(priv->serdes_addr, 26, 4, 0x0000); - xlr_phy_write(priv->serdes_addr, 26, 5, 0x0000); - xlr_phy_write(priv->serdes_addr, 26, 6, 0x0005); - xlr_phy_write(priv->serdes_addr, 26, 7, 0x0001); - xlr_phy_write(priv->serdes_addr, 26, 8, 0x0000); - xlr_phy_write(priv->serdes_addr, 26, 9, 0x0000); - xlr_phy_write(priv->serdes_addr, 26, 10, 0x0000); - - /* program GPIO values for serdes init parameters */ - xlr_nae_wreg(priv->gpio_addr, 0x20, 0x7e6802); - xlr_nae_wreg(priv->gpio_addr, 0x10, 0x7104); - - xlr_nae_wreg(priv->gpio_addr, 0x22, 0x7e6802); - xlr_nae_wreg(priv->gpio_addr, 0x21, 0x7104); - - /* enable autoneg - more magic */ - phy = priv->phy_addr % 4 + 27; - xlr_phy_write(priv->pcs_addr, phy, 0, 0x1000); - xlr_phy_write(priv->pcs_addr, phy, 0, 0x0200); -} - -void xlr_set_gmac_speed(struct xlr_net_priv *priv) -{ - struct phy_device *phydev = xlr_get_phydev(priv); - int speed; - - if (phydev->interface == PHY_INTERFACE_MODE_SGMII) - xlr_sgmii_init(priv); - - if (phydev->speed != priv->phy_speed) { - speed = phydev->speed; - if (speed == SPEED_1000) { - /* Set interface to Byte mode */ - xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7217); - priv->phy_speed = speed; - } else if (speed == SPEED_100 || speed == SPEED_10) { - /* Set interface to Nibble mode */ - xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7117); - priv->phy_speed = speed; - } - /* Set SGMII speed in Interface control reg */ - if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { - if (speed == SPEED_10) - xlr_nae_wreg(priv->base_addr, - R_INTERFACE_CONTROL, - SGMII_SPEED_10); - if (speed == SPEED_100) - xlr_nae_wreg(priv->base_addr, - R_INTERFACE_CONTROL, - SGMII_SPEED_100); - if (speed == SPEED_1000) - xlr_nae_wreg(priv->base_addr, - R_INTERFACE_CONTROL, - SGMII_SPEED_1000); - } - if (speed == SPEED_10) - xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x2); - if (speed == SPEED_100) - xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x1); - if (speed == SPEED_1000) - xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x0); - } - pr_info("gmac%d : %dMbps\n", priv->port_id, priv->phy_speed); -} - -static void xlr_gmac_link_adjust(struct net_device *ndev) -{ - struct xlr_net_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = xlr_get_phydev(priv); - u32 intreg; - - intreg = xlr_nae_rdreg(priv->base_addr, R_INTREG); - if (phydev->link) { - if (phydev->speed != priv->phy_speed) { - xlr_set_gmac_speed(priv); - pr_info("gmac%d : Link up\n", priv->port_id); - } - } else { - xlr_set_gmac_speed(priv); - pr_info("gmac%d : Link down\n", priv->port_id); - } -} - -static int xlr_mii_probe(struct xlr_net_priv *priv) -{ - struct phy_device *phydev = xlr_get_phydev(priv); - - if (!phydev) { - pr_err("no PHY found on phy_addr %d\n", priv->phy_addr); - return -ENODEV; - } - - /* Attach MAC to PHY */ - phydev = phy_connect(priv->ndev, phydev_name(phydev), - xlr_gmac_link_adjust, priv->nd->phy_interface); - - if (IS_ERR(phydev)) { - pr_err("could not attach PHY\n"); - return PTR_ERR(phydev); - } - phydev->supported &= (ADVERTISED_10baseT_Full - | ADVERTISED_10baseT_Half - | ADVERTISED_100baseT_Full - | ADVERTISED_100baseT_Half - | ADVERTISED_1000baseT_Full - | ADVERTISED_Autoneg - | ADVERTISED_MII); - - phydev->advertising = phydev->supported; - phy_attached_info(phydev); - return 0; -} - -static int xlr_setup_mdio(struct xlr_net_priv *priv, - struct platform_device *pdev) -{ - int err; - - priv->mii_bus = mdiobus_alloc(); - if (!priv->mii_bus) { - pr_err("mdiobus alloc failed\n"); - return -ENOMEM; - } - - priv->mii_bus->priv = priv; - priv->mii_bus->name = "xlr-mdio"; - snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", - priv->mii_bus->name, priv->port_id); - priv->mii_bus->read = xlr_mii_read; - priv->mii_bus->write = xlr_mii_write; - priv->mii_bus->parent = &pdev->dev; - - /* Scan only the enabled address */ - priv->mii_bus->phy_mask = ~(1 << priv->phy_addr); - - /* setting clock divisor to 54 */ - xlr_nae_wreg(priv->base_addr, R_MII_MGMT_CONFIG, 0x7); - - err = mdiobus_register(priv->mii_bus); - if (err) { - mdiobus_free(priv->mii_bus); - pr_err("mdio bus registration failed\n"); - return err; - } - - pr_info("Registered mdio bus id : %s\n", priv->mii_bus->id); - err = xlr_mii_probe(priv); - if (err) { - mdiobus_free(priv->mii_bus); - return err; - } - return 0; -} - -static void xlr_port_enable(struct xlr_net_priv *priv) -{ - u32 prid = (read_c0_prid() & 0xf000); - - /* Setup MAC_CONFIG reg if (xls & rgmii) */ - if ((prid == 0x8000 || prid == 0x4000 || prid == 0xc000) && - priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII) - xlr_reg_update(priv->base_addr, R_RX_CONTROL, - (1 << O_RX_CONTROL__RGMII), - (1 << O_RX_CONTROL__RGMII)); - - /* Rx Tx enable */ - xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1, - ((1 << O_MAC_CONFIG_1__rxen) | - (1 << O_MAC_CONFIG_1__txen) | - (1 << O_MAC_CONFIG_1__rxfc) | - (1 << O_MAC_CONFIG_1__txfc)), - ((1 << O_MAC_CONFIG_1__rxen) | - (1 << O_MAC_CONFIG_1__txen) | - (1 << O_MAC_CONFIG_1__rxfc) | - (1 << O_MAC_CONFIG_1__txfc))); - - /* Setup tx control reg */ - xlr_reg_update(priv->base_addr, R_TX_CONTROL, - ((1 << O_TX_CONTROL__TXENABLE) | - (512 << O_TX_CONTROL__TXTHRESHOLD)), 0x3fff); - - /* Setup rx control reg */ - xlr_reg_update(priv->base_addr, R_RX_CONTROL, - 1 << O_RX_CONTROL__RXENABLE, - 1 << O_RX_CONTROL__RXENABLE); -} - -static void xlr_port_disable(struct xlr_net_priv *priv) -{ - /* Setup MAC_CONFIG reg */ - /* Rx Tx disable*/ - xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1, - ((1 << O_MAC_CONFIG_1__rxen) | - (1 << O_MAC_CONFIG_1__txen) | - (1 << O_MAC_CONFIG_1__rxfc) | - (1 << O_MAC_CONFIG_1__txfc)), 0x0); - - /* Setup tx control reg */ - xlr_reg_update(priv->base_addr, R_TX_CONTROL, - ((1 << O_TX_CONTROL__TXENABLE) | - (512 << O_TX_CONTROL__TXTHRESHOLD)), 0); - - /* Setup rx control reg */ - xlr_reg_update(priv->base_addr, R_RX_CONTROL, - 1 << O_RX_CONTROL__RXENABLE, 0); -} - -/* - * Initialization of gmac - */ -static int xlr_gmac_init(struct xlr_net_priv *priv, - struct platform_device *pdev) -{ - int ret; - - pr_info("Initializing the gmac%d\n", priv->port_id); - - xlr_port_disable(priv); - - xlr_nae_wreg(priv->base_addr, R_DESC_PACK_CTRL, - (1 << O_DESC_PACK_CTRL__MAXENTRY) | - (BYTE_OFFSET << O_DESC_PACK_CTRL__BYTEOFFSET) | - (1600 << O_DESC_PACK_CTRL__REGULARSIZE)); - - ret = xlr_setup_mdio(priv, pdev); - if (ret) - return ret; - xlr_port_enable(priv); - - /* Enable Full-duplex/1000Mbps/CRC */ - xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7217); - /* speed 2.5Mhz */ - xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x02); - /* Setup Interrupt mask reg */ - xlr_nae_wreg(priv->base_addr, R_INTMASK, (1 << O_INTMASK__TXILLEGAL) | - (1 << O_INTMASK__MDINT) | (1 << O_INTMASK__TXFETCHERROR) | - (1 << O_INTMASK__P2PSPILLECC) | (1 << O_INTMASK__TAGFULL) | - (1 << O_INTMASK__UNDERRUN) | (1 << O_INTMASK__ABORT)); - - /* Clear all stats */ - xlr_reg_update(priv->base_addr, R_STATCTRL, 0, 1 << O_STATCTRL__CLRCNT); - xlr_reg_update(priv->base_addr, R_STATCTRL, 1 << 2, 1 << 2); - return 0; -} - -static int xlr_net_probe(struct platform_device *pdev) -{ - struct xlr_net_priv *priv = NULL; - struct net_device *ndev; - struct resource *res; - struct xlr_adapter *adapter; - int err, port; - - pr_info("XLR/XLS Ethernet Driver controller %d\n", pdev->id); - /* - * Allocate our adapter data structure and attach it to the device. - */ - adapter = devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL); - if (!adapter) - return -ENOMEM; - - /* - * XLR and XLS have 1 and 2 NAE controller respectively - * Each controller has 4 gmac ports, mapping each controller - * under one parent device, 4 gmac ports under one device. - */ - for (port = 0; port < pdev->num_resources / 2; port++) { - ndev = alloc_etherdev_mq(sizeof(struct xlr_net_priv), 32); - if (!ndev) { - dev_err(&pdev->dev, - "Allocation of Ethernet device failed\n"); - return -ENOMEM; - } - - priv = netdev_priv(ndev); - priv->pdev = pdev; - priv->ndev = ndev; - priv->port_id = (pdev->id * 4) + port; - priv->nd = (struct xlr_net_data *)pdev->dev.platform_data; - priv->base_addr = devm_platform_ioremap_resource(pdev, port); - if (IS_ERR(priv->base_addr)) { - err = PTR_ERR(priv->base_addr); - goto err_gmac; - } - priv->adapter = adapter; - adapter->netdev[port] = ndev; - - res = platform_get_resource(pdev, IORESOURCE_IRQ, port); - if (!res) { - dev_err(&pdev->dev, "No irq resource for MAC %d\n", - priv->port_id); - err = -ENODEV; - goto err_gmac; - } - - ndev->irq = res->start; - - priv->phy_addr = priv->nd->phy_addr[port]; - priv->tx_stnid = priv->nd->tx_stnid[port]; - priv->mii_addr = priv->nd->mii_addr; - priv->serdes_addr = priv->nd->serdes_addr; - priv->pcs_addr = priv->nd->pcs_addr; - priv->gpio_addr = priv->nd->gpio_addr; - - ndev->netdev_ops = &xlr_netdev_ops; - ndev->watchdog_timeo = HZ; - - /* Setup Mac address and Rx mode */ - eth_hw_addr_random(ndev); - xlr_hw_set_mac_addr(ndev); - xlr_set_rx_mode(ndev); - - priv->num_rx_desc += MAX_NUM_DESC_SPILL; - ndev->ethtool_ops = &xlr_ethtool_ops; - SET_NETDEV_DEV(ndev, &pdev->dev); - - xlr_config_fifo_spill_area(priv); - /* Configure PDE to Round-Robin pkt distribution */ - xlr_config_pde(priv); - xlr_config_parser(priv); - - /* Call init with respect to port */ - if (strcmp(res->name, "gmac") == 0) { - err = xlr_gmac_init(priv, pdev); - if (err) { - dev_err(&pdev->dev, "gmac%d init failed\n", - priv->port_id); - goto err_gmac; - } - } - - if (priv->port_id == 0 || priv->port_id == 4) { - err = xlr_config_common(priv); - if (err) - goto err_netdev; - } - - err = register_netdev(ndev); - if (err) { - dev_err(&pdev->dev, - "Registering netdev failed for gmac%d\n", - priv->port_id); - goto err_netdev; - } - platform_set_drvdata(pdev, priv); - } - - return 0; - -err_netdev: - mdiobus_free(priv->mii_bus); -err_gmac: - free_netdev(ndev); - return err; -} - -static int xlr_net_remove(struct platform_device *pdev) -{ - struct xlr_net_priv *priv = platform_get_drvdata(pdev); - - unregister_netdev(priv->ndev); - mdiobus_unregister(priv->mii_bus); - mdiobus_free(priv->mii_bus); - free_netdev(priv->ndev); - return 0; -} - -static struct platform_driver xlr_net_driver = { - .probe = xlr_net_probe, - .remove = xlr_net_remove, - .driver = { - .name = "xlr-net", - }, -}; - -module_platform_driver(xlr_net_driver); - -MODULE_AUTHOR("Ganesan Ramalingam <ganesanr@broadcom.com>"); -MODULE_DESCRIPTION("Ethernet driver for Netlogic XLR/XLS"); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_ALIAS("platform:xlr-net"); diff --git a/drivers/staging/netlogic/xlr_net.h b/drivers/staging/netlogic/xlr_net.h deleted file mode 100644 index 8365b744f9b3..000000000000 --- a/drivers/staging/netlogic/xlr_net.h +++ /dev/null @@ -1,1079 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ -/* - * Copyright (c) 2003-2012 Broadcom Corporation - * All Rights Reserved - */ - -/* #define MAC_SPLIT_MODE */ - -#define MAC_SPACING 0x400 -#define XGMAC_SPACING 0x400 - -/* PE-MCXMAC register and bit field definitions */ -#define R_MAC_CONFIG_1 0x00 -#define O_MAC_CONFIG_1__srst 31 -#define O_MAC_CONFIG_1__simr 30 -#define O_MAC_CONFIG_1__hrrmc 18 -#define W_MAC_CONFIG_1__hrtmc 2 -#define O_MAC_CONFIG_1__hrrfn 16 -#define W_MAC_CONFIG_1__hrtfn 2 -#define O_MAC_CONFIG_1__intlb 8 -#define O_MAC_CONFIG_1__rxfc 5 -#define O_MAC_CONFIG_1__txfc 4 -#define O_MAC_CONFIG_1__srxen 3 -#define O_MAC_CONFIG_1__rxen 2 -#define O_MAC_CONFIG_1__stxen 1 -#define O_MAC_CONFIG_1__txen 0 -#define R_MAC_CONFIG_2 0x01 -#define O_MAC_CONFIG_2__prlen 12 -#define W_MAC_CONFIG_2__prlen 4 -#define O_MAC_CONFIG_2__speed 8 -#define W_MAC_CONFIG_2__speed 2 -#define O_MAC_CONFIG_2__hugen 5 -#define O_MAC_CONFIG_2__flchk 4 -#define O_MAC_CONFIG_2__crce 1 -#define O_MAC_CONFIG_2__fulld 0 -#define R_IPG_IFG 0x02 -#define O_IPG_IFG__ipgr1 24 -#define W_IPG_IFG__ipgr1 7 -#define O_IPG_IFG__ipgr2 16 -#define W_IPG_IFG__ipgr2 7 -#define O_IPG_IFG__mifg 8 -#define W_IPG_IFG__mifg 8 -#define O_IPG_IFG__ipgt 0 -#define W_IPG_IFG__ipgt 7 -#define R_HALF_DUPLEX 0x03 -#define O_HALF_DUPLEX__abebt 24 -#define W_HALF_DUPLEX__abebt 4 -#define O_HALF_DUPLEX__abebe 19 -#define O_HALF_DUPLEX__bpnb 18 -#define O_HALF_DUPLEX__nobo 17 -#define O_HALF_DUPLEX__edxsdfr 16 -#define O_HALF_DUPLEX__retry 12 -#define W_HALF_DUPLEX__retry 4 -#define O_HALF_DUPLEX__lcol 0 -#define W_HALF_DUPLEX__lcol 10 -#define R_MAXIMUM_FRAME_LENGTH 0x04 -#define O_MAXIMUM_FRAME_LENGTH__maxf 0 -#define W_MAXIMUM_FRAME_LENGTH__maxf 16 -#define R_TEST 0x07 -#define O_TEST__mbof 3 -#define O_TEST__rthdf 2 -#define O_TEST__tpause 1 -#define O_TEST__sstct 0 -#define R_MII_MGMT_CONFIG 0x08 -#define O_MII_MGMT_CONFIG__scinc 5 -#define O_MII_MGMT_CONFIG__spre 4 -#define O_MII_MGMT_CONFIG__clks 3 -#define W_MII_MGMT_CONFIG__clks 3 -#define R_MII_MGMT_COMMAND 0x09 -#define O_MII_MGMT_COMMAND__scan 1 -#define O_MII_MGMT_COMMAND__rstat 0 -#define R_MII_MGMT_ADDRESS 0x0A -#define O_MII_MGMT_ADDRESS__fiad 8 -#define W_MII_MGMT_ADDRESS__fiad 5 -#define O_MII_MGMT_ADDRESS__fgad 5 -#define W_MII_MGMT_ADDRESS__fgad 0 -#define R_MII_MGMT_WRITE_DATA 0x0B -#define O_MII_MGMT_WRITE_DATA__ctld 0 -#define W_MII_MGMT_WRITE_DATA__ctld 16 -#define R_MII_MGMT_STATUS 0x0C -#define R_MII_MGMT_INDICATORS 0x0D -#define O_MII_MGMT_INDICATORS__nvalid 2 -#define O_MII_MGMT_INDICATORS__scan 1 -#define O_MII_MGMT_INDICATORS__busy 0 -#define R_INTERFACE_CONTROL 0x0E -#define O_INTERFACE_CONTROL__hrstint 31 -#define O_INTERFACE_CONTROL__tbimode 27 -#define O_INTERFACE_CONTROL__ghdmode 26 -#define O_INTERFACE_CONTROL__lhdmode 25 -#define O_INTERFACE_CONTROL__phymod 24 -#define O_INTERFACE_CONTROL__hrrmi 23 -#define O_INTERFACE_CONTROL__rspd 16 -#define O_INTERFACE_CONTROL__hr100 15 -#define O_INTERFACE_CONTROL__frcq 10 -#define O_INTERFACE_CONTROL__nocfr 9 -#define O_INTERFACE_CONTROL__dlfct 8 -#define O_INTERFACE_CONTROL__enjab 0 -#define R_INTERFACE_STATUS 0x0F -#define O_INTERFACE_STATUS__xsdfr 9 -#define O_INTERFACE_STATUS__ssrr 8 -#define W_INTERFACE_STATUS__ssrr 5 -#define O_INTERFACE_STATUS__miilf 3 -#define O_INTERFACE_STATUS__locar 2 -#define O_INTERFACE_STATUS__sqerr 1 -#define O_INTERFACE_STATUS__jabber 0 -#define R_STATION_ADDRESS_LS 0x10 -#define R_STATION_ADDRESS_MS 0x11 - -/* A-XGMAC register and bit field definitions */ -#define R_XGMAC_CONFIG_0 0x00 -#define O_XGMAC_CONFIG_0__hstmacrst 31 -#define O_XGMAC_CONFIG_0__hstrstrctl 23 -#define O_XGMAC_CONFIG_0__hstrstrfn 22 -#define O_XGMAC_CONFIG_0__hstrsttctl 18 -#define O_XGMAC_CONFIG_0__hstrsttfn 17 -#define O_XGMAC_CONFIG_0__hstrstmiim 16 -#define O_XGMAC_CONFIG_0__hstloopback 8 -#define R_XGMAC_CONFIG_1 0x01 -#define O_XGMAC_CONFIG_1__hsttctlen 31 -#define O_XGMAC_CONFIG_1__hsttfen 30 -#define O_XGMAC_CONFIG_1__hstrctlen 29 -#define O_XGMAC_CONFIG_1__hstrfen 28 -#define O_XGMAC_CONFIG_1__tfen 26 -#define O_XGMAC_CONFIG_1__rfen 24 -#define O_XGMAC_CONFIG_1__hstrctlshrtp 12 -#define O_XGMAC_CONFIG_1__hstdlyfcstx 10 -#define W_XGMAC_CONFIG_1__hstdlyfcstx 2 -#define O_XGMAC_CONFIG_1__hstdlyfcsrx 8 -#define W_XGMAC_CONFIG_1__hstdlyfcsrx 2 -#define O_XGMAC_CONFIG_1__hstppen 7 -#define O_XGMAC_CONFIG_1__hstbytswp 6 -#define O_XGMAC_CONFIG_1__hstdrplt64 5 -#define O_XGMAC_CONFIG_1__hstprmscrx 4 -#define O_XGMAC_CONFIG_1__hstlenchk 3 -#define O_XGMAC_CONFIG_1__hstgenfcs 2 -#define O_XGMAC_CONFIG_1__hstpadmode 0 -#define W_XGMAC_CONFIG_1__hstpadmode 2 -#define R_XGMAC_CONFIG_2 0x02 -#define O_XGMAC_CONFIG_2__hsttctlfrcp 31 -#define O_XGMAC_CONFIG_2__hstmlnkflth 27 -#define O_XGMAC_CONFIG_2__hstalnkflth 26 -#define O_XGMAC_CONFIG_2__rflnkflt 24 -#define W_XGMAC_CONFIG_2__rflnkflt 2 -#define O_XGMAC_CONFIG_2__hstipgextmod 16 -#define W_XGMAC_CONFIG_2__hstipgextmod 5 -#define O_XGMAC_CONFIG_2__hstrctlfrcp 15 -#define O_XGMAC_CONFIG_2__hstipgexten 5 -#define O_XGMAC_CONFIG_2__hstmipgext 0 -#define W_XGMAC_CONFIG_2__hstmipgext 5 -#define R_XGMAC_CONFIG_3 0x03 -#define O_XGMAC_CONFIG_3__hstfltrfrm 31 -#define W_XGMAC_CONFIG_3__hstfltrfrm 16 -#define O_XGMAC_CONFIG_3__hstfltrfrmdc 15 -#define W_XGMAC_CONFIG_3__hstfltrfrmdc 16 -#define R_XGMAC_STATION_ADDRESS_LS 0x04 -#define O_XGMAC_STATION_ADDRESS_LS__hstmacadr0 0 -#define W_XGMAC_STATION_ADDRESS_LS__hstmacadr0 32 -#define R_XGMAC_STATION_ADDRESS_MS 0x05 -#define R_XGMAC_MAX_FRAME_LEN 0x08 -#define O_XGMAC_MAX_FRAME_LEN__hstmxfrmwctx 16 -#define W_XGMAC_MAX_FRAME_LEN__hstmxfrmwctx 14 -#define O_XGMAC_MAX_FRAME_LEN__hstmxfrmbcrx 0 -#define W_XGMAC_MAX_FRAME_LEN__hstmxfrmbcrx 16 -#define R_XGMAC_REV_LEVEL 0x0B -#define O_XGMAC_REV_LEVEL__revlvl 0 -#define W_XGMAC_REV_LEVEL__revlvl 15 -#define R_XGMAC_MIIM_COMMAND 0x10 -#define O_XGMAC_MIIM_COMMAND__hstldcmd 3 -#define O_XGMAC_MIIM_COMMAND__hstmiimcmd 0 -#define W_XGMAC_MIIM_COMMAND__hstmiimcmd 3 -#define R_XGMAC_MIIM_FILED 0x11 -#define O_XGMAC_MIIM_FILED__hststfield 30 -#define W_XGMAC_MIIM_FILED__hststfield 2 -#define O_XGMAC_MIIM_FILED__hstopfield 28 -#define W_XGMAC_MIIM_FILED__hstopfield 2 -#define O_XGMAC_MIIM_FILED__hstphyadx 23 -#define W_XGMAC_MIIM_FILED__hstphyadx 5 -#define O_XGMAC_MIIM_FILED__hstregadx 18 -#define W_XGMAC_MIIM_FILED__hstregadx 5 -#define O_XGMAC_MIIM_FILED__hsttafield 16 -#define W_XGMAC_MIIM_FILED__hsttafield 2 -#define O_XGMAC_MIIM_FILED__miimrddat 0 -#define W_XGMAC_MIIM_FILED__miimrddat 16 -#define R_XGMAC_MIIM_CONFIG 0x12 -#define O_XGMAC_MIIM_CONFIG__hstnopram 7 -#define O_XGMAC_MIIM_CONFIG__hstclkdiv 0 -#define W_XGMAC_MIIM_CONFIG__hstclkdiv 7 -#define R_XGMAC_MIIM_LINK_FAIL_VECTOR 0x13 -#define O_XGMAC_MIIM_LINK_FAIL_VECTOR__miimlfvec 0 -#define W_XGMAC_MIIM_LINK_FAIL_VECTOR__miimlfvec 32 -#define R_XGMAC_MIIM_INDICATOR 0x14 -#define O_XGMAC_MIIM_INDICATOR__miimphylf 4 -#define O_XGMAC_MIIM_INDICATOR__miimmoncplt 3 -#define O_XGMAC_MIIM_INDICATOR__miimmonvld 2 -#define O_XGMAC_MIIM_INDICATOR__miimmon 1 -#define O_XGMAC_MIIM_INDICATOR__miimbusy 0 - -/* GMAC stats registers */ -#define R_RBYT 0x27 -#define R_RPKT 0x28 -#define R_RFCS 0x29 -#define R_RMCA 0x2A -#define R_RBCA 0x2B -#define R_RXCF 0x2C -#define R_RXPF 0x2D -#define R_RXUO 0x2E -#define R_RALN 0x2F -#define R_RFLR 0x30 -#define R_RCDE 0x31 -#define R_RCSE 0x32 -#define R_RUND 0x33 -#define R_ROVR 0x34 -#define R_TBYT 0x38 -#define R_TPKT 0x39 -#define R_TMCA 0x3A -#define R_TBCA 0x3B -#define R_TXPF 0x3C -#define R_TDFR 0x3D -#define R_TEDF 0x3E -#define R_TSCL 0x3F -#define R_TMCL 0x40 -#define R_TLCL 0x41 -#define R_TXCL 0x42 -#define R_TNCL 0x43 -#define R_TJBR 0x46 -#define R_TFCS 0x47 -#define R_TXCF 0x48 -#define R_TOVR 0x49 -#define R_TUND 0x4A -#define R_TFRG 0x4B - -/* Glue logic register and bit field definitions */ -#define R_MAC_ADDR0 0x50 -#define R_MAC_ADDR1 0x52 -#define R_MAC_ADDR2 0x54 -#define R_MAC_ADDR3 0x56 -#define R_MAC_ADDR_MASK2 0x58 -#define R_MAC_ADDR_MASK3 0x5A -#define R_MAC_FILTER_CONFIG 0x5C -#define O_MAC_FILTER_CONFIG__BROADCAST_EN 10 -#define O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN 9 -#define O_MAC_FILTER_CONFIG__ALL_MCAST_EN 8 -#define O_MAC_FILTER_CONFIG__ALL_UCAST_EN 7 -#define O_MAC_FILTER_CONFIG__HASH_MCAST_EN 6 -#define O_MAC_FILTER_CONFIG__HASH_UCAST_EN 5 -#define O_MAC_FILTER_CONFIG__ADDR_MATCH_DISC 4 -#define O_MAC_FILTER_CONFIG__MAC_ADDR3_VALID 3 -#define O_MAC_FILTER_CONFIG__MAC_ADDR2_VALID 2 -#define O_MAC_FILTER_CONFIG__MAC_ADDR1_VALID 1 -#define O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID 0 -#define R_HASH_TABLE_VECTOR 0x30 -#define R_TX_CONTROL 0x0A0 -#define O_TX_CONTROL__TX15HALT 31 -#define O_TX_CONTROL__TX14HALT 30 -#define O_TX_CONTROL__TX13HALT 29 -#define O_TX_CONTROL__TX12HALT 28 -#define O_TX_CONTROL__TX11HALT 27 -#define O_TX_CONTROL__TX10HALT 26 -#define O_TX_CONTROL__TX9HALT 25 -#define O_TX_CONTROL__TX8HALT 24 -#define O_TX_CONTROL__TX7HALT 23 -#define O_TX_CONTROL__TX6HALT 22 -#define O_TX_CONTROL__TX5HALT 21 -#define O_TX_CONTROL__TX4HALT 20 -#define O_TX_CONTROL__TX3HALT 19 -#define O_TX_CONTROL__TX2HALT 18 -#define O_TX_CONTROL__TX1HALT 17 -#define O_TX_CONTROL__TX0HALT 16 -#define O_TX_CONTROL__TXIDLE 15 -#define O_TX_CONTROL__TXENABLE 14 -#define O_TX_CONTROL__TXTHRESHOLD 0 -#define W_TX_CONTROL__TXTHRESHOLD 14 -#define R_RX_CONTROL 0x0A1 -#define O_RX_CONTROL__RGMII 10 -#define O_RX_CONTROL__SOFTRESET 2 -#define O_RX_CONTROL__RXHALT 1 -#define O_RX_CONTROL__RXENABLE 0 -#define R_DESC_PACK_CTRL 0x0A2 -#define O_DESC_PACK_CTRL__BYTEOFFSET 17 -#define W_DESC_PACK_CTRL__BYTEOFFSET 3 -#define O_DESC_PACK_CTRL__PREPADENABLE 16 -#define O_DESC_PACK_CTRL__MAXENTRY 14 -#define W_DESC_PACK_CTRL__MAXENTRY 2 -#define O_DESC_PACK_CTRL__REGULARSIZE 0 -#define W_DESC_PACK_CTRL__REGULARSIZE 14 -#define R_STATCTRL 0x0A3 -#define O_STATCTRL__OVERFLOWEN 4 -#define O_STATCTRL__GIG 3 -#define O_STATCTRL__STEN 2 -#define O_STATCTRL__CLRCNT 1 -#define O_STATCTRL__AUTOZ 0 -#define R_L2ALLOCCTRL 0x0A4 -#define O_L2ALLOCCTRL__TXL2ALLOCATE 9 -#define W_L2ALLOCCTRL__TXL2ALLOCATE 9 -#define O_L2ALLOCCTRL__RXL2ALLOCATE 0 -#define W_L2ALLOCCTRL__RXL2ALLOCATE 9 -#define R_INTMASK 0x0A5 -#define O_INTMASK__SPI4TXERROR 28 -#define O_INTMASK__SPI4RXERROR 27 -#define O_INTMASK__RGMIIHALFDUPCOLLISION 27 -#define O_INTMASK__ABORT 26 -#define O_INTMASK__UNDERRUN 25 -#define O_INTMASK__DISCARDPACKET 24 -#define O_INTMASK__ASYNCFIFOFULL 23 -#define O_INTMASK__TAGFULL 22 -#define O_INTMASK__CLASS3FULL 21 -#define O_INTMASK__C3EARLYFULL 20 -#define O_INTMASK__CLASS2FULL 19 -#define O_INTMASK__C2EARLYFULL 18 -#define O_INTMASK__CLASS1FULL 17 -#define O_INTMASK__C1EARLYFULL 16 -#define O_INTMASK__CLASS0FULL 15 -#define O_INTMASK__C0EARLYFULL 14 -#define O_INTMASK__RXDATAFULL 13 -#define O_INTMASK__RXEARLYFULL 12 -#define O_INTMASK__RFREEEMPTY 9 -#define O_INTMASK__RFEARLYEMPTY 8 -#define O_INTMASK__P2PSPILLECC 7 -#define O_INTMASK__FREEDESCFULL 5 -#define O_INTMASK__FREEEARLYFULL 4 -#define O_INTMASK__TXFETCHERROR 3 -#define O_INTMASK__STATCARRY 2 -#define O_INTMASK__MDINT 1 -#define O_INTMASK__TXILLEGAL 0 -#define R_INTREG 0x0A6 -#define O_INTREG__SPI4TXERROR 28 -#define O_INTREG__SPI4RXERROR 27 -#define O_INTREG__RGMIIHALFDUPCOLLISION 27 -#define O_INTREG__ABORT 26 -#define O_INTREG__UNDERRUN 25 -#define O_INTREG__DISCARDPACKET 24 -#define O_INTREG__ASYNCFIFOFULL 23 -#define O_INTREG__TAGFULL 22 -#define O_INTREG__CLASS3FULL 21 -#define O_INTREG__C3EARLYFULL 20 -#define O_INTREG__CLASS2FULL 19 -#define O_INTREG__C2EARLYFULL 18 -#define O_INTREG__CLASS1FULL 17 -#define O_INTREG__C1EARLYFULL 16 -#define O_INTREG__CLASS0FULL 15 -#define O_INTREG__C0EARLYFULL 14 -#define O_INTREG__RXDATAFULL 13 -#define O_INTREG__RXEARLYFULL 12 -#define O_INTREG__RFREEEMPTY 9 -#define O_INTREG__RFEARLYEMPTY 8 -#define O_INTREG__P2PSPILLECC 7 -#define O_INTREG__FREEDESCFULL 5 -#define O_INTREG__FREEEARLYFULL 4 -#define O_INTREG__TXFETCHERROR 3 -#define O_INTREG__STATCARRY 2 -#define O_INTREG__MDINT 1 -#define O_INTREG__TXILLEGAL 0 -#define R_TXRETRY 0x0A7 -#define O_TXRETRY__COLLISIONRETRY 6 -#define O_TXRETRY__BUSERRORRETRY 5 -#define O_TXRETRY__UNDERRUNRETRY 4 -#define O_TXRETRY__RETRIES 0 -#define W_TXRETRY__RETRIES 4 -#define R_CORECONTROL 0x0A8 -#define O_CORECONTROL__ERRORTHREAD 4 -#define W_CORECONTROL__ERRORTHREAD 7 -#define O_CORECONTROL__SHUTDOWN 2 -#define O_CORECONTROL__SPEED 0 -#define W_CORECONTROL__SPEED 2 -#define R_BYTEOFFSET0 0x0A9 -#define R_BYTEOFFSET1 0x0AA -#define R_L2TYPE_0 0x0F0 -#define O_L2TYPE__EXTRAHDRPROTOSIZE 26 -#define W_L2TYPE__EXTRAHDRPROTOSIZE 5 -#define O_L2TYPE__EXTRAHDRPROTOOFFSET 20 -#define W_L2TYPE__EXTRAHDRPROTOOFFSET 6 -#define O_L2TYPE__EXTRAHEADERSIZE 14 -#define W_L2TYPE__EXTRAHEADERSIZE 6 -#define O_L2TYPE__PROTOOFFSET 8 -#define W_L2TYPE__PROTOOFFSET 6 -#define O_L2TYPE__L2HDROFFSET 2 -#define W_L2TYPE__L2HDROFFSET 6 -#define O_L2TYPE__L2PROTO 0 -#define W_L2TYPE__L2PROTO 2 -#define R_L2TYPE_1 0xF0 -#define R_L2TYPE_2 0xF0 -#define R_L2TYPE_3 0xF0 -#define R_PARSERCONFIGREG 0x100 -#define O_PARSERCONFIGREG__CRCHASHPOLY 8 -#define W_PARSERCONFIGREG__CRCHASHPOLY 7 -#define O_PARSERCONFIGREG__PREPADOFFSET 4 -#define W_PARSERCONFIGREG__PREPADOFFSET 4 -#define O_PARSERCONFIGREG__USECAM 2 -#define O_PARSERCONFIGREG__USEHASH 1 -#define O_PARSERCONFIGREG__USEPROTO 0 -#define R_L3CTABLE 0x140 -#define O_L3CTABLE__OFFSET0 25 -#define W_L3CTABLE__OFFSET0 7 -#define O_L3CTABLE__LEN0 21 -#define W_L3CTABLE__LEN0 4 -#define O_L3CTABLE__OFFSET1 14 -#define W_L3CTABLE__OFFSET1 7 -#define O_L3CTABLE__LEN1 10 -#define W_L3CTABLE__LEN1 4 -#define O_L3CTABLE__OFFSET2 4 -#define W_L3CTABLE__OFFSET2 6 -#define O_L3CTABLE__LEN2 0 -#define W_L3CTABLE__LEN2 4 -#define O_L3CTABLE__L3HDROFFSET 26 -#define W_L3CTABLE__L3HDROFFSET 6 -#define O_L3CTABLE__L4PROTOOFFSET 20 -#define W_L3CTABLE__L4PROTOOFFSET 6 -#define O_L3CTABLE__IPCHKSUMCOMPUTE 19 -#define O_L3CTABLE__L4CLASSIFY 18 -#define O_L3CTABLE__L2PROTO 16 -#define W_L3CTABLE__L2PROTO 2 -#define O_L3CTABLE__L3PROTOKEY 0 -#define W_L3CTABLE__L3PROTOKEY 16 -#define R_L4CTABLE 0x160 -#define O_L4CTABLE__OFFSET0 21 -#define W_L4CTABLE__OFFSET0 6 -#define O_L4CTABLE__LEN0 17 -#define W_L4CTABLE__LEN0 4 -#define O_L4CTABLE__OFFSET1 11 -#define W_L4CTABLE__OFFSET1 6 -#define O_L4CTABLE__LEN1 7 -#define W_L4CTABLE__LEN1 4 -#define O_L4CTABLE__TCPCHKSUMENABLE 0 -#define R_CAM4X128TABLE 0x172 -#define O_CAM4X128TABLE__CLASSID 7 -#define W_CAM4X128TABLE__CLASSID 2 -#define O_CAM4X128TABLE__BUCKETID 1 -#define W_CAM4X128TABLE__BUCKETID 6 -#define O_CAM4X128TABLE__USEBUCKET 0 -#define R_CAM4X128KEY 0x180 -#define R_TRANSLATETABLE 0x1A0 -#define R_DMACR0 0x200 -#define O_DMACR0__DATA0WRMAXCR 27 -#define W_DMACR0__DATA0WRMAXCR 3 -#define O_DMACR0__DATA0RDMAXCR 24 -#define W_DMACR0__DATA0RDMAXCR 3 -#define O_DMACR0__DATA1WRMAXCR 21 -#define W_DMACR0__DATA1WRMAXCR 3 -#define O_DMACR0__DATA1RDMAXCR 18 -#define W_DMACR0__DATA1RDMAXCR 3 -#define O_DMACR0__DATA2WRMAXCR 15 -#define W_DMACR0__DATA2WRMAXCR 3 -#define O_DMACR0__DATA2RDMAXCR 12 -#define W_DMACR0__DATA2RDMAXCR 3 -#define O_DMACR0__DATA3WRMAXCR 9 -#define W_DMACR0__DATA3WRMAXCR 3 -#define O_DMACR0__DATA3RDMAXCR 6 -#define W_DMACR0__DATA3RDMAXCR 3 -#define O_DMACR0__DATA4WRMAXCR 3 -#define W_DMACR0__DATA4WRMAXCR 3 -#define O_DMACR0__DATA4RDMAXCR 0 -#define W_DMACR0__DATA4RDMAXCR 3 -#define R_DMACR1 0x201 -#define O_DMACR1__DATA5WRMAXCR 27 -#define W_DMACR1__DATA5WRMAXCR 3 -#define O_DMACR1__DATA5RDMAXCR 24 -#define W_DMACR1__DATA5RDMAXCR 3 -#define O_DMACR1__DATA6WRMAXCR 21 -#define W_DMACR1__DATA6WRMAXCR 3 -#define O_DMACR1__DATA6RDMAXCR 18 -#define W_DMACR1__DATA6RDMAXCR 3 -#define O_DMACR1__DATA7WRMAXCR 15 -#define W_DMACR1__DATA7WRMAXCR 3 -#define O_DMACR1__DATA7RDMAXCR 12 -#define W_DMACR1__DATA7RDMAXCR 3 -#define O_DMACR1__DATA8WRMAXCR 9 -#define W_DMACR1__DATA8WRMAXCR 3 -#define O_DMACR1__DATA8RDMAXCR 6 -#define W_DMACR1__DATA8RDMAXCR 3 -#define O_DMACR1__DATA9WRMAXCR 3 -#define W_DMACR1__DATA9WRMAXCR 3 -#define O_DMACR1__DATA9RDMAXCR 0 -#define W_DMACR1__DATA9RDMAXCR 3 -#define R_DMACR2 0x202 -#define O_DMACR2__DATA10WRMAXCR 27 -#define W_DMACR2__DATA10WRMAXCR 3 -#define O_DMACR2__DATA10RDMAXCR 24 -#define W_DMACR2__DATA10RDMAXCR 3 -#define O_DMACR2__DATA11WRMAXCR 21 -#define W_DMACR2__DATA11WRMAXCR 3 -#define O_DMACR2__DATA11RDMAXCR 18 -#define W_DMACR2__DATA11RDMAXCR 3 -#define O_DMACR2__DATA12WRMAXCR 15 -#define W_DMACR2__DATA12WRMAXCR 3 -#define O_DMACR2__DATA12RDMAXCR 12 -#define W_DMACR2__DATA12RDMAXCR 3 -#define O_DMACR2__DATA13WRMAXCR 9 -#define W_DMACR2__DATA13WRMAXCR 3 -#define O_DMACR2__DATA13RDMAXCR 6 -#define W_DMACR2__DATA13RDMAXCR 3 -#define O_DMACR2__DATA14WRMAXCR 3 -#define W_DMACR2__DATA14WRMAXCR 3 -#define O_DMACR2__DATA14RDMAXCR 0 -#define W_DMACR2__DATA14RDMAXCR 3 -#define R_DMACR3 0x203 -#define O_DMACR3__DATA15WRMAXCR 27 -#define W_DMACR3__DATA15WRMAXCR 3 -#define O_DMACR3__DATA15RDMAXCR 24 -#define W_DMACR3__DATA15RDMAXCR 3 -#define O_DMACR3__SPCLASSWRMAXCR 21 -#define W_DMACR3__SPCLASSWRMAXCR 3 -#define O_DMACR3__SPCLASSRDMAXCR 18 -#define W_DMACR3__SPCLASSRDMAXCR 3 -#define O_DMACR3__JUMFRINWRMAXCR 15 -#define W_DMACR3__JUMFRINWRMAXCR 3 -#define O_DMACR3__JUMFRINRDMAXCR 12 -#define W_DMACR3__JUMFRINRDMAXCR 3 -#define O_DMACR3__REGFRINWRMAXCR 9 -#define W_DMACR3__REGFRINWRMAXCR 3 -#define O_DMACR3__REGFRINRDMAXCR 6 -#define W_DMACR3__REGFRINRDMAXCR 3 -#define O_DMACR3__FROUTWRMAXCR 3 -#define W_DMACR3__FROUTWRMAXCR 3 -#define O_DMACR3__FROUTRDMAXCR 0 -#define W_DMACR3__FROUTRDMAXCR 3 -#define R_REG_FRIN_SPILL_MEM_START_0 0x204 -#define O_REG_FRIN_SPILL_MEM_START_0__REGFRINSPILLMEMSTART0 0 -#define W_REG_FRIN_SPILL_MEM_START_0__REGFRINSPILLMEMSTART0 32 -#define R_REG_FRIN_SPILL_MEM_START_1 0x205 -#define O_REG_FRIN_SPILL_MEM_START_1__REGFRINSPILLMEMSTART1 0 -#define W_REG_FRIN_SPILL_MEM_START_1__REGFRINSPILLMEMSTART1 3 -#define R_REG_FRIN_SPILL_MEM_SIZE 0x206 -#define O_REG_FRIN_SPILL_MEM_SIZE__REGFRINSPILLMEMSIZE 0 -#define W_REG_FRIN_SPILL_MEM_SIZE__REGFRINSPILLMEMSIZE 32 -#define R_FROUT_SPILL_MEM_START_0 0x207 -#define O_FROUT_SPILL_MEM_START_0__FROUTSPILLMEMSTART0 0 -#define W_FROUT_SPILL_MEM_START_0__FROUTSPILLMEMSTART0 32 -#define R_FROUT_SPILL_MEM_START_1 0x208 -#define O_FROUT_SPILL_MEM_START_1__FROUTSPILLMEMSTART1 0 -#define W_FROUT_SPILL_MEM_START_1__FROUTSPILLMEMSTART1 3 -#define R_FROUT_SPILL_MEM_SIZE 0x209 -#define O_FROUT_SPILL_MEM_SIZE__FROUTSPILLMEMSIZE 0 -#define W_FROUT_SPILL_MEM_SIZE__FROUTSPILLMEMSIZE 32 -#define R_CLASS0_SPILL_MEM_START_0 0x20A -#define O_CLASS0_SPILL_MEM_START_0__CLASS0SPILLMEMSTART0 0 -#define W_CLASS0_SPILL_MEM_START_0__CLASS0SPILLMEMSTART0 32 -#define R_CLASS0_SPILL_MEM_START_1 0x20B -#define O_CLASS0_SPILL_MEM_START_1__CLASS0SPILLMEMSTART1 0 -#define W_CLASS0_SPILL_MEM_START_1__CLASS0SPILLMEMSTART1 3 -#define R_CLASS0_SPILL_MEM_SIZE 0x20C -#define O_CLASS0_SPILL_MEM_SIZE__CLASS0SPILLMEMSIZE 0 -#define W_CLASS0_SPILL_MEM_SIZE__CLASS0SPILLMEMSIZE 32 -#define R_JUMFRIN_SPILL_MEM_START_0 0x20D -#define O_JUMFRIN_SPILL_MEM_START_0__JUMFRINSPILLMEMSTART0 0 -#define W_JUMFRIN_SPILL_MEM_START_0__JUMFRINSPILLMEMSTART0 32 -#define R_JUMFRIN_SPILL_MEM_START_1 0x20E -#define O_JUMFRIN_SPILL_MEM_START_1__JUMFRINSPILLMEMSTART1 0 -#define W_JUMFRIN_SPILL_MEM_START_1__JUMFRINSPILLMEMSTART1 3 -#define R_JUMFRIN_SPILL_MEM_SIZE 0x20F -#define O_JUMFRIN_SPILL_MEM_SIZE__JUMFRINSPILLMEMSIZE 0 -#define W_JUMFRIN_SPILL_MEM_SIZE__JUMFRINSPILLMEMSIZE 32 -#define R_CLASS1_SPILL_MEM_START_0 0x210 -#define O_CLASS1_SPILL_MEM_START_0__CLASS1SPILLMEMSTART0 0 -#define W_CLASS1_SPILL_MEM_START_0__CLASS1SPILLMEMSTART0 32 -#define R_CLASS1_SPILL_MEM_START_1 0x211 -#define O_CLASS1_SPILL_MEM_START_1__CLASS1SPILLMEMSTART1 0 -#define W_CLASS1_SPILL_MEM_START_1__CLASS1SPILLMEMSTART1 3 -#define R_CLASS1_SPILL_MEM_SIZE 0x212 -#define O_CLASS1_SPILL_MEM_SIZE__CLASS1SPILLMEMSIZE 0 -#define W_CLASS1_SPILL_MEM_SIZE__CLASS1SPILLMEMSIZE 32 -#define R_CLASS2_SPILL_MEM_START_0 0x213 -#define O_CLASS2_SPILL_MEM_START_0__CLASS2SPILLMEMSTART0 0 -#define W_CLASS2_SPILL_MEM_START_0__CLASS2SPILLMEMSTART0 32 -#define R_CLASS2_SPILL_MEM_START_1 0x214 -#define O_CLASS2_SPILL_MEM_START_1__CLASS2SPILLMEMSTART1 0 -#define W_CLASS2_SPILL_MEM_START_1__CLASS2SPILLMEMSTART1 3 -#define R_CLASS2_SPILL_MEM_SIZE 0x215 -#define O_CLASS2_SPILL_MEM_SIZE__CLASS2SPILLMEMSIZE 0 -#define W_CLASS2_SPILL_MEM_SIZE__CLASS2SPILLMEMSIZE 32 -#define R_CLASS3_SPILL_MEM_START_0 0x216 -#define O_CLASS3_SPILL_MEM_START_0__CLASS3SPILLMEMSTART0 0 -#define W_CLASS3_SPILL_MEM_START_0__CLASS3SPILLMEMSTART0 32 -#define R_CLASS3_SPILL_MEM_START_1 0x217 -#define O_CLASS3_SPILL_MEM_START_1__CLASS3SPILLMEMSTART1 0 -#define W_CLASS3_SPILL_MEM_START_1__CLASS3SPILLMEMSTART1 3 -#define R_CLASS3_SPILL_MEM_SIZE 0x218 -#define O_CLASS3_SPILL_MEM_SIZE__CLASS3SPILLMEMSIZE 0 -#define W_CLASS3_SPILL_MEM_SIZE__CLASS3SPILLMEMSIZE 32 -#define R_REG_FRIN1_SPILL_MEM_START_0 0x219 -#define R_REG_FRIN1_SPILL_MEM_START_1 0x21a -#define R_REG_FRIN1_SPILL_MEM_SIZE 0x21b -#define R_SPIHNGY0 0x219 -#define O_SPIHNGY0__EG_HNGY_THRESH_0 24 -#define W_SPIHNGY0__EG_HNGY_THRESH_0 7 -#define O_SPIHNGY0__EG_HNGY_THRESH_1 16 -#define W_SPIHNGY0__EG_HNGY_THRESH_1 7 -#define O_SPIHNGY0__EG_HNGY_THRESH_2 8 -#define W_SPIHNGY0__EG_HNGY_THRESH_2 7 -#define O_SPIHNGY0__EG_HNGY_THRESH_3 0 -#define W_SPIHNGY0__EG_HNGY_THRESH_3 7 -#define R_SPIHNGY1 0x21A -#define O_SPIHNGY1__EG_HNGY_THRESH_4 24 -#define W_SPIHNGY1__EG_HNGY_THRESH_4 7 -#define O_SPIHNGY1__EG_HNGY_THRESH_5 16 -#define W_SPIHNGY1__EG_HNGY_THRESH_5 7 -#define O_SPIHNGY1__EG_HNGY_THRESH_6 8 -#define W_SPIHNGY1__EG_HNGY_THRESH_6 7 -#define O_SPIHNGY1__EG_HNGY_THRESH_7 0 -#define W_SPIHNGY1__EG_HNGY_THRESH_7 7 -#define R_SPIHNGY2 0x21B -#define O_SPIHNGY2__EG_HNGY_THRESH_8 24 -#define W_SPIHNGY2__EG_HNGY_THRESH_8 7 -#define O_SPIHNGY2__EG_HNGY_THRESH_9 16 -#define W_SPIHNGY2__EG_HNGY_THRESH_9 7 -#define O_SPIHNGY2__EG_HNGY_THRESH_10 8 -#define W_SPIHNGY2__EG_HNGY_THRESH_10 7 -#define O_SPIHNGY2__EG_HNGY_THRESH_11 0 -#define W_SPIHNGY2__EG_HNGY_THRESH_11 7 -#define R_SPIHNGY3 0x21C -#define O_SPIHNGY3__EG_HNGY_THRESH_12 24 -#define W_SPIHNGY3__EG_HNGY_THRESH_12 7 -#define O_SPIHNGY3__EG_HNGY_THRESH_13 16 -#define W_SPIHNGY3__EG_HNGY_THRESH_13 7 -#define O_SPIHNGY3__EG_HNGY_THRESH_14 8 -#define W_SPIHNGY3__EG_HNGY_THRESH_14 7 -#define O_SPIHNGY3__EG_HNGY_THRESH_15 0 -#define W_SPIHNGY3__EG_HNGY_THRESH_15 7 -#define R_SPISTRV0 0x21D -#define O_SPISTRV0__EG_STRV_THRESH_0 24 -#define W_SPISTRV0__EG_STRV_THRESH_0 7 -#define O_SPISTRV0__EG_STRV_THRESH_1 16 -#define W_SPISTRV0__EG_STRV_THRESH_1 7 -#define O_SPISTRV0__EG_STRV_THRESH_2 8 -#define W_SPISTRV0__EG_STRV_THRESH_2 7 -#define O_SPISTRV0__EG_STRV_THRESH_3 0 -#define W_SPISTRV0__EG_STRV_THRESH_3 7 -#define R_SPISTRV1 0x21E -#define O_SPISTRV1__EG_STRV_THRESH_4 24 -#define W_SPISTRV1__EG_STRV_THRESH_4 7 -#define O_SPISTRV1__EG_STRV_THRESH_5 16 -#define W_SPISTRV1__EG_STRV_THRESH_5 7 -#define O_SPISTRV1__EG_STRV_THRESH_6 8 -#define W_SPISTRV1__EG_STRV_THRESH_6 7 -#define O_SPISTRV1__EG_STRV_THRESH_7 0 -#define W_SPISTRV1__EG_STRV_THRESH_7 7 -#define R_SPISTRV2 0x21F -#define O_SPISTRV2__EG_STRV_THRESH_8 24 -#define W_SPISTRV2__EG_STRV_THRESH_8 7 -#define O_SPISTRV2__EG_STRV_THRESH_9 16 -#define W_SPISTRV2__EG_STRV_THRESH_9 7 -#define O_SPISTRV2__EG_STRV_THRESH_10 8 -#define W_SPISTRV2__EG_STRV_THRESH_10 7 -#define O_SPISTRV2__EG_STRV_THRESH_11 0 -#define W_SPISTRV2__EG_STRV_THRESH_11 7 -#define R_SPISTRV3 0x220 -#define O_SPISTRV3__EG_STRV_THRESH_12 24 -#define W_SPISTRV3__EG_STRV_THRESH_12 7 -#define O_SPISTRV3__EG_STRV_THRESH_13 16 -#define W_SPISTRV3__EG_STRV_THRESH_13 7 -#define O_SPISTRV3__EG_STRV_THRESH_14 8 -#define W_SPISTRV3__EG_STRV_THRESH_14 7 -#define O_SPISTRV3__EG_STRV_THRESH_15 0 -#define W_SPISTRV3__EG_STRV_THRESH_15 7 -#define R_TXDATAFIFO0 0x221 -#define O_TXDATAFIFO0__TX0DATAFIFOSTART 24 -#define W_TXDATAFIFO0__TX0DATAFIFOSTART 7 -#define O_TXDATAFIFO0__TX0DATAFIFOSIZE 16 -#define W_TXDATAFIFO0__TX0DATAFIFOSIZE 7 -#define O_TXDATAFIFO0__TX1DATAFIFOSTART 8 -#define W_TXDATAFIFO0__TX1DATAFIFOSTART 7 -#define O_TXDATAFIFO0__TX1DATAFIFOSIZE 0 -#define W_TXDATAFIFO0__TX1DATAFIFOSIZE 7 -#define R_TXDATAFIFO1 0x222 -#define O_TXDATAFIFO1__TX2DATAFIFOSTART 24 -#define W_TXDATAFIFO1__TX2DATAFIFOSTART 7 -#define O_TXDATAFIFO1__TX2DATAFIFOSIZE 16 -#define W_TXDATAFIFO1__TX2DATAFIFOSIZE 7 -#define O_TXDATAFIFO1__TX3DATAFIFOSTART 8 -#define W_TXDATAFIFO1__TX3DATAFIFOSTART 7 -#define O_TXDATAFIFO1__TX3DATAFIFOSIZE 0 -#define W_TXDATAFIFO1__TX3DATAFIFOSIZE 7 -#define R_TXDATAFIFO2 0x223 -#define O_TXDATAFIFO2__TX4DATAFIFOSTART 24 -#define W_TXDATAFIFO2__TX4DATAFIFOSTART 7 -#define O_TXDATAFIFO2__TX4DATAFIFOSIZE 16 -#define W_TXDATAFIFO2__TX4DATAFIFOSIZE 7 -#define O_TXDATAFIFO2__TX5DATAFIFOSTART 8 -#define W_TXDATAFIFO2__TX5DATAFIFOSTART 7 -#define O_TXDATAFIFO2__TX5DATAFIFOSIZE 0 -#define W_TXDATAFIFO2__TX5DATAFIFOSIZE 7 -#define R_TXDATAFIFO3 0x224 -#define O_TXDATAFIFO3__TX6DATAFIFOSTART 24 -#define W_TXDATAFIFO3__TX6DATAFIFOSTART 7 -#define O_TXDATAFIFO3__TX6DATAFIFOSIZE 16 -#define W_TXDATAFIFO3__TX6DATAFIFOSIZE 7 -#define O_TXDATAFIFO3__TX7DATAFIFOSTART 8 -#define W_TXDATAFIFO3__TX7DATAFIFOSTART 7 -#define O_TXDATAFIFO3__TX7DATAFIFOSIZE 0 -#define W_TXDATAFIFO3__TX7DATAFIFOSIZE 7 -#define R_TXDATAFIFO4 0x225 -#define O_TXDATAFIFO4__TX8DATAFIFOSTART 24 -#define W_TXDATAFIFO4__TX8DATAFIFOSTART 7 -#define O_TXDATAFIFO4__TX8DATAFIFOSIZE 16 -#define W_TXDATAFIFO4__TX8DATAFIFOSIZE 7 -#define O_TXDATAFIFO4__TX9DATAFIFOSTART 8 -#define W_TXDATAFIFO4__TX9DATAFIFOSTART 7 -#define O_TXDATAFIFO4__TX9DATAFIFOSIZE 0 -#define W_TXDATAFIFO4__TX9DATAFIFOSIZE 7 -#define R_TXDATAFIFO5 0x226 -#define O_TXDATAFIFO5__TX10DATAFIFOSTART 24 -#define W_TXDATAFIFO5__TX10DATAFIFOSTART 7 -#define O_TXDATAFIFO5__TX10DATAFIFOSIZE 16 -#define W_TXDATAFIFO5__TX10DATAFIFOSIZE 7 -#define O_TXDATAFIFO5__TX11DATAFIFOSTART 8 -#define W_TXDATAFIFO5__TX11DATAFIFOSTART 7 -#define O_TXDATAFIFO5__TX11DATAFIFOSIZE 0 -#define W_TXDATAFIFO5__TX11DATAFIFOSIZE 7 -#define R_TXDATAFIFO6 0x227 -#define O_TXDATAFIFO6__TX12DATAFIFOSTART 24 -#define W_TXDATAFIFO6__TX12DATAFIFOSTART 7 -#define O_TXDATAFIFO6__TX12DATAFIFOSIZE 16 -#define W_TXDATAFIFO6__TX12DATAFIFOSIZE 7 -#define O_TXDATAFIFO6__TX13DATAFIFOSTART 8 -#define W_TXDATAFIFO6__TX13DATAFIFOSTART 7 -#define O_TXDATAFIFO6__TX13DATAFIFOSIZE 0 -#define W_TXDATAFIFO6__TX13DATAFIFOSIZE 7 -#define R_TXDATAFIFO7 0x228 -#define O_TXDATAFIFO7__TX14DATAFIFOSTART 24 -#define W_TXDATAFIFO7__TX14DATAFIFOSTART 7 -#define O_TXDATAFIFO7__TX14DATAFIFOSIZE 16 -#define W_TXDATAFIFO7__TX14DATAFIFOSIZE 7 -#define O_TXDATAFIFO7__TX15DATAFIFOSTART 8 -#define W_TXDATAFIFO7__TX15DATAFIFOSTART 7 -#define O_TXDATAFIFO7__TX15DATAFIFOSIZE 0 -#define W_TXDATAFIFO7__TX15DATAFIFOSIZE 7 -#define R_RXDATAFIFO0 0x229 -#define O_RXDATAFIFO0__RX0DATAFIFOSTART 24 -#define W_RXDATAFIFO0__RX0DATAFIFOSTART 7 -#define O_RXDATAFIFO0__RX0DATAFIFOSIZE 16 -#define W_RXDATAFIFO0__RX0DATAFIFOSIZE 7 -#define O_RXDATAFIFO0__RX1DATAFIFOSTART 8 -#define W_RXDATAFIFO0__RX1DATAFIFOSTART 7 -#define O_RXDATAFIFO0__RX1DATAFIFOSIZE 0 -#define W_RXDATAFIFO0__RX1DATAFIFOSIZE 7 -#define R_RXDATAFIFO1 0x22A -#define O_RXDATAFIFO1__RX2DATAFIFOSTART 24 -#define W_RXDATAFIFO1__RX2DATAFIFOSTART 7 -#define O_RXDATAFIFO1__RX2DATAFIFOSIZE 16 -#define W_RXDATAFIFO1__RX2DATAFIFOSIZE 7 -#define O_RXDATAFIFO1__RX3DATAFIFOSTART 8 -#define W_RXDATAFIFO1__RX3DATAFIFOSTART 7 -#define O_RXDATAFIFO1__RX3DATAFIFOSIZE 0 -#define W_RXDATAFIFO1__RX3DATAFIFOSIZE 7 -#define R_RXDATAFIFO2 0x22B -#define O_RXDATAFIFO2__RX4DATAFIFOSTART 24 -#define W_RXDATAFIFO2__RX4DATAFIFOSTART 7 -#define O_RXDATAFIFO2__RX4DATAFIFOSIZE 16 -#define W_RXDATAFIFO2__RX4DATAFIFOSIZE 7 -#define O_RXDATAFIFO2__RX5DATAFIFOSTART 8 -#define W_RXDATAFIFO2__RX5DATAFIFOSTART 7 -#define O_RXDATAFIFO2__RX5DATAFIFOSIZE 0 -#define W_RXDATAFIFO2__RX5DATAFIFOSIZE 7 -#define R_RXDATAFIFO3 0x22C -#define O_RXDATAFIFO3__RX6DATAFIFOSTART 24 -#define W_RXDATAFIFO3__RX6DATAFIFOSTART 7 -#define O_RXDATAFIFO3__RX6DATAFIFOSIZE 16 -#define W_RXDATAFIFO3__RX6DATAFIFOSIZE 7 -#define O_RXDATAFIFO3__RX7DATAFIFOSTART 8 -#define W_RXDATAFIFO3__RX7DATAFIFOSTART 7 -#define O_RXDATAFIFO3__RX7DATAFIFOSIZE 0 -#define W_RXDATAFIFO3__RX7DATAFIFOSIZE 7 -#define R_RXDATAFIFO4 0x22D -#define O_RXDATAFIFO4__RX8DATAFIFOSTART 24 -#define W_RXDATAFIFO4__RX8DATAFIFOSTART 7 -#define O_RXDATAFIFO4__RX8DATAFIFOSIZE 16 -#define W_RXDATAFIFO4__RX8DATAFIFOSIZE 7 -#define O_RXDATAFIFO4__RX9DATAFIFOSTART 8 -#define W_RXDATAFIFO4__RX9DATAFIFOSTART 7 -#define O_RXDATAFIFO4__RX9DATAFIFOSIZE 0 -#define W_RXDATAFIFO4__RX9DATAFIFOSIZE 7 -#define R_RXDATAFIFO5 0x22E -#define O_RXDATAFIFO5__RX10DATAFIFOSTART 24 -#define W_RXDATAFIFO5__RX10DATAFIFOSTART 7 -#define O_RXDATAFIFO5__RX10DATAFIFOSIZE 16 -#define W_RXDATAFIFO5__RX10DATAFIFOSIZE 7 -#define O_RXDATAFIFO5__RX11DATAFIFOSTART 8 -#define W_RXDATAFIFO5__RX11DATAFIFOSTART 7 -#define O_RXDATAFIFO5__RX11DATAFIFOSIZE 0 -#define W_RXDATAFIFO5__RX11DATAFIFOSIZE 7 -#define R_RXDATAFIFO6 0x22F -#define O_RXDATAFIFO6__RX12DATAFIFOSTART 24 -#define W_RXDATAFIFO6__RX12DATAFIFOSTART 7 -#define O_RXDATAFIFO6__RX12DATAFIFOSIZE 16 -#define W_RXDATAFIFO6__RX12DATAFIFOSIZE 7 -#define O_RXDATAFIFO6__RX13DATAFIFOSTART 8 -#define W_RXDATAFIFO6__RX13DATAFIFOSTART 7 -#define O_RXDATAFIFO6__RX13DATAFIFOSIZE 0 -#define W_RXDATAFIFO6__RX13DATAFIFOSIZE 7 -#define R_RXDATAFIFO7 0x230 -#define O_RXDATAFIFO7__RX14DATAFIFOSTART 24 -#define W_RXDATAFIFO7__RX14DATAFIFOSTART 7 -#define O_RXDATAFIFO7__RX14DATAFIFOSIZE 16 -#define W_RXDATAFIFO7__RX14DATAFIFOSIZE 7 -#define O_RXDATAFIFO7__RX15DATAFIFOSTART 8 -#define W_RXDATAFIFO7__RX15DATAFIFOSTART 7 -#define O_RXDATAFIFO7__RX15DATAFIFOSIZE 0 -#define W_RXDATAFIFO7__RX15DATAFIFOSIZE 7 -#define R_XGMACPADCALIBRATION 0x231 -#define R_FREEQCARVE 0x233 -#define R_SPI4STATICDELAY0 0x240 -#define O_SPI4STATICDELAY0__DATALINE7 28 -#define W_SPI4STATICDELAY0__DATALINE7 4 -#define O_SPI4STATICDELAY0__DATALINE6 24 -#define W_SPI4STATICDELAY0__DATALINE6 4 -#define O_SPI4STATICDELAY0__DATALINE5 20 -#define W_SPI4STATICDELAY0__DATALINE5 4 -#define O_SPI4STATICDELAY0__DATALINE4 16 -#define W_SPI4STATICDELAY0__DATALINE4 4 -#define O_SPI4STATICDELAY0__DATALINE3 12 -#define W_SPI4STATICDELAY0__DATALINE3 4 -#define O_SPI4STATICDELAY0__DATALINE2 8 -#define W_SPI4STATICDELAY0__DATALINE2 4 -#define O_SPI4STATICDELAY0__DATALINE1 4 -#define W_SPI4STATICDELAY0__DATALINE1 4 -#define O_SPI4STATICDELAY0__DATALINE0 0 -#define W_SPI4STATICDELAY0__DATALINE0 4 -#define R_SPI4STATICDELAY1 0x241 -#define O_SPI4STATICDELAY1__DATALINE15 28 -#define W_SPI4STATICDELAY1__DATALINE15 4 -#define O_SPI4STATICDELAY1__DATALINE14 24 -#define W_SPI4STATICDELAY1__DATALINE14 4 -#define O_SPI4STATICDELAY1__DATALINE13 20 -#define W_SPI4STATICDELAY1__DATALINE13 4 -#define O_SPI4STATICDELAY1__DATALINE12 16 -#define W_SPI4STATICDELAY1__DATALINE12 4 -#define O_SPI4STATICDELAY1__DATALINE11 12 -#define W_SPI4STATICDELAY1__DATALINE11 4 -#define O_SPI4STATICDELAY1__DATALINE10 8 -#define W_SPI4STATICDELAY1__DATALINE10 4 -#define O_SPI4STATICDELAY1__DATALINE9 4 -#define W_SPI4STATICDELAY1__DATALINE9 4 -#define O_SPI4STATICDELAY1__DATALINE8 0 -#define W_SPI4STATICDELAY1__DATALINE8 4 -#define R_SPI4STATICDELAY2 0x242 -#define O_SPI4STATICDELAY0__TXSTAT1 8 -#define W_SPI4STATICDELAY0__TXSTAT1 4 -#define O_SPI4STATICDELAY0__TXSTAT0 4 -#define W_SPI4STATICDELAY0__TXSTAT0 4 -#define O_SPI4STATICDELAY0__RXCONTROL 0 -#define W_SPI4STATICDELAY0__RXCONTROL 4 -#define R_SPI4CONTROL 0x243 -#define O_SPI4CONTROL__STATICDELAY 2 -#define O_SPI4CONTROL__LVDS_LVTTL 1 -#define O_SPI4CONTROL__SPI4ENABLE 0 -#define R_CLASSWATERMARKS 0x244 -#define O_CLASSWATERMARKS__CLASS0WATERMARK 24 -#define W_CLASSWATERMARKS__CLASS0WATERMARK 5 -#define O_CLASSWATERMARKS__CLASS1WATERMARK 16 -#define W_CLASSWATERMARKS__CLASS1WATERMARK 5 -#define O_CLASSWATERMARKS__CLASS3WATERMARK 0 -#define W_CLASSWATERMARKS__CLASS3WATERMARK 5 -#define R_RXWATERMARKS1 0x245 -#define O_RXWATERMARKS__RX0DATAWATERMARK 24 -#define W_RXWATERMARKS__RX0DATAWATERMARK 7 -#define O_RXWATERMARKS__RX1DATAWATERMARK 16 -#define W_RXWATERMARKS__RX1DATAWATERMARK 7 -#define O_RXWATERMARKS__RX3DATAWATERMARK 0 -#define W_RXWATERMARKS__RX3DATAWATERMARK 7 -#define R_RXWATERMARKS2 0x246 -#define O_RXWATERMARKS__RX4DATAWATERMARK 24 -#define W_RXWATERMARKS__RX4DATAWATERMARK 7 -#define O_RXWATERMARKS__RX5DATAWATERMARK 16 -#define W_RXWATERMARKS__RX5DATAWATERMARK 7 -#define O_RXWATERMARKS__RX6DATAWATERMARK 8 -#define W_RXWATERMARKS__RX6DATAWATERMARK 7 -#define O_RXWATERMARKS__RX7DATAWATERMARK 0 -#define W_RXWATERMARKS__RX7DATAWATERMARK 7 -#define R_RXWATERMARKS3 0x247 -#define O_RXWATERMARKS__RX8DATAWATERMARK 24 -#define W_RXWATERMARKS__RX8DATAWATERMARK 7 -#define O_RXWATERMARKS__RX9DATAWATERMARK 16 -#define W_RXWATERMARKS__RX9DATAWATERMARK 7 -#define O_RXWATERMARKS__RX10DATAWATERMARK 8 -#define W_RXWATERMARKS__RX10DATAWATERMARK 7 -#define O_RXWATERMARKS__RX11DATAWATERMARK 0 -#define W_RXWATERMARKS__RX11DATAWATERMARK 7 -#define R_RXWATERMARKS4 0x248 -#define O_RXWATERMARKS__RX12DATAWATERMARK 24 -#define W_RXWATERMARKS__RX12DATAWATERMARK 7 -#define O_RXWATERMARKS__RX13DATAWATERMARK 16 -#define W_RXWATERMARKS__RX13DATAWATERMARK 7 -#define O_RXWATERMARKS__RX14DATAWATERMARK 8 -#define W_RXWATERMARKS__RX14DATAWATERMARK 7 -#define O_RXWATERMARKS__RX15DATAWATERMARK 0 -#define W_RXWATERMARKS__RX15DATAWATERMARK 7 -#define R_FREEWATERMARKS 0x249 -#define O_FREEWATERMARKS__FREEOUTWATERMARK 16 -#define W_FREEWATERMARKS__FREEOUTWATERMARK 16 -#define O_FREEWATERMARKS__JUMFRWATERMARK 8 -#define W_FREEWATERMARKS__JUMFRWATERMARK 7 -#define O_FREEWATERMARKS__REGFRWATERMARK 0 -#define W_FREEWATERMARKS__REGFRWATERMARK 7 -#define R_EGRESSFIFOCARVINGSLOTS 0x24a - -#define CTRL_RES0 0 -#define CTRL_RES1 1 -#define CTRL_REG_FREE 2 -#define CTRL_JUMBO_FREE 3 -#define CTRL_CONT 4 -#define CTRL_EOP 5 -#define CTRL_START 6 -#define CTRL_SNGL 7 - -#define CTRL_B0_NOT_EOP 0 -#define CTRL_B0_EOP 1 - -#define R_ROUND_ROBIN_TABLE 0 -#define R_PDE_CLASS_0 0x300 -#define R_PDE_CLASS_1 0x302 -#define R_PDE_CLASS_2 0x304 -#define R_PDE_CLASS_3 0x306 - -#define R_MSG_TX_THRESHOLD 0x308 - -#define R_GMAC_JFR0_BUCKET_SIZE 0x320 -#define R_GMAC_RFR0_BUCKET_SIZE 0x321 -#define R_GMAC_TX0_BUCKET_SIZE 0x322 -#define R_GMAC_TX1_BUCKET_SIZE 0x323 -#define R_GMAC_TX2_BUCKET_SIZE 0x324 -#define R_GMAC_TX3_BUCKET_SIZE 0x325 -#define R_GMAC_JFR1_BUCKET_SIZE 0x326 -#define R_GMAC_RFR1_BUCKET_SIZE 0x327 - -#define R_XGS_TX0_BUCKET_SIZE 0x320 -#define R_XGS_TX1_BUCKET_SIZE 0x321 -#define R_XGS_TX2_BUCKET_SIZE 0x322 -#define R_XGS_TX3_BUCKET_SIZE 0x323 -#define R_XGS_TX4_BUCKET_SIZE 0x324 -#define R_XGS_TX5_BUCKET_SIZE 0x325 -#define R_XGS_TX6_BUCKET_SIZE 0x326 -#define R_XGS_TX7_BUCKET_SIZE 0x327 -#define R_XGS_TX8_BUCKET_SIZE 0x328 -#define R_XGS_TX9_BUCKET_SIZE 0x329 -#define R_XGS_TX10_BUCKET_SIZE 0x32A -#define R_XGS_TX11_BUCKET_SIZE 0x32B -#define R_XGS_TX12_BUCKET_SIZE 0x32C -#define R_XGS_TX13_BUCKET_SIZE 0x32D -#define R_XGS_TX14_BUCKET_SIZE 0x32E -#define R_XGS_TX15_BUCKET_SIZE 0x32F -#define R_XGS_JFR_BUCKET_SIZE 0x330 -#define R_XGS_RFR_BUCKET_SIZE 0x331 - -#define R_CC_CPU0_0 0x380 -#define R_CC_CPU1_0 0x388 -#define R_CC_CPU2_0 0x390 -#define R_CC_CPU3_0 0x398 -#define R_CC_CPU4_0 0x3a0 -#define R_CC_CPU5_0 0x3a8 -#define R_CC_CPU6_0 0x3b0 -#define R_CC_CPU7_0 0x3b8 - -#define XLR_GMAC_BLK_SZ (XLR_IO_GMAC_1_OFFSET - \ - XLR_IO_GMAC_0_OFFSET) - -/* Constants used for configuring the devices */ - -#define XLR_FB_STN 6 /* Bucket used for Tx freeback */ - -#define MAC_B2B_IPG 88 - -#define XLR_NET_PREPAD_LEN 32 - -/* frame sizes need to be cacheline aligned */ -#define MAX_FRAME_SIZE (1536 + XLR_NET_PREPAD_LEN) -#define MAX_FRAME_SIZE_JUMBO 9216 - -#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES -#define MAC_PREPAD 0 -#define BYTE_OFFSET 2 -#define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE + BYTE_OFFSET + \ - MAC_PREPAD + MAC_SKB_BACK_PTR_SIZE + SMP_CACHE_BYTES) -#define MAC_CRC_LEN 4 -#define MAX_NUM_MSGRNG_STN_CC 128 -#define MAX_MSG_SND_ATTEMPTS 100 /* 13 stns x 4 entry msg/stn + - * headroom - */ - -#define MAC_FRIN_TO_BE_SENT_THRESHOLD 16 - -#define MAX_NUM_DESC_SPILL 1024 -#define MAX_FRIN_SPILL (MAX_NUM_DESC_SPILL << 2) -#define MAX_FROUT_SPILL (MAX_NUM_DESC_SPILL << 2) -#define MAX_CLASS_0_SPILL (MAX_NUM_DESC_SPILL << 2) -#define MAX_CLASS_1_SPILL (MAX_NUM_DESC_SPILL << 2) -#define MAX_CLASS_2_SPILL (MAX_NUM_DESC_SPILL << 2) -#define MAX_CLASS_3_SPILL (MAX_NUM_DESC_SPILL << 2) - -enum { - SGMII_SPEED_10 = 0x00000000, - SGMII_SPEED_100 = 0x02000000, - SGMII_SPEED_1000 = 0x04000000, -}; - -enum tsv_rsv_reg { - TX_RX_64_BYTE_FRAME = 0x20, - TX_RX_64_127_BYTE_FRAME, - TX_RX_128_255_BYTE_FRAME, - TX_RX_256_511_BYTE_FRAME, - TX_RX_512_1023_BYTE_FRAME, - TX_RX_1024_1518_BYTE_FRAME, - TX_RX_1519_1522_VLAN_BYTE_FRAME, - - RX_BYTE_COUNTER = 0x27, - RX_PACKET_COUNTER, - RX_FCS_ERROR_COUNTER, - RX_MULTICAST_PACKET_COUNTER, - RX_BROADCAST_PACKET_COUNTER, - RX_CONTROL_FRAME_PACKET_COUNTER, - RX_PAUSE_FRAME_PACKET_COUNTER, - RX_UNKNOWN_OP_CODE_COUNTER, - RX_ALIGNMENT_ERROR_COUNTER, - RX_FRAME_LENGTH_ERROR_COUNTER, - RX_CODE_ERROR_COUNTER, - RX_CARRIER_SENSE_ERROR_COUNTER, - RX_UNDERSIZE_PACKET_COUNTER, - RX_OVERSIZE_PACKET_COUNTER, - RX_FRAGMENTS_COUNTER, - RX_JABBER_COUNTER, - RX_DROP_PACKET_COUNTER, - - TX_BYTE_COUNTER = 0x38, - TX_PACKET_COUNTER, - TX_MULTICAST_PACKET_COUNTER, - TX_BROADCAST_PACKET_COUNTER, - TX_PAUSE_CONTROL_FRAME_COUNTER, - TX_DEFERRAL_PACKET_COUNTER, - TX_EXCESSIVE_DEFERRAL_PACKET_COUNTER, - TX_SINGLE_COLLISION_PACKET_COUNTER, - TX_MULTI_COLLISION_PACKET_COUNTER, - TX_LATE_COLLISION_PACKET_COUNTER, - TX_EXCESSIVE_COLLISION_PACKET_COUNTER, - TX_TOTAL_COLLISION_COUNTER, - TX_PAUSE_FRAME_HONERED_COUNTER, - TX_DROP_FRAME_COUNTER, - TX_JABBER_FRAME_COUNTER, - TX_FCS_ERROR_COUNTER, - TX_CONTROL_FRAME_COUNTER, - TX_OVERSIZE_FRAME_COUNTER, - TX_UNDERSIZE_FRAME_COUNTER, - TX_FRAGMENT_FRAME_COUNTER, - - CARRY_REG_1 = 0x4c, - CARRY_REG_2 = 0x4d, -}; - -struct xlr_adapter { - struct net_device *netdev[4]; -}; - -struct xlr_net_priv { - u32 __iomem *base_addr; - struct net_device *ndev; - struct xlr_adapter *adapter; - struct mii_bus *mii_bus; - int num_rx_desc; - int phy_addr; /* PHY addr on MDIO bus */ - int pcs_id; /* PCS id on MDIO bus */ - int port_id; /* Port(gmac/xgmac) number, i.e 0-7 */ - int tx_stnid; - u32 __iomem *mii_addr; - u32 __iomem *serdes_addr; - u32 __iomem *pcs_addr; - u32 __iomem *gpio_addr; - int phy_speed; - int port_type; - struct timer_list queue_timer; - int wakeup_q; - struct platform_device *pdev; - struct xlr_net_data *nd; - - u64 *frin_spill; - u64 *frout_spill; - u64 *class_0_spill; - u64 *class_1_spill; - u64 *class_2_spill; - u64 *class_3_spill; -}; - -void xlr_set_gmac_speed(struct xlr_net_priv *priv); diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c index 55c3d4a6faeb..b4820ad2cee7 100644 --- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c +++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c @@ -107,6 +107,7 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = { {0x01}, /* 0x10, RT_CHANNEL_DOMAIN_JAPAN */ {0x02}, /* 0x11, RT_CHANNEL_DOMAIN_FCC_NO_DFS */ {0x01}, /* 0x12, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */ + {0x00}, /* 0x13 */ {0x02}, /* 0x14, RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS */ {0x00}, /* 0x15, RT_CHANNEL_DOMAIN_ETSI_NO_DFS */ {0x00}, /* 0x16, RT_CHANNEL_DOMAIN_KOREA_NO_DFS */ @@ -118,6 +119,7 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = { {0x00}, /* 0x1C, */ {0x00}, /* 0x1D, */ {0x00}, /* 0x1E, */ + {0x00}, /* 0x1F, */ /* 0x20 ~ 0x7F , New Define ===== */ {0x00}, /* 0x20, RT_CHANNEL_DOMAIN_WORLD_NULL */ {0x01}, /* 0x21, RT_CHANNEL_DOMAIN_ETSI1_NULL */ @@ -6845,12 +6847,12 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_ATOMIC); if (!pcmd_obj) return; cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header)); - pevtcmd = kzalloc(cmdsz, GFP_KERNEL); + pevtcmd = kzalloc(cmdsz, GFP_ATOMIC); if (!pevtcmd) { kfree(pcmd_obj); return; diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c index 52d42e576443..9404355726d0 100644 --- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c @@ -1980,6 +1980,7 @@ static int rtw_wx_read32(struct net_device *dev, u32 data32; u32 bytes; u8 *ptmp; + int ret; padapter = (struct adapter *)rtw_netdev_priv(dev); p = &wrqu->data; @@ -2007,12 +2008,17 @@ static int rtw_wx_read32(struct net_device *dev, break; default: DBG_88E(KERN_INFO "%s: usage> read [bytes],[address(hex)]\n", __func__); - return -EINVAL; + ret = -EINVAL; + goto err_free_ptmp; } DBG_88E(KERN_INFO "%s: addr = 0x%08X data =%s\n", __func__, addr, extra); kfree(ptmp); return 0; + +err_free_ptmp: + kfree(ptmp); + return ret; } static int rtw_wx_write32(struct net_device *dev, diff --git a/drivers/staging/r8188eu/os_dep/mlme_linux.c b/drivers/staging/r8188eu/os_dep/mlme_linux.c index a9b6ffdbf31a..f7ce724ebf87 100644 --- a/drivers/staging/r8188eu/os_dep/mlme_linux.c +++ b/drivers/staging/r8188eu/os_dep/mlme_linux.c @@ -112,7 +112,7 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie) buff = NULL; if (authmode == _WPA_IE_ID_) { - buff = kzalloc(IW_CUSTOM_MAX, GFP_KERNEL); + buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC); if (!buff) return; p = buff; diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index d2e9df60e9ba..b9ce71848023 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -2549,13 +2549,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev) free_irq(dev->irq, dev); priv->irq = 0; } - free_rtllib(dev); if (dev->mem_start != 0) { iounmap((void __iomem *)dev->mem_start); release_mem_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); } + + free_rtllib(dev); } pci_disable_device(pdev); diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 0b65de9f2df1..95a88f6224cd 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -520,7 +520,7 @@ static ssize_t target_fabric_port_alua_tg_pt_gp_show(struct config_item *item, { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_show_tg_pt_gp_info(lun, page); @@ -531,7 +531,7 @@ static ssize_t target_fabric_port_alua_tg_pt_gp_store(struct config_item *item, { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_store_tg_pt_gp_info(lun, page, count); @@ -542,7 +542,7 @@ static ssize_t target_fabric_port_alua_tg_pt_offline_show( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_show_offline_bit(lun, page); @@ -553,7 +553,7 @@ static ssize_t target_fabric_port_alua_tg_pt_offline_store( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_store_offline_bit(lun, page, count); @@ -564,7 +564,7 @@ static ssize_t target_fabric_port_alua_tg_pt_status_show( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_show_secondary_status(lun, page); @@ -575,7 +575,7 @@ static ssize_t target_fabric_port_alua_tg_pt_status_store( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_store_secondary_status(lun, page, count); @@ -586,7 +586,7 @@ static ssize_t target_fabric_port_alua_tg_pt_write_md_show( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_show_secondary_write_metadata(lun, page); @@ -597,7 +597,7 @@ static ssize_t target_fabric_port_alua_tg_pt_write_md_store( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_store_secondary_write_metadata(lun, page, count); diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 22703a0dbd07..4c76498d3fb0 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -40,11 +40,11 @@ static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf) * * See spc4r17 section 6.4.2 Table 135 */ - spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); if (tg_pt_gp) buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); } static u16 @@ -325,14 +325,14 @@ check_t10_vend_desc: * Get the PROTOCOL IDENTIFIER as defined by spc4r17 * section 7.5.1 Table 362 */ - spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); if (!tg_pt_gp) { - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); goto check_lu_gp; } tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); buf[off] = tpg->proto_id << 4; buf[off++] |= 0x1; /* CODE SET == Binary */ diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index e7fcbc09f9db..bac111456fa1 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -50,15 +50,6 @@ EXPORT_SYMBOL(core_tmr_alloc_req); void core_tmr_release_req(struct se_tmr_req *tmr) { - struct se_device *dev = tmr->tmr_dev; - unsigned long flags; - - if (dev) { - spin_lock_irqsave(&dev->se_tmr_lock, flags); - list_del_init(&tmr->tmr_list); - spin_unlock_irqrestore(&dev->se_tmr_lock, flags); - } - kfree(tmr); } @@ -156,13 +147,6 @@ void core_tmr_abort_task( se_cmd->state_active = false; spin_unlock_irqrestore(&dev->queues[i].lock, flags); - /* - * Ensure that this ABORT request is visible to the LU - * RESET code. - */ - if (!tmr->tmr_dev) - WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd) < 0); - if (dev->transport->tmr_notify) dev->transport->tmr_notify(dev, TMR_ABORT_TASK, &aborted_list); @@ -234,6 +218,7 @@ static void core_tmr_drain_tmr_list( } list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); + tmr_p->tmr_dev = NULL; } spin_unlock_irqrestore(&dev->se_tmr_lock, flags); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4a2e749eb182..7838dc20f713 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -676,6 +676,21 @@ static void target_remove_from_state_list(struct se_cmd *cmd) spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); } +static void target_remove_from_tmr_list(struct se_cmd *cmd) +{ + struct se_device *dev = NULL; + unsigned long flags; + + if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) + dev = cmd->se_tmr_req->tmr_dev; + + if (dev) { + spin_lock_irqsave(&dev->se_tmr_lock, flags); + if (cmd->se_tmr_req->tmr_dev) + list_del_init(&cmd->se_tmr_req->tmr_list); + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); + } +} /* * This function is called by the target core after the target core has * finished processing a SCSI command or SCSI TMF. Both the regular command @@ -687,13 +702,6 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) { unsigned long flags; - target_remove_from_state_list(cmd); - - /* - * Clear struct se_cmd->se_lun before the handoff to FE. - */ - cmd->se_lun = NULL; - spin_lock_irqsave(&cmd->t_state_lock, flags); /* * Determine if frontend context caller is requesting the stopping of @@ -728,8 +736,16 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) if (!lun) return; + target_remove_from_state_list(cmd); + target_remove_from_tmr_list(cmd); + if (cmpxchg(&cmd->lun_ref_active, true, false)) percpu_ref_put(&lun->lun_ref); + + /* + * Clear struct se_cmd->se_lun before the handoff to FE. + */ + cmd->se_lun = NULL; } static void target_complete_failure_work(struct work_struct *work) diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index 45424824e0f9..d8c8683863aa 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -810,10 +810,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) return -EINVAL; optee = kzalloc(sizeof(*optee), GFP_KERNEL); - if (!optee) { - rc = -ENOMEM; - goto err; - } + if (!optee) + return -ENOMEM; + optee->pool = optee_ffa_config_dyn_shm(); if (IS_ERR(optee->pool)) { rc = PTR_ERR(optee->pool); diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig index 45c31f3d6054..5d046de96a5d 100644 --- a/drivers/thermal/intel/int340x_thermal/Kconfig +++ b/drivers/thermal/intel/int340x_thermal/Kconfig @@ -5,12 +5,12 @@ config INT340X_THERMAL tristate "ACPI INT340X thermal drivers" - depends on X86 && ACPI && PCI + depends on X86_64 && ACPI && PCI select THERMAL_GOV_USER_SPACE select ACPI_THERMAL_REL select ACPI_FAN select INTEL_SOC_DTS_IOSF_CORE - select PROC_THERMAL_MMIO_RAPL if X86_64 && POWERCAP + select PROC_THERMAL_MMIO_RAPL if POWERCAP help Newer laptops and tablets that use ACPI may have thermal sensors and other devices with thermal control capabilities outside the core diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c index a86521973dad..01008ae00e7f 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c @@ -7,6 +7,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include "processor_thermal_device.h" #define MBOX_CMD_WORKLOAD_TYPE_READ 0x0E diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 648829ab79ff..82654dc8382b 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -421,6 +421,8 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz) { struct thermal_instance *pos; tz->temperature = THERMAL_TEMP_INVALID; + tz->prev_low_trip = -INT_MAX; + tz->prev_high_trip = INT_MAX; list_for_each_entry(pos, &tz->thermal_instances, tz_node) pos->initialized = false; } diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index f0bf01ea069a..71e0dd2c0ce5 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -522,6 +522,7 @@ static struct xenbus_driver xencons_driver = { .remove = xencons_remove, .resume = xencons_resume, .otherend_changed = xencons_backend_changed, + .not_essential = true, }; #endif /* CONFIG_HVC_XEN_FRONTEND */ diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index f1d100671ee6..097142ffb184 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -420,15 +420,15 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0); if (IS_ERR(data->phy)) { ret = PTR_ERR(data->phy); - if (ret == -ENODEV) { - data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0); - if (IS_ERR(data->phy)) { - ret = PTR_ERR(data->phy); - if (ret == -ENODEV) - data->phy = NULL; - else - goto err_clk; - } + if (ret != -ENODEV) + goto err_clk; + data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0); + if (IS_ERR(data->phy)) { + ret = PTR_ERR(data->phy); + if (ret == -ENODEV) + data->phy = NULL; + else + goto err_clk; } } diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 86658a81d284..00070a8a6507 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4700,8 +4700,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, if (oldspeed == USB_SPEED_LOW) delay = HUB_LONG_RESET_TIME; - mutex_lock(hcd->address0_mutex); - /* Reset the device; full speed may morph to high speed */ /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ retval = hub_port_reset(hub, port1, udev, delay, false); @@ -5016,7 +5014,6 @@ fail: hub_port_disable(hub, port1, 0); update_devnum(udev, devnum); /* for disconnect processing */ } - mutex_unlock(hcd->address0_mutex); return retval; } @@ -5191,6 +5188,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *udev = port_dev->child; static int unreliable_port = -1; + bool retry_locked; /* Disconnect any existing devices under this port */ if (udev) { @@ -5246,8 +5244,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, unit_load = 100; status = 0; - for (i = 0; i < PORT_INIT_TRIES; i++) { + for (i = 0; i < PORT_INIT_TRIES; i++) { + usb_lock_port(port_dev); + mutex_lock(hcd->address0_mutex); + retry_locked = true; /* reallocate for each attempt, since references * to the previous one can escape in various ways */ @@ -5255,6 +5256,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, if (!udev) { dev_err(&port_dev->dev, "couldn't allocate usb_device\n"); + mutex_unlock(hcd->address0_mutex); + usb_unlock_port(port_dev); goto done; } @@ -5276,12 +5279,14 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, } /* reset (non-USB 3.0 devices) and get descriptor */ - usb_lock_port(port_dev); status = hub_port_init(hub, udev, port1, i); - usb_unlock_port(port_dev); if (status < 0) goto loop; + mutex_unlock(hcd->address0_mutex); + usb_unlock_port(port_dev); + retry_locked = false; + if (udev->quirks & USB_QUIRK_DELAY_INIT) msleep(2000); @@ -5374,6 +5379,10 @@ loop: usb_ep0_reinit(udev); release_devnum(udev); hub_free_dev(udev); + if (retry_locked) { + mutex_unlock(hcd->address0_mutex); + usb_unlock_port(port_dev); + } usb_put_dev(udev); if ((status == -ENOTCONN) || (status == -ENOTSUPP)) break; @@ -5915,6 +5924,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev) bos = udev->bos; udev->bos = NULL; + mutex_lock(hcd->address0_mutex); + for (i = 0; i < PORT_INIT_TRIES; ++i) { /* ep0 maxpacket size may change; let the HCD know about it. @@ -5924,6 +5935,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV) break; } + mutex_unlock(hcd->address0_mutex); if (ret < 0) goto re_enumerate; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 4ab4a1d5062b..ab8d7dad9f56 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -1198,6 +1198,8 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, } ctrl |= DXEPCTL_CNAK; } else { + hs_req->req.frame_number = hs_ep->target_frame; + hs_req->req.actual = 0; dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA); return; } @@ -2857,9 +2859,12 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep) do { hs_req = get_ep_head(hs_ep); - if (hs_req) + if (hs_req) { + hs_req->req.frame_number = hs_ep->target_frame; + hs_req->req.actual = 0; dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA); + } dwc2_gadget_incr_frame_num(hs_ep); /* Update current frame number value. */ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg); @@ -2912,8 +2917,11 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep) while (dwc2_gadget_target_frame_elapsed(ep)) { hs_req = get_ep_head(ep); - if (hs_req) + if (hs_req) { + hs_req->req.frame_number = ep->target_frame; + hs_req->req.actual = 0; dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA); + } dwc2_gadget_incr_frame_num(ep); /* Update current frame number value. */ @@ -3002,8 +3010,11 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep) while (dwc2_gadget_target_frame_elapsed(hs_ep)) { hs_req = get_ep_head(hs_ep); - if (hs_req) + if (hs_req) { + hs_req->req.frame_number = hs_ep->target_frame; + hs_req->req.actual = 0; dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA); + } dwc2_gadget_incr_frame_num(hs_ep); /* Update current frame number value. */ diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index 89a788326c56..24beff610cf2 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -59,7 +59,7 @@ #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5)) /* If we get a NAK, wait this long before retrying */ -#define DWC2_RETRY_WAIT_DELAY (1 * 1E6L) +#define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC) /** * dwc2_periodic_channel_available() - Checks that a channel is available for a diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 643239d7d370..f4c09951b517 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1594,9 +1594,11 @@ static int dwc3_probe(struct platform_device *pdev) dwc3_get_properties(dwc); - ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); - if (ret) - return ret; + if (!dwc->sysdev_is_parent) { + ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + } dwc->reset = devm_reset_control_array_get_optional_shared(dev); if (IS_ERR(dwc->reset)) diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 620c8d3914d7..5c491d0a19d7 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -143,7 +143,7 @@ #define DWC3_GHWPARAMS8 0xc600 #define DWC3_GUCTL3 0xc60c #define DWC3_GFLADJ 0xc630 -#define DWC3_GHWPARAMS9 0xc680 +#define DWC3_GHWPARAMS9 0xc6e0 /* Device Registers */ #define DWC3_DCFG 0xc700 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 23de2a5a40d6..7e3db00e9759 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -310,13 +310,24 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { int link_state; + /* + * Initiate remote wakeup if the link state is in U3 when + * operating in SS/SSP or L1/L2 when operating in HS/FS. If the + * link state is in U1/U2, no remote wakeup is needed. The Start + * Transfer command will initiate the link recovery. + */ link_state = dwc3_gadget_get_link_state(dwc); - if (link_state == DWC3_LINK_STATE_U1 || - link_state == DWC3_LINK_STATE_U2 || - link_state == DWC3_LINK_STATE_U3) { + switch (link_state) { + case DWC3_LINK_STATE_U2: + if (dwc->gadget->speed >= USB_SPEED_SUPER) + break; + + fallthrough; + case DWC3_LINK_STATE_U3: ret = __dwc3_gadget_wakeup(dwc); dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", ret); + break; } } @@ -3252,6 +3263,9 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep, struct dwc3 *dwc = dep->dwc; bool no_started_trb = true; + if (!dep->endpoint.desc) + return no_started_trb; + dwc3_gadget_ep_cleanup_completed_requests(dep, event, status); if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) @@ -3299,6 +3313,9 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep, { int status = 0; + if (!dep->endpoint.desc) + return; + if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) dwc3_gadget_endpoint_frame_from_event(dep, event); @@ -3352,6 +3369,14 @@ static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep, if (cmd != DWC3_DEPCMD_ENDTRANSFER) return; + /* + * The END_TRANSFER command will cause the controller to generate a + * NoStream Event, and it's not due to the host DP NoStream rejection. + * Ignore the next NoStream event. + */ + if (dep->stream_capable) + dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM; + dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; dep->flags &= ~DWC3_EP_TRANSFER_STARTED; dwc3_gadget_ep_cleanup_cancelled_requests(dep); @@ -3574,14 +3599,6 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, WARN_ON_ONCE(ret); dep->resource_index = 0; - /* - * The END_TRANSFER command will cause the controller to generate a - * NoStream Event, and it's not due to the host DP NoStream rejection. - * Ignore the next NoStream event. - */ - if (dep->stream_capable) - dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM; - if (!interrupt) dep->flags &= ~DWC3_EP_TRANSFER_STARTED; else diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index f5ca670776a3..857159dd5ae0 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -2136,7 +2136,7 @@ static int xudc_probe(struct platform_device *pdev) ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget); if (ret) - goto fail; + goto err_disable_unprepare_clk; udc->dev = &udc->gadget.dev; @@ -2155,6 +2155,9 @@ static int xudc_probe(struct platform_device *pdev) udc->dma_enabled ? "with DMA" : "without DMA"); return 0; + +err_disable_unprepare_clk: + clk_disable_unprepare(udc->clk); fail: dev_err(&pdev->dev, "probe failed, %d\n", ret); return ret; diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 1bf494b649bd..c8af2cd2216d 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -1400,6 +1400,7 @@ static void tegra_xusb_deinit_usb_phy(struct tegra_xusb *tegra) static int tegra_xusb_probe(struct platform_device *pdev) { + struct of_phandle_args args; struct tegra_xusb *tegra; struct device_node *np; struct resource *regs; @@ -1454,10 +1455,17 @@ static int tegra_xusb_probe(struct platform_device *pdev) goto put_padctl; } - tegra->padctl_irq = of_irq_get(np, 0); - if (tegra->padctl_irq <= 0) { - err = (tegra->padctl_irq == 0) ? -ENODEV : tegra->padctl_irq; - goto put_padctl; + /* Older device-trees don't have padctrl interrupt */ + err = of_irq_parse_one(np, 0, &args); + if (!err) { + tegra->padctl_irq = of_irq_get(np, 0); + if (tegra->padctl_irq <= 0) { + err = (tegra->padctl_irq == 0) ? -ENODEV : tegra->padctl_irq; + goto put_padctl; + } + } else { + dev_dbg(&pdev->dev, + "%pOF is missing an interrupt, disabling PM support\n", np); } tegra->host_clk = devm_clk_get(&pdev->dev, "xusb_host"); @@ -1696,11 +1704,15 @@ static int tegra_xusb_probe(struct platform_device *pdev) goto remove_usb3; } - err = devm_request_threaded_irq(&pdev->dev, tegra->padctl_irq, NULL, tegra_xusb_padctl_irq, - IRQF_ONESHOT, dev_name(&pdev->dev), tegra); - if (err < 0) { - dev_err(&pdev->dev, "failed to request padctl IRQ: %d\n", err); - goto remove_usb3; + if (tegra->padctl_irq) { + err = devm_request_threaded_irq(&pdev->dev, tegra->padctl_irq, + NULL, tegra_xusb_padctl_irq, + IRQF_ONESHOT, dev_name(&pdev->dev), + tegra); + if (err < 0) { + dev_err(&pdev->dev, "failed to request padctl IRQ: %d\n", err); + goto remove_usb3; + } } err = tegra_xusb_enable_firmware_messages(tegra); @@ -1718,13 +1730,16 @@ static int tegra_xusb_probe(struct platform_device *pdev) /* Enable wake for both USB 2.0 and USB 3.0 roothubs */ device_init_wakeup(&tegra->hcd->self.root_hub->dev, true); device_init_wakeup(&xhci->shared_hcd->self.root_hub->dev, true); - device_init_wakeup(tegra->dev, true); pm_runtime_use_autosuspend(tegra->dev); pm_runtime_set_autosuspend_delay(tegra->dev, 2000); pm_runtime_mark_last_busy(tegra->dev); pm_runtime_set_active(tegra->dev); - pm_runtime_enable(tegra->dev); + + if (tegra->padctl_irq) { + device_init_wakeup(tegra->dev, true); + pm_runtime_enable(tegra->dev); + } return 0; @@ -1772,7 +1787,9 @@ static int tegra_xusb_remove(struct platform_device *pdev) dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt, tegra->fw.phys); - pm_runtime_disable(&pdev->dev); + if (tegra->padctl_irq) + pm_runtime_disable(&pdev->dev); + pm_runtime_put(&pdev->dev); tegra_xusb_powergate_partitions(tegra); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index a484ff5e4ebf..546fce4617a8 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1267,6 +1267,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ .driver_info = NCTRL(0) | ZLP }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */ + .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, @@ -2094,6 +2096,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */ + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */ + .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index f45ca7ddf78e..a70fd86f735c 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -432,6 +432,7 @@ static int pl2303_detect_type(struct usb_serial *serial) case 0x200: switch (bcdDevice) { case 0x100: + case 0x105: case 0x305: case 0x405: /* diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c index 7a2a17866a82..72f9001b0792 100644 --- a/drivers/usb/typec/tcpm/fusb302.c +++ b/drivers/usb/typec/tcpm/fusb302.c @@ -669,25 +669,27 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc) ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK, FUSB_REG_MASK_BC_LVL | FUSB_REG_MASK_COMP_CHNG, - FUSB_REG_MASK_COMP_CHNG); + FUSB_REG_MASK_BC_LVL); if (ret < 0) { fusb302_log(chip, "cannot set SRC interrupt, ret=%d", ret); goto done; } chip->intr_comp_chng = true; + chip->intr_bc_lvl = false; break; case TYPEC_CC_RD: ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK, FUSB_REG_MASK_BC_LVL | FUSB_REG_MASK_COMP_CHNG, - FUSB_REG_MASK_BC_LVL); + FUSB_REG_MASK_COMP_CHNG); if (ret < 0) { fusb302_log(chip, "cannot set SRC interrupt, ret=%d", ret); goto done; } chip->intr_bc_lvl = true; + chip->intr_comp_chng = false; break; default: break; diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index fb8ef12bbe9c..6d27a5b5e3ca 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -653,7 +653,7 @@ static int cd321x_switch_power_state(struct tps6598x *tps, u8 target_state) if (state == target_state) return 0; - ret = tps6598x_exec_cmd(tps, "SPSS", sizeof(u8), &target_state, 0, NULL); + ret = tps6598x_exec_cmd(tps, "SSPS", sizeof(u8), &target_state, 0, NULL); if (ret) return ret; @@ -707,6 +707,7 @@ static int tps6598x_probe(struct i2c_client *client) u32 conf; u32 vid; int ret; + u64 mask1; tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL); if (!tps) @@ -730,11 +731,6 @@ static int tps6598x_probe(struct i2c_client *client) if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) tps->i2c_protocol = true; - /* Make sure the controller has application firmware running */ - ret = tps6598x_check_mode(tps); - if (ret) - return ret; - if (np && of_device_is_compatible(np, "apple,cd321x")) { /* Switch CD321X chips to the correct system power state */ ret = cd321x_switch_power_state(tps, TPS_SYSTEM_POWER_STATE_S0); @@ -742,24 +738,27 @@ static int tps6598x_probe(struct i2c_client *client) return ret; /* CD321X chips have all interrupts masked initially */ - ret = tps6598x_write64(tps, TPS_REG_INT_MASK1, - APPLE_CD_REG_INT_POWER_STATUS_UPDATE | - APPLE_CD_REG_INT_DATA_STATUS_UPDATE | - APPLE_CD_REG_INT_PLUG_EVENT); - if (ret) - return ret; + mask1 = APPLE_CD_REG_INT_POWER_STATUS_UPDATE | + APPLE_CD_REG_INT_DATA_STATUS_UPDATE | + APPLE_CD_REG_INT_PLUG_EVENT; irq_handler = cd321x_interrupt; } else { /* Enable power status, data status and plug event interrupts */ - ret = tps6598x_write64(tps, TPS_REG_INT_MASK1, - TPS_REG_INT_POWER_STATUS_UPDATE | - TPS_REG_INT_DATA_STATUS_UPDATE | - TPS_REG_INT_PLUG_EVENT); - if (ret) - return ret; + mask1 = TPS_REG_INT_POWER_STATUS_UPDATE | + TPS_REG_INT_DATA_STATUS_UPDATE | + TPS_REG_INT_PLUG_EVENT; } + /* Make sure the controller has application firmware running */ + ret = tps6598x_check_mode(tps); + if (ret) + return ret; + + ret = tps6598x_write64(tps, TPS_REG_INT_MASK1, mask1); + if (ret) + return ret; + ret = tps6598x_read32(tps, TPS_REG_STATUS, &status); if (ret < 0) return ret; diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 5f484fff8dbe..41b0cd17fcba 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -591,8 +591,11 @@ static void vdpasim_free(struct vdpa_device *vdpa) vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov); } - put_iova_domain(&vdpasim->iova); - iova_cache_put(); + if (vdpa_get_dma_dev(vdpa)) { + put_iova_domain(&vdpasim->iova); + iova_cache_put(); + } + kvfree(vdpasim->buffer); if (vdpasim->iommu) vhost_iotlb_free(vdpasim->iommu); diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 01c59ce7e250..29cced1cd277 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -1014,12 +1014,12 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep) mutex_lock(&d->mutex); filep->private_data = NULL; + vhost_vdpa_clean_irq(v); vhost_vdpa_reset(v); vhost_dev_stop(&v->vdev); vhost_vdpa_iotlb_free(v); vhost_vdpa_free_domain(v); vhost_vdpa_config_put(v); - vhost_vdpa_clean_irq(v); vhost_dev_cleanup(&v->vdev); kfree(v->vdev.vqs); mutex_unlock(&d->mutex); diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 938aefbc75ec..d6ca1c7ad513 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -511,8 +511,6 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) vhost_disable_notify(&vsock->dev, vq); do { - u32 len; - if (!vhost_vsock_more_replies(vsock)) { /* Stop tx until the device processes already * pending replies. Leave tx virtqueue @@ -540,7 +538,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) continue; } - len = pkt->len; + total_len += sizeof(pkt->hdr) + pkt->len; /* Deliver to monitoring devices all received packets */ virtio_transport_deliver_tap_pkt(pkt); @@ -553,9 +551,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) else virtio_transport_free_pkt(pkt); - len += sizeof(pkt->hdr); - vhost_add_used(vq, head, len); - total_len += len; + vhost_add_used(vq, head, 0); added = true; } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c index 1b451165311c..40496e9e9b43 100644 --- a/drivers/video/console/sticon.c +++ b/drivers/video/console/sticon.c @@ -332,13 +332,13 @@ static u8 sticon_build_attr(struct vc_data *conp, u8 color, bool blink, bool underline, bool reverse, bool italic) { - u8 attr = ((color & 0x70) >> 1) | ((color & 7)); + u8 fg = color & 7; + u8 bg = (color & 0x70) >> 4; - if (reverse) { - color = ((color >> 3) & 0x7) | ((color & 0x7) << 3); - } - - return attr; + if (reverse) + return (fg << 3) | bg; + else + return (bg << 3) | fg; } static void sticon_invert_region(struct vc_data *conp, u16 *p, int count) diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index f98e8f298bc1..01fae2c96965 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -43,21 +43,6 @@ static void update_attr(u8 *dst, u8 *src, int attribute, } } -static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int dy, int dx, int height, int width) -{ - struct fb_copyarea area; - - area.sx = sx * vc->vc_font.width; - area.sy = sy * vc->vc_font.height; - area.dx = dx * vc->vc_font.width; - area.dy = dy * vc->vc_font.height; - area.height = height * vc->vc_font.height; - area.width = width * vc->vc_font.width; - - info->fbops->fb_copyarea(info, &area); -} - static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { @@ -393,7 +378,6 @@ static int bit_update_start(struct fb_info *info) void fbcon_set_bitops(struct fbcon_ops *ops) { - ops->bmove = bit_bmove; ops->clear = bit_clear; ops->putcs = bit_putcs; ops->clear_margins = bit_clear_margins; diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 22bb3892f6bd..99ecd9a6d844 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -173,8 +173,6 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s, int count, int ypos, int xpos); static void fbcon_clear_margins(struct vc_data *vc, int bottom_only); static void fbcon_cursor(struct vc_data *vc, int mode); -static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, - int height, int width); static int fbcon_switch(struct vc_data *vc); static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch); static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table); @@ -182,16 +180,8 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table); /* * Internal routines */ -static __inline__ void ywrap_up(struct vc_data *vc, int count); -static __inline__ void ywrap_down(struct vc_data *vc, int count); -static __inline__ void ypan_up(struct vc_data *vc, int count); -static __inline__ void ypan_down(struct vc_data *vc, int count); -static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx, - int dy, int dx, int height, int width, u_int y_break); static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, int unit); -static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, - int line, int count, int dy); static void fbcon_modechanged(struct fb_info *info); static void fbcon_set_all_vcs(struct fb_info *info); static void fbcon_start(void); @@ -1136,14 +1126,6 @@ static void fbcon_init(struct vc_data *vc, int init) ops->graphics = 0; /* - * No more hw acceleration for fbcon. - * - * FIXME: Garbage collect all the now dead code after sufficient time - * has passed. - */ - p->scrollmode = SCROLL_REDRAW; - - /* * ++guenther: console.c:vc_allocate() relies on initializing * vc_{cols,rows}, but we must not set those if we are only * resizing the console. @@ -1229,14 +1211,13 @@ finished: * This system is now divided into two levels because of complications * caused by hardware scrolling. Top level functions: * - * fbcon_bmove(), fbcon_clear(), fbcon_putc(), fbcon_clear_margins() + * fbcon_clear(), fbcon_putc(), fbcon_clear_margins() * * handles y values in range [0, scr_height-1] that correspond to real * screen positions. y_wrap shift means that first line of bitmap may be * anywhere on this display. These functions convert lineoffsets to * bitmap offsets and deal with the wrap-around case by splitting blits. * - * fbcon_bmove_physical_8() -- These functions fast implementations * fbcon_clear_physical_8() -- of original fbcon_XXX fns. * fbcon_putc_physical_8() -- (font width != 8) may be added later * @@ -1409,224 +1390,6 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, } } -static __inline__ void ywrap_up(struct vc_data *vc, int count) -{ - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; - struct fbcon_ops *ops = info->fbcon_par; - struct fbcon_display *p = &fb_display[vc->vc_num]; - - p->yscroll += count; - if (p->yscroll >= p->vrows) /* Deal with wrap */ - p->yscroll -= p->vrows; - ops->var.xoffset = 0; - ops->var.yoffset = p->yscroll * vc->vc_font.height; - ops->var.vmode |= FB_VMODE_YWRAP; - ops->update_start(info); - scrollback_max += count; - if (scrollback_max > scrollback_phys_max) - scrollback_max = scrollback_phys_max; - scrollback_current = 0; -} - -static __inline__ void ywrap_down(struct vc_data *vc, int count) -{ - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; - struct fbcon_ops *ops = info->fbcon_par; - struct fbcon_display *p = &fb_display[vc->vc_num]; - - p->yscroll -= count; - if (p->yscroll < 0) /* Deal with wrap */ - p->yscroll += p->vrows; - ops->var.xoffset = 0; - ops->var.yoffset = p->yscroll * vc->vc_font.height; - ops->var.vmode |= FB_VMODE_YWRAP; - ops->update_start(info); - scrollback_max -= count; - if (scrollback_max < 0) - scrollback_max = 0; - scrollback_current = 0; -} - -static __inline__ void ypan_up(struct vc_data *vc, int count) -{ - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; - struct fbcon_display *p = &fb_display[vc->vc_num]; - struct fbcon_ops *ops = info->fbcon_par; - - p->yscroll += count; - if (p->yscroll > p->vrows - vc->vc_rows) { - ops->bmove(vc, info, p->vrows - vc->vc_rows, - 0, 0, 0, vc->vc_rows, vc->vc_cols); - p->yscroll -= p->vrows - vc->vc_rows; - } - - ops->var.xoffset = 0; - ops->var.yoffset = p->yscroll * vc->vc_font.height; - ops->var.vmode &= ~FB_VMODE_YWRAP; - ops->update_start(info); - fbcon_clear_margins(vc, 1); - scrollback_max += count; - if (scrollback_max > scrollback_phys_max) - scrollback_max = scrollback_phys_max; - scrollback_current = 0; -} - -static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count) -{ - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; - struct fbcon_ops *ops = info->fbcon_par; - struct fbcon_display *p = &fb_display[vc->vc_num]; - - p->yscroll += count; - - if (p->yscroll > p->vrows - vc->vc_rows) { - p->yscroll -= p->vrows - vc->vc_rows; - fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t); - } - - ops->var.xoffset = 0; - ops->var.yoffset = p->yscroll * vc->vc_font.height; - ops->var.vmode &= ~FB_VMODE_YWRAP; - ops->update_start(info); - fbcon_clear_margins(vc, 1); - scrollback_max += count; - if (scrollback_max > scrollback_phys_max) - scrollback_max = scrollback_phys_max; - scrollback_current = 0; -} - -static __inline__ void ypan_down(struct vc_data *vc, int count) -{ - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; - struct fbcon_display *p = &fb_display[vc->vc_num]; - struct fbcon_ops *ops = info->fbcon_par; - - p->yscroll -= count; - if (p->yscroll < 0) { - ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows, - 0, vc->vc_rows, vc->vc_cols); - p->yscroll += p->vrows - vc->vc_rows; - } - - ops->var.xoffset = 0; - ops->var.yoffset = p->yscroll * vc->vc_font.height; - ops->var.vmode &= ~FB_VMODE_YWRAP; - ops->update_start(info); - fbcon_clear_margins(vc, 1); - scrollback_max -= count; - if (scrollback_max < 0) - scrollback_max = 0; - scrollback_current = 0; -} - -static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count) -{ - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; - struct fbcon_ops *ops = info->fbcon_par; - struct fbcon_display *p = &fb_display[vc->vc_num]; - - p->yscroll -= count; - - if (p->yscroll < 0) { - p->yscroll += p->vrows - vc->vc_rows; - fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count); - } - - ops->var.xoffset = 0; - ops->var.yoffset = p->yscroll * vc->vc_font.height; - ops->var.vmode &= ~FB_VMODE_YWRAP; - ops->update_start(info); - fbcon_clear_margins(vc, 1); - scrollback_max -= count; - if (scrollback_max < 0) - scrollback_max = 0; - scrollback_current = 0; -} - -static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, - int line, int count, int dy) -{ - unsigned short *s = (unsigned short *) - (vc->vc_origin + vc->vc_size_row * line); - - while (count--) { - unsigned short *start = s; - unsigned short *le = advance_row(s, 1); - unsigned short c; - int x = 0; - unsigned short attr = 1; - - do { - c = scr_readw(s); - if (attr != (c & 0xff00)) { - attr = c & 0xff00; - if (s > start) { - fbcon_putcs(vc, start, s - start, - dy, x); - x += s - start; - start = s; - } - } - console_conditional_schedule(); - s++; - } while (s < le); - if (s > start) - fbcon_putcs(vc, start, s - start, dy, x); - console_conditional_schedule(); - dy++; - } -} - -static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info, - struct fbcon_display *p, int line, int count, int ycount) -{ - int offset = ycount * vc->vc_cols; - unsigned short *d = (unsigned short *) - (vc->vc_origin + vc->vc_size_row * line); - unsigned short *s = d + offset; - struct fbcon_ops *ops = info->fbcon_par; - - while (count--) { - unsigned short *start = s; - unsigned short *le = advance_row(s, 1); - unsigned short c; - int x = 0; - - do { - c = scr_readw(s); - - if (c == scr_readw(d)) { - if (s > start) { - ops->bmove(vc, info, line + ycount, x, - line, x, 1, s-start); - x += s - start + 1; - start = s + 1; - } else { - x++; - start++; - } - } - - scr_writew(c, d); - console_conditional_schedule(); - s++; - d++; - } while (s < le); - if (s > start) - ops->bmove(vc, info, line + ycount, x, line, x, 1, - s-start); - console_conditional_schedule(); - if (ycount > 0) - line++; - else { - line--; - /* NOTE: We subtract two lines from these pointers */ - s -= vc->vc_size_row; - d -= vc->vc_size_row; - } - } -} - static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p, int line, int count, int offset) { @@ -1687,7 +1450,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_display *p = &fb_display[vc->vc_num]; - int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK; if (fbcon_is_inactive(vc, info)) return true; @@ -1704,249 +1466,32 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, case SM_UP: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; - if (logo_shown >= 0) - goto redraw_up; - switch (p->scrollmode) { - case SCROLL_MOVE: - fbcon_redraw_blit(vc, info, p, t, b - t - count, - count); - fbcon_clear(vc, b - count, 0, count, vc->vc_cols); - scr_memsetw((unsigned short *) (vc->vc_origin + - vc->vc_size_row * - (b - count)), - vc->vc_video_erase_char, - vc->vc_size_row * count); - return true; - - case SCROLL_WRAP_MOVE: - if (b - t - count > 3 * vc->vc_rows >> 2) { - if (t > 0) - fbcon_bmove(vc, 0, 0, count, 0, t, - vc->vc_cols); - ywrap_up(vc, count); - if (vc->vc_rows - b > 0) - fbcon_bmove(vc, b - count, 0, b, 0, - vc->vc_rows - b, - vc->vc_cols); - } else if (info->flags & FBINFO_READS_FAST) - fbcon_bmove(vc, t + count, 0, t, 0, - b - t - count, vc->vc_cols); - else - goto redraw_up; - fbcon_clear(vc, b - count, 0, count, vc->vc_cols); - break; - - case SCROLL_PAN_REDRAW: - if ((p->yscroll + count <= - 2 * (p->vrows - vc->vc_rows)) - && ((!scroll_partial && (b - t == vc->vc_rows)) - || (scroll_partial - && (b - t - count > - 3 * vc->vc_rows >> 2)))) { - if (t > 0) - fbcon_redraw_move(vc, p, 0, t, count); - ypan_up_redraw(vc, t, count); - if (vc->vc_rows - b > 0) - fbcon_redraw_move(vc, p, b, - vc->vc_rows - b, b); - } else - fbcon_redraw_move(vc, p, t + count, b - t - count, t); - fbcon_clear(vc, b - count, 0, count, vc->vc_cols); - break; - - case SCROLL_PAN_MOVE: - if ((p->yscroll + count <= - 2 * (p->vrows - vc->vc_rows)) - && ((!scroll_partial && (b - t == vc->vc_rows)) - || (scroll_partial - && (b - t - count > - 3 * vc->vc_rows >> 2)))) { - if (t > 0) - fbcon_bmove(vc, 0, 0, count, 0, t, - vc->vc_cols); - ypan_up(vc, count); - if (vc->vc_rows - b > 0) - fbcon_bmove(vc, b - count, 0, b, 0, - vc->vc_rows - b, - vc->vc_cols); - } else if (info->flags & FBINFO_READS_FAST) - fbcon_bmove(vc, t + count, 0, t, 0, - b - t - count, vc->vc_cols); - else - goto redraw_up; - fbcon_clear(vc, b - count, 0, count, vc->vc_cols); - break; - - case SCROLL_REDRAW: - redraw_up: - fbcon_redraw(vc, p, t, b - t - count, - count * vc->vc_cols); - fbcon_clear(vc, b - count, 0, count, vc->vc_cols); - scr_memsetw((unsigned short *) (vc->vc_origin + - vc->vc_size_row * - (b - count)), - vc->vc_video_erase_char, - vc->vc_size_row * count); - return true; - } - break; + fbcon_redraw(vc, p, t, b - t - count, + count * vc->vc_cols); + fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + (b - count)), + vc->vc_video_erase_char, + vc->vc_size_row * count); + return true; case SM_DOWN: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; - if (logo_shown >= 0) - goto redraw_down; - switch (p->scrollmode) { - case SCROLL_MOVE: - fbcon_redraw_blit(vc, info, p, b - 1, b - t - count, - -count); - fbcon_clear(vc, t, 0, count, vc->vc_cols); - scr_memsetw((unsigned short *) (vc->vc_origin + - vc->vc_size_row * - t), - vc->vc_video_erase_char, - vc->vc_size_row * count); - return true; - - case SCROLL_WRAP_MOVE: - if (b - t - count > 3 * vc->vc_rows >> 2) { - if (vc->vc_rows - b > 0) - fbcon_bmove(vc, b, 0, b - count, 0, - vc->vc_rows - b, - vc->vc_cols); - ywrap_down(vc, count); - if (t > 0) - fbcon_bmove(vc, count, 0, 0, 0, t, - vc->vc_cols); - } else if (info->flags & FBINFO_READS_FAST) - fbcon_bmove(vc, t, 0, t + count, 0, - b - t - count, vc->vc_cols); - else - goto redraw_down; - fbcon_clear(vc, t, 0, count, vc->vc_cols); - break; - - case SCROLL_PAN_MOVE: - if ((count - p->yscroll <= p->vrows - vc->vc_rows) - && ((!scroll_partial && (b - t == vc->vc_rows)) - || (scroll_partial - && (b - t - count > - 3 * vc->vc_rows >> 2)))) { - if (vc->vc_rows - b > 0) - fbcon_bmove(vc, b, 0, b - count, 0, - vc->vc_rows - b, - vc->vc_cols); - ypan_down(vc, count); - if (t > 0) - fbcon_bmove(vc, count, 0, 0, 0, t, - vc->vc_cols); - } else if (info->flags & FBINFO_READS_FAST) - fbcon_bmove(vc, t, 0, t + count, 0, - b - t - count, vc->vc_cols); - else - goto redraw_down; - fbcon_clear(vc, t, 0, count, vc->vc_cols); - break; - - case SCROLL_PAN_REDRAW: - if ((count - p->yscroll <= p->vrows - vc->vc_rows) - && ((!scroll_partial && (b - t == vc->vc_rows)) - || (scroll_partial - && (b - t - count > - 3 * vc->vc_rows >> 2)))) { - if (vc->vc_rows - b > 0) - fbcon_redraw_move(vc, p, b, vc->vc_rows - b, - b - count); - ypan_down_redraw(vc, t, count); - if (t > 0) - fbcon_redraw_move(vc, p, count, t, 0); - } else - fbcon_redraw_move(vc, p, t, b - t - count, t + count); - fbcon_clear(vc, t, 0, count, vc->vc_cols); - break; - - case SCROLL_REDRAW: - redraw_down: - fbcon_redraw(vc, p, b - 1, b - t - count, - -count * vc->vc_cols); - fbcon_clear(vc, t, 0, count, vc->vc_cols); - scr_memsetw((unsigned short *) (vc->vc_origin + - vc->vc_size_row * - t), - vc->vc_video_erase_char, - vc->vc_size_row * count); - return true; - } + fbcon_redraw(vc, p, b - 1, b - t - count, + -count * vc->vc_cols); + fbcon_clear(vc, t, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + t), + vc->vc_video_erase_char, + vc->vc_size_row * count); + return true; } return false; } - -static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, - int height, int width) -{ - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; - struct fbcon_display *p = &fb_display[vc->vc_num]; - - if (fbcon_is_inactive(vc, info)) - return; - - if (!width || !height) - return; - - /* Split blits that cross physical y_wrap case. - * Pathological case involves 4 blits, better to use recursive - * code rather than unrolled case - * - * Recursive invocations don't need to erase the cursor over and - * over again, so we use fbcon_bmove_rec() - */ - fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width, - p->vrows - p->yscroll); -} - -static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx, - int dy, int dx, int height, int width, u_int y_break) -{ - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; - struct fbcon_ops *ops = info->fbcon_par; - u_int b; - - if (sy < y_break && sy + height > y_break) { - b = y_break - sy; - if (dy < sy) { /* Avoid trashing self */ - fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, - y_break); - fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, - height - b, width, y_break); - } else { - fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, - height - b, width, y_break); - fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, - y_break); - } - return; - } - - if (dy < y_break && dy + height > y_break) { - b = y_break - dy; - if (dy < sy) { /* Avoid trashing self */ - fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, - y_break); - fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, - height - b, width, y_break); - } else { - fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, - height - b, width, y_break); - fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, - y_break); - } - return; - } - ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx, - height, width); -} - static void updatescrollmode(struct fbcon_display *p, struct fb_info *info, struct vc_data *vc) @@ -2119,21 +1664,7 @@ static int fbcon_switch(struct vc_data *vc) updatescrollmode(p, info, vc); - switch (p->scrollmode) { - case SCROLL_WRAP_MOVE: - scrollback_phys_max = p->vrows - vc->vc_rows; - break; - case SCROLL_PAN_MOVE: - case SCROLL_PAN_REDRAW: - scrollback_phys_max = p->vrows - 2 * vc->vc_rows; - if (scrollback_phys_max < 0) - scrollback_phys_max = 0; - break; - default: - scrollback_phys_max = 0; - break; - } - + scrollback_phys_max = 0; scrollback_max = 0; scrollback_current = 0; diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index 9315b360c898..a00603b4451a 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -29,7 +29,6 @@ struct fbcon_display { /* Filled in by the low-level console driver */ const u_char *fontdata; int userfont; /* != 0 if fontdata kmalloc()ed */ - u_short scrollmode; /* Scroll Method */ u_short inverse; /* != 0 text black on white as default */ short yscroll; /* Hardware scrolling */ int vrows; /* number of virtual rows */ @@ -52,8 +51,6 @@ struct fbcon_display { }; struct fbcon_ops { - void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int dy, int dx, int height, int width); void (*clear)(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width); void (*putcs)(struct vc_data *vc, struct fb_info *info, @@ -152,62 +149,6 @@ static inline int attr_col_ec(int shift, struct vc_data *vc, #define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0) #define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1) - /* - * Scroll Method - */ - -/* There are several methods fbcon can use to move text around the screen: - * - * Operation Pan Wrap - *--------------------------------------------- - * SCROLL_MOVE copyarea No No - * SCROLL_PAN_MOVE copyarea Yes No - * SCROLL_WRAP_MOVE copyarea No Yes - * SCROLL_REDRAW imageblit No No - * SCROLL_PAN_REDRAW imageblit Yes No - * SCROLL_WRAP_REDRAW imageblit No Yes - * - * (SCROLL_WRAP_REDRAW is not implemented yet) - * - * In general, fbcon will choose the best scrolling - * method based on the rule below: - * - * Pan/Wrap > accel imageblit > accel copyarea > - * soft imageblit > (soft copyarea) - * - * Exception to the rule: Pan + accel copyarea is - * preferred over Pan + accel imageblit. - * - * The above is typical for PCI/AGP cards. Unless - * overridden, fbcon will never use soft copyarea. - * - * If you need to override the above rule, set the - * appropriate flags in fb_info->flags. For example, - * to prefer copyarea over imageblit, set - * FBINFO_READS_FAST. - * - * Other notes: - * + use the hardware engine to move the text - * (hw-accelerated copyarea() and fillrect()) - * + use hardware-supported panning on a large virtual screen - * + amifb can not only pan, but also wrap the display by N lines - * (i.e. visible line i = physical line (i+N) % yres). - * + read what's already rendered on the screen and - * write it in a different place (this is cfb_copyarea()) - * + re-render the text to the screen - * - * Whether to use wrapping or panning can only be figured out at - * runtime (when we know whether our font height is a multiple - * of the pan/wrap step) - * - */ - -#define SCROLL_MOVE 0x001 -#define SCROLL_PAN_MOVE 0x002 -#define SCROLL_WRAP_MOVE 0x003 -#define SCROLL_REDRAW 0x004 -#define SCROLL_PAN_REDRAW 0x005 - #ifdef CONFIG_FB_TILEBLITTING extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info); #endif diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index 9cd2c4b05c32..ffa78936eaab 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -59,31 +59,12 @@ static void ccw_update_attr(u8 *dst, u8 *src, int attribute, } } - -static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int dy, int dx, int height, int width) -{ - struct fbcon_ops *ops = info->fbcon_par; - struct fb_copyarea area; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - - area.sx = sy * vc->vc_font.height; - area.sy = vyres - ((sx + width) * vc->vc_font.width); - area.dx = dy * vc->vc_font.height; - area.dy = vyres - ((dx + width) * vc->vc_font.width); - area.width = height * vc->vc_font.height; - area.height = width * vc->vc_font.width; - - info->fbops->fb_copyarea(info, &area); -} - static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { - struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = info->var.yres; region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = sy * vc->vc_font.height; @@ -140,7 +121,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info, u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = info->var.yres; if (!ops->fontbuffer) return; @@ -229,7 +210,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = info->var.yres; if (!ops->fontbuffer) return; @@ -387,7 +368,7 @@ static int ccw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; u32 yoffset; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = info->var.yres; int err; yoffset = (vyres - info->var.yres) - ops->var.xoffset; @@ -402,7 +383,6 @@ static int ccw_update_start(struct fb_info *info) void fbcon_rotate_ccw(struct fbcon_ops *ops) { - ops->bmove = ccw_bmove; ops->clear = ccw_clear; ops->putcs = ccw_putcs; ops->clear_margins = ccw_clear_margins; diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index 88d89fad3f05..92e5b7fb51ee 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -44,31 +44,12 @@ static void cw_update_attr(u8 *dst, u8 *src, int attribute, } } - -static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int dy, int dx, int height, int width) -{ - struct fbcon_ops *ops = info->fbcon_par; - struct fb_copyarea area; - u32 vxres = GETVXRES(ops->p->scrollmode, info); - - area.sx = vxres - ((sy + height) * vc->vc_font.height); - area.sy = sx * vc->vc_font.width; - area.dx = vxres - ((dy + height) * vc->vc_font.height); - area.dy = dx * vc->vc_font.width; - area.width = height * vc->vc_font.height; - area.height = width * vc->vc_font.width; - - info->fbops->fb_copyarea(info, &area); -} - static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { - struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = info->var.xres; region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = vxres - ((sy + height) * vc->vc_font.height); @@ -125,7 +106,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info, u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = info->var.xres; if (!ops->fontbuffer) return; @@ -212,7 +193,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = info->var.xres; if (!ops->fontbuffer) return; @@ -369,7 +350,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, static int cw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = info->var.xres; u32 xoffset; int err; @@ -385,7 +366,6 @@ static int cw_update_start(struct fb_info *info) void fbcon_rotate_cw(struct fbcon_ops *ops) { - ops->bmove = cw_bmove; ops->clear = cw_clear; ops->putcs = cw_putcs; ops->clear_margins = cw_clear_margins; diff --git a/drivers/video/fbdev/core/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h index e233444cda66..b528b2e54283 100644 --- a/drivers/video/fbdev/core/fbcon_rotate.h +++ b/drivers/video/fbdev/core/fbcon_rotate.h @@ -11,15 +11,6 @@ #ifndef _FBCON_ROTATE_H #define _FBCON_ROTATE_H -#define GETVYRES(s,i) ({ \ - (s == SCROLL_REDRAW || s == SCROLL_MOVE) ? \ - (i)->var.yres : (i)->var.yres_virtual; }) - -#define GETVXRES(s,i) ({ \ - (s == SCROLL_REDRAW || s == SCROLL_MOVE || !(i)->fix.xpanstep) ? \ - (i)->var.xres : (i)->var.xres_virtual; }) - - static inline int pattern_test_bit(u32 x, u32 y, u32 pitch, const char *pat) { u32 tmp = (y * pitch) + x, index = tmp / 8, bit = tmp % 8; diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 8d5e66b1bdfb..09619bd8e021 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -44,33 +44,13 @@ static void ud_update_attr(u8 *dst, u8 *src, int attribute, } } - -static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int dy, int dx, int height, int width) -{ - struct fbcon_ops *ops = info->fbcon_par; - struct fb_copyarea area; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); - - area.sy = vyres - ((sy + height) * vc->vc_font.height); - area.sx = vxres - ((sx + width) * vc->vc_font.width); - area.dy = vyres - ((dy + height) * vc->vc_font.height); - area.dx = vxres - ((dx + width) * vc->vc_font.width); - area.height = height * vc->vc_font.height; - area.width = width * vc->vc_font.width; - - info->fbops->fb_copyarea(info, &area); -} - static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { - struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = info->var.yres; + u32 vxres = info->var.xres; region.color = attr_bgcol_ec(bgshift,vc,info); region.dy = vyres - ((sy + height) * vc->vc_font.height); @@ -162,8 +142,8 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info, u32 mod = vc->vc_font.width % 8, cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = info->var.yres; + u32 vxres = info->var.xres; if (!ops->fontbuffer) return; @@ -259,8 +239,8 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = info->var.yres; + u32 vxres = info->var.xres; if (!ops->fontbuffer) return; @@ -410,8 +390,8 @@ static int ud_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; int xoffset, yoffset; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = info->var.yres; + u32 vxres = info->var.xres; int err; xoffset = vxres - info->var.xres - ops->var.xoffset; @@ -429,7 +409,6 @@ static int ud_update_start(struct fb_info *info) void fbcon_rotate_ud(struct fbcon_ops *ops) { - ops->bmove = ud_bmove; ops->clear = ud_clear; ops->putcs = ud_putcs; ops->clear_margins = ud_clear_margins; diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 7420d2c16e47..826175ad88a2 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1702,8 +1702,11 @@ static void do_unregister_framebuffer(struct fb_info *fb_info) { unlink_framebuffer(fb_info); if (fb_info->pixmap.addr && - (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) + (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) { kfree(fb_info->pixmap.addr); + fb_info->pixmap.addr = NULL; + } + fb_destroy_modelist(&fb_info->modelist); registered_fb[fb_info->node] = NULL; num_registered_fb--; diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c index 2768eff247ba..72af95053bcb 100644 --- a/drivers/video/fbdev/core/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -16,21 +16,6 @@ #include <asm/types.h> #include "fbcon.h" -static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int dy, int dx, int height, int width) -{ - struct fb_tilearea area; - - area.sx = sx; - area.sy = sy; - area.dx = dx; - area.dy = dy; - area.height = height; - area.width = width; - - info->tileops->fb_tilecopy(info, &area); -} - static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { @@ -133,7 +118,6 @@ void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info) struct fb_tilemap map; struct fbcon_ops *ops = info->fbcon_par; - ops->bmove = tile_bmove; ops->clear = tile_clear; ops->putcs = tile_putcs; ops->clear_margins = tile_clear_margins; diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index edca3703b964..ea42ba6445b2 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -351,6 +351,17 @@ static int efifb_probe(struct platform_device *dev) char *option = NULL; efi_memory_desc_t md; + /* + * Generic drivers must not be registered if a framebuffer exists. + * If a native driver was probed, the display hardware was already + * taken and attempting to use the system framebuffer is dangerous. + */ + if (num_registered_fb > 0) { + dev_err(&dev->dev, + "efifb: a framebuffer is already registered\n"); + return -EINVAL; + } + if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled) return -ENODEV; diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c index 62f0ded70681..b63074fd892e 100644 --- a/drivers/video/fbdev/simplefb.c +++ b/drivers/video/fbdev/simplefb.c @@ -407,6 +407,17 @@ static int simplefb_probe(struct platform_device *pdev) struct simplefb_par *par; struct resource *mem; + /* + * Generic drivers must not be registered if a framebuffer exists. + * If a native driver was probed, the display hardware was already + * taken and attempting to use the system framebuffer is dangerous. + */ + if (num_registered_fb > 0) { + dev_err(&pdev->dev, + "simplefb: a framebuffer is already registered\n"); + return -EINVAL; + } + if (fb_get_options("simplefb", NULL)) return -ENODEV; diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c index bcacfb6934fa..0fe922f726e9 100644 --- a/drivers/video/fbdev/skeletonfb.c +++ b/drivers/video/fbdev/skeletonfb.c @@ -505,15 +505,15 @@ void xxxfb_fillrect(struct fb_info *p, const struct fb_fillrect *region) } /** - * xxxfb_copyarea - REQUIRED function. Can use generic routines if - * non acclerated hardware and packed pixel based. + * xxxfb_copyarea - OBSOLETE function. * Copies one area of the screen to another area. + * Will be deleted in a future version * * @info: frame buffer structure that represents a single frame buffer * @area: Structure providing the data to copy the framebuffer contents * from one region to another. * - * This drawing operation copies a rectangular area from one area of the + * This drawing operation copied a rectangular area from one area of the * screen to another area. */ void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area) @@ -645,9 +645,9 @@ static const struct fb_ops xxxfb_ops = { .fb_setcolreg = xxxfb_setcolreg, .fb_blank = xxxfb_blank, .fb_pan_display = xxxfb_pan_display, - .fb_fillrect = xxxfb_fillrect, /* Needed !!! */ - .fb_copyarea = xxxfb_copyarea, /* Needed !!! */ - .fb_imageblit = xxxfb_imageblit, /* Needed !!! */ + .fb_fillrect = xxxfb_fillrect, /* Needed !!! */ + .fb_copyarea = xxxfb_copyarea, /* Obsolete */ + .fb_imageblit = xxxfb_imageblit, /* Needed !!! */ .fb_cursor = xxxfb_cursor, /* Optional !!! */ .fb_sync = xxxfb_sync, .fb_ioctl = xxxfb_ioctl, diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c index 5ec51445bee8..6826f986da43 100644 --- a/drivers/video/fbdev/xen-fbfront.c +++ b/drivers/video/fbdev/xen-fbfront.c @@ -695,6 +695,7 @@ static struct xenbus_driver xenfb_driver = { .remove = xenfb_remove, .resume = xenfb_resume, .otherend_changed = xenfb_backend_changed, + .not_essential = true, }; static int __init xenfb_init(void) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 0da0af251c73..96e5a8782769 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -2889,6 +2889,7 @@ static unsigned int virtio_mem_features[] = { #if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA) VIRTIO_MEM_F_ACPI_PXM, #endif + VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, }; static const struct virtio_device_id virtio_mem_id_table[] = { diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 00f64f2f8b72..6d2614e34470 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -14,9 +14,6 @@ #include <linux/spinlock.h> #include <xen/xen.h> -static bool force_used_validation = false; -module_param(force_used_validation, bool, 0444); - #ifdef DEBUG /* For development, we want to crash whenever the ring is screwed. */ #define BAD_RING(_vq, fmt, args...) \ @@ -185,9 +182,6 @@ struct vring_virtqueue { } packed; }; - /* Per-descriptor in buffer length */ - u32 *buflen; - /* How to notify other side. FIXME: commonalize hcalls! */ bool (*notify)(struct virtqueue *vq); @@ -496,7 +490,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, unsigned int i, n, avail, descs_used, prev, err_idx; int head; bool indirect; - u32 buflen = 0; START_USE(vq); @@ -578,7 +571,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE, indirect); - buflen += sg->length; } } /* Last one doesn't continue. */ @@ -618,10 +610,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, else vq->split.desc_state[head].indir_desc = ctx; - /* Store in buffer length if necessary */ - if (vq->buflen) - vq->buflen[head] = buflen; - /* Put entry in available array (but don't update avail->idx until they * do sync). */ avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); @@ -796,11 +784,6 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, BAD_RING(vq, "id %u is not a head!\n", i); return NULL; } - if (vq->buflen && unlikely(*len > vq->buflen[i])) { - BAD_RING(vq, "used len %d is larger than in buflen %u\n", - *len, vq->buflen[i]); - return NULL; - } /* detach_buf_split clears data, so grab it now. */ ret = vq->split.desc_state[i].data; @@ -1079,7 +1062,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, unsigned int i, n, err_idx; u16 head, id; dma_addr_t addr; - u32 buflen = 0; head = vq->packed.next_avail_idx; desc = alloc_indirect_packed(total_sg, gfp); @@ -1109,8 +1091,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, desc[i].addr = cpu_to_le64(addr); desc[i].len = cpu_to_le32(sg->length); i++; - if (n >= out_sgs) - buflen += sg->length; } } @@ -1164,10 +1144,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, vq->packed.desc_state[id].indir_desc = desc; vq->packed.desc_state[id].last = id; - /* Store in buffer length if necessary */ - if (vq->buflen) - vq->buflen[id] = buflen; - vq->num_added += 1; pr_debug("Added buffer head %i to %p\n", head, vq); @@ -1203,7 +1179,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, __le16 head_flags, flags; u16 head, id, prev, curr, avail_used_flags; int err; - u32 buflen = 0; START_USE(vq); @@ -1283,8 +1258,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, 1 << VRING_PACKED_DESC_F_AVAIL | 1 << VRING_PACKED_DESC_F_USED; } - if (n >= out_sgs) - buflen += sg->length; } } @@ -1304,10 +1277,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, vq->packed.desc_state[id].indir_desc = ctx; vq->packed.desc_state[id].last = prev; - /* Store in buffer length if necessary */ - if (vq->buflen) - vq->buflen[id] = buflen; - /* * A driver MUST NOT make the first descriptor in the list * available before all subsequent descriptors comprising @@ -1494,11 +1463,6 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, BAD_RING(vq, "id %u is not a head!\n", id); return NULL; } - if (vq->buflen && unlikely(*len > vq->buflen[id])) { - BAD_RING(vq, "used len %d is larger than in buflen %u\n", - *len, vq->buflen[id]); - return NULL; - } /* detach_buf_packed clears data, so grab it now. */ ret = vq->packed.desc_state[id].data; @@ -1704,7 +1668,6 @@ static struct virtqueue *vring_create_virtqueue_packed( struct vring_virtqueue *vq; struct vring_packed_desc *ring; struct vring_packed_desc_event *driver, *device; - struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr; size_t ring_size_in_bytes, event_size_in_bytes; @@ -1794,15 +1757,6 @@ static struct virtqueue *vring_create_virtqueue_packed( if (!vq->packed.desc_extra) goto err_desc_extra; - if (!drv->suppress_used_validation || force_used_validation) { - vq->buflen = kmalloc_array(num, sizeof(*vq->buflen), - GFP_KERNEL); - if (!vq->buflen) - goto err_buflen; - } else { - vq->buflen = NULL; - } - /* No callback? Tell other side not to bother us. */ if (!callback) { vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; @@ -1815,8 +1769,6 @@ static struct virtqueue *vring_create_virtqueue_packed( spin_unlock(&vdev->vqs_list_lock); return &vq->vq; -err_buflen: - kfree(vq->packed.desc_extra); err_desc_extra: kfree(vq->packed.desc_state); err_desc_state: @@ -2224,7 +2176,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, void (*callback)(struct virtqueue *), const char *name) { - struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); struct vring_virtqueue *vq; if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) @@ -2284,15 +2235,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, if (!vq->split.desc_extra) goto err_extra; - if (!drv->suppress_used_validation || force_used_validation) { - vq->buflen = kmalloc_array(vring.num, sizeof(*vq->buflen), - GFP_KERNEL); - if (!vq->buflen) - goto err_buflen; - } else { - vq->buflen = NULL; - } - /* Put everything in free lists. */ vq->free_head = 0; memset(vq->split.desc_state, 0, vring.num * @@ -2303,8 +2245,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, spin_unlock(&vdev->vqs_list_lock); return &vq->vq; -err_buflen: - kfree(vq->split.desc_extra); err_extra: kfree(vq->split.desc_state); err_state: diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index a1b11c62da9e..33e941e40082 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -259,9 +259,15 @@ config XEN_SCSI_BACKEND if guests need generic access to SCSI devices. config XEN_PRIVCMD - tristate + tristate "Xen hypercall passthrough driver" depends on XEN default m + help + The hypercall passthrough driver allows privileged user programs to + perform Xen hypercalls. This driver is normally required for systems + running as Dom0 to perform privileged operations, but in some + disaggregated Xen setups this driver might be needed for other + domains, too. config XEN_ACPI_PROCESSOR tristate "Xen ACPI processor" diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 7984645b5956..3c9ae156b597 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -1275,6 +1275,7 @@ static struct xenbus_driver pvcalls_front_driver = { .probe = pvcalls_front_probe, .remove = pvcalls_front_remove, .otherend_changed = pvcalls_front_changed, + .not_essential = true, }; static int __init pvcalls_frontend_init(void) diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index bd003ca8acbe..fe360c33ce71 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -909,7 +909,7 @@ static struct notifier_block xenbus_resume_nb = { static int __init xenbus_init(void) { - int err = 0; + int err; uint64_t v = 0; xen_store_domain_type = XS_UNKNOWN; @@ -949,6 +949,29 @@ static int __init xenbus_init(void) err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto out_error; + /* + * Uninitialized hvm_params are zero and return no error. + * Although it is theoretically possible to have + * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is + * not zero when valid. If zero, it means that Xenstore hasn't + * been properly initialized. Instead of attempting to map a + * wrong guest physical address return error. + * + * Also recognize all bits set as an invalid value. + */ + if (!v || !~v) { + err = -ENOENT; + goto out_error; + } + /* Avoid truncation on 32-bit. */ +#if BITS_PER_LONG == 32 + if (v > ULONG_MAX) { + pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n", + __func__, v); + err = -EINVAL; + goto out_error; + } +#endif xen_store_gfn = (unsigned long)v; xen_store_interface = xen_remap(xen_store_gfn << XEN_PAGE_SHIFT, @@ -983,8 +1006,10 @@ static int __init xenbus_init(void) */ proc_create_mount_point("xen"); #endif + return 0; out_error: + xen_store_domain_type = XS_UNKNOWN; return err; } diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c index 480944606a3c..07b010a68fcf 100644 --- a/drivers/xen/xenbus/xenbus_probe_frontend.c +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c @@ -211,19 +211,11 @@ static int is_device_connecting(struct device *dev, void *data, bool ignore_none if (drv && (dev->driver != drv)) return 0; - if (ignore_nonessential) { - /* With older QEMU, for PVonHVM guests the guest config files - * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0'] - * which is nonsensical as there is no PV FB (there can be - * a PVKB) running as HVM guest. */ + xendrv = to_xenbus_driver(dev->driver); - if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0)) - return 0; + if (ignore_nonessential && xendrv->not_essential) + return 0; - if ((strncmp(xendev->nodename, "device/vfb", 10) == 0)) - return 0; - } - xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); |