diff options
author | Wolfram Sang <wsa@kernel.org> | 2022-09-27 22:33:37 +0300 |
---|---|---|
committer | Wolfram Sang <wsa@kernel.org> | 2022-09-27 22:33:37 +0300 |
commit | 85f17d677f6c40069287617630f202eb20fcfe36 (patch) | |
tree | a0a7de12ddf19068b14124d7a64d79cf697e0710 /drivers | |
parent | 2dec3a7a7beb23ec11b23986e0e331913d621ff1 (diff) | |
parent | f76349cf41451c5c42a99f18a9163377e4b364ff (diff) | |
download | linux-85f17d677f6c40069287617630f202eb20fcfe36.tar.xz |
Merge branch 'master' into i2c/for-mergewindow
Diffstat (limited to 'drivers')
204 files changed, 1900 insertions, 2245 deletions
diff --git a/drivers/base/core.c b/drivers/base/core.c index 753e7cca0f40..5fb4bc51dd8b 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1625,7 +1625,7 @@ static int __init fw_devlink_setup(char *arg) } early_param("fw_devlink", fw_devlink_setup); -static bool fw_devlink_strict = true; +static bool fw_devlink_strict; static int __init fw_devlink_strict_setup(char *arg) { return strtobool(arg, &fw_devlink_strict); diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c index 62c2b7ac4339..4407203e0c9b 100644 --- a/drivers/counter/104-quad-8.c +++ b/drivers/counter/104-quad-8.c @@ -449,6 +449,9 @@ static int quad8_events_configure(struct counter_device *counter) return -EINVAL; } + /* Enable IRQ line */ + irq_enabled |= BIT(event_node->channel); + /* Skip configuration if it is the same as previously set */ if (priv->irq_trigger[event_node->channel] == next_irq_trigger) continue; @@ -462,9 +465,6 @@ static int quad8_events_configure(struct counter_device *counter) priv->irq_trigger[event_node->channel] << 3; iowrite8(QUAD8_CTR_IOR | ior_cfg, &priv->reg->channel[event_node->channel].control); - - /* Enable IRQ line */ - irq_enabled |= BIT(event_node->channel); } iowrite8(irq_enabled, &priv->reg->index_interrupt); diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c index cb6401c9e9a4..acf31cc1dbcc 100644 --- a/drivers/dax/hmem/device.c +++ b/drivers/dax/hmem/device.c @@ -15,6 +15,7 @@ void hmem_register_device(int target_nid, struct resource *r) .start = r->start, .end = r->end, .flags = IORESOURCE_MEM, + .desc = IORES_DESC_SOFT_RESERVED, }; struct platform_device *pdev; struct memregion_info info; diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c index d4f1e4e9603a..85e00701473c 100644 --- a/drivers/dma/ti/k3-udma-private.c +++ b/drivers/dma/ti/k3-udma-private.c @@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property) } pdev = of_find_device_by_node(udma_node); + if (np != udma_node) + of_node_put(udma_node); + if (!pdev) { pr_debug("UDMA device not found\n"); return ERR_PTR(-EPROBE_DEFER); } - if (np != udma_node) - of_node_put(udma_node); - ud = platform_get_drvdata(pdev); if (!ud) { pr_debug("UDMA has not been probed\n"); diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 6276934d4d2b..8cd4e69dc7b4 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -3040,9 +3040,10 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Request and map I/O memory */ xdev->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(xdev->regs)) - return PTR_ERR(xdev->regs); - + if (IS_ERR(xdev->regs)) { + err = PTR_ERR(xdev->regs); + goto disable_clks; + } /* Retrieve the DMA engine properties from the device tree */ xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2; @@ -3070,7 +3071,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) if (err < 0) { dev_err(xdev->dev, "missing xlnx,num-fstores property\n"); - return err; + goto disable_clks; } err = of_property_read_u32(node, "xlnx,flush-fsync", @@ -3090,7 +3091,11 @@ static int xilinx_dma_probe(struct platform_device *pdev) xdev->ext_addr = false; /* Set the dma mask bits */ - dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width)); + err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width)); + if (err < 0) { + dev_err(xdev->dev, "DMA mask error %d\n", err); + goto disable_clks; + } /* Initialize the DMA engine */ xdev->common.dev = &pdev->dev; @@ -3137,7 +3142,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) for_each_child_of_node(node, child) { err = xilinx_dma_child_probe(xdev, child); if (err < 0) - goto disable_clks; + goto error; } if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { @@ -3172,12 +3177,12 @@ static int xilinx_dma_probe(struct platform_device *pdev) return 0; -disable_clks: - xdma_disable_allclks(xdev); error: for (i = 0; i < xdev->dma_config->max_channels; i++) if (xdev->chan[i]) xilinx_dma_chan_remove(xdev->chan[i]); +disable_clks: + xdma_disable_allclks(xdev); return err; } diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index dc299ab36818..3f4ee3954384 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -849,7 +849,7 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( zynqmp_dma_desc_config_eod(chan, desc); async_tx_ack(&first->async_tx); - first->async_tx.flags = flags; + first->async_tx.flags = (enum dma_ctrl_flags)flags; return &first->async_tx; } diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 3ed7ae0d6781..96060bf90a24 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -450,9 +450,13 @@ static int scmi_clock_count_get(const struct scmi_protocol_handle *ph) static const struct scmi_clock_info * scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id) { + struct scmi_clock_info *clk; struct clock_info *ci = ph->get_priv(ph); - struct scmi_clock_info *clk = ci->clk + clk_id; + if (clk_id >= ci->num_clocks) + return NULL; + + clk = ci->clk + clk_id; if (!clk->name[0]) return NULL; diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index 8abace56b958..f42dad997ac9 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -106,6 +106,7 @@ enum scmi_optee_pta_cmd { * @channel_id: OP-TEE channel ID used for this transport * @tee_session: TEE session identifier * @caps: OP-TEE SCMI channel capabilities + * @rx_len: Response size * @mu: Mutex protection on channel access * @cinfo: SCMI channel information * @shmem: Virtual base address of the shared memory diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c index 673f3eb498f4..e9afa8cab730 100644 --- a/drivers/firmware/arm_scmi/reset.c +++ b/drivers/firmware/arm_scmi/reset.c @@ -166,9 +166,13 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain, struct scmi_xfer *t; struct scmi_msg_reset_domain_reset *dom; struct scmi_reset_info *pi = ph->get_priv(ph); - struct reset_dom_info *rdom = pi->dom_info + domain; + struct reset_dom_info *rdom; - if (rdom->async_reset) + if (domain >= pi->num_domains) + return -EINVAL; + + rdom = pi->dom_info + domain; + if (rdom->async_reset && flags & AUTONOMOUS_RESET) flags |= ASYNCHRONOUS_RESET; ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t); @@ -180,7 +184,7 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain, dom->flags = cpu_to_le32(flags); dom->reset_state = cpu_to_le32(state); - if (rdom->async_reset) + if (flags & ASYNCHRONOUS_RESET) ret = ph->xops->do_xfer_with_response(ph, t); else ret = ph->xops->do_xfer(ph, t); diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c index 581d34c95769..4e27c3d66a83 100644 --- a/drivers/firmware/arm_scmi/scmi_pm_domain.c +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c @@ -138,9 +138,28 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) scmi_pd_data->domains = domains; scmi_pd_data->num_domains = num_domains; + dev_set_drvdata(dev, scmi_pd_data); + return of_genpd_add_provider_onecell(np, scmi_pd_data); } +static void scmi_pm_domain_remove(struct scmi_device *sdev) +{ + int i; + struct genpd_onecell_data *scmi_pd_data; + struct device *dev = &sdev->dev; + struct device_node *np = dev->of_node; + + of_genpd_del_provider(np); + + scmi_pd_data = dev_get_drvdata(dev); + for (i = 0; i < scmi_pd_data->num_domains; i++) { + if (!scmi_pd_data->domains[i]) + continue; + pm_genpd_remove(scmi_pd_data->domains[i]); + } +} + static const struct scmi_device_id scmi_id_table[] = { { SCMI_PROTOCOL_POWER, "genpd" }, { }, @@ -150,6 +169,7 @@ MODULE_DEVICE_TABLE(scmi, scmi_id_table); static struct scmi_driver scmi_power_domain_driver = { .name = "scmi-power-domain", .probe = scmi_pm_domain_probe, + .remove = scmi_pm_domain_remove, .id_table = scmi_id_table, }; module_scmi_driver(scmi_power_domain_driver); diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index 7288c6117838..0b5853fa9d87 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -762,6 +762,10 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph, { int ret; struct scmi_xfer *t; + struct sensors_info *si = ph->get_priv(ph); + + if (sensor_id >= si->num_sensors) + return -EINVAL; ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_GET, sizeof(__le32), sizeof(__le32), &t); @@ -771,7 +775,6 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph, put_unaligned_le32(sensor_id, t->tx.buf); ret = ph->xops->do_xfer(ph, t); if (!ret) { - struct sensors_info *si = ph->get_priv(ph); struct scmi_sensor_info *s = si->sensors + sensor_id; *sensor_config = get_unaligned_le64(t->rx.buf); @@ -788,6 +791,10 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph, int ret; struct scmi_xfer *t; struct scmi_msg_sensor_config_set *msg; + struct sensors_info *si = ph->get_priv(ph); + + if (sensor_id >= si->num_sensors) + return -EINVAL; ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_SET, sizeof(*msg), 0, &t); @@ -800,7 +807,6 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph, ret = ph->xops->do_xfer(ph, t); if (!ret) { - struct sensors_info *si = ph->get_priv(ph); struct scmi_sensor_info *s = si->sensors + sensor_id; s->sensor_config = sensor_config; @@ -831,8 +837,11 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph, int ret; struct scmi_xfer *t; struct scmi_msg_sensor_reading_get *sensor; + struct scmi_sensor_info *s; struct sensors_info *si = ph->get_priv(ph); - struct scmi_sensor_info *s = si->sensors + sensor_id; + + if (sensor_id >= si->num_sensors) + return -EINVAL; ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET, sizeof(*sensor), 0, &t); @@ -841,6 +850,7 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph, sensor = t->tx.buf; sensor->id = cpu_to_le32(sensor_id); + s = si->sensors + sensor_id; if (s->async) { sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC); ret = ph->xops->do_xfer_with_response(ph, t); @@ -895,9 +905,13 @@ scmi_sensor_reading_get_timestamped(const struct scmi_protocol_handle *ph, int ret; struct scmi_xfer *t; struct scmi_msg_sensor_reading_get *sensor; + struct scmi_sensor_info *s; struct sensors_info *si = ph->get_priv(ph); - struct scmi_sensor_info *s = si->sensors + sensor_id; + if (sensor_id >= si->num_sensors) + return -EINVAL; + + s = si->sensors + sensor_id; if (!count || !readings || (!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis)) return -EINVAL; @@ -948,6 +962,9 @@ scmi_sensor_info_get(const struct scmi_protocol_handle *ph, u32 sensor_id) { struct sensors_info *si = ph->get_priv(ph); + if (sensor_id >= si->num_sensors) + return NULL; + return si->sensors + sensor_id; } diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c index 8ced7af8e56d..4f9fb086eab7 100644 --- a/drivers/firmware/efi/efibc.c +++ b/drivers/firmware/efi/efibc.c @@ -48,6 +48,9 @@ static int efibc_reboot_notifier_call(struct notifier_block *notifier, return NOTIFY_DONE; wdata = kmalloc(MAX_DATA_LEN * sizeof(efi_char16_t), GFP_KERNEL); + if (!wdata) + return NOTIFY_DONE; + for (l = 0; l < MAX_DATA_LEN - 1 && str[l] != '\0'; l++) wdata[l] = str[l]; wdata[l] = L'\0'; diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c index 8a18930f3eb6..516f4f0069bd 100644 --- a/drivers/firmware/efi/libstub/secureboot.c +++ b/drivers/firmware/efi/libstub/secureboot.c @@ -14,7 +14,7 @@ /* SHIM variables */ static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID; -static const efi_char16_t shim_MokSBState_name[] = L"MokSBState"; +static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT"; static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr, unsigned long *data_size, void *data) @@ -43,8 +43,8 @@ enum efi_secureboot_mode efi_get_secureboot(void) /* * See if a user has put the shim into insecure mode. If so, and if the - * variable doesn't have the runtime attribute set, we might as well - * honor that. + * variable doesn't have the non-volatile attribute set, we might as + * well honor that. */ size = sizeof(moksbstate); status = get_efi_var(shim_MokSBState_name, &shim_guid, @@ -53,7 +53,7 @@ enum efi_secureboot_mode efi_get_secureboot(void) /* If it fails, we don't care why. Default to secure */ if (status != EFI_SUCCESS) goto secure_boot_enabled; - if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1) + if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1) return efi_secureboot_mode_disabled; secure_boot_enabled: diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index 43ca665af610..7a7abc8959d2 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -516,6 +516,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, hdr->ramdisk_image = 0; hdr->ramdisk_size = 0; + /* + * Disregard any setup data that was provided by the bootloader: + * setup_data could be pointing anywhere, and we have no way of + * authenticating or validating the payload. + */ + hdr->setup_data = 0; + efi_stub_entry(handle, sys_table_arg, boot_params); /* not reached */ diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c index 72c677c910de..133e511355c9 100644 --- a/drivers/fpga/intel-m10-bmc-sec-update.c +++ b/drivers/fpga/intel-m10-bmc-sec-update.c @@ -148,10 +148,6 @@ static ssize_t flash_count_show(struct device *dev, stride = regmap_get_reg_stride(sec->m10bmc->regmap); num_bits = FLASH_COUNT_SIZE * 8; - flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL); - if (!flash_buf) - return -ENOMEM; - if (FLASH_COUNT_SIZE % stride) { dev_err(sec->dev, "FLASH_COUNT_SIZE (0x%x) not aligned to stride (0x%x)\n", @@ -160,6 +156,10 @@ static ssize_t flash_count_show(struct device *dev, return -EINVAL; } + flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL); + if (!flash_buf) + return -ENOMEM; + ret = regmap_bulk_read(sec->m10bmc->regmap, STAGING_FLASH_COUNT, flash_buf, FLASH_COUNT_SIZE / stride); if (ret) { diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c index f422c3e129a0..f77a965f5780 100644 --- a/drivers/gpio/gpio-ftgpio010.c +++ b/drivers/gpio/gpio-ftgpio010.c @@ -41,14 +41,12 @@ * struct ftgpio_gpio - Gemini GPIO state container * @dev: containing device for this instance * @gc: gpiochip for this instance - * @irq: irqchip for this instance * @base: remapped I/O-memory base * @clk: silicon clock */ struct ftgpio_gpio { struct device *dev; struct gpio_chip gc; - struct irq_chip irq; void __iomem *base; struct clk *clk; }; @@ -70,6 +68,7 @@ static void ftgpio_gpio_mask_irq(struct irq_data *d) val = readl(g->base + GPIO_INT_EN); val &= ~BIT(irqd_to_hwirq(d)); writel(val, g->base + GPIO_INT_EN); + gpiochip_disable_irq(gc, irqd_to_hwirq(d)); } static void ftgpio_gpio_unmask_irq(struct irq_data *d) @@ -78,6 +77,7 @@ static void ftgpio_gpio_unmask_irq(struct irq_data *d) struct ftgpio_gpio *g = gpiochip_get_data(gc); u32 val; + gpiochip_enable_irq(gc, irqd_to_hwirq(d)); val = readl(g->base + GPIO_INT_EN); val |= BIT(irqd_to_hwirq(d)); writel(val, g->base + GPIO_INT_EN); @@ -221,6 +221,16 @@ static int ftgpio_gpio_set_config(struct gpio_chip *gc, unsigned int offset, return 0; } +static const struct irq_chip ftgpio_irq_chip = { + .name = "FTGPIO010", + .irq_ack = ftgpio_gpio_ack_irq, + .irq_mask = ftgpio_gpio_mask_irq, + .irq_unmask = ftgpio_gpio_unmask_irq, + .irq_set_type = ftgpio_gpio_set_irq_type, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + static int ftgpio_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -277,14 +287,8 @@ static int ftgpio_gpio_probe(struct platform_device *pdev) if (!IS_ERR(g->clk)) g->gc.set_config = ftgpio_gpio_set_config; - g->irq.name = "FTGPIO010"; - g->irq.irq_ack = ftgpio_gpio_ack_irq; - g->irq.irq_mask = ftgpio_gpio_mask_irq; - g->irq.irq_unmask = ftgpio_gpio_unmask_irq; - g->irq.irq_set_type = ftgpio_gpio_set_irq_type; - girq = &g->gc.irq; - girq->chip = &g->irq; + gpio_irq_chip_set_chip(girq, &ftgpio_irq_chip); girq->parent_handler = ftgpio_gpio_irq_handler; girq->num_parents = 1; girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents), diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c index 312309be0287..56656fb519f8 100644 --- a/drivers/gpio/gpio-ixp4xx.c +++ b/drivers/gpio/gpio-ixp4xx.c @@ -63,6 +63,14 @@ static void ixp4xx_gpio_irq_ack(struct irq_data *d) __raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS); } +static void ixp4xx_gpio_mask_irq(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + + irq_chip_mask_parent(d); + gpiochip_disable_irq(gc, d->hwirq); +} + static void ixp4xx_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -72,6 +80,7 @@ static void ixp4xx_gpio_irq_unmask(struct irq_data *d) if (!(g->irq_edge & BIT(d->hwirq))) ixp4xx_gpio_irq_ack(d); + gpiochip_enable_irq(gc, d->hwirq); irq_chip_unmask_parent(d); } @@ -149,12 +158,14 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type) return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH); } -static struct irq_chip ixp4xx_gpio_irqchip = { +static const struct irq_chip ixp4xx_gpio_irqchip = { .name = "IXP4GPIO", .irq_ack = ixp4xx_gpio_irq_ack, - .irq_mask = irq_chip_mask_parent, + .irq_mask = ixp4xx_gpio_mask_irq, .irq_unmask = ixp4xx_gpio_irq_unmask, .irq_set_type = ixp4xx_gpio_irq_set_type, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc, @@ -263,7 +274,7 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev) g->gc.owner = THIS_MODULE; girq = &g->gc.irq; - girq->chip = &ixp4xx_gpio_irqchip; + gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip); girq->fwnode = g->fwnode; girq->parent_domain = parent; girq->child_to_parent_hwirq = ixp4xx_gpio_child_to_parent_hwirq; diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index a2e505a7545c..523dfd17dd92 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -533,8 +533,10 @@ static int __init gpio_mockup_register_chip(int idx) } fwnode = fwnode_create_software_node(properties, NULL); - if (IS_ERR(fwnode)) + if (IS_ERR(fwnode)) { + kfree_strarray(line_names, ngpio); return PTR_ERR(fwnode); + } pdevinfo.name = "gpio-mockup"; pdevinfo.id = idx; @@ -597,9 +599,9 @@ static int __init gpio_mockup_init(void) static void __exit gpio_mockup_exit(void) { + gpio_mockup_unregister_pdevs(); debugfs_remove_recursive(gpio_mockup_dbg_dir); platform_driver_unregister(&gpio_mockup_driver); - gpio_mockup_unregister_pdevs(); } module_init(gpio_mockup_init); diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index 15049822937a..3eb08cd1fdc0 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -169,6 +169,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) switch (flow_type) { case IRQ_TYPE_EDGE_FALLING: + case IRQ_TYPE_LEVEL_LOW: raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR, gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR) diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c index d8a26e503ca5..f163f5ca857b 100644 --- a/drivers/gpio/gpio-mt7621.c +++ b/drivers/gpio/gpio-mt7621.c @@ -112,6 +112,8 @@ mediatek_gpio_irq_unmask(struct irq_data *d) unsigned long flags; u32 rise, fall, high, low; + gpiochip_enable_irq(gc, d->hwirq); + spin_lock_irqsave(&rg->lock, flags); rise = mtk_gpio_r32(rg, GPIO_REG_REDGE); fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE); @@ -143,6 +145,8 @@ mediatek_gpio_irq_mask(struct irq_data *d) mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin)); mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin)); spin_unlock_irqrestore(&rg->lock, flags); + + gpiochip_disable_irq(gc, d->hwirq); } static int @@ -204,6 +208,16 @@ mediatek_gpio_xlate(struct gpio_chip *chip, return gpio % MTK_BANK_WIDTH; } +static const struct irq_chip mt7621_irq_chip = { + .name = "mt7621-gpio", + .irq_mask_ack = mediatek_gpio_irq_mask, + .irq_mask = mediatek_gpio_irq_mask, + .irq_unmask = mediatek_gpio_irq_unmask, + .irq_set_type = mediatek_gpio_irq_type, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + static int mediatek_gpio_bank_probe(struct device *dev, int bank) { @@ -238,11 +252,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank) return -ENOMEM; rg->chip.offset = bank * MTK_BANK_WIDTH; - rg->irq_chip.name = dev_name(dev); - rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask; - rg->irq_chip.irq_mask = mediatek_gpio_irq_mask; - rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask; - rg->irq_chip.irq_set_type = mediatek_gpio_irq_type; if (mtk->gpio_irq) { struct gpio_irq_chip *girq; @@ -262,7 +271,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank) } girq = &rg->chip.irq; - girq->chip = &rg->irq_chip; + gpio_irq_chip_set_chip(girq, &mt7621_irq_chip); /* This will let us handle the parent IRQ in the driver */ girq->parent_handler = NULL; girq->num_parents = 0; diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c index f91e876fd969..bb50335239ac 100644 --- a/drivers/gpio/gpio-rockchip.c +++ b/drivers/gpio/gpio-rockchip.c @@ -419,11 +419,11 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) goto out; } else { bank->toggle_edge_mode |= mask; - level |= mask; + level &= ~mask; /* * Determine gpio state. If 1 next interrupt should be - * falling otherwise rising. + * low otherwise high. */ data = readl(bank->reg_base + bank->gpio_regs->ext_port); if (data & mask) diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c index fa4bc7481f9a..e739dcea61b2 100644 --- a/drivers/gpio/gpio-tqmx86.c +++ b/drivers/gpio/gpio-tqmx86.c @@ -307,6 +307,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev) girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_simple_irq; girq->init_valid_mask = tqmx86_init_irq_valid_mask; + + irq_domain_set_pm_device(girq->domain, dev); } ret = devm_gpiochip_add_data(dev, chip, gpio); @@ -315,8 +317,6 @@ static int tqmx86_gpio_probe(struct platform_device *pdev) goto out_pm_dis; } - irq_domain_set_pm_device(girq->domain, dev); - dev_info(dev, "GPIO functionality initialized with %d pins\n", chip->ngpio); diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index f8041d4898d1..92f185575e94 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -1986,7 +1986,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) ret = -ENODEV; goto out_free_le; } - le->irq = irq; if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? @@ -2000,7 +1999,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) init_waitqueue_head(&le->wait); /* Request a thread to read the events */ - ret = request_threaded_irq(le->irq, + ret = request_threaded_irq(irq, lineevent_irq_handler, lineevent_irq_thread, irqflags, @@ -2009,6 +2008,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) if (ret) goto out_free_le; + le->irq = irq; + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); if (fd < 0) { ret = fd; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1400abee9f40..be7aff2d4a57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2365,8 +2365,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) } adev->ip_blocks[i].status.sw = true; - /* need to do gmc hw init early so we can allocate gpu mem */ - if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { + /* need to do common hw init early so everything is set up for gmc */ + r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); + if (r) { + DRM_ERROR("hw_init %d failed %d\n", i, r); + goto init_failed; + } + adev->ip_blocks[i].status.hw = true; + } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { + /* need to do gmc hw init early so we can allocate gpu mem */ /* Try to reserve bad pages early */ if (amdgpu_sriov_vf(adev)) amdgpu_virt_exchange_data(adev); @@ -3052,8 +3060,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) int i, r; static enum amd_ip_block_type ip_order[] = { - AMD_IP_BLOCK_TYPE_GMC, AMD_IP_BLOCK_TYPE_COMMON, + AMD_IP_BLOCK_TYPE_GMC, AMD_IP_BLOCK_TYPE_PSP, AMD_IP_BLOCK_TYPE_IH, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index c20922a5af9f..23998f727c7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -38,6 +38,8 @@ #include <linux/pci.h> #include <linux/pm_runtime.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_damage_helper.h> +#include <drm/drm_drv.h> #include <drm/drm_edid.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_fb_helper.h> @@ -498,6 +500,12 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { .create_handle = drm_gem_fb_create_handle, }; +static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = { + .destroy = drm_gem_fb_destroy, + .create_handle = drm_gem_fb_create_handle, + .dirty = drm_atomic_helper_dirtyfb, +}; + uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, uint64_t bo_flags) { @@ -1100,7 +1108,10 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev, if (ret) goto err; - ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); + if (drm_drv_uses_atomic_modeset(dev)) + ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic); + else + ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); if (ret) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index fe82b8b19a4e..0c546245793b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -181,6 +181,9 @@ int amdgpu_mes_init(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) { if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0)) adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc; + /* zero sdma_hqd_mask for non-existent engine */ + else if (adev->sdma.num_instances == 1) + adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc; else adev->mes.sdma_hqd_mask[i] = 0xfc; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 35738db12cd1..c9dec2434f37 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -756,7 +756,7 @@ static int psp_tmr_init(struct psp_context *psp) } pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; - ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE(psp->adev), + ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT, AMDGPU_GEM_DOMAIN_VRAM, &psp->tmr_bo, &psp->tmr_mc_addr, pptr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index c32b74bd970f..e593e8c2a54d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -36,6 +36,7 @@ #define PSP_CMD_BUFFER_SIZE 0x1000 #define PSP_1_MEG 0x100000 #define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000) +#define PSP_TMR_ALIGNMENT 0x100000 #define PSP_FW_NAME_LEN 0x24 enum psp_shared_mem_size { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index ff5361f5c2d4..12c6f97945a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1811,7 +1811,8 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) amdgpu_ras_query_error_status(adev, &info); if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && - adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) && + adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) { if (amdgpu_ras_reset_error_status(adev, info.head.block)) dev_warn(adev->dev, "Failed to reset error counter and error status"); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 59cac347baa3..690fd4f639f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2484,8 +2484,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, /* Intentionally setting invalid PTE flag * combination to force a no-retry-fault */ - flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | - AMDGPU_PTE_TF; + flags = AMDGPU_PTE_SNOOPED | AMDGPU_PTE_PRT; value = 0; } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { /* Redirect the access to the dummy page */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 4603653916f5..67ca16a8027c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1103,10 +1103,13 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, *flags |= AMDGPU_PDE_BFS(0x9); } else if (level == AMDGPU_VM_PDB0) { - if (*flags & AMDGPU_PDE_PTE) + if (*flags & AMDGPU_PDE_PTE) { *flags &= ~AMDGPU_PDE_PTE; - else + if (!(*flags & AMDGPU_PTE_VALID)) + *addr |= 1 << PAGE_SHIFT; + } else { *flags |= AMDGPU_PTE_TF; + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index b465baa26762..aa761ff3a5fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -380,6 +380,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev, WREG32_PCIE(smnPCIE_LC_CNTL, data); } +#ifdef CONFIG_PCIEASPM static void nbio_v2_3_program_ltr(struct amdgpu_device *adev) { uint32_t def, data; @@ -401,9 +402,11 @@ static void nbio_v2_3_program_ltr(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); } +#endif static void nbio_v2_3_program_aspm(struct amdgpu_device *adev) { +#ifdef CONFIG_PCIEASPM uint32_t def, data; def = data = RREG32_PCIE(smnPCIE_LC_CNTL); @@ -459,7 +462,10 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnPCIE_LC_CNTL6, data); - nbio_v2_3_program_ltr(adev); + /* Don't bother about LTR if LTR is not enabled + * in the path */ + if (adev->pdev->ltr_path) + nbio_v2_3_program_ltr(adev); def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3); data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; @@ -483,6 +489,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev) data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; if (def != data) WREG32_PCIE(smnPCIE_LC_CNTL3, data); +#endif } static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index f7f6ddebd3e4..37615a77287b 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -282,6 +282,7 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev) mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } +#ifdef CONFIG_PCIEASPM static void nbio_v6_1_program_ltr(struct amdgpu_device *adev) { uint32_t def, data; @@ -303,9 +304,11 @@ static void nbio_v6_1_program_ltr(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); } +#endif static void nbio_v6_1_program_aspm(struct amdgpu_device *adev) { +#ifdef CONFIG_PCIEASPM uint32_t def, data; def = data = RREG32_PCIE(smnPCIE_LC_CNTL); @@ -361,7 +364,10 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnPCIE_LC_CNTL6, data); - nbio_v6_1_program_ltr(adev); + /* Don't bother about LTR if LTR is not enabled + * in the path */ + if (adev->pdev->ltr_path) + nbio_v6_1_program_ltr(adev); def = data = RREG32_PCIE(smnRCC_BIF_STRAP3); data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; @@ -385,6 +391,7 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev) data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; if (def != data) WREG32_PCIE(smnPCIE_LC_CNTL3, data); +#endif } const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 11848d1e238b..19455a725939 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -673,6 +673,7 @@ struct amdgpu_nbio_ras nbio_v7_4_ras = { }; +#ifdef CONFIG_PCIEASPM static void nbio_v7_4_program_ltr(struct amdgpu_device *adev) { uint32_t def, data; @@ -694,9 +695,11 @@ static void nbio_v7_4_program_ltr(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); } +#endif static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) { +#ifdef CONFIG_PCIEASPM uint32_t def, data; if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4)) @@ -755,7 +758,10 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnPCIE_LC_CNTL6, data); - nbio_v7_4_program_ltr(adev); + /* Don't bother about LTR if LTR is not enabled + * in the path */ + if (adev->pdev->ltr_path) + nbio_v7_4_program_ltr(adev); def = data = RREG32_PCIE(smnRCC_BIF_STRAP3); data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; @@ -779,6 +785,7 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; if (def != data) WREG32_PCIE(smnPCIE_LC_CNTL3, data); +#endif } const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c index f30bc826a878..def89379b51a 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c @@ -28,6 +28,14 @@ #include "nbio/nbio_7_7_0_sh_mask.h" #include <uapi/linux/kfd_ioctl.h> +static void nbio_v7_7_remap_hdp_registers(struct amdgpu_device *adev) +{ + WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL); + WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL); +} + static u32 nbio_v7_7_get_rev_id(struct amdgpu_device *adev) { u32 tmp; @@ -336,4 +344,5 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = { .get_clockgating_state = nbio_v7_7_get_clockgating_state, .ih_control = nbio_v7_7_ih_control, .init_registers = nbio_v7_7_init_registers, + .remap_hdp_registers = nbio_v7_7_remap_hdp_registers, }; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 65181efba50e..56424f75dd2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1504,6 +1504,11 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) WREG32_SDMA(i, mmSDMA0_CNTL, temp); if (!amdgpu_sriov_vf(adev)) { + ring = &adev->sdma.instance[i].ring; + adev->nbio.funcs->sdma_doorbell_range(adev, i, + ring->use_doorbell, ring->doorbell_index, + adev->doorbell_index.sdma_doorbell_range); + /* unhalt engine */ temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL); temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index fde6154f2009..183024d7c184 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1211,25 +1211,6 @@ static int soc15_common_sw_fini(void *handle) return 0; } -static void soc15_doorbell_range_init(struct amdgpu_device *adev) -{ - int i; - struct amdgpu_ring *ring; - - /* sdma/ih doorbell range are programed by hypervisor */ - if (!amdgpu_sriov_vf(adev)) { - for (i = 0; i < adev->sdma.num_instances; i++) { - ring = &adev->sdma.instance[i].ring; - adev->nbio.funcs->sdma_doorbell_range(adev, i, - ring->use_doorbell, ring->doorbell_index, - adev->doorbell_index.sdma_doorbell_range); - } - - adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, - adev->irq.ih.doorbell_index); - } -} - static int soc15_common_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1249,12 +1230,6 @@ static int soc15_common_hw_init(void *handle) /* enable the doorbell aperture */ soc15_enable_doorbell_aperture(adev, true); - /* HW doorbell routing policy: doorbell writing not - * in SDMA/IH/MM/ACV range will be routed to CP. So - * we need to init SDMA/IH/MM/ACV doorbell range prior - * to CP ip block init and ring test. - */ - soc15_doorbell_range_init(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 55284b24f113..2e50db3b761e 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -421,6 +421,7 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev) { switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(11, 0, 0): + return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC); case IP_VERSION(11, 0, 2): return false; default: diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index 03b7066471f9..1e83db0c5438 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -289,6 +289,10 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) } } + if (!amdgpu_sriov_vf(adev)) + adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, + adev->irq.ih.doorbell_index); + pci_set_master(adev->pdev); /* enable interrupts */ diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index 2022ffbb8dba..59dfca093155 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -340,6 +340,10 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) } } + if (!amdgpu_sriov_vf(adev)) + adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, + adev->irq.ih.doorbell_index); + pci_set_master(adev->pdev); /* enable interrupts */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 5140d9c2bf3b..1efe7fa5bc58 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4759,7 +4759,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, plane_info->visible = true; plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; - plane_info->layer_index = 0; + plane_info->layer_index = plane_state->normalized_zpos; ret = fill_plane_color_attributes(plane_state, plane_info->format, &plane_info->color_space); @@ -4827,7 +4827,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, dc_plane_state->global_alpha = plane_info.global_alpha; dc_plane_state->global_alpha_value = plane_info.global_alpha_value; dc_plane_state->dcc = plane_info.dcc; - dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0 + dc_plane_state->layer_index = plane_info.layer_index; dc_plane_state->flip_int_enabled = true; /* @@ -9485,6 +9485,14 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } } + /* + * DC consults the zpos (layer_index in DC terminology) to determine the + * hw plane on which to enable the hw cursor (see + * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in + * atomic state, so call drm helper to normalize zpos. + */ + drm_atomic_normalize_zpos(dev, state); + /* Remove exiting planes if they are modified */ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { ret = dm_update_plane_state(dc, state, plane, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index c09be3f15fe6..23a299c929a1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -99,7 +99,7 @@ static int dcn31_get_active_display_cnt_wa( return display_count; } -static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable) +static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; @@ -110,9 +110,10 @@ static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable) if (pipe->top_pipe || pipe->prev_odm_pipe) continue; if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { - if (disable) + if (disable) { pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); - else + reset_sync_context_for_pipe(dc, context, i); + } else pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); } } @@ -211,11 +212,11 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, } if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { - dcn31_disable_otg_wa(clk_mgr_base, true); + dcn31_disable_otg_wa(clk_mgr_base, context, true); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; dcn31_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); - dcn31_disable_otg_wa(clk_mgr_base, false); + dcn31_disable_otg_wa(clk_mgr_base, context, false); update_dispclk = true; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index beb025cd3dc2..8559dcd80af0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -119,7 +119,7 @@ static int dcn314_get_active_display_cnt_wa( return display_count; } -static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable) +static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; @@ -129,11 +129,11 @@ static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable) if (pipe->top_pipe || pipe->prev_odm_pipe) continue; - if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL || - dc_is_virtual_signal(pipe->stream->signal))) { - if (disable) + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { + if (disable) { pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); - else + reset_sync_context_for_pipe(dc, context, i); + } else pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); } } @@ -233,11 +233,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base, } if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { - dcn314_disable_otg_wa(clk_mgr_base, true); + dcn314_disable_otg_wa(clk_mgr_base, context, true); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); - dcn314_disable_otg_wa(clk_mgr_base, false); + dcn314_disable_otg_wa(clk_mgr_base, context, false); update_dispclk = true; } @@ -670,6 +670,8 @@ static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *cl } ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz); bw_params->vram_type = bios_info->memory_type; + + bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4; bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4; for (i = 0; i < WM_SET_COUNT; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index cc076621f5e6..98ad8e0fd2d8 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -46,6 +46,9 @@ #define TO_CLK_MGR_DCN315(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn315, base) +#define UNSUPPORTED_DCFCLK 10000000 +#define MIN_DPP_DISP_CLK 100000 + static int dcn315_get_active_display_cnt_wa( struct dc *dc, struct dc_state *context) @@ -79,7 +82,7 @@ static int dcn315_get_active_display_cnt_wa( return display_count; } -static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable) +static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; @@ -91,9 +94,10 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable) continue; if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL || dc_is_virtual_signal(pipe->stream->signal))) { - if (disable) + if (disable) { pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); - else + reset_sync_context_for_pipe(dc, context, i); + } else pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); } } @@ -146,6 +150,9 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, } } + /* Lock pstate by requesting unsupported dcfclk if change is unsupported */ + if (!new_clocks->p_state_change_support) + new_clocks->dcfclk_khz = UNSUPPORTED_DCFCLK; if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) { clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz; dcn315_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz); @@ -159,10 +166,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - if (new_clocks->dppclk_khz < 100000) - new_clocks->dppclk_khz = 100000; - if (new_clocks->dispclk_khz < 100000) - new_clocks->dispclk_khz = 100000; + if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK) + new_clocks->dppclk_khz = MIN_DPP_DISP_CLK; + if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK) + new_clocks->dispclk_khz = MIN_DPP_DISP_CLK; } if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { @@ -175,12 +182,12 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { /* No need to apply the w/a if we haven't taken over from bios yet */ if (clk_mgr_base->clks.dispclk_khz) - dcn315_disable_otg_wa(clk_mgr_base, true); + dcn315_disable_otg_wa(clk_mgr_base, context, true); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); if (clk_mgr_base->clks.dispclk_khz) - dcn315_disable_otg_wa(clk_mgr_base, false); + dcn315_disable_otg_wa(clk_mgr_base, context, false); update_dispclk = true; } @@ -275,7 +282,7 @@ static struct wm_table ddr5_wm_table = { { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 64.0, + .pstate_latency_us = 129.0, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, @@ -283,7 +290,7 @@ static struct wm_table ddr5_wm_table = { { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 64.0, + .pstate_latency_us = 129.0, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, @@ -291,7 +298,7 @@ static struct wm_table ddr5_wm_table = { { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 64.0, + .pstate_latency_us = 129.0, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, @@ -299,7 +306,7 @@ static struct wm_table ddr5_wm_table = { { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 64.0, + .pstate_latency_us = 129.0, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, @@ -556,8 +563,7 @@ static void dcn315_clk_mgr_helper_populate_bw_params( ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz); bw_params->vram_type = bios_info->memory_type; bw_params->num_channels = bios_info->ma_channel_number; - if (!bw_params->num_channels) - bw_params->num_channels = 2; + bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4; for (i = 0; i < WM_SET_COUNT; i++) { bw_params->wm_table.entries[i].wm_inst = i; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 0cd3d2eb7ac7..187f5b27fdc8 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -112,7 +112,7 @@ static int dcn316_get_active_display_cnt_wa( return display_count; } -static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable) +static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; @@ -124,9 +124,10 @@ static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable) continue; if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL || dc_is_virtual_signal(pipe->stream->signal))) { - if (disable) + if (disable) { pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); - else + reset_sync_context_for_pipe(dc, context, i); + } else pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); } } @@ -221,11 +222,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, } if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { - dcn316_disable_otg_wa(clk_mgr_base, true); + dcn316_disable_otg_wa(clk_mgr_base, context, true); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); - dcn316_disable_otg_wa(clk_mgr_base, false); + dcn316_disable_otg_wa(clk_mgr_base, context, false); update_dispclk = true; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 48dad093ae8b..780f7f4c28b6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2758,8 +2758,14 @@ bool perform_link_training_with_retries( skip_video_pattern); /* Transmit idle pattern once training successful. */ - if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) + if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) { dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); + /* Update verified link settings to current one + * Because DPIA LT might fallback to lower link setting. + */ + link->verified_link_cap.link_rate = link->cur_link_settings.link_rate; + link->verified_link_cap.lane_count = link->cur_link_settings.lane_count; + } } else { status = dc_link_dp_perform_link_training(link, &pipe_ctx->link_res, @@ -5121,6 +5127,14 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + /* If this chip cap is set, at least one retimer must exist in the chain + * Override count to 1 if we receive a known bad count (0 or an invalid value) */ + if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && + (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) { + ASSERT(0); + link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80; + } + /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 && link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 7dbab15bfa68..ccf7bd3d90fe 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -3584,6 +3584,23 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc, } } +void reset_sync_context_for_pipe(const struct dc *dc, + struct dc_state *context, + uint8_t pipe_idx) +{ + int i; + struct pipe_ctx *pipe_ctx_reset; + + /* reset the otg sync context for the pipe and its slave pipes if any */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe_ctx_reset = &context->res_ctx.pipe_ctx[i]; + + if (((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_reset) == pipe_idx) && + IS_PIPE_SYNCD_VALID(pipe_ctx_reset)) || (i == pipe_idx)) + SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_reset, i); + } +} + uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter) { /* TODO - get transmitter to phy idx mapping from DMUB */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index f62d50901d92..0c85ab5933b4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -329,7 +329,7 @@ bool dc_stream_set_cursor_attributes( dc = stream->ctx->dc; - if (attributes->height * attributes->width * 4 > 16384) + if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384) if (stream->mall_stream_config.type == SUBVP_MAIN) return false; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 5908b60db313..dbf8158b832e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -745,6 +745,7 @@ struct dc_debug_options { bool disable_fixed_vs_aux_timeout_wa; bool force_disable_subvp; bool force_subvp_mclk_switch; + bool allow_sw_cursor_fallback; bool force_usr_allow; /* uses value at boot and disables switch */ bool disable_dtb_ref_clk_switch; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 09b304507bad..52a61b3e5a8b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -417,44 +417,42 @@ static void populate_subvp_cmd_drr_info(struct dc *dc, struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing; - int16_t drr_frame_us = 0; - int16_t min_drr_supported_us = 0; - int16_t max_drr_supported_us = 0; - int16_t max_drr_vblank_us = 0; - int16_t max_drr_mallregion_us = 0; - int16_t mall_region_us = 0; - int16_t prefetch_us = 0; - int16_t subvp_active_us = 0; - int16_t drr_active_us = 0; - int16_t min_vtotal_supported = 0; - int16_t max_vtotal_supported = 0; + uint16_t drr_frame_us = 0; + uint16_t min_drr_supported_us = 0; + uint16_t max_drr_supported_us = 0; + uint16_t max_drr_vblank_us = 0; + uint16_t max_drr_mallregion_us = 0; + uint16_t mall_region_us = 0; + uint16_t prefetch_us = 0; + uint16_t subvp_active_us = 0; + uint16_t drr_active_us = 0; + uint16_t min_vtotal_supported = 0; + uint16_t max_vtotal_supported = 0; pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true; pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now - drr_frame_us = div64_s64(drr_timing->v_total * drr_timing->h_total, - (int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000); + drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000), + (((uint64_t)drr_timing->pix_clk_100hz * 100))); // P-State allow width and FW delays already included phantom_timing->v_addressable - mall_region_us = div64_s64(phantom_timing->v_addressable * phantom_timing->h_total, - (int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000); + mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000), + (((uint64_t)phantom_timing->pix_clk_100hz * 100))); min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US; - min_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 * - (div64_s64((int64_t)min_drr_supported_us, 1000000)), - (int64_t)drr_timing->h_total); - - prefetch_us = div64_s64((phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total, - (int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000 + - dc->caps.subvp_prefetch_end_to_mall_start_us); - subvp_active_us = div64_s64(main_timing->v_addressable * main_timing->h_total, - (int64_t)(main_timing->pix_clk_100hz * 100) * 1000000); - drr_active_us = div64_s64(drr_timing->v_addressable * drr_timing->h_total, - (int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000); - max_drr_vblank_us = div64_s64((int64_t)(subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us; + min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us), + (((uint64_t)drr_timing->h_total * 1000000))); + + prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000), + (((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); + subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000), + (((uint64_t)main_timing->pix_clk_100hz * 100))); + drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000), + (((uint64_t)drr_timing->pix_clk_100hz * 100))); + max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us; max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us; max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us; - max_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 * (div64_s64((int64_t)max_drr_supported_us, 1000000)), - (int64_t)drr_timing->h_total); + max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us), + (((uint64_t)drr_timing->h_total * 1000000))); pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported; pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported; @@ -548,10 +546,12 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc, struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing; struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL; - subvp0_prefetch_us = div64_s64((phantom_timing0->v_total - phantom_timing0->v_front_porch) * phantom_timing0->h_total, - (int64_t)(phantom_timing0->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us); - subvp1_prefetch_us = div64_s64((phantom_timing1->v_total - phantom_timing1->v_front_porch) * phantom_timing1->h_total, - (int64_t)(phantom_timing1->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us); + subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) * + (uint64_t)phantom_timing0->h_total * 1000000), + (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); + subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) * + (uint64_t)phantom_timing1->h_total * 1000000), + (((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); // Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time) // should increase it's prefetch time to match the other @@ -559,16 +559,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc, pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1]; prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us; pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = - div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) * - (phantom_timing1->pix_clk_100hz * 100) + phantom_timing1->h_total - 1), - (int64_t)phantom_timing1->h_total); + div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) * + ((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)), + ((uint64_t)phantom_timing1->h_total * 1000000)); + } else if (subvp1_prefetch_us > subvp0_prefetch_us) { pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0]; prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us; pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = - div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) * - (phantom_timing0->pix_clk_100hz * 100) + phantom_timing0->h_total - 1), - (int64_t)phantom_timing0->h_total); + div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) * + ((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)), + ((uint64_t)phantom_timing0->h_total * 1000000)); } } @@ -630,13 +631,11 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc, // Round up pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = - div64_s64(((div64_s64((int64_t)dc->caps.subvp_prefetch_end_to_mall_start_us, 1000000)) * - (phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1), - (int64_t)phantom_timing->h_total); + div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) + + ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000)); pipe_data->pipe_config.subvp_data.processing_delay_lines = - div64_s64(((div64_s64((int64_t)dc->caps.subvp_fw_processing_delay_us, 1000000)) * - (phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1), - (int64_t)phantom_timing->h_total); + div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) + + ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000)); // Find phantom pipe index based on phantom stream for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j]; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 38a67051d470..aea49334021c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2164,7 +2164,8 @@ static void dce110_setup_audio_dto( continue; if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A) continue; - if (pipe_ctx->stream_res.audio != NULL) { + if (pipe_ctx->stream_res.audio != NULL && + pipe_ctx->stream_res.audio->enabled == false) { struct audio_output audio_output; build_audio_output(context, pipe_ctx, &audio_output); @@ -2204,7 +2205,8 @@ static void dce110_setup_audio_dto( if (!dc_is_dp_signal(pipe_ctx->stream->signal)) continue; - if (pipe_ctx->stream_res.audio != NULL) { + if (pipe_ctx->stream_res.audio != NULL && + pipe_ctx->stream_res.audio->enabled == false) { struct audio_output audio_output; build_audio_output(context, pipe_ctx, &audio_output); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h index cd2671161ef1..7ce64a3c1b02 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h @@ -445,226 +445,6 @@ type DSCRM_DSC_FORWARD_EN; \ type DSCRM_DSC_OPP_PIPE_SOURCE -#define DSC_REG_LIST_DCN314(id) \ - SRI(DSC_TOP_CONTROL, DSC_TOP, id),\ - SRI(DSC_DEBUG_CONTROL, DSC_TOP, id),\ - SRI(DSCC_CONFIG0, DSCC, id),\ - SRI(DSCC_CONFIG1, DSCC, id),\ - SRI(DSCC_STATUS, DSCC, id),\ - SRI(DSCC_INTERRUPT_CONTROL_STATUS, DSCC, id),\ - SRI(DSCC_PPS_CONFIG0, DSCC, id),\ - SRI(DSCC_PPS_CONFIG1, DSCC, id),\ - SRI(DSCC_PPS_CONFIG2, DSCC, id),\ - SRI(DSCC_PPS_CONFIG3, DSCC, id),\ - SRI(DSCC_PPS_CONFIG4, DSCC, id),\ - SRI(DSCC_PPS_CONFIG5, DSCC, id),\ - SRI(DSCC_PPS_CONFIG6, DSCC, id),\ - SRI(DSCC_PPS_CONFIG7, DSCC, id),\ - SRI(DSCC_PPS_CONFIG8, DSCC, id),\ - SRI(DSCC_PPS_CONFIG9, DSCC, id),\ - SRI(DSCC_PPS_CONFIG10, DSCC, id),\ - SRI(DSCC_PPS_CONFIG11, DSCC, id),\ - SRI(DSCC_PPS_CONFIG12, DSCC, id),\ - SRI(DSCC_PPS_CONFIG13, DSCC, id),\ - SRI(DSCC_PPS_CONFIG14, DSCC, id),\ - SRI(DSCC_PPS_CONFIG15, DSCC, id),\ - SRI(DSCC_PPS_CONFIG16, DSCC, id),\ - SRI(DSCC_PPS_CONFIG17, DSCC, id),\ - SRI(DSCC_PPS_CONFIG18, DSCC, id),\ - SRI(DSCC_PPS_CONFIG19, DSCC, id),\ - SRI(DSCC_PPS_CONFIG20, DSCC, id),\ - SRI(DSCC_PPS_CONFIG21, DSCC, id),\ - SRI(DSCC_PPS_CONFIG22, DSCC, id),\ - SRI(DSCC_MEM_POWER_CONTROL, DSCC, id),\ - SRI(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id),\ - SRI(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id),\ - SRI(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id),\ - SRI(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id),\ - SRI(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id),\ - SRI(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id),\ - SRI(DSCC_MAX_ABS_ERROR0, DSCC, id),\ - SRI(DSCC_MAX_ABS_ERROR1, DSCC, id),\ - SRI(DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\ - SRI(DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\ - SRI(DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\ - SRI(DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\ - SRI(DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\ - SRI(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\ - SRI(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\ - SRI(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\ - SRI(DSCCIF_CONFIG0, DSCCIF, id),\ - SRI(DSCCIF_CONFIG1, DSCCIF, id),\ - SRI(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id) - -#define DSC_REG_LIST_SH_MASK_DCN314(mask_sh)\ - DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_CLOCK_EN, mask_sh), \ - DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \ - DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \ - DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \ - DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_TEST_CLOCK_MUX_SEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \ - DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, mask_sh), \ - DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, mask_sh), \ - /*DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh),*/ \ - DSC_SF(DSCC0_DSCC_STATUS, DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MINOR, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MAJOR, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG0, PPS_IDENTIFIER, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG0, LINEBUF_DEPTH, mask_sh), \ - DSC2_SF(DSCC0, DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BITS_PER_PIXEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG1, VBR_ENABLE, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG1, SIMPLE_422, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CONVERT_RGB, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BLOCK_PRED_ENABLE, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_422, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_420, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CHUNK_SIZE, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_WIDTH, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_HEIGHT, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_WIDTH, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_HEIGHT, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_XMIT_DELAY, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_DEC_DELAY, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG5, INITIAL_SCALE_VALUE, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG5, SCALE_INCREMENT_INTERVAL, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SCALE_DECREMENT_INTERVAL, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG6, FIRST_LINE_BPG_OFFSET, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SECOND_LINE_BPG_OFFSET, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG7, NFL_BPG_OFFSET, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG8, NSL_BPG_OFFSET, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG8, SECOND_LINE_OFFSET_ADJ, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG9, INITIAL_OFFSET, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG9, FINAL_OFFSET, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MIN_QP, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MAX_QP, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG10, RC_MODEL_SIZE, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_EDGE_FACTOR, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT0, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT1, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_LO, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_HI, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH0, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH1, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH2, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH3, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH4, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH5, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH6, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH7, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH8, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH9, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH10, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH11, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH12, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH13, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MIN_QP0, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MAX_QP0, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_BPG_OFFSET0, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP1, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP1, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET1, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP2, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP2, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET2, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP3, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP3, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET3, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP4, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP4, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET4, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP5, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP5, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET5, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP6, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP6, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET6, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP7, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP7, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET7, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP8, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP8, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET8, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP9, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP9, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET9, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP10, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP10, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET10, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP11, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP11, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET11, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP12, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP12, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET12, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP13, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP13, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET13, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP14, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP14, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET14, mask_sh), \ - DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_DEFAULT_MEM_LOW_POWER_STATE, mask_sh), \ - DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_FORCE, mask_sh), \ - DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_DIS, mask_sh), \ - DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_STATE, mask_sh), \ - DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_FORCE, mask_sh), \ - DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_DIS, mask_sh), \ - DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_STATE, mask_sh), \ - DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC_R_Y_SQUARED_ERROR_LOWER, mask_sh), \ - DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC_R_Y_SQUARED_ERROR_UPPER, mask_sh), \ - DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC_G_CB_SQUARED_ERROR_LOWER, mask_sh), \ - DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC_G_CB_SQUARED_ERROR_UPPER, mask_sh), \ - DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC_B_CR_SQUARED_ERROR_LOWER, mask_sh), \ - DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC_B_CR_SQUARED_ERROR_UPPER, mask_sh), \ - DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_R_Y_MAX_ABS_ERROR, mask_sh), \ - DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_G_CB_MAX_ABS_ERROR, mask_sh), \ - DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR1, DSCC_B_CR_MAX_ABS_ERROR, mask_sh), \ - DSC_SF(DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \ - DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, mask_sh), \ - DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, mask_sh), \ - DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_PIXEL_FORMAT, mask_sh), \ - DSC2_SF(DSCCIF0, DSCCIF_CONFIG0__BITS_PER_COMPONENT, mask_sh), \ - DSC_SF(DSCCIF0_DSCCIF_CONFIG0, DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \ - DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_WIDTH, mask_sh), \ - DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_HEIGHT, mask_sh), \ - DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, mask_sh), \ - DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_OPP_PIPE_SOURCE, mask_sh) - - struct dcn20_dsc_registers { uint32_t DSC_TOP_CONTROL; uint32_t DSC_DEBUG_CONTROL; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 884fa060f375..598ce872a8d7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1565,6 +1565,7 @@ static void dcn20_update_dchubp_dpp( /* Any updates are handled in dc interface, just need * to apply existing for plane enable / opp change */ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed + || pipe_ctx->update_flags.bits.plane_changed || pipe_ctx->stream->update_flags.bits.gamut_remap || pipe_ctx->stream->update_flags.bits.out_csc) { /* dpp/cm gamut remap*/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c index e3351ddc566c..06d8638db696 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c @@ -67,8 +67,7 @@ static void enc314_disable_fifo(struct stream_encoder *enc) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - REG_UPDATE_2(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0, - DIG_FIFO_READ_START_LEVEL, 0); + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0); } static void enc314_dp_set_odm_combine( diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index 39931d48f385..f4d1b83979fe 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -343,7 +343,6 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig { struct dc_stream_state *stream = pipe_ctx->stream; unsigned int odm_combine_factor = 0; - struct dc *dc = pipe_ctx->stream->ctx->dc; bool two_pix_per_container = false; two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing); @@ -364,7 +363,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig } else { *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_4; - if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy) + if (odm_combine_factor == 2) *k2_div = PIXEL_RATE_DIV_BY_2; } } @@ -384,21 +383,10 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx) return; odm_combine_factor = get_odm_config(pipe_ctx, NULL); - if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1 - || dcn314_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) + if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1) pix_per_cycle = 2; if (pipe_ctx->stream_res.stream_enc->funcs->set_input_mode) pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc, pix_per_cycle); } - -bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx) -{ - struct dc *dc = pipe_ctx->stream->ctx->dc; - - if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) && - dc->debug.enable_dp_dig_pixel_rate_div_policy) - return true; - return false; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h index d014580592ac..244280298212 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h @@ -41,6 +41,4 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx); -bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx); - #endif /* __DC_HWSS_DCN314_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index fcf67eb3478f..72a563a4c3e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -146,7 +146,6 @@ static const struct hwseq_private_funcs dcn314_private_funcs = { .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, .calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values, .set_pixels_per_cycle = dcn314_set_pixels_per_cycle, - .is_dp_dig_pixel_rate_div_policy = dcn314_is_dp_dig_pixel_rate_div_policy, }; void dcn314_hw_sequencer_construct(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index 2a2a4a9cc117..44ac1c2aabf5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -87,6 +87,9 @@ #define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10 #define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L +#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0 +#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL + #include "reg_helper.h" #include "dce/dmub_abm.h" #include "dce/dmub_psr.h" @@ -579,7 +582,7 @@ static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { #define dsc_regsDCN314(id)\ [id] = {\ - DSC_REG_LIST_DCN314(id)\ + DSC_REG_LIST_DCN20(id)\ } static const struct dcn20_dsc_registers dsc_regs[] = { @@ -590,11 +593,11 @@ static const struct dcn20_dsc_registers dsc_regs[] = { }; static const struct dcn20_dsc_shift dsc_shift = { - DSC_REG_LIST_SH_MASK_DCN314(__SHIFT) + DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) }; static const struct dcn20_dsc_mask dsc_mask = { - DSC_REG_LIST_SH_MASK_DCN314(_MASK) + DSC_REG_LIST_SH_MASK_DCN20(_MASK) }; static const struct dcn30_mpc_registers mpc_regs = { @@ -844,7 +847,7 @@ static const struct resource_caps res_cap_dcn314 = { .num_ddc = 5, .num_vmid = 16, .num_mpc_3dlut = 2, - .num_dsc = 4, + .num_dsc = 3, }; static const struct dc_plane_cap plane_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c index 6ec1c52535b9..2038cbda33f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c @@ -103,6 +103,11 @@ void hubp32_cursor_set_attributes( enum cursor_lines_per_chunk lpc = hubp2_get_lines_per_chunk( attr->width, attr->color_format); + //Round cursor width up to next multiple of 64 + uint32_t cursor_width = ((attr->width + 63) / 64) * 64; + uint32_t cursor_height = attr->height; + uint32_t cursor_size = cursor_width * cursor_height; + hubp->curs_attr = *attr; REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH, @@ -126,7 +131,24 @@ void hubp32_cursor_set_attributes( /* used to shift the cursor chunk request deadline */ CURSOR0_CHUNK_HDL_ADJUST, 3); - if (attr->width * attr->height * 4 > 16384) + switch (attr->color_format) { + case CURSOR_MODE_MONO: + cursor_size /= 2; + break; + case CURSOR_MODE_COLOR_1BIT_AND: + case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: + case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: + cursor_size *= 4; + break; + + case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: + case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: + default: + cursor_size *= 8; + break; + } + + if (cursor_size > 16384) REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true); else REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index 8d9d96c39808..344fe7535df5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -741,7 +741,29 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context) struct hubp *hubp = pipe->plane_res.hubp; if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) { - if (hubp->curs_attr.width * hubp->curs_attr.height * 4 > 16384) + //Round cursor width up to next multiple of 64 + int cursor_width = ((hubp->curs_attr.width + 63) / 64) * 64; + int cursor_height = hubp->curs_attr.height; + int cursor_size = cursor_width * cursor_height; + + switch (hubp->curs_attr.color_format) { + case CURSOR_MODE_MONO: + cursor_size /= 2; + break; + case CURSOR_MODE_COLOR_1BIT_AND: + case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: + case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: + cursor_size *= 4; + break; + + case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: + case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: + default: + cursor_size *= 8; + break; + } + + if (cursor_size > 16384) cache_cursor = true; if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 8b887b552f2c..c3b783cea8a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -871,6 +871,7 @@ static const struct dc_debug_options debug_defaults_drv = { .exit_idle_opt_for_cursor_updates = true, .enable_single_display_2to1_odm_policy = true, .enable_dp_dig_pixel_rate_div_policy = 1, + .allow_sw_cursor_fallback = false, }; static const struct dc_debug_options debug_defaults_diags = { @@ -2039,7 +2040,8 @@ static bool dcn32_resource_construct( dc->caps.max_downscale_ratio = 600; dc->caps.i2c_speed_in_khz = 100; dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/ - dc->caps.max_cursor_size = 256; + /* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/ + dc->caps.max_cursor_size = 64; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h index 1e7e6201c880..cf15d0e5e9b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h @@ -30,6 +30,9 @@ #define DCN3_2_DET_SEG_SIZE 64 #define DCN3_2_MALL_MBLK_SIZE_BYTES 65536 // 64 * 1024 +#define DCN3_2_MBLK_WIDTH 128 +#define DCN3_2_MBLK_HEIGHT_4BPE 128 +#define DCN3_2_MBLK_HEIGHT_8BPE 64 #define TO_DCN32_RES_POOL(pool)\ container_of(pool, struct dcn32_resource_pool, base) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index ab918fe38f6a..1f195c5b3377 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -46,7 +46,6 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_state *context) { uint32_t num_ways = 0; - uint32_t mall_region_pixels = 0; uint32_t bytes_per_pixel = 0; uint32_t cache_lines_used = 0; uint32_t lines_per_way = 0; @@ -54,20 +53,64 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat uint32_t bytes_in_mall = 0; uint32_t num_mblks = 0; uint32_t cache_lines_per_plane = 0; - uint32_t i = 0; + uint32_t i = 0, j = 0; + uint32_t mblk_width = 0; + uint32_t mblk_height = 0; + uint32_t full_vp_width_blk_aligned = 0; + uint32_t full_vp_height_blk_aligned = 0; + uint32_t mall_alloc_width_blk_aligned = 0; + uint32_t mall_alloc_height_blk_aligned = 0; + uint32_t full_vp_height = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; // Find the phantom pipes - if (pipe->stream && pipe->plane_state && !pipe->top_pipe && + if (pipe->stream && pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4; - mall_region_pixels = pipe->plane_state->plane_size.surface_pitch * pipe->stream->timing.v_addressable; + struct pipe_ctx *main_pipe = NULL; + + /* Get full viewport height from main pipe (required for MBLK calculation) */ + for (j = 0; j < dc->res_pool->pipe_count; j++) { + main_pipe = &context->res_ctx.pipe_ctx[j]; + if (main_pipe->stream == pipe->stream->mall_stream_config.paired_stream) { + full_vp_height = main_pipe->plane_res.scl_data.viewport.height; + break; + } + } - // For bytes required in MALL, calculate based on number of MBlks required - num_mblks = (mall_region_pixels * bytes_per_pixel + - DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES; + bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4; + mblk_width = DCN3_2_MBLK_WIDTH; + mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE; + + /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) - + * FLOOR(vp_x_start, blk_width) + */ + full_vp_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x + + pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) + + (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width); + + /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) - + * FLOOR(vp_y_start, blk_height) + */ + full_vp_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y + + full_vp_height + mblk_height - 1) / mblk_height * mblk_height) + + (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height); + + /* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c */ + mall_alloc_width_blk_aligned = full_vp_width_blk_aligned; + + /* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */ + mall_alloc_height_blk_aligned = (pipe->stream->timing.v_addressable - 1 + mblk_height - 1) / + mblk_height * mblk_height + mblk_height; + + /* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c; + * full_mblk_height_ub_l/c = mall_alloc_height_blk_aligned_l/c; + * num_mblk_l/c = (full_mblk_width_ub_l/c / mblk_width_l/c) * (full_mblk_height_ub_l/c / mblk_height_l/c); + * (Should be divisible, but round up if not) + */ + num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) * + ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height); bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES; // cache lines used is total bytes / cache_line size. Add +2 for worst case alignment // (MALL is 64-byte aligned) diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index c8b7d6ff38f4..7309eed33a61 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -872,6 +872,7 @@ static const struct dc_debug_options debug_defaults_drv = { .exit_idle_opt_for_cursor_updates = true, .enable_single_display_2to1_odm_policy = true, .enable_dp_dig_pixel_rate_div_policy = 1, + .allow_sw_cursor_fallback = false, }; static const struct dc_debug_options debug_defaults_diags = { @@ -1651,7 +1652,8 @@ static bool dcn321_resource_construct( dc->caps.max_downscale_ratio = 600; dc->caps.i2c_speed_in_khz = 100; dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/ - dc->caps.max_cursor_size = 256; + /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/ + dc->caps.max_cursor_size = 64; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 86a3b5bfd699..cb81ed2fbd53 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -70,6 +70,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/display_mode_vba_314.o := $(dml_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/display_rq_dlg_calc_314.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/dcn314_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags) @@ -123,6 +125,7 @@ DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o DML += dcn30/dcn30_fpu.o dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o +DML += dcn314/display_mode_vba_314.o dcn314/display_rq_dlg_calc_314.o DML += dcn32/display_mode_vba_32.o dcn32/display_rq_dlg_calc_32.o dcn32/display_mode_vba_util_32.o DML += dcn31/dcn31_fpu.o DML += dcn32/dcn32_fpu.o diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 876b321b30ca..1cb858dd6ea0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -6610,8 +6610,7 @@ static double CalculateUrgentLatency( return ret; } - -static void UseMinimumDCFCLK( +static noinline_for_stack void UseMinimumDCFCLK( struct display_mode_lib *mode_lib, int MaxInterDCNTileRepeaters, int MaxPrefetchMode, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c index 149a1b17cdf3..fa7b0291ce4d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c @@ -291,6 +291,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = { .do_urgent_latency_adjustment = false, .urgent_latency_adjustment_fabric_clock_component_us = 0, .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, + .num_chans = 4, }; struct _vcs_dpi_ip_params_st dcn3_16_ip = { @@ -680,7 +681,11 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn3_15_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; dcn3_15_ip.max_num_dpp = dc->res_pool->pipe_count; - dcn3_15_soc.num_chans = bw_params->num_channels; + + if (bw_params->num_channels > 0) + dcn3_15_soc.num_chans = bw_params->num_channels; + if (bw_params->dram_channel_width_bytes > 0) + dcn3_15_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes; ASSERT(clk_table->num_entries); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index d63b4209b14c..8ca66f1644dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -251,33 +251,13 @@ static void CalculateRowBandwidth( static void CalculateFlipSchedule( struct display_mode_lib *mode_lib, + unsigned int k, double HostVMInefficiencyFactor, double UrgentExtraLatency, double UrgentLatency, - unsigned int GPUVMMaxPageTableLevels, - bool HostVMEnable, - unsigned int HostVMMaxNonCachedPageTableLevels, - bool GPUVMEnable, - double HostVMMinPageSize, double PDEAndMetaPTEBytesPerFrame, double MetaRowBytes, - double DPTEBytesPerRow, - double BandwidthAvailableForImmediateFlip, - unsigned int TotImmediateFlipBytes, - enum source_format_class SourcePixelFormat, - double LineTime, - double VRatio, - double VRatioChroma, - double Tno_bw, - bool DCCEnable, - unsigned int dpte_row_height, - unsigned int meta_row_height, - unsigned int dpte_row_height_chroma, - unsigned int meta_row_height_chroma, - double *DestinationLinesToRequestVMInImmediateFlip, - double *DestinationLinesToRequestRowInImmediateFlip, - double *final_flip_bw, - bool *ImmediateFlipSupportedForPipe); + double DPTEBytesPerRow); static double CalculateWriteBackDelay( enum source_format_class WritebackPixelFormat, double WritebackHRatio, @@ -311,64 +291,28 @@ static void CalculateVupdateAndDynamicMetadataParameters( static void CalculateWatermarksAndDRAMSpeedChangeSupport( struct display_mode_lib *mode_lib, unsigned int PrefetchMode, - unsigned int NumberOfActivePlanes, - unsigned int MaxLineBufferLines, - unsigned int LineBufferSize, - unsigned int WritebackInterfaceBufferSize, double DCFCLK, double ReturnBW, - bool SynchronizedVBlank, - unsigned int dpte_group_bytes[], - unsigned int MetaChunkSize, double UrgentLatency, double ExtraLatency, - double WritebackLatency, - double WritebackChunkSize, double SOCCLK, - double DRAMClockChangeLatency, - double SRExitTime, - double SREnterPlusExitTime, - double SRExitZ8Time, - double SREnterPlusExitZ8Time, double DCFCLKDeepSleep, unsigned int DETBufferSizeY[], unsigned int DETBufferSizeC[], unsigned int SwathHeightY[], unsigned int SwathHeightC[], - unsigned int LBBitPerPixel[], double SwathWidthY[], double SwathWidthC[], - double HRatio[], - double HRatioChroma[], - unsigned int vtaps[], - unsigned int VTAPsChroma[], - double VRatio[], - double VRatioChroma[], - unsigned int HTotal[], - double PixelClock[], - unsigned int BlendingAndTiming[], unsigned int DPPPerPlane[], double BytePerPixelDETY[], double BytePerPixelDETC[], - double DSTXAfterScaler[], - double DSTYAfterScaler[], - bool WritebackEnable[], - enum source_format_class WritebackPixelFormat[], - double WritebackDestinationWidth[], - double WritebackDestinationHeight[], - double WritebackSourceHeight[], bool UnboundedRequestEnabled, int unsigned CompressedBufferSizeInkByte, enum clock_change_support *DRAMClockChangeSupport, - double *UrgentWatermark, - double *WritebackUrgentWatermark, - double *DRAMClockChangeWatermark, - double *WritebackDRAMClockChangeWatermark, double *StutterExitWatermark, double *StutterEnterPlusExitWatermark, double *Z8StutterExitWatermark, - double *Z8StutterEnterPlusExitWatermark, - double *MinActiveDRAMClockChangeLatencySupported); + double *Z8StutterEnterPlusExitWatermark); static void CalculateDCFCLKDeepSleep( struct display_mode_lib *mode_lib, @@ -2904,33 +2848,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman for (k = 0; k < v->NumberOfActivePlanes; ++k) { CalculateFlipSchedule( mode_lib, + k, HostVMInefficiencyFactor, v->UrgentExtraLatency, v->UrgentLatency, - v->GPUVMMaxPageTableLevels, - v->HostVMEnable, - v->HostVMMaxNonCachedPageTableLevels, - v->GPUVMEnable, - v->HostVMMinPageSize, v->PDEAndMetaPTEBytesFrame[k], v->MetaRowByte[k], - v->PixelPTEBytesPerRow[k], - v->BandwidthAvailableForImmediateFlip, - v->TotImmediateFlipBytes, - v->SourcePixelFormat[k], - v->HTotal[k] / v->PixelClock[k], - v->VRatio[k], - v->VRatioChroma[k], - v->Tno_bw[k], - v->DCCEnable[k], - v->dpte_row_height[k], - v->meta_row_height[k], - v->dpte_row_height_chroma[k], - v->meta_row_height_chroma[k], - &v->DestinationLinesToRequestVMInImmediateFlip[k], - &v->DestinationLinesToRequestRowInImmediateFlip[k], - &v->final_flip_bw[k], - &v->ImmediateFlipSupportedForPipe[k]); + v->PixelPTEBytesPerRow[k]); } v->total_dcn_read_bw_with_flip = 0.0; @@ -3017,64 +2941,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman CalculateWatermarksAndDRAMSpeedChangeSupport( mode_lib, PrefetchMode, - v->NumberOfActivePlanes, - v->MaxLineBufferLines, - v->LineBufferSize, - v->WritebackInterfaceBufferSize, v->DCFCLK, v->ReturnBW, - v->SynchronizedVBlank, - v->dpte_group_bytes, - v->MetaChunkSize, v->UrgentLatency, v->UrgentExtraLatency, - v->WritebackLatency, - v->WritebackChunkSize, v->SOCCLK, - v->DRAMClockChangeLatency, - v->SRExitTime, - v->SREnterPlusExitTime, - v->SRExitZ8Time, - v->SREnterPlusExitZ8Time, v->DCFCLKDeepSleep, v->DETBufferSizeY, v->DETBufferSizeC, v->SwathHeightY, v->SwathHeightC, - v->LBBitPerPixel, v->SwathWidthY, v->SwathWidthC, - v->HRatio, - v->HRatioChroma, - v->vtaps, - v->VTAPsChroma, - v->VRatio, - v->VRatioChroma, - v->HTotal, - v->PixelClock, - v->BlendingAndTiming, v->DPPPerPlane, v->BytePerPixelDETY, v->BytePerPixelDETC, - v->DSTXAfterScaler, - v->DSTYAfterScaler, - v->WritebackEnable, - v->WritebackPixelFormat, - v->WritebackDestinationWidth, - v->WritebackDestinationHeight, - v->WritebackSourceHeight, v->UnboundedRequestEnabled, v->CompressedBufferSizeInkByte, &DRAMClockChangeSupport, - &v->UrgentWatermark, - &v->WritebackUrgentWatermark, - &v->DRAMClockChangeWatermark, - &v->WritebackDRAMClockChangeWatermark, &v->StutterExitWatermark, &v->StutterEnterPlusExitWatermark, &v->Z8StutterExitWatermark, - &v->Z8StutterEnterPlusExitWatermark, - &v->MinActiveDRAMClockChangeLatencySupported); + &v->Z8StutterEnterPlusExitWatermark); for (k = 0; k < v->NumberOfActivePlanes; ++k) { if (v->WritebackEnable[k] == true) { @@ -3598,61 +3486,43 @@ static void CalculateRowBandwidth( static void CalculateFlipSchedule( struct display_mode_lib *mode_lib, + unsigned int k, double HostVMInefficiencyFactor, double UrgentExtraLatency, double UrgentLatency, - unsigned int GPUVMMaxPageTableLevels, - bool HostVMEnable, - unsigned int HostVMMaxNonCachedPageTableLevels, - bool GPUVMEnable, - double HostVMMinPageSize, double PDEAndMetaPTEBytesPerFrame, double MetaRowBytes, - double DPTEBytesPerRow, - double BandwidthAvailableForImmediateFlip, - unsigned int TotImmediateFlipBytes, - enum source_format_class SourcePixelFormat, - double LineTime, - double VRatio, - double VRatioChroma, - double Tno_bw, - bool DCCEnable, - unsigned int dpte_row_height, - unsigned int meta_row_height, - unsigned int dpte_row_height_chroma, - unsigned int meta_row_height_chroma, - double *DestinationLinesToRequestVMInImmediateFlip, - double *DestinationLinesToRequestRowInImmediateFlip, - double *final_flip_bw, - bool *ImmediateFlipSupportedForPipe) + double DPTEBytesPerRow) { + struct vba_vars_st *v = &mode_lib->vba; double min_row_time = 0.0; unsigned int HostVMDynamicLevelsTrips; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; double ImmediateFlipBW; + double LineTime = v->HTotal[k] / v->PixelClock[k]; - if (GPUVMEnable == true && HostVMEnable == true) { - HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels; + if (v->GPUVMEnable == true && v->HostVMEnable == true) { + HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels; } else { HostVMDynamicLevelsTrips = 0; } - if (GPUVMEnable == true || DCCEnable == true) { - ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes; + if (v->GPUVMEnable == true || v->DCCEnable[k] == true) { + ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes; } - if (GPUVMEnable == true) { + if (v->GPUVMEnable == true) { TimeForFetchingMetaPTEImmediateFlip = dml_max3( - Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW, - UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1), + v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW, + UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1), LineTime / 4.0); } else { TimeForFetchingMetaPTEImmediateFlip = 0; } - *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0; - if ((GPUVMEnable == true || DCCEnable == true)) { + v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0; + if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) { TimeForFetchingRowInVBlankImmediateFlip = dml_max3( (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW, UrgentLatency * (HostVMDynamicLevelsTrips + 1), @@ -3661,54 +3531,54 @@ static void CalculateFlipSchedule( TimeForFetchingRowInVBlankImmediateFlip = 0; } - *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0; + v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0; - if (GPUVMEnable == true) { - *final_flip_bw = dml_max( - PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime), - (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime)); - } else if ((GPUVMEnable == true || DCCEnable == true)) { - *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime); + if (v->GPUVMEnable == true) { + v->final_flip_bw[k] = dml_max( + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime), + (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime)); + } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) { + v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime); } else { - *final_flip_bw = 0; + v->final_flip_bw[k] = 0; } - if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) { - if (GPUVMEnable == true && DCCEnable != true) { - min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma); - } else if (GPUVMEnable != true && DCCEnable == true) { - min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma); + if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) { + if (v->GPUVMEnable == true && v->DCCEnable[k] != true) { + min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]); + } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) { + min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]); } else { min_row_time = dml_min4( - dpte_row_height * LineTime / VRatio, - meta_row_height * LineTime / VRatio, - dpte_row_height_chroma * LineTime / VRatioChroma, - meta_row_height_chroma * LineTime / VRatioChroma); + v->dpte_row_height[k] * LineTime / v->VRatio[k], + v->meta_row_height[k] * LineTime / v->VRatio[k], + v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k], + v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]); } } else { - if (GPUVMEnable == true && DCCEnable != true) { - min_row_time = dpte_row_height * LineTime / VRatio; - } else if (GPUVMEnable != true && DCCEnable == true) { - min_row_time = meta_row_height * LineTime / VRatio; + if (v->GPUVMEnable == true && v->DCCEnable[k] != true) { + min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k]; + } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) { + min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k]; } else { - min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio); + min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]); } } - if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16 + if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16 || TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) { - *ImmediateFlipSupportedForPipe = false; + v->ImmediateFlipSupportedForPipe[k] = false; } else { - *ImmediateFlipSupportedForPipe = true; + v->ImmediateFlipSupportedForPipe[k] = true; } #ifdef __DML_VBA_DEBUG__ - dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip); - dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip); + dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]); + dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]); dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip); dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip); dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time); - dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe); + dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]); #endif } @@ -5300,33 +5170,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l for (k = 0; k < v->NumberOfActivePlanes; k++) { CalculateFlipSchedule( mode_lib, + k, HostVMInefficiencyFactor, v->ExtraLatency, v->UrgLatency[i], - v->GPUVMMaxPageTableLevels, - v->HostVMEnable, - v->HostVMMaxNonCachedPageTableLevels, - v->GPUVMEnable, - v->HostVMMinPageSize, v->PDEAndMetaPTEBytesPerFrame[i][j][k], v->MetaRowBytes[i][j][k], - v->DPTEBytesPerRow[i][j][k], - v->BandwidthAvailableForImmediateFlip, - v->TotImmediateFlipBytes, - v->SourcePixelFormat[k], - v->HTotal[k] / v->PixelClock[k], - v->VRatio[k], - v->VRatioChroma[k], - v->Tno_bw[k], - v->DCCEnable[k], - v->dpte_row_height[k], - v->meta_row_height[k], - v->dpte_row_height_chroma[k], - v->meta_row_height_chroma[k], - &v->DestinationLinesToRequestVMInImmediateFlip[k], - &v->DestinationLinesToRequestRowInImmediateFlip[k], - &v->final_flip_bw[k], - &v->ImmediateFlipSupportedForPipe[k]); + v->DPTEBytesPerRow[i][j][k]); } v->total_dcn_read_bw_with_flip = 0.0; for (k = 0; k < v->NumberOfActivePlanes; k++) { @@ -5384,64 +5234,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l CalculateWatermarksAndDRAMSpeedChangeSupport( mode_lib, v->PrefetchModePerState[i][j], - v->NumberOfActivePlanes, - v->MaxLineBufferLines, - v->LineBufferSize, - v->WritebackInterfaceBufferSize, v->DCFCLKState[i][j], v->ReturnBWPerState[i][j], - v->SynchronizedVBlank, - v->dpte_group_bytes, - v->MetaChunkSize, v->UrgLatency[i], v->ExtraLatency, - v->WritebackLatency, - v->WritebackChunkSize, v->SOCCLKPerState[i], - v->DRAMClockChangeLatency, - v->SRExitTime, - v->SREnterPlusExitTime, - v->SRExitZ8Time, - v->SREnterPlusExitZ8Time, v->ProjectedDCFCLKDeepSleep[i][j], v->DETBufferSizeYThisState, v->DETBufferSizeCThisState, v->SwathHeightYThisState, v->SwathHeightCThisState, - v->LBBitPerPixel, v->SwathWidthYThisState, v->SwathWidthCThisState, - v->HRatio, - v->HRatioChroma, - v->vtaps, - v->VTAPsChroma, - v->VRatio, - v->VRatioChroma, - v->HTotal, - v->PixelClock, - v->BlendingAndTiming, v->NoOfDPPThisState, v->BytePerPixelInDETY, v->BytePerPixelInDETC, - v->DSTXAfterScaler, - v->DSTYAfterScaler, - v->WritebackEnable, - v->WritebackPixelFormat, - v->WritebackDestinationWidth, - v->WritebackDestinationHeight, - v->WritebackSourceHeight, UnboundedRequestEnabledThisState, CompressedBufferSizeInkByteThisState, &v->DRAMClockChangeSupport[i][j], - &v->UrgentWatermark, - &v->WritebackUrgentWatermark, - &v->DRAMClockChangeWatermark, - &v->WritebackDRAMClockChangeWatermark, - &dummy, &dummy, &dummy, &dummy, - &v->MinActiveDRAMClockChangeLatencySupported); + &dummy); } } @@ -5566,64 +5380,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l static void CalculateWatermarksAndDRAMSpeedChangeSupport( struct display_mode_lib *mode_lib, unsigned int PrefetchMode, - unsigned int NumberOfActivePlanes, - unsigned int MaxLineBufferLines, - unsigned int LineBufferSize, - unsigned int WritebackInterfaceBufferSize, double DCFCLK, double ReturnBW, - bool SynchronizedVBlank, - unsigned int dpte_group_bytes[], - unsigned int MetaChunkSize, double UrgentLatency, double ExtraLatency, - double WritebackLatency, - double WritebackChunkSize, double SOCCLK, - double DRAMClockChangeLatency, - double SRExitTime, - double SREnterPlusExitTime, - double SRExitZ8Time, - double SREnterPlusExitZ8Time, double DCFCLKDeepSleep, unsigned int DETBufferSizeY[], unsigned int DETBufferSizeC[], unsigned int SwathHeightY[], unsigned int SwathHeightC[], - unsigned int LBBitPerPixel[], double SwathWidthY[], double SwathWidthC[], - double HRatio[], - double HRatioChroma[], - unsigned int vtaps[], - unsigned int VTAPsChroma[], - double VRatio[], - double VRatioChroma[], - unsigned int HTotal[], - double PixelClock[], - unsigned int BlendingAndTiming[], unsigned int DPPPerPlane[], double BytePerPixelDETY[], double BytePerPixelDETC[], - double DSTXAfterScaler[], - double DSTYAfterScaler[], - bool WritebackEnable[], - enum source_format_class WritebackPixelFormat[], - double WritebackDestinationWidth[], - double WritebackDestinationHeight[], - double WritebackSourceHeight[], bool UnboundedRequestEnabled, int unsigned CompressedBufferSizeInkByte, enum clock_change_support *DRAMClockChangeSupport, - double *UrgentWatermark, - double *WritebackUrgentWatermark, - double *DRAMClockChangeWatermark, - double *WritebackDRAMClockChangeWatermark, double *StutterExitWatermark, double *StutterEnterPlusExitWatermark, double *Z8StutterExitWatermark, - double *Z8StutterEnterPlusExitWatermark, - double *MinActiveDRAMClockChangeLatencySupported) + double *Z8StutterEnterPlusExitWatermark) { struct vba_vars_st *v = &mode_lib->vba; double EffectiveLBLatencyHidingY; @@ -5643,103 +5421,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( double TotalPixelBW = 0.0; int k, j; - *UrgentWatermark = UrgentLatency + ExtraLatency; + v->UrgentWatermark = UrgentLatency + ExtraLatency; #ifdef __DML_VBA_DEBUG__ dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency); dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency); - dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark); + dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark); #endif - *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark; + v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark; #ifdef __DML_VBA_DEBUG__ - dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency); - dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark); + dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency); + dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark); #endif v->TotalActiveWriteback = 0; - for (k = 0; k < NumberOfActivePlanes; ++k) { - if (WritebackEnable[k] == true) { + for (k = 0; k < v->NumberOfActivePlanes; ++k) { + if (v->WritebackEnable[k] == true) { v->TotalActiveWriteback = v->TotalActiveWriteback + 1; } } if (v->TotalActiveWriteback <= 1) { - *WritebackUrgentWatermark = WritebackLatency; + v->WritebackUrgentWatermark = v->WritebackLatency; } else { - *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; + v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; } if (v->TotalActiveWriteback <= 1) { - *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency; + v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency; } else { - *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; + v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; } - for (k = 0; k < NumberOfActivePlanes; ++k) { + for (k = 0; k < v->NumberOfActivePlanes; ++k) { TotalPixelBW = TotalPixelBW - + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) - / (HTotal[k] / PixelClock[k]); + + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k]) + / (v->HTotal[k] / v->PixelClock[k]); } - for (k = 0; k < NumberOfActivePlanes; ++k) { + for (k = 0; k < v->NumberOfActivePlanes; ++k) { double EffectiveDETBufferSizeY = DETBufferSizeY[k]; v->LBLatencyHidingSourceLinesY = dml_min( - (double) MaxLineBufferLines, - dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1); + (double) v->MaxLineBufferLines, + dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1); v->LBLatencyHidingSourceLinesC = dml_min( - (double) MaxLineBufferLines, - dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1); + (double) v->MaxLineBufferLines, + dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1); - EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]); + EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]); - EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]); + EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]); if (UnboundedRequestEnabled) { EffectiveDETBufferSizeY = EffectiveDETBufferSizeY - + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW; + + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW; } LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k]; LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]); - FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k]; + FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k]; if (BytePerPixelDETC[k] > 0) { LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k]; LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]); - FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k]; + FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k]; } else { LinesInDETC = 0; FullDETBufferingTimeC = 999999; } ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY - - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark; + - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark; - if (NumberOfActivePlanes > 1) { + if (v->NumberOfActivePlanes > 1) { ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY - - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k]; + - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k]; } if (BytePerPixelDETC[k] > 0) { ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC - - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark; + - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark; - if (NumberOfActivePlanes > 1) { + if (v->NumberOfActivePlanes > 1) { ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC - - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k]; + - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k]; } v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC); } else { v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY; } - if (WritebackEnable[k] == true) { - WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024 - / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4); - if (WritebackPixelFormat[k] == dm_444_64) { + if (v->WritebackEnable[k] == true) { + WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024 + / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4); + if (v->WritebackPixelFormat[k] == dm_444_64) { WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2; } WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark; @@ -5749,14 +5527,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( v->MinActiveDRAMClockChangeMargin = 999999; PlaneWithMinActiveDRAMClockChangeMargin = 0; - for (k = 0; k < NumberOfActivePlanes; ++k) { + for (k = 0; k < v->NumberOfActivePlanes; ++k) { if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) { v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k]; - if (BlendingAndTiming[k] == k) { + if (v->BlendingAndTiming[k] == k) { PlaneWithMinActiveDRAMClockChangeMargin = k; } else { - for (j = 0; j < NumberOfActivePlanes; ++j) { - if (BlendingAndTiming[k] == j) { + for (j = 0; j < v->NumberOfActivePlanes; ++j) { + if (v->BlendingAndTiming[k] == j) { PlaneWithMinActiveDRAMClockChangeMargin = j; } } @@ -5764,11 +5542,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( } } - *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency; + v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ; SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999; - for (k = 0; k < NumberOfActivePlanes; ++k) { - if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) + for (k = 0; k < v->NumberOfActivePlanes; ++k) { + if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) && v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) { SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k]; } @@ -5776,25 +5554,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( v->TotalNumberOfActiveOTG = 0; - for (k = 0; k < NumberOfActivePlanes; ++k) { - if (BlendingAndTiming[k] == k) { + for (k = 0; k < v->NumberOfActivePlanes; ++k) { + if (v->BlendingAndTiming[k] == k) { v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1; } } if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) { *DRAMClockChangeSupport = dm_dram_clock_change_vactive; - } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1 + } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) { *DRAMClockChangeSupport = dm_dram_clock_change_vblank; } else { *DRAMClockChangeSupport = dm_dram_clock_change_unsupported; } - *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep; - *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep); - *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep; - *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep; + *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep; + *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep); + *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep; + *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep; #ifdef __DML_VBA_DEBUG__ dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index 34a5d0f87b5f..4bb3b31ea7e0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -194,6 +194,9 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count; + if (bw_params->dram_channel_width_bytes > 0) + dcn3_14_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes; + if (bw_params->num_channels > 0) dcn3_14_soc.num_chans = bw_params->num_channels; @@ -262,7 +265,7 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p } if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31); + dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314); else dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA); } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index fc4d7474c111..ee821c4fb5dd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -61,7 +61,7 @@ // fudge factor for min dcfclk calclation #define __DML_MIN_DCFCLK_FACTOR__ 1.15 -struct { +typedef struct { double DPPCLK; double DISPCLK; double PixelClock; @@ -265,33 +265,13 @@ static void CalculateRowBandwidth( static void CalculateFlipSchedule( struct display_mode_lib *mode_lib, + unsigned int k, double HostVMInefficiencyFactor, double UrgentExtraLatency, double UrgentLatency, - unsigned int GPUVMMaxPageTableLevels, - bool HostVMEnable, - unsigned int HostVMMaxNonCachedPageTableLevels, - bool GPUVMEnable, - double HostVMMinPageSize, double PDEAndMetaPTEBytesPerFrame, double MetaRowBytes, - double DPTEBytesPerRow, - double BandwidthAvailableForImmediateFlip, - unsigned int TotImmediateFlipBytes, - enum source_format_class SourcePixelFormat, - double LineTime, - double VRatio, - double VRatioChroma, - double Tno_bw, - bool DCCEnable, - unsigned int dpte_row_height, - unsigned int meta_row_height, - unsigned int dpte_row_height_chroma, - unsigned int meta_row_height_chroma, - double *DestinationLinesToRequestVMInImmediateFlip, - double *DestinationLinesToRequestRowInImmediateFlip, - double *final_flip_bw, - bool *ImmediateFlipSupportedForPipe); + double DPTEBytesPerRow); static double CalculateWriteBackDelay( enum source_format_class WritebackPixelFormat, double WritebackHRatio, @@ -325,64 +305,28 @@ static void CalculateVupdateAndDynamicMetadataParameters( static void CalculateWatermarksAndDRAMSpeedChangeSupport( struct display_mode_lib *mode_lib, unsigned int PrefetchMode, - unsigned int NumberOfActivePlanes, - unsigned int MaxLineBufferLines, - unsigned int LineBufferSize, - unsigned int WritebackInterfaceBufferSize, double DCFCLK, double ReturnBW, - bool SynchronizedVBlank, - unsigned int dpte_group_bytes[], - unsigned int MetaChunkSize, double UrgentLatency, double ExtraLatency, - double WritebackLatency, - double WritebackChunkSize, double SOCCLK, - double DRAMClockChangeLatency, - double SRExitTime, - double SREnterPlusExitTime, - double SRExitZ8Time, - double SREnterPlusExitZ8Time, double DCFCLKDeepSleep, unsigned int DETBufferSizeY[], unsigned int DETBufferSizeC[], unsigned int SwathHeightY[], unsigned int SwathHeightC[], - unsigned int LBBitPerPixel[], double SwathWidthY[], double SwathWidthC[], - double HRatio[], - double HRatioChroma[], - unsigned int vtaps[], - unsigned int VTAPsChroma[], - double VRatio[], - double VRatioChroma[], - unsigned int HTotal[], - double PixelClock[], - unsigned int BlendingAndTiming[], unsigned int DPPPerPlane[], double BytePerPixelDETY[], double BytePerPixelDETC[], - double DSTXAfterScaler[], - double DSTYAfterScaler[], - bool WritebackEnable[], - enum source_format_class WritebackPixelFormat[], - double WritebackDestinationWidth[], - double WritebackDestinationHeight[], - double WritebackSourceHeight[], bool UnboundedRequestEnabled, unsigned int CompressedBufferSizeInkByte, enum clock_change_support *DRAMClockChangeSupport, - double *UrgentWatermark, - double *WritebackUrgentWatermark, - double *DRAMClockChangeWatermark, - double *WritebackDRAMClockChangeWatermark, double *StutterExitWatermark, double *StutterEnterPlusExitWatermark, double *Z8StutterExitWatermark, - double *Z8StutterEnterPlusExitWatermark, - double *MinActiveDRAMClockChangeLatencySupported); + double *Z8StutterEnterPlusExitWatermark); static void CalculateDCFCLKDeepSleep( struct display_mode_lib *mode_lib, @@ -1599,7 +1543,7 @@ static void CalculateDCCConfiguration( int segment_order_vert_contiguous_luma; int segment_order_vert_contiguous_chroma; - enum { + typedef enum { REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA } RequestType; RequestType RequestLuma; @@ -2928,33 +2872,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman for (k = 0; k < v->NumberOfActivePlanes; ++k) { CalculateFlipSchedule( mode_lib, + k, HostVMInefficiencyFactor, v->UrgentExtraLatency, v->UrgentLatency, - v->GPUVMMaxPageTableLevels, - v->HostVMEnable, - v->HostVMMaxNonCachedPageTableLevels, - v->GPUVMEnable, - v->HostVMMinPageSize, v->PDEAndMetaPTEBytesFrame[k], v->MetaRowByte[k], - v->PixelPTEBytesPerRow[k], - v->BandwidthAvailableForImmediateFlip, - v->TotImmediateFlipBytes, - v->SourcePixelFormat[k], - v->HTotal[k] / v->PixelClock[k], - v->VRatio[k], - v->VRatioChroma[k], - v->Tno_bw[k], - v->DCCEnable[k], - v->dpte_row_height[k], - v->meta_row_height[k], - v->dpte_row_height_chroma[k], - v->meta_row_height_chroma[k], - &v->DestinationLinesToRequestVMInImmediateFlip[k], - &v->DestinationLinesToRequestRowInImmediateFlip[k], - &v->final_flip_bw[k], - &v->ImmediateFlipSupportedForPipe[k]); + v->PixelPTEBytesPerRow[k]); } v->total_dcn_read_bw_with_flip = 0.0; @@ -3041,64 +2965,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman CalculateWatermarksAndDRAMSpeedChangeSupport( mode_lib, PrefetchMode, - v->NumberOfActivePlanes, - v->MaxLineBufferLines, - v->LineBufferSize, - v->WritebackInterfaceBufferSize, v->DCFCLK, v->ReturnBW, - v->SynchronizedVBlank, - v->dpte_group_bytes, - v->MetaChunkSize, v->UrgentLatency, v->UrgentExtraLatency, - v->WritebackLatency, - v->WritebackChunkSize, v->SOCCLK, - v->DRAMClockChangeLatency, - v->SRExitTime, - v->SREnterPlusExitTime, - v->SRExitZ8Time, - v->SREnterPlusExitZ8Time, v->DCFCLKDeepSleep, v->DETBufferSizeY, v->DETBufferSizeC, v->SwathHeightY, v->SwathHeightC, - v->LBBitPerPixel, v->SwathWidthY, v->SwathWidthC, - v->HRatio, - v->HRatioChroma, - v->vtaps, - v->VTAPsChroma, - v->VRatio, - v->VRatioChroma, - v->HTotal, - v->PixelClock, - v->BlendingAndTiming, v->DPPPerPlane, v->BytePerPixelDETY, v->BytePerPixelDETC, - v->DSTXAfterScaler, - v->DSTYAfterScaler, - v->WritebackEnable, - v->WritebackPixelFormat, - v->WritebackDestinationWidth, - v->WritebackDestinationHeight, - v->WritebackSourceHeight, v->UnboundedRequestEnabled, v->CompressedBufferSizeInkByte, &DRAMClockChangeSupport, - &v->UrgentWatermark, - &v->WritebackUrgentWatermark, - &v->DRAMClockChangeWatermark, - &v->WritebackDRAMClockChangeWatermark, &v->StutterExitWatermark, &v->StutterEnterPlusExitWatermark, &v->Z8StutterExitWatermark, - &v->Z8StutterEnterPlusExitWatermark, - &v->MinActiveDRAMClockChangeLatencySupported); + &v->Z8StutterEnterPlusExitWatermark); for (k = 0; k < v->NumberOfActivePlanes; ++k) { if (v->WritebackEnable[k] == true) { @@ -3710,61 +3598,43 @@ static void CalculateRowBandwidth( static void CalculateFlipSchedule( struct display_mode_lib *mode_lib, + unsigned int k, double HostVMInefficiencyFactor, double UrgentExtraLatency, double UrgentLatency, - unsigned int GPUVMMaxPageTableLevels, - bool HostVMEnable, - unsigned int HostVMMaxNonCachedPageTableLevels, - bool GPUVMEnable, - double HostVMMinPageSize, double PDEAndMetaPTEBytesPerFrame, double MetaRowBytes, - double DPTEBytesPerRow, - double BandwidthAvailableForImmediateFlip, - unsigned int TotImmediateFlipBytes, - enum source_format_class SourcePixelFormat, - double LineTime, - double VRatio, - double VRatioChroma, - double Tno_bw, - bool DCCEnable, - unsigned int dpte_row_height, - unsigned int meta_row_height, - unsigned int dpte_row_height_chroma, - unsigned int meta_row_height_chroma, - double *DestinationLinesToRequestVMInImmediateFlip, - double *DestinationLinesToRequestRowInImmediateFlip, - double *final_flip_bw, - bool *ImmediateFlipSupportedForPipe) + double DPTEBytesPerRow) { + struct vba_vars_st *v = &mode_lib->vba; double min_row_time = 0.0; unsigned int HostVMDynamicLevelsTrips; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; double ImmediateFlipBW; + double LineTime = v->HTotal[k] / v->PixelClock[k]; - if (GPUVMEnable == true && HostVMEnable == true) { - HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels; + if (v->GPUVMEnable == true && v->HostVMEnable == true) { + HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels; } else { HostVMDynamicLevelsTrips = 0; } - if (GPUVMEnable == true || DCCEnable == true) { - ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes; + if (v->GPUVMEnable == true || v->DCCEnable[k] == true) { + ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes; } - if (GPUVMEnable == true) { + if (v->GPUVMEnable == true) { TimeForFetchingMetaPTEImmediateFlip = dml_max3( - Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW, - UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1), + v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW, + UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1), LineTime / 4.0); } else { TimeForFetchingMetaPTEImmediateFlip = 0; } - *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0; - if ((GPUVMEnable == true || DCCEnable == true)) { + v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0; + if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) { TimeForFetchingRowInVBlankImmediateFlip = dml_max3( (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW, UrgentLatency * (HostVMDynamicLevelsTrips + 1), @@ -3773,54 +3643,54 @@ static void CalculateFlipSchedule( TimeForFetchingRowInVBlankImmediateFlip = 0; } - *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0; + v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0; - if (GPUVMEnable == true) { - *final_flip_bw = dml_max( - PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime), - (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime)); - } else if ((GPUVMEnable == true || DCCEnable == true)) { - *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime); + if (v->GPUVMEnable == true) { + v->final_flip_bw[k] = dml_max( + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime), + (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime)); + } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) { + v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime); } else { - *final_flip_bw = 0; + v->final_flip_bw[k] = 0; } - if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) { - if (GPUVMEnable == true && DCCEnable != true) { - min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma); - } else if (GPUVMEnable != true && DCCEnable == true) { - min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma); + if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) { + if (v->GPUVMEnable == true && v->DCCEnable[k] != true) { + min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]); + } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) { + min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]); } else { min_row_time = dml_min4( - dpte_row_height * LineTime / VRatio, - meta_row_height * LineTime / VRatio, - dpte_row_height_chroma * LineTime / VRatioChroma, - meta_row_height_chroma * LineTime / VRatioChroma); + v->dpte_row_height[k] * LineTime / v->VRatio[k], + v->meta_row_height[k] * LineTime / v->VRatio[k], + v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k], + v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]); } } else { - if (GPUVMEnable == true && DCCEnable != true) { - min_row_time = dpte_row_height * LineTime / VRatio; - } else if (GPUVMEnable != true && DCCEnable == true) { - min_row_time = meta_row_height * LineTime / VRatio; + if (v->GPUVMEnable == true && v->DCCEnable[k] != true) { + min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k]; + } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) { + min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k]; } else { - min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio); + min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]); } } - if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16 + if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16 || TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) { - *ImmediateFlipSupportedForPipe = false; + v->ImmediateFlipSupportedForPipe[k] = false; } else { - *ImmediateFlipSupportedForPipe = true; + v->ImmediateFlipSupportedForPipe[k] = true; } #ifdef __DML_VBA_DEBUG__ - dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip); - dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip); + dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]); + dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]); dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip); dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip); dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time); - dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe); + dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]); #endif } @@ -4071,9 +3941,7 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_ v->SourceFormatPixelAndScanSupport = true; for (k = 0; k < v->NumberOfActivePlanes; k++) { - if ((v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true)) - || ((v->SurfaceTiling[k] == dm_sw_64kb_d || v->SurfaceTiling[k] == dm_sw_64kb_d_t - || v->SurfaceTiling[k] == dm_sw_64kb_d_x) && !(v->SourcePixelFormat[k] == dm_444_64))) { + if (v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true)) { v->SourceFormatPixelAndScanSupport = false; } } @@ -5414,33 +5282,13 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_ for (k = 0; k < v->NumberOfActivePlanes; k++) { CalculateFlipSchedule( mode_lib, + k, HostVMInefficiencyFactor, v->ExtraLatency, v->UrgLatency[i], - v->GPUVMMaxPageTableLevels, - v->HostVMEnable, - v->HostVMMaxNonCachedPageTableLevels, - v->GPUVMEnable, - v->HostVMMinPageSize, v->PDEAndMetaPTEBytesPerFrame[i][j][k], v->MetaRowBytes[i][j][k], - v->DPTEBytesPerRow[i][j][k], - v->BandwidthAvailableForImmediateFlip, - v->TotImmediateFlipBytes, - v->SourcePixelFormat[k], - v->HTotal[k] / v->PixelClock[k], - v->VRatio[k], - v->VRatioChroma[k], - v->Tno_bw[k], - v->DCCEnable[k], - v->dpte_row_height[k], - v->meta_row_height[k], - v->dpte_row_height_chroma[k], - v->meta_row_height_chroma[k], - &v->DestinationLinesToRequestVMInImmediateFlip[k], - &v->DestinationLinesToRequestRowInImmediateFlip[k], - &v->final_flip_bw[k], - &v->ImmediateFlipSupportedForPipe[k]); + v->DPTEBytesPerRow[i][j][k]); } v->total_dcn_read_bw_with_flip = 0.0; for (k = 0; k < v->NumberOfActivePlanes; k++) { @@ -5498,64 +5346,28 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_ CalculateWatermarksAndDRAMSpeedChangeSupport( mode_lib, v->PrefetchModePerState[i][j], - v->NumberOfActivePlanes, - v->MaxLineBufferLines, - v->LineBufferSize, - v->WritebackInterfaceBufferSize, v->DCFCLKState[i][j], v->ReturnBWPerState[i][j], - v->SynchronizedVBlank, - v->dpte_group_bytes, - v->MetaChunkSize, v->UrgLatency[i], v->ExtraLatency, - v->WritebackLatency, - v->WritebackChunkSize, v->SOCCLKPerState[i], - v->DRAMClockChangeLatency, - v->SRExitTime, - v->SREnterPlusExitTime, - v->SRExitZ8Time, - v->SREnterPlusExitZ8Time, v->ProjectedDCFCLKDeepSleep[i][j], v->DETBufferSizeYThisState, v->DETBufferSizeCThisState, v->SwathHeightYThisState, v->SwathHeightCThisState, - v->LBBitPerPixel, v->SwathWidthYThisState, v->SwathWidthCThisState, - v->HRatio, - v->HRatioChroma, - v->vtaps, - v->VTAPsChroma, - v->VRatio, - v->VRatioChroma, - v->HTotal, - v->PixelClock, - v->BlendingAndTiming, v->NoOfDPPThisState, v->BytePerPixelInDETY, v->BytePerPixelInDETC, - v->DSTXAfterScaler, - v->DSTYAfterScaler, - v->WritebackEnable, - v->WritebackPixelFormat, - v->WritebackDestinationWidth, - v->WritebackDestinationHeight, - v->WritebackSourceHeight, UnboundedRequestEnabledThisState, CompressedBufferSizeInkByteThisState, &v->DRAMClockChangeSupport[i][j], - &v->UrgentWatermark, - &v->WritebackUrgentWatermark, - &v->DRAMClockChangeWatermark, - &v->WritebackDRAMClockChangeWatermark, - &dummy, &dummy, &dummy, &dummy, - &v->MinActiveDRAMClockChangeLatencySupported); + &dummy); } } @@ -5681,64 +5493,28 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_ static void CalculateWatermarksAndDRAMSpeedChangeSupport( struct display_mode_lib *mode_lib, unsigned int PrefetchMode, - unsigned int NumberOfActivePlanes, - unsigned int MaxLineBufferLines, - unsigned int LineBufferSize, - unsigned int WritebackInterfaceBufferSize, double DCFCLK, double ReturnBW, - bool SynchronizedVBlank, - unsigned int dpte_group_bytes[], - unsigned int MetaChunkSize, double UrgentLatency, double ExtraLatency, - double WritebackLatency, - double WritebackChunkSize, double SOCCLK, - double DRAMClockChangeLatency, - double SRExitTime, - double SREnterPlusExitTime, - double SRExitZ8Time, - double SREnterPlusExitZ8Time, double DCFCLKDeepSleep, unsigned int DETBufferSizeY[], unsigned int DETBufferSizeC[], unsigned int SwathHeightY[], unsigned int SwathHeightC[], - unsigned int LBBitPerPixel[], double SwathWidthY[], double SwathWidthC[], - double HRatio[], - double HRatioChroma[], - unsigned int vtaps[], - unsigned int VTAPsChroma[], - double VRatio[], - double VRatioChroma[], - unsigned int HTotal[], - double PixelClock[], - unsigned int BlendingAndTiming[], unsigned int DPPPerPlane[], double BytePerPixelDETY[], double BytePerPixelDETC[], - double DSTXAfterScaler[], - double DSTYAfterScaler[], - bool WritebackEnable[], - enum source_format_class WritebackPixelFormat[], - double WritebackDestinationWidth[], - double WritebackDestinationHeight[], - double WritebackSourceHeight[], bool UnboundedRequestEnabled, unsigned int CompressedBufferSizeInkByte, enum clock_change_support *DRAMClockChangeSupport, - double *UrgentWatermark, - double *WritebackUrgentWatermark, - double *DRAMClockChangeWatermark, - double *WritebackDRAMClockChangeWatermark, double *StutterExitWatermark, double *StutterEnterPlusExitWatermark, double *Z8StutterExitWatermark, - double *Z8StutterEnterPlusExitWatermark, - double *MinActiveDRAMClockChangeLatencySupported) + double *Z8StutterEnterPlusExitWatermark) { struct vba_vars_st *v = &mode_lib->vba; double EffectiveLBLatencyHidingY; @@ -5758,103 +5534,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( double TotalPixelBW = 0.0; int k, j; - *UrgentWatermark = UrgentLatency + ExtraLatency; + v->UrgentWatermark = UrgentLatency + ExtraLatency; #ifdef __DML_VBA_DEBUG__ dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency); dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency); - dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark); + dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark); #endif - *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark; + v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark; #ifdef __DML_VBA_DEBUG__ - dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency); - dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark); + dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency); + dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark); #endif v->TotalActiveWriteback = 0; - for (k = 0; k < NumberOfActivePlanes; ++k) { - if (WritebackEnable[k] == true) { + for (k = 0; k < v->NumberOfActivePlanes; ++k) { + if (v->WritebackEnable[k] == true) { v->TotalActiveWriteback = v->TotalActiveWriteback + 1; } } if (v->TotalActiveWriteback <= 1) { - *WritebackUrgentWatermark = WritebackLatency; + v->WritebackUrgentWatermark = v->WritebackLatency; } else { - *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; + v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; } if (v->TotalActiveWriteback <= 1) { - *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency; + v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency; } else { - *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; + v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; } - for (k = 0; k < NumberOfActivePlanes; ++k) { + for (k = 0; k < v->NumberOfActivePlanes; ++k) { TotalPixelBW = TotalPixelBW - + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) - / (HTotal[k] / PixelClock[k]); + + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k]) + / (v->HTotal[k] / v->PixelClock[k]); } - for (k = 0; k < NumberOfActivePlanes; ++k) { + for (k = 0; k < v->NumberOfActivePlanes; ++k) { double EffectiveDETBufferSizeY = DETBufferSizeY[k]; v->LBLatencyHidingSourceLinesY = dml_min( - (double) MaxLineBufferLines, - dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1); + (double) v->MaxLineBufferLines, + dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1); v->LBLatencyHidingSourceLinesC = dml_min( - (double) MaxLineBufferLines, - dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1); + (double) v->MaxLineBufferLines, + dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1); - EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]); + EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]); - EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]); + EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]); if (UnboundedRequestEnabled) { EffectiveDETBufferSizeY = EffectiveDETBufferSizeY - + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW; + + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW; } LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k]; LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]); - FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k]; + FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k]; if (BytePerPixelDETC[k] > 0) { LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k]; LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]); - FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k]; + FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k]; } else { LinesInDETC = 0; FullDETBufferingTimeC = 999999; } ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY - - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark; + - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark; - if (NumberOfActivePlanes > 1) { + if (v->NumberOfActivePlanes > 1) { ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY - - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k]; + - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k]; } if (BytePerPixelDETC[k] > 0) { ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC - - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark; + - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark; - if (NumberOfActivePlanes > 1) { + if (v->NumberOfActivePlanes > 1) { ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC - - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k]; + - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k]; } v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC); } else { v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY; } - if (WritebackEnable[k] == true) { - WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024 - / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4); - if (WritebackPixelFormat[k] == dm_444_64) { + if (v->WritebackEnable[k] == true) { + WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024 + / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4); + if (v->WritebackPixelFormat[k] == dm_444_64) { WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2; } WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark; @@ -5864,14 +5640,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( v->MinActiveDRAMClockChangeMargin = 999999; PlaneWithMinActiveDRAMClockChangeMargin = 0; - for (k = 0; k < NumberOfActivePlanes; ++k) { + for (k = 0; k < v->NumberOfActivePlanes; ++k) { if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) { v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k]; - if (BlendingAndTiming[k] == k) { + if (v->BlendingAndTiming[k] == k) { PlaneWithMinActiveDRAMClockChangeMargin = k; } else { - for (j = 0; j < NumberOfActivePlanes; ++j) { - if (BlendingAndTiming[k] == j) { + for (j = 0; j < v->NumberOfActivePlanes; ++j) { + if (v->BlendingAndTiming[k] == j) { PlaneWithMinActiveDRAMClockChangeMargin = j; } } @@ -5879,11 +5655,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( } } - *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency; + v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ; SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999; - for (k = 0; k < NumberOfActivePlanes; ++k) { - if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) + for (k = 0; k < v->NumberOfActivePlanes; ++k) { + if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) && v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) { SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k]; } @@ -5891,25 +5667,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( v->TotalNumberOfActiveOTG = 0; - for (k = 0; k < NumberOfActivePlanes; ++k) { - if (BlendingAndTiming[k] == k) { + for (k = 0; k < v->NumberOfActivePlanes; ++k) { + if (v->BlendingAndTiming[k] == k) { v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1; } } if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) { *DRAMClockChangeSupport = dm_dram_clock_change_vactive; - } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1 + } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) { *DRAMClockChangeSupport = dm_dram_clock_change_vblank; } else { *DRAMClockChangeSupport = dm_dram_clock_change_unsupported; } - *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep; - *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep); - *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep; - *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep; + *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep; + *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep); + *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep; + *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep; #ifdef __DML_VBA_DEBUG__ dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark); @@ -7157,12 +6933,13 @@ static double CalculateExtraLatencyBytes( HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1); else HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2); - else + } else { HostVMDynamicLevels = 0; + } ret = ReorderingBytes + (TotalNumberOfActiveDPP * PixelChunkSizeInKByte + TotalNumberOfDCCActiveDPP * MetaChunkSize) * 1024.0; - if (GPUVMEnable == true) + if (GPUVMEnable == true) { for (k = 0; k < NumberOfActivePlanes; ++k) ret = ret + NumberOfDPP[k] * dpte_group_bytes[k] * (1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactor; } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 8e4c9d0887ce..f43686997917 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -244,6 +244,50 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr) } /** + * Finds dummy_latency_index when MCLK switching using firmware based + * vblank stretch is enabled. This function will iterate through the + * table of dummy pstate latencies until the lowest value that allows + * dm_allow_self_refresh_and_mclk_switch to happen is found + */ +int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel) +{ + const int max_latency_table_entries = 4; + const struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + int dummy_latency_index = 0; + + dc_assert_fp_enabled(); + + while (dummy_latency_index < max_latency_table_entries) { + context->bw_ctx.dml.soc.dram_clock_change_latency_us = + dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); + + if (vlevel < context->bw_ctx.dml.vba.soc.num_states && + vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported) + break; + + dummy_latency_index++; + } + + if (dummy_latency_index == max_latency_table_entries) { + ASSERT(dummy_latency_index != max_latency_table_entries); + /* If the execution gets here, it means dummy p_states are + * not possible. This should never happen and would mean + * something is severely wrong. + * Here we reset dummy_latency_index to 3, because it is + * better to have underflows than system crashes. + */ + dummy_latency_index = max_latency_table_entries - 1; + } + + return dummy_latency_index; +} + +/** * dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes * and populate pipe_ctx with those params. * @@ -1646,7 +1690,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context); if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { - dummy_latency_index = dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(dc, + dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc, context, pipes, pipe_cnt, vlevel); /* After calling dcn30_find_dummy_latency_index_for_fw_based_mclk_switch diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h index 3ed06ab855be..6ce221098979 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h @@ -71,4 +71,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params); +int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index cb2025771646..6980f698eb23 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -755,30 +755,18 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelY = v->BytePerPixelY[k]; v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelC = v->BytePerPixelC[k]; v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.ProgressiveToInterlaceUnitInOPP = mode_lib->vba.ProgressiveToInterlaceUnitInOPP; - v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor, - &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, v->DSCDelay[k], - mode_lib->vba.DPPCLKDelaySubtotal + mode_lib->vba.DPPCLKDelayCNVCFormater, - mode_lib->vba.DPPCLKDelaySCL, - mode_lib->vba.DPPCLKDelaySCLLBOnly, - mode_lib->vba.DPPCLKDelayCNVCCursor, - mode_lib->vba.DISPCLKDelaySubtotal, - (unsigned int) (v->SwathWidthY[k] / mode_lib->vba.HRatio[k]), - mode_lib->vba.OutputFormat[k], - mode_lib->vba.MaxInterDCNTileRepeaters, + v->ErrorResult[k] = dml32_CalculatePrefetchSchedule( + v, + k, + v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor, + &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, + v->DSCDelay[k], + (unsigned int) (v->SwathWidthY[k] / v->HRatio[k]), dml_min(v->VStartupLines, v->MaxVStartupLines[k]), v->MaxVStartupLines[k], - mode_lib->vba.GPUVMMaxPageTableLevels, - mode_lib->vba.GPUVMEnable, - mode_lib->vba.HostVMEnable, - mode_lib->vba.HostVMMaxNonCachedPageTableLevels, - mode_lib->vba.HostVMMinPageSize, - mode_lib->vba.DynamicMetadataEnable[k], - mode_lib->vba.DynamicMetadataVMEnabled, - mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k], - mode_lib->vba.DynamicMetadataTransmittedBytes[k], v->UrgentLatency, v->UrgentExtraLatency, - mode_lib->vba.TCalc, + v->TCalc, v->PDEAndMetaPTEBytesFrame[k], v->MetaRowByte[k], v->PixelPTEBytesPerRow[k], @@ -792,8 +780,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->MaxNumSwathC[k], v->swath_width_luma_ub[k], v->swath_width_chroma_ub[k], - mode_lib->vba.SwathHeightY[k], - mode_lib->vba.SwathHeightC[k], + v->SwathHeightY[k], + v->SwathHeightC[k], TWait, /* Output */ &v->DSTXAfterScaler[k], @@ -1163,58 +1151,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SMNLatency = mode_lib->vba.SMNLatency; dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( - mode_lib->vba.USRRetrainingRequiredFinal, - mode_lib->vba.UsesMALLForPStateChange, - mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb], - mode_lib->vba.NumberOfActiveSurfaces, - mode_lib->vba.MaxLineBufferLines, - mode_lib->vba.LineBufferSizeFinal, - mode_lib->vba.WritebackInterfaceBufferSize, - mode_lib->vba.DCFCLK, - mode_lib->vba.ReturnBW, - mode_lib->vba.SynchronizeTimingsFinal, - mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal, - mode_lib->vba.DRRDisplay, - v->dpte_group_bytes, - v->meta_row_height, - v->meta_row_height_chroma, + v, + v->PrefetchModePerState[v->VoltageLevel][v->maxMpcComb], + v->DCFCLK, + v->ReturnBW, v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters, - mode_lib->vba.WritebackChunkSize, - mode_lib->vba.SOCCLK, + v->SOCCLK, v->DCFCLKDeepSleep, - mode_lib->vba.DETBufferSizeY, - mode_lib->vba.DETBufferSizeC, - mode_lib->vba.SwathHeightY, - mode_lib->vba.SwathHeightC, - mode_lib->vba.LBBitPerPixel, + v->DETBufferSizeY, + v->DETBufferSizeC, + v->SwathHeightY, + v->SwathHeightC, v->SwathWidthY, v->SwathWidthC, - mode_lib->vba.HRatio, - mode_lib->vba.HRatioChroma, - mode_lib->vba.vtaps, - mode_lib->vba.VTAPsChroma, - mode_lib->vba.VRatio, - mode_lib->vba.VRatioChroma, - mode_lib->vba.HTotal, - mode_lib->vba.VTotal, - mode_lib->vba.VActive, - mode_lib->vba.PixelClock, - mode_lib->vba.BlendingAndTiming, - mode_lib->vba.DPPPerPlane, + v->DPPPerPlane, v->BytePerPixelDETY, v->BytePerPixelDETC, v->DSTXAfterScaler, v->DSTYAfterScaler, - mode_lib->vba.WritebackEnable, - mode_lib->vba.WritebackPixelFormat, - mode_lib->vba.WritebackDestinationWidth, - mode_lib->vba.WritebackDestinationHeight, - mode_lib->vba.WritebackSourceHeight, v->UnboundedRequestEnabled, v->CompressedBufferSizeInkByte, /* Output */ - &v->Watermark, &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_dramchange_support, v->MaxActiveDRAMClockChangeLatencySupported, v->SubViewportLinesNeededInMALL, @@ -1806,10 +1764,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l &mode_lib->vba.Read256BlockHeightC[k], &mode_lib->vba.Read256BlockWidthY[k], &mode_lib->vba.Read256BlockWidthC[k], - &mode_lib->vba.MicroTileHeightY[k], - &mode_lib->vba.MicroTileHeightC[k], - &mode_lib->vba.MicroTileWidthY[k], - &mode_lib->vba.MicroTileWidthC[k]); + &mode_lib->vba.MacroTileHeightY[k], + &mode_lib->vba.MacroTileHeightC[k], + &mode_lib->vba.MacroTileWidthY[k], + &mode_lib->vba.MacroTileWidthC[k]); } /*Bandwidth Support Check*/ @@ -2034,6 +1992,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l dml32_CalculateODMMode( mode_lib->vba.MaximumPixelsPerLinePerDSCUnit, mode_lib->vba.HActive[k], + mode_lib->vba.OutputFormat[k], mode_lib->vba.Output[k], mode_lib->vba.ODMUse[k], mode_lib->vba.MaxDispclk[i], @@ -2055,6 +2014,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l dml32_CalculateODMMode( mode_lib->vba.MaximumPixelsPerLinePerDSCUnit, mode_lib->vba.HActive[k], + mode_lib->vba.OutputFormat[k], mode_lib->vba.Output[k], mode_lib->vba.ODMUse[k], mode_lib->vba.MaxDispclk[i], @@ -2659,10 +2619,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.Read256BlockWidthC, mode_lib->vba.Read256BlockHeightY, mode_lib->vba.Read256BlockHeightC, - mode_lib->vba.MicroTileWidthY, - mode_lib->vba.MicroTileWidthC, - mode_lib->vba.MicroTileHeightY, - mode_lib->vba.MicroTileHeightC, + mode_lib->vba.MacroTileWidthY, + mode_lib->vba.MacroTileWidthC, + mode_lib->vba.MacroTileHeightY, + mode_lib->vba.MacroTileHeightC, /* Output */ mode_lib->vba.SurfaceSizeInMALL, @@ -2709,10 +2669,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesY = mode_lib->vba.Read256BlockHeightY[k]; v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidth256BytesC = mode_lib->vba.Read256BlockWidthC[k]; v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesC = mode_lib->vba.Read256BlockHeightC[k]; - v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MicroTileWidthY[k]; - v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MicroTileHeightY[k]; - v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MicroTileWidthC[k]; - v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MicroTileHeightC[k]; + v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MacroTileWidthY[k]; + v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MacroTileHeightY[k]; + v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MacroTileWidthC[k]; + v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MacroTileHeightC[k]; v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].InterlaceEnable = mode_lib->vba.Interlace[k]; v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].HTotal = mode_lib->vba.HTotal[k]; v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].DCCEnable = mode_lib->vba.DCCEnable[k]; @@ -3258,63 +3218,47 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.NoTimeForPrefetch[i][j][k] = dml32_CalculatePrefetchSchedule( + v, + k, v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor, &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe, - mode_lib->vba.DSCDelayPerState[i][k], - mode_lib->vba.DPPCLKDelaySubtotal + - mode_lib->vba.DPPCLKDelayCNVCFormater, - mode_lib->vba.DPPCLKDelaySCL, - mode_lib->vba.DPPCLKDelaySCLLBOnly, - mode_lib->vba.DPPCLKDelayCNVCCursor, - mode_lib->vba.DISPCLKDelaySubtotal, - mode_lib->vba.SwathWidthYThisState[k] / - mode_lib->vba.HRatio[k], - mode_lib->vba.OutputFormat[k], - mode_lib->vba.MaxInterDCNTileRepeaters, - dml_min(mode_lib->vba.MaxVStartup, - mode_lib->vba.MaximumVStartup[i][j][k]), - mode_lib->vba.MaximumVStartup[i][j][k], - mode_lib->vba.GPUVMMaxPageTableLevels, - mode_lib->vba.GPUVMEnable, mode_lib->vba.HostVMEnable, - mode_lib->vba.HostVMMaxNonCachedPageTableLevels, - mode_lib->vba.HostVMMinPageSize, - mode_lib->vba.DynamicMetadataEnable[k], - mode_lib->vba.DynamicMetadataVMEnabled, - mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k], - mode_lib->vba.DynamicMetadataTransmittedBytes[k], - mode_lib->vba.UrgLatency[i], - mode_lib->vba.ExtraLatency, - mode_lib->vba.TimeCalc, - mode_lib->vba.PDEAndMetaPTEBytesPerFrame[i][j][k], - mode_lib->vba.MetaRowBytes[i][j][k], - mode_lib->vba.DPTEBytesPerRow[i][j][k], - mode_lib->vba.PrefetchLinesY[i][j][k], - mode_lib->vba.SwathWidthYThisState[k], - mode_lib->vba.PrefillY[k], - mode_lib->vba.MaxNumSwY[k], - mode_lib->vba.PrefetchLinesC[i][j][k], - mode_lib->vba.SwathWidthCThisState[k], - mode_lib->vba.PrefillC[k], - mode_lib->vba.MaxNumSwC[k], - mode_lib->vba.swath_width_luma_ub_this_state[k], - mode_lib->vba.swath_width_chroma_ub_this_state[k], - mode_lib->vba.SwathHeightYThisState[k], - mode_lib->vba.SwathHeightCThisState[k], mode_lib->vba.TWait, + v->DSCDelayPerState[i][k], + v->SwathWidthYThisState[k] / v->HRatio[k], + dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]), + v->MaximumVStartup[i][j][k], + v->UrgLatency[i], + v->ExtraLatency, + v->TimeCalc, + v->PDEAndMetaPTEBytesPerFrame[i][j][k], + v->MetaRowBytes[i][j][k], + v->DPTEBytesPerRow[i][j][k], + v->PrefetchLinesY[i][j][k], + v->SwathWidthYThisState[k], + v->PrefillY[k], + v->MaxNumSwY[k], + v->PrefetchLinesC[i][j][k], + v->SwathWidthCThisState[k], + v->PrefillC[k], + v->MaxNumSwC[k], + v->swath_width_luma_ub_this_state[k], + v->swath_width_chroma_ub_this_state[k], + v->SwathHeightYThisState[k], + v->SwathHeightCThisState[k], v->TWait, /* Output */ &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k], &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler[k], - &mode_lib->vba.LineTimesForPrefetch[k], - &mode_lib->vba.PrefetchBW[k], - &mode_lib->vba.LinesForMetaPTE[k], - &mode_lib->vba.LinesForMetaAndDPTERow[k], - &mode_lib->vba.VRatioPreY[i][j][k], - &mode_lib->vba.VRatioPreC[i][j][k], - &mode_lib->vba.RequiredPrefetchPixelDataBWLuma[0][0][k], - &mode_lib->vba.RequiredPrefetchPixelDataBWChroma[0][0][k], - &mode_lib->vba.NoTimeForDynamicMetadata[i][j][k], - &mode_lib->vba.Tno_bw[k], - &mode_lib->vba.prefetch_vmrow_bw[k], + &v->LineTimesForPrefetch[k], + &v->PrefetchBW[k], + &v->LinesForMetaPTE[k], + &v->LinesForMetaAndDPTERow[k], + &v->VRatioPreY[i][j][k], + &v->VRatioPreC[i][j][k], + &v->RequiredPrefetchPixelDataBWLuma[0][0][k], + &v->RequiredPrefetchPixelDataBWChroma[0][0][k], + &v->NoTimeForDynamicMetadata[i][j][k], + &v->Tno_bw[k], + &v->prefetch_vmrow_bw[k], &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[0], // double *Tdmdl_vm &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[1], // double *Tdmdl &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[2], // double *TSetup @@ -3557,62 +3501,32 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l { dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( - mode_lib->vba.USRRetrainingRequiredFinal, - mode_lib->vba.UsesMALLForPStateChange, - mode_lib->vba.PrefetchModePerState[i][j], - mode_lib->vba.NumberOfActiveSurfaces, - mode_lib->vba.MaxLineBufferLines, - mode_lib->vba.LineBufferSizeFinal, - mode_lib->vba.WritebackInterfaceBufferSize, - mode_lib->vba.DCFCLKState[i][j], - mode_lib->vba.ReturnBWPerState[i][j], - mode_lib->vba.SynchronizeTimingsFinal, - mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal, - mode_lib->vba.DRRDisplay, - mode_lib->vba.dpte_group_bytes, - mode_lib->vba.meta_row_height, - mode_lib->vba.meta_row_height_chroma, + v, + v->PrefetchModePerState[i][j], + v->DCFCLKState[i][j], + v->ReturnBWPerState[i][j], v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters, - mode_lib->vba.WritebackChunkSize, - mode_lib->vba.SOCCLKPerState[i], - mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j], - mode_lib->vba.DETBufferSizeYThisState, - mode_lib->vba.DETBufferSizeCThisState, - mode_lib->vba.SwathHeightYThisState, - mode_lib->vba.SwathHeightCThisState, - mode_lib->vba.LBBitPerPixel, - mode_lib->vba.SwathWidthYThisState, // 24 - mode_lib->vba.SwathWidthCThisState, - mode_lib->vba.HRatio, - mode_lib->vba.HRatioChroma, - mode_lib->vba.vtaps, - mode_lib->vba.VTAPsChroma, - mode_lib->vba.VRatio, - mode_lib->vba.VRatioChroma, - mode_lib->vba.HTotal, - mode_lib->vba.VTotal, - mode_lib->vba.VActive, - mode_lib->vba.PixelClock, - mode_lib->vba.BlendingAndTiming, - mode_lib->vba.NoOfDPPThisState, - mode_lib->vba.BytePerPixelInDETY, - mode_lib->vba.BytePerPixelInDETC, + v->SOCCLKPerState[i], + v->ProjectedDCFCLKDeepSleep[i][j], + v->DETBufferSizeYThisState, + v->DETBufferSizeCThisState, + v->SwathHeightYThisState, + v->SwathHeightCThisState, + v->SwathWidthYThisState, // 24 + v->SwathWidthCThisState, + v->NoOfDPPThisState, + v->BytePerPixelInDETY, + v->BytePerPixelInDETC, v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler, v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler, - mode_lib->vba.WritebackEnable, - mode_lib->vba.WritebackPixelFormat, - mode_lib->vba.WritebackDestinationWidth, - mode_lib->vba.WritebackDestinationHeight, - mode_lib->vba.WritebackSourceHeight, - mode_lib->vba.UnboundedRequestEnabledThisState, - mode_lib->vba.CompressedBufferSizeInkByteThisState, + v->UnboundedRequestEnabledThisState, + v->CompressedBufferSizeInkByteThisState, /* Output */ - &mode_lib->vba.Watermark, // Store the values in vba - &mode_lib->vba.DRAMClockChangeSupport[i][j], + &v->DRAMClockChangeSupport[i][j], &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[0], // double *MaxActiveDRAMClockChangeLatencySupported &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer[0], // Long SubViewportLinesNeededInMALL[] - &mode_lib->vba.FCLKChangeSupport[i][j], + &v->FCLKChangeSupport[i][j], &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[1], // double *MinActiveFCLKChangeLatencySupported &mode_lib->vba.USRRetrainingSupport[i][j], mode_lib->vba.ActiveDRAMClockChangeLatencyMargin); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index 05fc14a47fba..365d290bba99 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -27,6 +27,8 @@ #include "display_mode_vba_32.h" #include "../display_mode_lib.h" +#define DCN32_MAX_FMT_420_BUFFER_WIDTH 4096 + unsigned int dml32_dscceComputeDelay( unsigned int bpc, double BPP, @@ -1182,6 +1184,7 @@ void dml32_CalculateDETBufferSize( void dml32_CalculateODMMode( unsigned int MaximumPixelsPerLinePerDSCUnit, unsigned int HActive, + enum output_format_class OutFormat, enum output_encoder_class Output, enum odm_combine_policy ODMUse, double StateDispclk, @@ -1253,6 +1256,29 @@ void dml32_CalculateODMMode( else *TotalAvailablePipesSupport = false; } + if (OutFormat == dm_420 && HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH && + ODMUse != dm_odm_combine_policy_4to1) { + if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 4) { + *ODMMode = dm_odm_combine_mode_disabled; + *NumberOfDPP = 0; + *TotalAvailablePipesSupport = false; + } else if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 2 || + *ODMMode == dm_odm_combine_mode_4to1) { + *ODMMode = dm_odm_combine_mode_4to1; + *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineFourToOne; + *NumberOfDPP = 4; + } else { + *ODMMode = dm_odm_combine_mode_2to1; + *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineTwoToOne; + *NumberOfDPP = 2; + } + } + if (Output == dm_hdmi && OutFormat == dm_420 && + HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH) { + *ODMMode = dm_odm_combine_mode_disabled; + *NumberOfDPP = 0; + *TotalAvailablePipesSupport = false; + } } double dml32_CalculateRequiredDispclk( @@ -3363,28 +3389,14 @@ double dml32_CalculateExtraLatency( } // CalculateExtraLatency bool dml32_CalculatePrefetchSchedule( + struct vba_vars_st *v, + unsigned int k, double HostVMInefficiencyFactor, DmlPipe *myPipe, unsigned int DSCDelay, - double DPPCLKDelaySubtotalPlusCNVCFormater, - double DPPCLKDelaySCL, - double DPPCLKDelaySCLLBOnly, - double DPPCLKDelayCNVCCursor, - double DISPCLKDelaySubtotal, unsigned int DPP_RECOUT_WIDTH, - enum output_format_class OutputFormat, - unsigned int MaxInterDCNTileRepeaters, unsigned int VStartup, unsigned int MaxVStartup, - unsigned int GPUVMPageTableLevels, - bool GPUVMEnable, - bool HostVMEnable, - unsigned int HostVMMaxNonCachedPageTableLevels, - double HostVMMinPageSize, - bool DynamicMetadataEnable, - bool DynamicMetadataVMEnabled, - int DynamicMetadataLinesBeforeActiveRequired, - unsigned int DynamicMetadataTransmittedBytes, double UrgentLatency, double UrgentExtraLatency, double TCalc, @@ -3425,6 +3437,7 @@ bool dml32_CalculatePrefetchSchedule( double *VUpdateWidthPix, double *VReadyOffsetPix) { + double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater; bool MyError = false; unsigned int DPPCycles, DISPCLKCycles; double DSTTotalPixelsAfterScaler; @@ -3461,27 +3474,27 @@ bool dml32_CalculatePrefetchSchedule( double Tsw_est1 = 0; double Tsw_est3 = 0; - if (GPUVMEnable == true && HostVMEnable == true) - HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels; + if (v->GPUVMEnable == true && v->HostVMEnable == true) + HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels; else HostVMDynamicLevelsTrips = 0; #ifdef __DML_VBA_DEBUG__ - dml_print("DML::%s: GPUVMEnable = %d\n", __func__, GPUVMEnable); - dml_print("DML::%s: GPUVMPageTableLevels = %d\n", __func__, GPUVMPageTableLevels); + dml_print("DML::%s: v->GPUVMEnable = %d\n", __func__, v->GPUVMEnable); + dml_print("DML::%s: v->GPUVMMaxPageTableLevels = %d\n", __func__, v->GPUVMMaxPageTableLevels); dml_print("DML::%s: DCCEnable = %d\n", __func__, myPipe->DCCEnable); - dml_print("DML::%s: HostVMEnable=%d HostVMInefficiencyFactor=%f\n", - __func__, HostVMEnable, HostVMInefficiencyFactor); + dml_print("DML::%s: v->HostVMEnable=%d HostVMInefficiencyFactor=%f\n", + __func__, v->HostVMEnable, HostVMInefficiencyFactor); #endif dml32_CalculateVUpdateAndDynamicMetadataParameters( - MaxInterDCNTileRepeaters, + v->MaxInterDCNTileRepeaters, myPipe->Dppclk, myPipe->Dispclk, myPipe->DCFClkDeepSleep, myPipe->PixelClock, myPipe->HTotal, myPipe->VBlank, - DynamicMetadataTransmittedBytes, - DynamicMetadataLinesBeforeActiveRequired, + v->DynamicMetadataTransmittedBytes[k], + v->DynamicMetadataLinesBeforeActiveRequired[k], myPipe->InterlaceEnable, myPipe->ProgressiveToInterlaceUnitInOPP, TSetup, @@ -3496,19 +3509,19 @@ bool dml32_CalculatePrefetchSchedule( LineTime = myPipe->HTotal / myPipe->PixelClock; trip_to_mem = UrgentLatency; - Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1); + Tvm_trips = UrgentExtraLatency + trip_to_mem * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1); - if (DynamicMetadataVMEnabled == true) + if (v->DynamicMetadataVMEnabled == true) *Tdmdl = TWait + Tvm_trips + trip_to_mem; else *Tdmdl = TWait + UrgentExtraLatency; #ifdef __DML_VBA_ALLOW_DELTA__ - if (DynamicMetadataEnable == false) + if (v->DynamicMetadataEnable[k] == false) *Tdmdl = 0.0; #endif - if (DynamicMetadataEnable == true) { + if (v->DynamicMetadataEnable[k] == true) { if (VStartup * LineTime < *TSetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) { *NotEnoughTimeForDynamicMetadata = true; #ifdef __DML_VBA_DEBUG__ @@ -3528,17 +3541,17 @@ bool dml32_CalculatePrefetchSchedule( *NotEnoughTimeForDynamicMetadata = false; } - *Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true && - GPUVMEnable == true ? TWait + Tvm_trips : 0); + *Tdmdl_vm = (v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true && + v->GPUVMEnable == true ? TWait + Tvm_trips : 0); if (myPipe->ScalerEnabled) - DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL; + DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCL; else - DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly; + DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCLLBOnly; - DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor; + DPPCycles = DPPCycles + myPipe->NumberOfCursors * v->DPPCLKDelayCNVCCursor; - DISPCLKCycles = DISPCLKDelaySubtotal; + DISPCLKCycles = v->DISPCLKDelaySubtotal; if (myPipe->Dppclk == 0.0 || myPipe->Dispclk == 0.0) return true; @@ -3564,7 +3577,7 @@ bool dml32_CalculatePrefetchSchedule( dml_print("DML::%s: DSTXAfterScaler: %d\n", __func__, *DSTXAfterScaler); #endif - if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP)) + if (v->OutputFormat[k] == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP)) *DSTYAfterScaler = 1; else *DSTYAfterScaler = 0; @@ -3581,13 +3594,13 @@ bool dml32_CalculatePrefetchSchedule( Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1); - if (GPUVMEnable == true) { + if (v->GPUVMEnable == true) { Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1.0) / 4.0 * LineTime; Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime; - if (GPUVMPageTableLevels >= 3) { + if (v->GPUVMMaxPageTableLevels >= 3) { *Tno_bw = UrgentExtraLatency + trip_to_mem * - (double) ((GPUVMPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1); - } else if (GPUVMPageTableLevels == 1 && myPipe->DCCEnable != true) { + (double) ((v->GPUVMMaxPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1); + } else if (v->GPUVMMaxPageTableLevels == 1 && myPipe->DCCEnable != true) { Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / LineTime, 1.0) / 4.0 * LineTime; // VBA_ERROR *Tno_bw = UrgentExtraLatency; @@ -3622,7 +3635,7 @@ bool dml32_CalculatePrefetchSchedule( min_Lsw = dml_max(min_Lsw, 1.0); Lsw_oto = dml_ceil(4.0 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1.0) / 4.0; - if (GPUVMEnable == true) { + if (v->GPUVMEnable == true) { Tvm_oto = dml_max3( Tvm_trips, *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto, @@ -3630,7 +3643,7 @@ bool dml32_CalculatePrefetchSchedule( } else Tvm_oto = LineTime / 4.0; - if ((GPUVMEnable == true || myPipe->DCCEnable == true)) { + if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) { Tr0_oto = dml_max4( Tr0_trips, (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto, @@ -3833,7 +3846,7 @@ bool dml32_CalculatePrefetchSchedule( #endif if (prefetch_bw_equ > 0) { - if (GPUVMEnable == true) { + if (v->GPUVMEnable == true) { Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4); @@ -3841,7 +3854,7 @@ bool dml32_CalculatePrefetchSchedule( Tvm_equ = LineTime / 4; } - if ((GPUVMEnable == true || myPipe->DCCEnable == true)) { + if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) { Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_equ, Tr0_trips, (LineTime - Tvm_equ) / 2, LineTime / 4); @@ -4206,58 +4219,28 @@ void dml32_CalculateFlipSchedule( } // CalculateFlipSchedule void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( - bool USRRetrainingRequiredFinal, - enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[], + struct vba_vars_st *v, unsigned int PrefetchMode, - unsigned int NumberOfActiveSurfaces, - unsigned int MaxLineBufferLines, - unsigned int LineBufferSize, - unsigned int WritebackInterfaceBufferSize, double DCFCLK, double ReturnBW, - bool SynchronizeTimingsFinal, - bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal, - bool DRRDisplay[], - unsigned int dpte_group_bytes[], - unsigned int meta_row_height[], - unsigned int meta_row_height_chroma[], SOCParametersList mmSOCParameters, - unsigned int WritebackChunkSize, double SOCCLK, double DCFClkDeepSleep, unsigned int DETBufferSizeY[], unsigned int DETBufferSizeC[], unsigned int SwathHeightY[], unsigned int SwathHeightC[], - unsigned int LBBitPerPixel[], double SwathWidthY[], double SwathWidthC[], - double HRatio[], - double HRatioChroma[], - unsigned int VTaps[], - unsigned int VTapsChroma[], - double VRatio[], - double VRatioChroma[], - unsigned int HTotal[], - unsigned int VTotal[], - unsigned int VActive[], - double PixelClock[], - unsigned int BlendingAndTiming[], unsigned int DPPPerSurface[], double BytePerPixelDETY[], double BytePerPixelDETC[], double DSTXAfterScaler[], double DSTYAfterScaler[], - bool WritebackEnable[], - enum source_format_class WritebackPixelFormat[], - double WritebackDestinationWidth[], - double WritebackDestinationHeight[], - double WritebackSourceHeight[], bool UnboundedRequestEnabled, unsigned int CompressedBufferSizeInkByte, /* Output */ - Watermarks *Watermark, enum clock_change_support *DRAMClockChangeSupport, double MaxActiveDRAMClockChangeLatencySupported[], unsigned int SubViewportLinesNeededInMALL[], @@ -4299,136 +4282,136 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX]; unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX]; - Watermark->UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency; - Watermark->USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency + v->Watermark.UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency; + v->Watermark.USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency + mmSOCParameters.USRRetrainingLatency + mmSOCParameters.SMNLatency; - Watermark->DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + Watermark->UrgentWatermark; - Watermark->FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + Watermark->UrgentWatermark; - Watermark->StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency + v->Watermark.DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + v->Watermark.UrgentWatermark; + v->Watermark.FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + v->Watermark.UrgentWatermark; + v->Watermark.StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency + 10 / DCFClkDeepSleep; - Watermark->StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency + v->Watermark.StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency + 10 / DCFClkDeepSleep; - Watermark->Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency + v->Watermark.Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency + 10 / DCFClkDeepSleep; - Watermark->Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time + v->Watermark.Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time + mmSOCParameters.ExtraLatency + 10 / DCFClkDeepSleep; #ifdef __DML_VBA_DEBUG__ dml_print("DML::%s: UrgentLatency = %f\n", __func__, mmSOCParameters.UrgentLatency); dml_print("DML::%s: ExtraLatency = %f\n", __func__, mmSOCParameters.ExtraLatency); dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, mmSOCParameters.DRAMClockChangeLatency); - dml_print("DML::%s: UrgentWatermark = %f\n", __func__, Watermark->UrgentWatermark); - dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, Watermark->USRRetrainingWatermark); - dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, Watermark->DRAMClockChangeWatermark); - dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, Watermark->FCLKChangeWatermark); - dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, Watermark->StutterExitWatermark); - dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, Watermark->StutterEnterPlusExitWatermark); - dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, Watermark->Z8StutterExitWatermark); + dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->Watermark.UrgentWatermark); + dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, v->Watermark.USRRetrainingWatermark); + dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->Watermark.DRAMClockChangeWatermark); + dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, v->Watermark.FCLKChangeWatermark); + dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, v->Watermark.StutterExitWatermark); + dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, v->Watermark.StutterEnterPlusExitWatermark); + dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, v->Watermark.Z8StutterExitWatermark); dml_print("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", - __func__, Watermark->Z8StutterEnterPlusExitWatermark); + __func__, v->Watermark.Z8StutterEnterPlusExitWatermark); #endif TotalActiveWriteback = 0; - for (k = 0; k < NumberOfActiveSurfaces; ++k) { - if (WritebackEnable[k] == true) + for (k = 0; k < v->NumberOfActiveSurfaces; ++k) { + if (v->WritebackEnable[k] == true) TotalActiveWriteback = TotalActiveWriteback + 1; } if (TotalActiveWriteback <= 1) { - Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency; + v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency; } else { - Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency - + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; + v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency + + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; } - if (USRRetrainingRequiredFinal) - Watermark->WritebackUrgentWatermark = Watermark->WritebackUrgentWatermark + if (v->USRRetrainingRequiredFinal) + v->Watermark.WritebackUrgentWatermark = v->Watermark.WritebackUrgentWatermark + mmSOCParameters.USRRetrainingLatency; if (TotalActiveWriteback <= 1) { - Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.WritebackLatency; - Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + mmSOCParameters.WritebackLatency; } else { - Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency - + mmSOCParameters.WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; - Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency - + mmSOCParameters.WritebackLatency + WritebackChunkSize * 1024 / 32 / SOCCLK; + v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + + mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; + v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + + mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024 / 32 / SOCCLK; } - if (USRRetrainingRequiredFinal) - Watermark->WritebackDRAMClockChangeWatermark = Watermark->WritebackDRAMClockChangeWatermark + if (v->USRRetrainingRequiredFinal) + v->Watermark.WritebackDRAMClockChangeWatermark = v->Watermark.WritebackDRAMClockChangeWatermark + mmSOCParameters.USRRetrainingLatency; - if (USRRetrainingRequiredFinal) - Watermark->WritebackFCLKChangeWatermark = Watermark->WritebackFCLKChangeWatermark + if (v->USRRetrainingRequiredFinal) + v->Watermark.WritebackFCLKChangeWatermark = v->Watermark.WritebackFCLKChangeWatermark + mmSOCParameters.USRRetrainingLatency; #ifdef __DML_VBA_DEBUG__ dml_print("DML::%s: WritebackDRAMClockChangeWatermark = %f\n", - __func__, Watermark->WritebackDRAMClockChangeWatermark); - dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, Watermark->WritebackFCLKChangeWatermark); - dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, Watermark->WritebackUrgentWatermark); - dml_print("DML::%s: USRRetrainingRequiredFinal = %d\n", __func__, USRRetrainingRequiredFinal); + __func__, v->Watermark.WritebackDRAMClockChangeWatermark); + dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, v->Watermark.WritebackFCLKChangeWatermark); + dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, v->Watermark.WritebackUrgentWatermark); + dml_print("DML::%s: v->USRRetrainingRequiredFinal = %d\n", __func__, v->USRRetrainingRequiredFinal); dml_print("DML::%s: USRRetrainingLatency = %f\n", __func__, mmSOCParameters.USRRetrainingLatency); #endif - for (k = 0; k < NumberOfActiveSurfaces; ++k) { - TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + - SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) / (HTotal[k] / PixelClock[k]); + for (k = 0; k < v->NumberOfActiveSurfaces; ++k) { + TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k]) / (v->HTotal[k] / v->PixelClock[k]); } - for (k = 0; k < NumberOfActiveSurfaces; ++k) { + for (k = 0; k < v->NumberOfActiveSurfaces; ++k) { - LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1); - LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1); + LBLatencyHidingSourceLinesY[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1); + LBLatencyHidingSourceLinesC[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1); #ifdef __DML_VBA_DEBUG__ - dml_print("DML::%s: k=%d, MaxLineBufferLines = %d\n", __func__, k, MaxLineBufferLines); - dml_print("DML::%s: k=%d, LineBufferSize = %d\n", __func__, k, LineBufferSize); - dml_print("DML::%s: k=%d, LBBitPerPixel = %d\n", __func__, k, LBBitPerPixel[k]); - dml_print("DML::%s: k=%d, HRatio = %f\n", __func__, k, HRatio[k]); - dml_print("DML::%s: k=%d, VTaps = %d\n", __func__, k, VTaps[k]); + dml_print("DML::%s: k=%d, v->MaxLineBufferLines = %d\n", __func__, k, v->MaxLineBufferLines); + dml_print("DML::%s: k=%d, v->LineBufferSizeFinal = %d\n", __func__, k, v->LineBufferSizeFinal); + dml_print("DML::%s: k=%d, v->LBBitPerPixel = %d\n", __func__, k, v->LBBitPerPixel[k]); + dml_print("DML::%s: k=%d, v->HRatio = %f\n", __func__, k, v->HRatio[k]); + dml_print("DML::%s: k=%d, v->vtaps = %d\n", __func__, k, v->vtaps[k]); #endif - EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]); - EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]); + EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]); + EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]); EffectiveDETBufferSizeY = DETBufferSizeY[k]; if (UnboundedRequestEnabled) { EffectiveDETBufferSizeY = EffectiveDETBufferSizeY + CompressedBufferSizeInkByte * 1024 - * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k]) - / (HTotal[k] / PixelClock[k]) / TotalPixelBW; + * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k]) + / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW; } LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k]; LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]); - FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k]; + FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k]; ActiveClockChangeLatencyHidingY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY - - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k]; + - (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k]; - if (NumberOfActiveSurfaces > 1) { + if (v->NumberOfActiveSurfaces > 1) { ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY - - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightY[k] * HTotal[k] - / PixelClock[k] / VRatio[k]; + - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k] + / v->PixelClock[k] / v->VRatio[k]; } if (BytePerPixelDETC[k] > 0) { LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k]; LinesInDETCRoundedDownToSwath[k] = dml_floor(LinesInDETC[k], SwathHeightC[k]); - FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) - / VRatioChroma[k]; + FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) + / v->VRatioChroma[k]; ActiveClockChangeLatencyHidingC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC - - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] - / PixelClock[k]; - if (NumberOfActiveSurfaces > 1) { + - (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k] + / v->PixelClock[k]; + if (v->NumberOfActiveSurfaces > 1) { ActiveClockChangeLatencyHidingC = ActiveClockChangeLatencyHidingC - - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightC[k] * HTotal[k] - / PixelClock[k] / VRatioChroma[k]; + - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightC[k] * v->HTotal[k] + / v->PixelClock[k] / v->VRatioChroma[k]; } ActiveClockChangeLatencyHiding = dml_min(ActiveClockChangeLatencyHidingY, ActiveClockChangeLatencyHidingC); @@ -4436,24 +4419,24 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( ActiveClockChangeLatencyHiding = ActiveClockChangeLatencyHidingY; } - ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark - - Watermark->DRAMClockChangeWatermark; - ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark - - Watermark->FCLKChangeWatermark; - USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark; - - if (WritebackEnable[k]) { - WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024 - / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] - / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4); - if (WritebackPixelFormat[k] == dm_444_64) + ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark + - v->Watermark.DRAMClockChangeWatermark; + ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark + - v->Watermark.FCLKChangeWatermark; + USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.USRRetrainingWatermark; + + if (v->WritebackEnable[k]) { + WritebackLatencyHiding = v->WritebackInterfaceBufferSize * 1024 + / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] + / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4); + if (v->WritebackPixelFormat[k] == dm_444_64) WritebackLatencyHiding = WritebackLatencyHiding / 2; WritebackDRAMClockChangeLatencyMargin = WritebackLatencyHiding - - Watermark->WritebackDRAMClockChangeWatermark; + - v->Watermark.WritebackDRAMClockChangeWatermark; WritebackFCLKChangeLatencyMargin = WritebackLatencyHiding - - Watermark->WritebackFCLKChangeWatermark; + - v->Watermark.WritebackFCLKChangeWatermark; ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMargin[k], WritebackFCLKChangeLatencyMargin); @@ -4461,22 +4444,22 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( WritebackDRAMClockChangeLatencyMargin); } MaxActiveDRAMClockChangeLatencySupported[k] = - (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ? + (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ? 0 : (ActiveDRAMClockChangeLatencyMargin[k] + mmSOCParameters.DRAMClockChangeLatency); } - for (i = 0; i < NumberOfActiveSurfaces; ++i) { - for (j = 0; j < NumberOfActiveSurfaces; ++j) { + for (i = 0; i < v->NumberOfActiveSurfaces; ++i) { + for (j = 0; j < v->NumberOfActiveSurfaces; ++j) { if (i == j || - (BlendingAndTiming[i] == i && BlendingAndTiming[j] == i) || - (BlendingAndTiming[j] == j && BlendingAndTiming[i] == j) || - (BlendingAndTiming[i] == BlendingAndTiming[j] && BlendingAndTiming[i] != i) || - (SynchronizeTimingsFinal && PixelClock[i] == PixelClock[j] && - HTotal[i] == HTotal[j] && VTotal[i] == VTotal[j] && - VActive[i] == VActive[j]) || (SynchronizeDRRDisplaysForUCLKPStateChangeFinal && - (DRRDisplay[i] || DRRDisplay[j]))) { + (v->BlendingAndTiming[i] == i && v->BlendingAndTiming[j] == i) || + (v->BlendingAndTiming[j] == j && v->BlendingAndTiming[i] == j) || + (v->BlendingAndTiming[i] == v->BlendingAndTiming[j] && v->BlendingAndTiming[i] != i) || + (v->SynchronizeTimingsFinal && v->PixelClock[i] == v->PixelClock[j] && + v->HTotal[i] == v->HTotal[j] && v->VTotal[i] == v->VTotal[j] && + v->VActive[i] == v->VActive[j]) || (v->SynchronizeDRRDisplaysForUCLKPStateChangeFinal && + (v->DRRDisplay[i] || v->DRRDisplay[j]))) { SynchronizedSurfaces[i][j] = true; } else { SynchronizedSurfaces[i][j] = false; @@ -4484,8 +4467,8 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( } } - for (k = 0; k < NumberOfActiveSurfaces; ++k) { - if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) && + for (k = 0; k < v->NumberOfActiveSurfaces; ++k) { + if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) && (!FoundFirstSurfaceWithMinActiveFCLKChangeMargin || ActiveFCLKChangeLatencyMargin[k] < MinActiveFCLKChangeMargin)) { FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true; @@ -4497,9 +4480,9 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( *MinActiveFCLKChangeLatencySupported = MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency; SameTimingForFCLKChange = true; - for (k = 0; k < NumberOfActiveSurfaces; ++k) { + for (k = 0; k < v->NumberOfActiveSurfaces; ++k) { if (!SynchronizedSurfaces[k][SurfaceWithMinActiveFCLKChangeMargin]) { - if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) && + if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) && (SameTimingForFCLKChange || ActiveFCLKChangeLatencyMargin[k] < SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) { @@ -4519,17 +4502,17 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( } *USRRetrainingSupport = true; - for (k = 0; k < NumberOfActiveSurfaces; ++k) { - if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) && + for (k = 0; k < v->NumberOfActiveSurfaces; ++k) { + if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) && (USRRetrainingLatencyMargin[k] < 0)) { *USRRetrainingSupport = false; } } - for (k = 0; k < NumberOfActiveSurfaces; ++k) { - if (UseMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame && - UseMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport && - UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe && + for (k = 0; k < v->NumberOfActiveSurfaces; ++k) { + if (v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame && + v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport && + v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe && ActiveDRAMClockChangeLatencyMargin[k] < 0) { if (PrefetchMode > 0) { DRAMClockChangeSupportNumber = 2; @@ -4543,10 +4526,10 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( } } - for (k = 0; k < NumberOfActiveSurfaces; ++k) { - if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame) + for (k = 0; k < v->NumberOfActiveSurfaces; ++k) { + if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame) DRAMClockChangeMethod = 1; - else if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport) + else if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport) DRAMClockChangeMethod = 2; } @@ -4573,16 +4556,16 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( *DRAMClockChangeSupport = dm_dram_clock_change_unsupported; } - for (k = 0; k < NumberOfActiveSurfaces; ++k) { + for (k = 0; k < v->NumberOfActiveSurfaces; ++k) { unsigned int dst_y_pstate; unsigned int src_y_pstate_l; unsigned int src_y_pstate_c; unsigned int src_y_ahead_l, src_y_ahead_c, sub_vp_lines_l, sub_vp_lines_c; - dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (HTotal[k] / PixelClock[k]), 1); - src_y_pstate_l = dml_ceil(dst_y_pstate * VRatio[k], SwathHeightY[k]); + dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (v->HTotal[k] / v->PixelClock[k]), 1); + src_y_pstate_l = dml_ceil(dst_y_pstate * v->VRatio[k], SwathHeightY[k]); src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + LBLatencyHidingSourceLinesY[k]; - sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + meta_row_height[k]; + sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + v->meta_row_height[k]; #ifdef __DML_VBA_DEBUG__ dml_print("DML::%s: k=%d, DETBufferSizeY = %d\n", __func__, k, DETBufferSizeY[k]); @@ -4593,21 +4576,21 @@ dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, LBL dml_print("DML::%s: k=%d, dst_y_pstate = %d\n", __func__, k, dst_y_pstate); dml_print("DML::%s: k=%d, src_y_pstate_l = %d\n", __func__, k, src_y_pstate_l); dml_print("DML::%s: k=%d, src_y_ahead_l = %d\n", __func__, k, src_y_ahead_l); -dml_print("DML::%s: k=%d, meta_row_height = %d\n", __func__, k, meta_row_height[k]); +dml_print("DML::%s: k=%d, v->meta_row_height = %d\n", __func__, k, v->meta_row_height[k]); dml_print("DML::%s: k=%d, sub_vp_lines_l = %d\n", __func__, k, sub_vp_lines_l); #endif SubViewportLinesNeededInMALL[k] = sub_vp_lines_l; if (BytePerPixelDETC[k] > 0) { - src_y_pstate_c = dml_ceil(dst_y_pstate * VRatioChroma[k], SwathHeightC[k]); + src_y_pstate_c = dml_ceil(dst_y_pstate * v->VRatioChroma[k], SwathHeightC[k]); src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + LBLatencyHidingSourceLinesC[k]; - sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + meta_row_height_chroma[k]; + sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + v->meta_row_height_chroma[k]; SubViewportLinesNeededInMALL[k] = dml_max(sub_vp_lines_l, sub_vp_lines_c); #ifdef __DML_VBA_DEBUG__ dml_print("DML::%s: k=%d, src_y_pstate_c = %d\n", __func__, k, src_y_pstate_c); dml_print("DML::%s: k=%d, src_y_ahead_c = %d\n", __func__, k, src_y_ahead_c); -dml_print("DML::%s: k=%d, meta_row_height_chroma = %d\n", __func__, k, meta_row_height_chroma[k]); +dml_print("DML::%s: k=%d, v->meta_row_height_chroma = %d\n", __func__, k, v->meta_row_height_chroma[k]); dml_print("DML::%s: k=%d, sub_vp_lines_c = %d\n", __func__, k, sub_vp_lines_c); #endif } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h index d293856ba906..0b427d89b3c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h @@ -30,6 +30,7 @@ #include "os_types.h" #include "../dc_features.h" #include "../display_mode_structs.h" +#include "dml/display_mode_vba.h" unsigned int dml32_dscceComputeDelay( unsigned int bpc, @@ -215,6 +216,7 @@ void dml32_CalculateDETBufferSize( void dml32_CalculateODMMode( unsigned int MaximumPixelsPerLinePerDSCUnit, unsigned int HActive, + enum output_format_class OutFormat, enum output_encoder_class Output, enum odm_combine_policy ODMUse, double StateDispclk, @@ -712,28 +714,14 @@ double dml32_CalculateExtraLatency( unsigned int HostVMMaxNonCachedPageTableLevels); bool dml32_CalculatePrefetchSchedule( + struct vba_vars_st *v, + unsigned int k, double HostVMInefficiencyFactor, DmlPipe *myPipe, unsigned int DSCDelay, - double DPPCLKDelaySubtotalPlusCNVCFormater, - double DPPCLKDelaySCL, - double DPPCLKDelaySCLLBOnly, - double DPPCLKDelayCNVCCursor, - double DISPCLKDelaySubtotal, unsigned int DPP_RECOUT_WIDTH, - enum output_format_class OutputFormat, - unsigned int MaxInterDCNTileRepeaters, unsigned int VStartup, unsigned int MaxVStartup, - unsigned int GPUVMPageTableLevels, - bool GPUVMEnable, - bool HostVMEnable, - unsigned int HostVMMaxNonCachedPageTableLevels, - double HostVMMinPageSize, - bool DynamicMetadataEnable, - bool DynamicMetadataVMEnabled, - int DynamicMetadataLinesBeforeActiveRequired, - unsigned int DynamicMetadataTransmittedBytes, double UrgentLatency, double UrgentExtraLatency, double TCalc, @@ -807,58 +795,28 @@ void dml32_CalculateFlipSchedule( bool *ImmediateFlipSupportedForPipe); void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( - bool USRRetrainingRequiredFinal, - enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[], + struct vba_vars_st *v, unsigned int PrefetchMode, - unsigned int NumberOfActiveSurfaces, - unsigned int MaxLineBufferLines, - unsigned int LineBufferSize, - unsigned int WritebackInterfaceBufferSize, double DCFCLK, double ReturnBW, - bool SynchronizeTimingsFinal, - bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal, - bool DRRDisplay[], - unsigned int dpte_group_bytes[], - unsigned int meta_row_height[], - unsigned int meta_row_height_chroma[], SOCParametersList mmSOCParameters, - unsigned int WritebackChunkSize, double SOCCLK, double DCFClkDeepSleep, unsigned int DETBufferSizeY[], unsigned int DETBufferSizeC[], unsigned int SwathHeightY[], unsigned int SwathHeightC[], - unsigned int LBBitPerPixel[], double SwathWidthY[], double SwathWidthC[], - double HRatio[], - double HRatioChroma[], - unsigned int VTaps[], - unsigned int VTapsChroma[], - double VRatio[], - double VRatioChroma[], - unsigned int HTotal[], - unsigned int VTotal[], - unsigned int VActive[], - double PixelClock[], - unsigned int BlendingAndTiming[], unsigned int DPPPerSurface[], double BytePerPixelDETY[], double BytePerPixelDETC[], double DSTXAfterScaler[], double DSTYAfterScaler[], - bool WritebackEnable[], - enum source_format_class WritebackPixelFormat[], - double WritebackDestinationWidth[], - double WritebackDestinationHeight[], - double WritebackSourceHeight[], bool UnboundedRequestEnabled, unsigned int CompressedBufferSizeInkByte, /* Output */ - Watermarks *Watermark, enum clock_change_support *DRAMClockChangeSupport, double MaxActiveDRAMClockChangeLatencySupported[], unsigned int SubViewportLinesNeededInMALL[], diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c index 5d27ff0ebb5f..f5400eda07a5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c @@ -35,6 +35,8 @@ #include "dcn30/display_rq_dlg_calc_30.h" #include "dcn31/display_mode_vba_31.h" #include "dcn31/display_rq_dlg_calc_31.h" +#include "dcn314/display_mode_vba_314.h" +#include "dcn314/display_rq_dlg_calc_314.h" #include "dcn32/display_mode_vba_32.h" #include "dcn32/display_rq_dlg_calc_32.h" #include "dml_logger.h" @@ -74,6 +76,13 @@ const struct dml_funcs dml31_funcs = { .rq_dlg_get_rq_reg = dml31_rq_dlg_get_rq_reg }; +const struct dml_funcs dml314_funcs = { + .validate = dml314_ModeSupportAndSystemConfigurationFull, + .recalculate = dml314_recalculate, + .rq_dlg_get_dlg_reg = dml314_rq_dlg_get_dlg_reg, + .rq_dlg_get_rq_reg = dml314_rq_dlg_get_rq_reg +}; + const struct dml_funcs dml32_funcs = { .validate = dml32_ModeSupportAndSystemConfigurationFull, .recalculate = dml32_recalculate, @@ -107,6 +116,9 @@ void dml_init_instance(struct display_mode_lib *lib, case DML_PROJECT_DCN31_FPGA: lib->funcs = dml31_funcs; break; + case DML_PROJECT_DCN314: + lib->funcs = dml314_funcs; + break; case DML_PROJECT_DCN32: lib->funcs = dml32_funcs; break; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index 2bdd6ed22611..b1878a1440e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -41,6 +41,7 @@ enum dml_project { DML_PROJECT_DCN30, DML_PROJECT_DCN31, DML_PROJECT_DCN31_FPGA, + DML_PROJECT_DCN314, DML_PROJECT_DCN32, }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 492aec634b68..2051ddaa641a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -651,10 +651,10 @@ struct vba_vars_st { unsigned int OutputTypeAndRatePerState[DC__VOLTAGE_STATES][DC__NUM_DPP__MAX]; double RequiredDISPCLKPerSurface[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX]; - unsigned int MicroTileHeightY[DC__NUM_DPP__MAX]; - unsigned int MicroTileHeightC[DC__NUM_DPP__MAX]; - unsigned int MicroTileWidthY[DC__NUM_DPP__MAX]; - unsigned int MicroTileWidthC[DC__NUM_DPP__MAX]; + unsigned int MacroTileHeightY[DC__NUM_DPP__MAX]; + unsigned int MacroTileHeightC[DC__NUM_DPP__MAX]; + unsigned int MacroTileWidthY[DC__NUM_DPP__MAX]; + unsigned int MacroTileWidthC[DC__NUM_DPP__MAX]; bool ImmediateFlipRequiredFinal; bool DCCProgrammingAssumesScanDirectionUnknownFinal; bool EnoughWritebackUnits; @@ -800,8 +800,6 @@ struct vba_vars_st { double PSCL_FACTOR[DC__NUM_DPP__MAX]; double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX]; double MaximumVStartup[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX]; - unsigned int MacroTileWidthY[DC__NUM_DPP__MAX]; - unsigned int MacroTileWidthC[DC__NUM_DPP__MAX]; double AlignedDCCMetaPitch[DC__NUM_DPP__MAX]; double AlignedYPitch[DC__NUM_DPP__MAX]; double AlignedCPitch[DC__NUM_DPP__MAX]; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 5d2b028e5dad..d9f1b0a4fbd4 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -214,6 +214,7 @@ struct dummy_pstate_entry { struct clk_bw_params { unsigned int vram_type; unsigned int num_channels; + unsigned int dram_channel_width_bytes; unsigned int dispclk_vco_khz; unsigned int dc_mode_softmax_memclk; struct clk_limit_table clk_table; diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 58158764adc0..7614125c92c7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -219,6 +219,10 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc, struct dc_state *context, uint8_t disabled_master_pipe_idx); +void reset_sync_context_for_pipe(const struct dc *dc, + struct dc_state *context, + uint8_t pipe_idx); + uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter); const struct link_hwss *get_link_hwss(const struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 859ffd8725c5..04f7656906ca 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1600,6 +1600,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num, struct fixed31_32 lut2; struct fixed31_32 delta_lut; struct fixed31_32 delta_index; + const struct fixed31_32 one = dc_fixpt_from_int(1); i = 0; /* fixed_pt library has problems handling too small values */ @@ -1628,6 +1629,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num, } else hw_x = coordinates_x[i].x; + if (dc_fixpt_le(one, hw_x)) + hw_x = one; + norm_x = dc_fixpt_mul(norm_factor, hw_x); index = dc_fixpt_floor(norm_x); if (index < 0 || index > 255) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 6db67f082d91..644ea150e075 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -368,6 +368,17 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu) smu_baco->platform_support = (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false; + + /* + * Disable BACO entry/exit completely on below SKUs to + * avoid hardware intermittent failures. + */ + if (((adev->pdev->device == 0x73A1) && + (adev->pdev->revision == 0x00)) || + ((adev->pdev->device == 0x73BF) && + (adev->pdev->revision == 0xCF))) + smu_baco->platform_support = false; + } } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 24488f4cb78c..93f9b8377539 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -209,7 +209,8 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu) if (!adev->scpm_enabled) return 0; - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) + if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) || + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0))) return 0; /* override pptable_id from driver parameter */ @@ -218,27 +219,6 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu) dev_info(adev->dev, "override pptable id %d\n", pptable_id); } else { pptable_id = smu->smu_table.boot_values.pp_table_id; - - /* - * Temporary solution for SMU V13.0.0 with SCPM enabled: - * - use vbios carried pptable when pptable_id is 3664, 3715 or 3795 - * - use 36831 soft pptable when pptable_id is 3683 - */ - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) { - switch (pptable_id) { - case 3664: - case 3715: - case 3795: - pptable_id = 0; - break; - case 3683: - pptable_id = 36831; - break; - default: - dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id); - return -EINVAL; - } - } } /* "pptable_id == 0" means vbios carries the pptable. */ @@ -471,26 +451,6 @@ int smu_v13_0_setup_pptable(struct smu_context *smu) } else { pptable_id = smu->smu_table.boot_values.pp_table_id; - /* - * Temporary solution for SMU V13.0.0 with SCPM disabled: - * - use 3664, 3683 or 3715 on request - * - use 3664 when pptable_id is 0 - * TODO: drop these when the pptable carried in vbios is ready. - */ - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) { - switch (pptable_id) { - case 0: - pptable_id = 3664; - break; - case 3664: - case 3683: - case 3715: - break; - default: - dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id); - return -EINVAL; - } - } } /* force using vbios pptable in sriov mode */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 7db2fd9ea74a..096327513dd0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -410,58 +410,11 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct amdgpu_device *adev = smu->adev; - uint32_t pptable_id; int ret = 0; - /* - * With SCPM enabled, the pptable used will be signed. It cannot - * be used directly by driver. To get the raw pptable, we need to - * rely on the combo pptable(and its revelant SMU message). - */ - if (adev->scpm_enabled) { - ret = smu_v13_0_0_get_pptable_from_pmfw(smu, - &smu_table->power_play_table, - &smu_table->power_play_table_size); - } else { - /* override pptable_id from driver parameter */ - if (amdgpu_smu_pptable_id >= 0) { - pptable_id = amdgpu_smu_pptable_id; - dev_info(adev->dev, "override pptable id %d\n", pptable_id); - } else { - pptable_id = smu_table->boot_values.pp_table_id; - } - - /* - * Temporary solution for SMU V13.0.0 with SCPM disabled: - * - use vbios carried pptable when pptable_id is 3664, 3715 or 3795 - * - use soft pptable when pptable_id is 3683 - */ - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) { - switch (pptable_id) { - case 3664: - case 3715: - case 3795: - pptable_id = 0; - break; - case 3683: - break; - default: - dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id); - return -EINVAL; - } - } - - /* force using vbios pptable in sriov mode */ - if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1)) - ret = smu_v13_0_0_get_pptable_from_pmfw(smu, - &smu_table->power_play_table, - &smu_table->power_play_table_size); - else - ret = smu_v13_0_get_pptable_from_firmware(smu, - &smu_table->power_play_table, - &smu_table->power_play_table_size, - pptable_id); - } + ret = smu_v13_0_0_get_pptable_from_pmfw(smu, + &smu_table->power_play_table, + &smu_table->power_play_table_size); if (ret) return ret; diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c index dd32b484dd82..ce96234f3df2 100644 --- a/drivers/gpu/drm/gma500/cdv_device.c +++ b/drivers/gpu/drm/gma500/cdv_device.c @@ -581,11 +581,9 @@ static const struct psb_offset cdv_regmap[2] = { static int cdv_chip_setup(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - struct pci_dev *pdev = to_pci_dev(dev->dev); INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func); - if (pci_enable_msi(pdev)) - dev_warn(dev->dev, "Enabling MSI failed!\n"); + dev_priv->use_msi = true; dev_priv->regmap = cdv_regmap; gma_get_core_freq(dev); psb_intel_opregion_init(dev); diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index dffe37490206..4b7627a72637 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -112,12 +112,12 @@ static void psb_gem_free_object(struct drm_gem_object *obj) { struct psb_gem_object *pobj = to_psb_gem_object(obj); - drm_gem_object_release(obj); - /* Undo the mmap pin if we are destroying the object */ if (pobj->mmapping) psb_gem_unpin(pobj); + drm_gem_object_release(obj); + WARN_ON(pobj->in_gart && !pobj->stolen); release_resource(&pobj->resource); diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index bd40c040a2c9..2f52eceda3a1 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -532,15 +532,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc, WARN_ON(drm_crtc_vblank_get(crtc) != 0); gma_crtc->page_flip_event = event; + spin_unlock_irqrestore(&dev->event_lock, flags); /* Call this locked if we want an event at vblank interrupt. */ ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); if (ret) { - gma_crtc->page_flip_event = NULL; - drm_crtc_vblank_put(crtc); + spin_lock_irqsave(&dev->event_lock, flags); + if (gma_crtc->page_flip_event) { + gma_crtc->page_flip_event = NULL; + drm_crtc_vblank_put(crtc); + } + spin_unlock_irqrestore(&dev->event_lock, flags); } - - spin_unlock_irqrestore(&dev->event_lock, flags); } else { ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); } diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c index 5923a9c89312..f90e628cb482 100644 --- a/drivers/gpu/drm/gma500/oaktrail_device.c +++ b/drivers/gpu/drm/gma500/oaktrail_device.c @@ -501,12 +501,9 @@ static const struct psb_offset oaktrail_regmap[2] = { static int oaktrail_chip_setup(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - struct pci_dev *pdev = to_pci_dev(dev->dev); int ret; - if (pci_enable_msi(pdev)) - dev_warn(dev->dev, "Enabling MSI failed!\n"); - + dev_priv->use_msi = true; dev_priv->regmap = oaktrail_regmap; ret = mid_chip_setup(dev); diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c index b91de6d36e41..66873085d450 100644 --- a/drivers/gpu/drm/gma500/power.c +++ b/drivers/gpu/drm/gma500/power.c @@ -139,8 +139,6 @@ static void gma_suspend_pci(struct pci_dev *pdev) dev_priv->regs.saveBSM = bsm; pci_read_config_dword(pdev, 0xFC, &vbt); dev_priv->regs.saveVBT = vbt; - pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr); - pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); @@ -168,9 +166,6 @@ static bool gma_resume_pci(struct pci_dev *pdev) pci_restore_state(pdev); pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM); pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT); - /* restoring MSI address and data in PCIx space */ - pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr); - pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data); ret = pci_enable_device(pdev); if (ret != 0) @@ -223,8 +218,7 @@ int gma_power_resume(struct device *_dev) mutex_lock(&power_mutex); gma_resume_pci(pdev); gma_resume_display(pdev); - gma_irq_preinstall(dev); - gma_irq_postinstall(dev); + gma_irq_install(dev); mutex_unlock(&power_mutex); return 0; } diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 1d8744f3e702..54e756b48606 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -383,7 +383,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R); spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); - gma_irq_install(dev, pdev->irq); + gma_irq_install(dev); dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 0ea3d23575f3..731cc356c07a 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -490,6 +490,7 @@ struct drm_psb_private { int rpm_enabled; /* MID specific */ + bool use_msi; bool has_gct; struct oaktrail_gct_data gct_data; @@ -499,10 +500,6 @@ struct drm_psb_private { /* Register state */ struct psb_save_area regs; - /* MSI reg save */ - uint32_t msi_addr; - uint32_t msi_data; - /* Hotplug handling */ struct work_struct hotplug_work; diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c index e6e6d61bbeab..038f18ed0a95 100644 --- a/drivers/gpu/drm/gma500/psb_irq.c +++ b/drivers/gpu/drm/gma500/psb_irq.c @@ -316,17 +316,24 @@ void gma_irq_postinstall(struct drm_device *dev) spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); } -int gma_irq_install(struct drm_device *dev, unsigned int irq) +int gma_irq_install(struct drm_device *dev) { + struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); int ret; - if (irq == IRQ_NOTCONNECTED) + if (dev_priv->use_msi && pci_enable_msi(pdev)) { + dev_warn(dev->dev, "Enabling MSI failed!\n"); + dev_priv->use_msi = false; + } + + if (pdev->irq == IRQ_NOTCONNECTED) return -ENOTCONN; gma_irq_preinstall(dev); /* PCI devices require shared interrupts. */ - ret = request_irq(irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev); + ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev); if (ret) return ret; @@ -369,6 +376,8 @@ void gma_irq_uninstall(struct drm_device *dev) spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); free_irq(pdev->irq, dev); + if (dev_priv->use_msi) + pci_disable_msi(pdev); } int gma_crtc_enable_vblank(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h index b51e395194ff..7648f69824a5 100644 --- a/drivers/gpu/drm/gma500/psb_irq.h +++ b/drivers/gpu/drm/gma500/psb_irq.h @@ -17,7 +17,7 @@ struct drm_device; void gma_irq_preinstall(struct drm_device *dev); void gma_irq_postinstall(struct drm_device *dev); -int gma_irq_install(struct drm_device *dev, unsigned int irq); +int gma_irq_install(struct drm_device *dev); void gma_irq_uninstall(struct drm_device *dev); int gma_crtc_enable_vblank(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig index 073adfe438dd..4e41c144a290 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig +++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig @@ -2,6 +2,7 @@ config DRM_HISI_HIBMC tristate "DRM Support for Hisilicon Hibmc" depends on DRM && PCI && (ARM64 || COMPILE_TEST) + depends on MMU select DRM_KMS_HELPER select DRM_VRAM_HELPER select DRM_TTM diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index 6d11e7938c83..f84d39762a72 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -23,9 +23,6 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 -#define PCI_VENDOR_ID_MICROSOFT 0x1414 -#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353 - DEFINE_DRM_GEM_FOPS(hv_fops); static struct drm_driver hyperv_driver = { @@ -133,7 +130,6 @@ static int hyperv_vmbus_probe(struct hv_device *hdev, } ret = hyperv_setup_vram(hv, hdev); - if (ret) goto err_vmbus_close; @@ -150,18 +146,20 @@ static int hyperv_vmbus_probe(struct hv_device *hdev, ret = hyperv_mode_config_init(hv); if (ret) - goto err_vmbus_close; + goto err_free_mmio; ret = drm_dev_register(dev, 0); if (ret) { drm_err(dev, "Failed to register drm driver.\n"); - goto err_vmbus_close; + goto err_free_mmio; } drm_fbdev_generic_setup(dev, 0); return 0; +err_free_mmio: + vmbus_free_mmio(hv->mem->start, hv->fb_size); err_vmbus_close: vmbus_close(hdev->channel); err_hv_set_drv_data: diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 885c74f60366..1390729401a0 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -1629,6 +1629,8 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, /* FIXME: initialize from VBT */ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; + vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; + ret = intel_dsc_compute_params(crtc_state); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index ac90d455a7c7..3ed7eeacc706 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -389,23 +389,13 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp) return intel_dp_is_edp(intel_dp) ? 810000 : 1350000; } -static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy) -{ - u32 voltage; - - voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK; - - return voltage == VOLTAGE_INFO_0_85V; -} - static int icl_max_source_rate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); - if (intel_phy_is_combo(dev_priv, phy) && - (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp))) + if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp)) return 540000; return 810000; @@ -413,23 +403,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp) static int ehl_max_source_rate(struct intel_dp *intel_dp) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); - - if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy)) - return 540000; - - return 810000; -} - -static int dg1_max_source_rate(struct intel_dp *intel_dp) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(i915, dig_port->base.port); - - if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy)) + if (intel_dp_is_edp(intel_dp)) return 540000; return 810000; @@ -491,7 +465,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) max_rate = dg2_max_source_rate(intel_dp); else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) - max_rate = dg1_max_source_rate(intel_dp); + max_rate = 810000; else if (IS_JSL_EHL(dev_priv)) max_rate = ehl_max_source_rate(intel_dp); else @@ -1395,6 +1369,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, * DP_DSC_RC_BUF_SIZE for this. */ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; + vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; /* * Slice Height of 8 works for all currently available panels. So start diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 43e1bbc1e303..ca530f0733e0 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -460,7 +460,6 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) u8 i = 0; vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay; - vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay; vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width, pipe_config->dsc.slice_count); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index dabdfe09f5e5..0bcde53c50c6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -1269,6 +1269,10 @@ static void i915_gem_context_release_work(struct work_struct *work) trace_i915_context_free(ctx); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); + spin_lock(&ctx->i915->gem.contexts.lock); + list_del(&ctx->link); + spin_unlock(&ctx->i915->gem.contexts.lock); + if (ctx->syncobj) drm_syncobj_put(ctx->syncobj); @@ -1521,10 +1525,6 @@ static void context_close(struct i915_gem_context *ctx) ctx->file_priv = ERR_PTR(-EBADF); - spin_lock(&ctx->i915->gem.contexts.lock); - list_del(&ctx->link); - spin_unlock(&ctx->i915->gem.contexts.lock); - client = ctx->client; if (client) { spin_lock(&client->ctx_lock); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 834c707d1877..3e91f44829e9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1438,7 +1438,12 @@ void intel_guc_busyness_park(struct intel_gt *gt) if (!guc_submission_initialized(guc)) return; - cancel_delayed_work(&guc->timestamp.work); + /* + * There is a race with suspend flow where the worker runs after suspend + * and causes an unclaimed register access warning. Cancel the worker + * synchronously here. + */ + cancel_delayed_work_sync(&guc->timestamp.work); /* * Before parking, we should sample engine busyness stats if we need to. diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 702e5b89be22..b605d0ceaefa 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1191,7 +1191,8 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv) intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc); - i915_gem_drain_freed_objects(dev_priv); + /* Flush any outstanding work, including i915_gem_context.release_work. */ + i915_gem_drain_workqueue(dev_priv); drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list)); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3168d7007e10..135d04c2d41c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1857,14 +1857,14 @@ #define GT0_PERF_LIMIT_REASONS _MMIO(0x1381a8) #define GT0_PERF_LIMIT_REASONS_MASK 0xde3 -#define PROCHOT_MASK REG_BIT(1) -#define THERMAL_LIMIT_MASK REG_BIT(2) -#define RATL_MASK REG_BIT(6) -#define VR_THERMALERT_MASK REG_BIT(7) -#define VR_TDC_MASK REG_BIT(8) -#define POWER_LIMIT_4_MASK REG_BIT(9) -#define POWER_LIMIT_1_MASK REG_BIT(11) -#define POWER_LIMIT_2_MASK REG_BIT(12) +#define PROCHOT_MASK REG_BIT(0) +#define THERMAL_LIMIT_MASK REG_BIT(1) +#define RATL_MASK REG_BIT(5) +#define VR_THERMALERT_MASK REG_BIT(6) +#define VR_TDC_MASK REG_BIT(7) +#define POWER_LIMIT_4_MASK REG_BIT(8) +#define POWER_LIMIT_1_MASK REG_BIT(10) +#define POWER_LIMIT_2_MASK REG_BIT(11) #define CHV_CLK_CTL1 _MMIO(0x101100) #define VLV_CLK_CTL2 _MMIO(0x101104) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 260371716490..373582cfd8f3 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1882,12 +1882,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma, enum dma_resv_usage usage; int idx; - obj->read_domains = 0; if (flags & EXEC_OBJECT_WRITE) { usage = DMA_RESV_USAGE_WRITE; obj->write_domain = I915_GEM_DOMAIN_RENDER; + obj->read_domains = 0; } else { usage = DMA_RESV_USAGE_READ; + obj->write_domain = 0; } dma_fence_array_for_each(curr, idx, fence) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 2d72cc5ddaba..6b6d5335c834 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -157,7 +157,7 @@ static void mtk_dither_config(struct device *dev, unsigned int w, { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE); + mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE); mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_CFG); mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG, diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 9cc406e1eee1..3b7d13028fb6 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -685,6 +685,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi) if (--dsi->refcount != 0) return; + /* + * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since + * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(), + * which needs irq for vblank, and mtk_dsi_stop() will disable irq. + * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(), + * after dsi is fully set. + */ + mtk_dsi_stop(dsi); + + mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500); mtk_dsi_reset_engine(dsi); mtk_dsi_lane0_ulp_mode_enter(dsi); mtk_dsi_clk_ulp_mode_enter(dsi); @@ -735,17 +745,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi) if (!dsi->enabled) return; - /* - * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since - * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(), - * which needs irq for vblank, and mtk_dsi_stop() will disable irq. - * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(), - * after dsi is fully set. - */ - mtk_dsi_stop(dsi); - - mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500); - dsi->enabled = false; } @@ -808,10 +807,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge, static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = { .attach = mtk_dsi_bridge_attach, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_disable = mtk_dsi_bridge_atomic_disable, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_enable = mtk_dsi_bridge_atomic_enable, .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable, .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable, + .atomic_reset = drm_atomic_helper_bridge_reset, .mode_set = mtk_dsi_bridge_mode_set, }; diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index b9ac932af8d0..03acc68abf2c 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c @@ -170,7 +170,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane, /* Enable OSD and BLK0, set max global alpha */ priv->viu.osd1_ctrl_stat = OSD_ENABLE | - (0xFF << OSD_GLOBAL_ALPHA_SHIFT) | + (0x100 << OSD_GLOBAL_ALPHA_SHIFT) | OSD_BLK0_ENABLE; priv->viu.osd1_ctrl_stat2 = readl(priv->io_base + diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c index bb7e109534de..d4b907889a21 100644 --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c @@ -94,7 +94,7 @@ static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv, priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12)); writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff), priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21)); - writel((m[11] & 0x1fff) << 16, + writel((m[11] & 0x1fff), priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22)); writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff), diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 251a1bb648cc..a222bf76804f 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -262,7 +262,11 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - drm_fbdev_generic_setup(dev, 0); + /* + * FIXME: A 24-bit color depth does not work with 24 bpp on + * G200ER. Force 32 bpp. + */ + drm_fbdev_generic_setup(dev, 32); return 0; } diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index cdb154c8b866..b75c690bb0cc 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1295,7 +1295,8 @@ static const struct panel_desc innolux_n116bca_ea1 = { }, .delay = { .hpd_absent = 200, - .prepare_to_enable = 80, + .enable = 80, + .disable = 50, .unprepare = 500, }, }; diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index ff5e1a44c43a..1e716c23019a 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2257,7 +2257,7 @@ static const struct panel_desc innolux_g121i1_l01 = { .enable = 200, .disable = 20, }, - .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, .connector_type = DRM_MODE_CONNECTOR_LVDS, }; diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index c204e9b95c1f..518ee13b1d6f 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -283,8 +283,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector) return ret; } -static int cdn_dp_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +cdn_dp_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { struct cdn_dp_device *dp = connector_to_dp(connector); struct drm_display_info *display_info = &dp->connector.display_info; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index e4631f515ba4..f9aa8b96c695 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -1439,11 +1439,15 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id, die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX; die |= RK3568_SYS_DSP_INFACE_EN_HDMI | FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id); + dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL; + dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags); break; case ROCKCHIP_VOP2_EP_EDP0: die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX; die |= RK3568_SYS_DSP_INFACE_EN_EDP | FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id); + dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL; + dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags); break; case ROCKCHIP_VOP2_EP_MIPI0: die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX; diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c index 660036da7449..922d83eb7ddf 100644 --- a/drivers/hv/hv_fcopy.c +++ b/drivers/hv/hv_fcopy.c @@ -129,7 +129,7 @@ static void fcopy_send_data(struct work_struct *dummy) /* * The strings sent from the host are encoded in - * in utf16; convert it to utf8 strings. + * utf16; convert it to utf8 strings. * The host assures us that the utf16 strings will not exceed * the max lengths specified. We will however, reserve room * for the string terminating character - in the utf16s_utf8s() diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 23c680d1a0f5..3c833ea60db6 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -35,6 +35,7 @@ #include <linux/kernel.h> #include <linux/syscore_ops.h> #include <linux/dma-map-ops.h> +#include <linux/pci.h> #include <clocksource/hyperv_timer.h> #include "hyperv_vmbus.h" @@ -2262,26 +2263,43 @@ static int vmbus_acpi_remove(struct acpi_device *device) static void vmbus_reserve_fb(void) { - int size; + resource_size_t start = 0, size; + struct pci_dev *pdev; + + if (efi_enabled(EFI_BOOT)) { + /* Gen2 VM: get FB base from EFI framebuffer */ + start = screen_info.lfb_base; + size = max_t(__u32, screen_info.lfb_size, 0x800000); + } else { + /* Gen1 VM: get FB base from PCI */ + pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT, + PCI_DEVICE_ID_HYPERV_VIDEO, NULL); + if (!pdev) + return; + + if (pdev->resource[0].flags & IORESOURCE_MEM) { + start = pci_resource_start(pdev, 0); + size = pci_resource_len(pdev, 0); + } + + /* + * Release the PCI device so hyperv_drm or hyperv_fb driver can + * grab it later. + */ + pci_dev_put(pdev); + } + + if (!start) + return; + /* * Make a claim for the frame buffer in the resource tree under the * first node, which will be the one below 4GB. The length seems to * be underreported, particularly in a Generation 1 VM. So start out * reserving a larger area and make it smaller until it succeeds. */ - - if (screen_info.lfb_base) { - if (efi_enabled(EFI_BOOT)) - size = max_t(__u32, screen_info.lfb_size, 0x800000); - else - size = max_t(__u32, screen_info.lfb_size, 0x4000000); - - for (; !fb_mmio && (size >= 0x100000); size >>= 1) { - fb_mmio = __request_region(hyperv_mmio, - screen_info.lfb_base, size, - fb_mmio_name, 0); - } - } + for (; !fb_mmio && (size >= 0x100000); size >>= 1) + fb_mmio = __request_region(hyperv_mmio, start, size, fb_mmio_name, 0); } /** @@ -2313,7 +2331,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, bool fb_overlap_ok) { struct resource *iter, *shadow; - resource_size_t range_min, range_max, start; + resource_size_t range_min, range_max, start, end; const char *dev_n = dev_name(&device_obj->device); int retval; @@ -2348,6 +2366,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, range_max = iter->end; start = (range_min + align - 1) & ~(align - 1); for (; start + size - 1 <= range_max; start += align) { + end = start + size - 1; + + /* Skip the whole fb_mmio region if not fb_overlap_ok */ + if (!fb_overlap_ok && fb_mmio && + (((start >= fb_mmio->start) && (start <= fb_mmio->end)) || + ((end >= fb_mmio->start) && (end <= fb_mmio->end)))) + continue; + shadow = __request_region(iter, start, size, NULL, IORESOURCE_BUSY); if (!shadow) diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index e47fa3465671..3082183bd66a 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -1583,7 +1583,7 @@ static int i2c_imx_remove(struct platform_device *pdev) if (i2c_imx->dma) i2c_imx_dma_free(i2c_imx); - if (ret == 0) { + if (ret >= 0) { /* setup chip registers to defaults */ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR); imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR); diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c index c9ef0dc56678..1a0fc9640c23 100644 --- a/drivers/i2c/busses/i2c-mlxbf.c +++ b/drivers/i2c/busses/i2c-mlxbf.c @@ -6,6 +6,7 @@ */ #include <linux/acpi.h> +#include <linux/bitfield.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> @@ -63,13 +64,14 @@ */ #define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000) /* Reference clock for Bluefield - 156 MHz. */ -#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000) +#define MLXBF_I2C_PLL_IN_FREQ 156250000ULL /* Constant used to determine the PLL frequency. */ -#define MLNXBF_I2C_COREPLL_CONST 16384 +#define MLNXBF_I2C_COREPLL_CONST 16384ULL + +#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000ULL /* PLL registers. */ -#define MLXBF_I2C_CORE_PLL_REG0 0x0 #define MLXBF_I2C_CORE_PLL_REG1 0x4 #define MLXBF_I2C_CORE_PLL_REG2 0x8 @@ -181,22 +183,15 @@ #define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ /* Core PLL TYU configuration. */ -#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(12, 0) -#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(3, 0) -#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(5, 0) - -#define MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT 3 -#define MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT 16 -#define MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT 20 +#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(15, 3) +#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(19, 16) +#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(25, 20) /* Core PLL YU configuration. */ #define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0) #define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0) -#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(5, 0) +#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(31, 26) -#define MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT 0 -#define MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT 1 -#define MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT 26 /* Core PLL frequency. */ static u64 mlxbf_i2c_corepll_frequency; @@ -479,8 +474,6 @@ static struct mutex mlxbf_i2c_bus_lock; #define MLXBF_I2C_MASK_8 GENMASK(7, 0) #define MLXBF_I2C_MASK_16 GENMASK(15, 0) -#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000 - /* * Function to poll a set of bits at a specific address; it checks whether * the bits are equal to zero when eq_zero is set to 'true', and not equal @@ -669,7 +662,7 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave, /* Clear status bits. */ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS); /* Set the cause data. */ - writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR); + writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR); /* Zero PEC byte. */ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC); /* Zero byte count. */ @@ -738,6 +731,9 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, if (flags & MLXBF_I2C_F_WRITE) { write_en = 1; write_len += operation->length; + if (data_idx + operation->length > + MLXBF_I2C_MASTER_DATA_DESC_SIZE) + return -ENOBUFS; memcpy(data_desc + data_idx, operation->buffer, operation->length); data_idx += operation->length; @@ -1407,24 +1403,19 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev, return 0; } -static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) +static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) { - u64 core_frequency, pad_frequency; + u64 core_frequency; u8 core_od, core_r; u32 corepll_val; u16 core_f; - pad_frequency = MLXBF_I2C_PLL_IN_FREQ; - corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1); /* Get Core PLL configuration bits. */ - core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_F_TYU_MASK; - core_od = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK; - core_r = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_R_TYU_MASK; + core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val); + core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val); + core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val); /* * Compute PLL output frequency as follow: @@ -1436,31 +1427,26 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency * and PadFrequency, respectively. */ - core_frequency = pad_frequency * (++core_f); + core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f); core_frequency /= (++core_r) * (++core_od); return core_frequency; } -static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) +static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) { u32 corepll_reg1_val, corepll_reg2_val; - u64 corepll_frequency, pad_frequency; + u64 corepll_frequency; u8 core_od, core_r; u32 core_f; - pad_frequency = MLXBF_I2C_PLL_IN_FREQ; - corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1); corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2); /* Get Core PLL configuration bits */ - core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_F_YU_MASK; - core_r = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_R_YU_MASK; - core_od = rol32(corepll_reg2_val, MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_OD_YU_MASK; + core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val); + core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val); + core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val); /* * Compute PLL output frequency as follow: @@ -1472,7 +1458,7 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency * and PadFrequency, respectively. */ - corepll_frequency = (pad_frequency * core_f) / MLNXBF_I2C_COREPLL_CONST; + corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST; corepll_frequency /= (++core_r) * (++core_od); return corepll_frequency; @@ -2180,14 +2166,14 @@ static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = { [1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1], [2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1] }, - .calculate_freq = mlxbf_calculate_freq_from_tyu + .calculate_freq = mlxbf_i2c_calculate_freq_from_tyu }, [MLXBF_I2C_CHIP_TYPE_2] = { .type = MLXBF_I2C_CHIP_TYPE_2, .shared_res = { [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2] }, - .calculate_freq = mlxbf_calculate_freq_from_yu + .calculate_freq = mlxbf_i2c_calculate_freq_from_yu } }; diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index 774507b54b57..313904be5f3b 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c @@ -243,9 +243,10 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent, int (*deselect)(struct i2c_mux_core *, u32)) { struct i2c_mux_core *muxc; + size_t mux_size; - muxc = devm_kzalloc(dev, struct_size(muxc, adapter, max_adapters) - + sizeof_priv, GFP_KERNEL); + mux_size = struct_size(muxc, adapter, max_adapters); + muxc = devm_kzalloc(dev, size_add(mux_size, sizeof_priv), GFP_KERNEL); if (!muxc) return NULL; if (sizeof_priv) diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 497c912ad9e1..5a8f780e7ffd 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -2349,13 +2349,6 @@ static int dmar_device_hotplug(acpi_handle handle, bool insert) if (!dmar_in_use()) return 0; - /* - * It's unlikely that any I/O board is hot added before the IOMMU - * subsystem is initialized. - */ - if (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled) - return -EOPNOTSUPP; - if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) { tmp = handle; } else { diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 1f2cd43cf9bc..31bc50e538a3 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -399,7 +399,7 @@ static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu) { unsigned long fl_sagaw, sl_sagaw; - fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0); + fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0); sl_sagaw = cap_sagaw(iommu->cap); /* Second level only. */ @@ -3019,7 +3019,13 @@ static int __init init_dmars(void) #ifdef CONFIG_INTEL_IOMMU_SVM if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { + /* + * Call dmar_alloc_hwirq() with dmar_global_lock held, + * could cause possible lock race condition. + */ + up_write(&dmar_global_lock); ret = intel_svm_enable_prq(iommu); + down_write(&dmar_global_lock); if (ret) goto free_iommu; } @@ -3932,6 +3938,7 @@ int __init intel_iommu_init(void) force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) || platform_optin_force_iommu(); + down_write(&dmar_global_lock); if (dmar_table_init()) { if (force_on) panic("tboot: Failed to initialize DMAR table\n"); @@ -3944,6 +3951,16 @@ int __init intel_iommu_init(void) goto out_free_dmar; } + up_write(&dmar_global_lock); + + /* + * The bus notifier takes the dmar_global_lock, so lockdep will + * complain later when we register it under the lock. + */ + dmar_register_bus_notifier(); + + down_write(&dmar_global_lock); + if (!no_iommu) intel_iommu_debugfs_init(); @@ -3988,9 +4005,11 @@ int __init intel_iommu_init(void) pr_err("Initialization failed\n"); goto out_free_dmar; } + up_write(&dmar_global_lock); init_iommu_pm_ops(); + down_read(&dmar_global_lock); for_each_active_iommu(iommu, drhd) { /* * The flush queue implementation does not perform @@ -4008,11 +4027,13 @@ int __init intel_iommu_init(void) "%s", iommu->name); iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); } + up_read(&dmar_global_lock); bus_set_iommu(&pci_bus_type, &intel_iommu_ops); if (si_domain && !hw_pass_through) register_memory_notifier(&intel_iommu_memory_nb); + down_read(&dmar_global_lock); if (probe_acpi_namespace_devices()) pr_warn("ACPI name space devices didn't probe correctly\n"); @@ -4023,15 +4044,17 @@ int __init intel_iommu_init(void) iommu_disable_protect_mem_regions(iommu); } + up_read(&dmar_global_lock); - intel_iommu_enabled = 1; - dmar_register_bus_notifier(); pr_info("Intel(R) Virtualization Technology for Directed I/O\n"); + intel_iommu_enabled = 1; + return 0; out_free_dmar: intel_iommu_free_dmars(); + up_write(&dmar_global_lock); return ret; } diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index 7835bb0f32fc..e012b21c4fd7 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -511,7 +511,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb) if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1) return -ENODEV; - if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc)) + if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc)) return -ENODEV; switch (fc_usb->udev->speed) { diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 184608bd8999..e58a1e0cadd2 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -88,8 +88,9 @@ static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = { static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL; static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000; -static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = - MULTICAST_LACPDU_ADDR; +const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = { + 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 +}; /* ================= main 802.3ad protocol functions ================== */ static int ad_lacpdu_send(struct port *port); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5c2febe94428..86d42306aa5e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -865,12 +865,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev, dev_uc_unsync(slave_dev, bond_dev); dev_mc_unsync(slave_dev, bond_dev); - if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* del lacpdu mc addr from mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - - dev_mc_del(slave_dev, lacpdu_multicast); - } + if (BOND_MODE(bond) == BOND_MODE_8023AD) + dev_mc_del(slave_dev, lacpdu_mcast_addr); } /*--------------------------- Active slave change ---------------------------*/ @@ -890,7 +886,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(old_active->dev, -1); - bond_hw_addr_flush(bond->dev, old_active->dev); + if (bond->dev->flags & IFF_UP) + bond_hw_addr_flush(bond->dev, old_active->dev); } if (new_active) { @@ -901,10 +898,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(new_active->dev, 1); - netif_addr_lock_bh(bond->dev); - dev_uc_sync(new_active->dev, bond->dev); - dev_mc_sync(new_active->dev, bond->dev); - netif_addr_unlock_bh(bond->dev); + if (bond->dev->flags & IFF_UP) { + netif_addr_lock_bh(bond->dev); + dev_uc_sync(new_active->dev, bond->dev); + dev_mc_sync(new_active->dev, bond->dev); + netif_addr_unlock_bh(bond->dev); + } } } @@ -2166,16 +2165,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, } } - netif_addr_lock_bh(bond_dev); - dev_mc_sync_multiple(slave_dev, bond_dev); - dev_uc_sync_multiple(slave_dev, bond_dev); - netif_addr_unlock_bh(bond_dev); - - if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* add lacpdu mc addr to mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; + if (bond_dev->flags & IFF_UP) { + netif_addr_lock_bh(bond_dev); + dev_mc_sync_multiple(slave_dev, bond_dev); + dev_uc_sync_multiple(slave_dev, bond_dev); + netif_addr_unlock_bh(bond_dev); - dev_mc_add(slave_dev, lacpdu_multicast); + if (BOND_MODE(bond) == BOND_MODE_8023AD) + dev_mc_add(slave_dev, lacpdu_mcast_addr); } } @@ -2447,7 +2444,8 @@ static int __bond_release_one(struct net_device *bond_dev, if (old_flags & IFF_ALLMULTI) dev_set_allmulti(slave_dev, -1); - bond_hw_addr_flush(bond_dev, slave_dev); + if (old_flags & IFF_UP) + bond_hw_addr_flush(bond_dev, slave_dev); } slave_disable_netpoll(slave); @@ -4184,6 +4182,12 @@ static int bond_open(struct net_device *bond_dev) struct list_head *iter; struct slave *slave; + if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) { + bond->rr_tx_counter = alloc_percpu(u32); + if (!bond->rr_tx_counter) + return -ENOMEM; + } + /* reset slave->backup and slave->inactive */ if (bond_has_slaves(bond)) { bond_for_each_slave(bond, slave, iter) { @@ -4221,6 +4225,9 @@ static int bond_open(struct net_device *bond_dev) /* register to receive LACPDUs */ bond->recv_probe = bond_3ad_lacpdu_recv; bond_3ad_initiate_agg_selection(bond, 1); + + bond_for_each_slave(bond, slave, iter) + dev_mc_add(slave->dev, lacpdu_mcast_addr); } if (bond_mode_can_use_xmit_hash(bond)) @@ -4232,6 +4239,7 @@ static int bond_open(struct net_device *bond_dev) static int bond_close(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave; bond_work_cancel_all(bond); bond->send_peer_notif = 0; @@ -4239,6 +4247,19 @@ static int bond_close(struct net_device *bond_dev) bond_alb_deinitialize(bond); bond->recv_probe = NULL; + if (bond_uses_primary(bond)) { + rcu_read_lock(); + slave = rcu_dereference(bond->curr_active_slave); + if (slave) + bond_hw_addr_flush(bond_dev, slave->dev); + rcu_read_unlock(); + } else { + struct list_head *iter; + + bond_for_each_slave(bond, slave, iter) + bond_hw_addr_flush(bond_dev, slave->dev); + } + return 0; } @@ -6228,15 +6249,6 @@ static int bond_init(struct net_device *bond_dev) if (!bond->wq) return -ENOMEM; - if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) { - bond->rr_tx_counter = alloc_percpu(u32); - if (!bond->rr_tx_counter) { - destroy_workqueue(bond->wq); - bond->wq = NULL; - return -ENOMEM; - } - } - spin_lock_init(&bond->stats_lock); netdev_lockdep_set_classes(bond_dev); diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c index f857968efed7..ccb438eca517 100644 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -941,11 +941,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, u32 reg_ctrl, reg_id, reg_iflag1; int i; - if (unlikely(drop)) { - skb = ERR_PTR(-ENOBUFS); - goto mark_as_read; - } - mb = flexcan_get_mb(priv, n); if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { @@ -974,6 +969,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, reg_ctrl = priv->read(&mb->can_ctrl); } + if (unlikely(drop)) { + skb = ERR_PTR(-ENOBUFS); + goto mark_as_read; + } + if (reg_ctrl & FLEXCAN_MB_CNT_EDL) skb = alloc_canfd_skb(offload->dev, &cfd); else diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index baf749c8cda3..c1ff3c046d62 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -824,6 +824,7 @@ static int gs_can_open(struct net_device *netdev) flags |= GS_CAN_MODE_TRIPLE_SAMPLE; /* finally start device */ + dev->can.state = CAN_STATE_ERROR_ACTIVE; dm->mode = cpu_to_le32(GS_CAN_MODE_START); dm->flags = cpu_to_le32(flags); rc = usb_control_msg(interface_to_usbdev(dev->iface), @@ -835,13 +836,12 @@ static int gs_can_open(struct net_device *netdev) if (rc < 0) { netdev_err(netdev, "Couldn't start device (err=%d)\n", rc); kfree(dm); + dev->can.state = CAN_STATE_STOPPED; return rc; } kfree(dm); - dev->can.state = CAN_STATE_ERROR_ACTIVE; - parent->active_channels++; if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(netdev); @@ -925,17 +925,21 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) } /* blink LED's for finding the this interface */ -static int gs_usb_set_phys_id(struct net_device *dev, +static int gs_usb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { + const struct gs_can *dev = netdev_priv(netdev); int rc = 0; + if (!(dev->feature & GS_CAN_FEATURE_IDENTIFY)) + return -EOPNOTSUPP; + switch (state) { case ETHTOOL_ID_ACTIVE: - rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON); + rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_ON); break; case ETHTOOL_ID_INACTIVE: - rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF); + rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_OFF); break; default: break; @@ -1072,9 +1076,10 @@ static struct gs_can *gs_make_candev(unsigned int channel, dev->feature |= GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX | GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO; - if (le32_to_cpu(dconf->sw_version) > 1) - if (feature & GS_CAN_FEATURE_IDENTIFY) - netdev->ethtool_ops = &gs_usb_ethtool_ops; + /* GS_CAN_FEATURE_IDENTIFY is only supported for sw_version > 1 */ + if (!(le32_to_cpu(dconf->sw_version) > 1 && + feature & GS_CAN_FEATURE_IDENTIFY)) + dev->feature &= ~GS_CAN_FEATURE_IDENTIFY; kfree(bt_const); diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c index daedd2bf20c1..5579644e8fde 100644 --- a/drivers/net/dsa/microchip/lan937x_main.c +++ b/drivers/net/dsa/microchip/lan937x_main.c @@ -244,10 +244,6 @@ void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port) lan937x_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE, true); - /* disable frame check length field */ - lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0, PORT_CHECK_LENGTH, - false); - /* set back pressure for half duplex */ lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 88595863d8bc..8a0af371e7dc 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -94,11 +94,8 @@ static int aq_ndev_close(struct net_device *ndev) int err = 0; err = aq_nic_stop(aq_nic); - if (err < 0) - goto err_exit; aq_nic_deinit(aq_nic, true); -err_exit: return err; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f46eefb5a029..96da0ba3d507 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -659,7 +659,6 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) for (i = 0; i < nr_pkts; i++) { struct bnxt_sw_tx_bd *tx_buf; - bool compl_deferred = false; struct sk_buff *skb; int j, last; @@ -668,6 +667,8 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) skb = tx_buf->skb; tx_buf->skb = NULL; + tx_bytes += skb->len; + if (tx_buf->is_push) { tx_buf->is_push = 0; goto next_tx_int; @@ -688,8 +689,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) } if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { if (bp->flags & BNXT_FLAG_CHIP_P5) { + /* PTP worker takes ownership of the skb */ if (!bnxt_get_tx_ts_p5(bp, skb)) - compl_deferred = true; + skb = NULL; else atomic_inc(&bp->ptp_cfg->tx_avail); } @@ -698,9 +700,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) next_tx_int: cons = NEXT_TX(cons); - tx_bytes += skb->len; - if (!compl_deferred) - dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb); } netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 7f3c0875b6f5..8e316367f6ce 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -317,9 +317,9 @@ void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp) if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters & (PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE | - PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE))) { + PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE))) { ptp->tstamp_filters &= ~(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE | - PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE); + PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE); netdev_warn(bp->dev, "Unsupported FW for all RX pkts timestamp filter\n"); } diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile index a139f2e9d59f..e0e8dfd13793 100644 --- a/drivers/net/ethernet/freescale/enetc/Makefile +++ b/drivers/net/ethernet/freescale/enetc/Makefile @@ -9,7 +9,6 @@ fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o fsl-enetc-vf-y := enetc_vf.o $(common-objs) -fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o fsl-enetc-ierb-y := enetc_ierb.o diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 4470a4a3e4c3..9f5b921039bd 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -2432,7 +2432,7 @@ int enetc_close(struct net_device *ndev) return 0; } -static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) +int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct tc_mqprio_qopt *mqprio = type_data; @@ -2486,25 +2486,6 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) return 0; } -int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, - void *type_data) -{ - switch (type) { - case TC_SETUP_QDISC_MQPRIO: - return enetc_setup_tc_mqprio(ndev, type_data); - case TC_SETUP_QDISC_TAPRIO: - return enetc_setup_tc_taprio(ndev, type_data); - case TC_SETUP_QDISC_CBS: - return enetc_setup_tc_cbs(ndev, type_data); - case TC_SETUP_QDISC_ETF: - return enetc_setup_tc_txtime(ndev, type_data); - case TC_SETUP_BLOCK: - return enetc_setup_tc_psfp(ndev, type_data); - default: - return -EOPNOTSUPP; - } -} - static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { @@ -2600,29 +2581,6 @@ static int enetc_set_rss(struct net_device *ndev, int en) return 0; } -static int enetc_set_psfp(struct net_device *ndev, int en) -{ - struct enetc_ndev_priv *priv = netdev_priv(ndev); - int err; - - if (en) { - err = enetc_psfp_enable(priv); - if (err) - return err; - - priv->active_offloads |= ENETC_F_QCI; - return 0; - } - - err = enetc_psfp_disable(priv); - if (err) - return err; - - priv->active_offloads &= ~ENETC_F_QCI; - - return 0; -} - static void enetc_enable_rxvlan(struct net_device *ndev, bool en) { struct enetc_ndev_priv *priv = netdev_priv(ndev); @@ -2641,11 +2599,9 @@ static void enetc_enable_txvlan(struct net_device *ndev, bool en) enetc_bdr_enable_txvlan(&priv->si->hw, i, en); } -int enetc_set_features(struct net_device *ndev, - netdev_features_t features) +void enetc_set_features(struct net_device *ndev, netdev_features_t features) { netdev_features_t changed = ndev->features ^ features; - int err = 0; if (changed & NETIF_F_RXHASH) enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH)); @@ -2657,11 +2613,6 @@ int enetc_set_features(struct net_device *ndev, if (changed & NETIF_F_HW_VLAN_CTAG_TX) enetc_enable_txvlan(ndev, !!(features & NETIF_F_HW_VLAN_CTAG_TX)); - - if (changed & NETIF_F_HW_TC) - err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC)); - - return err; } #ifdef CONFIG_FSL_ENETC_PTP_CLOCK diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 29922c20531f..2cfe6944ebd3 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -393,11 +393,9 @@ void enetc_start(struct net_device *ndev); void enetc_stop(struct net_device *ndev); netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev); struct net_device_stats *enetc_get_stats(struct net_device *ndev); -int enetc_set_features(struct net_device *ndev, - netdev_features_t features); +void enetc_set_features(struct net_device *ndev, netdev_features_t features); int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd); -int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, - void *type_data); +int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data); int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp); int enetc_xdp_xmit(struct net_device *ndev, int num_frames, struct xdp_frame **frames, u32 flags); @@ -465,6 +463,7 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data); int enetc_psfp_init(struct enetc_ndev_priv *priv); int enetc_psfp_clean(struct enetc_ndev_priv *priv); +int enetc_set_psfp(struct net_device *ndev, bool en); static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv) { @@ -540,4 +539,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv) { return 0; } + +static inline int enetc_set_psfp(struct net_device *ndev, bool en) +{ + return 0; +} #endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index c4a0e836d4f0..bb7750222691 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -709,6 +709,13 @@ static int enetc_pf_set_features(struct net_device *ndev, { netdev_features_t changed = ndev->features ^ features; struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + if (changed & NETIF_F_HW_TC) { + err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC)); + if (err) + return err; + } if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { struct enetc_pf *pf = enetc_si_priv(priv->si); @@ -722,7 +729,28 @@ static int enetc_pf_set_features(struct net_device *ndev, if (changed & NETIF_F_LOOPBACK) enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK)); - return enetc_set_features(ndev, features); + enetc_set_features(ndev, features); + + return 0; +} + +static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return enetc_setup_tc_mqprio(ndev, type_data); + case TC_SETUP_QDISC_TAPRIO: + return enetc_setup_tc_taprio(ndev, type_data); + case TC_SETUP_QDISC_CBS: + return enetc_setup_tc_cbs(ndev, type_data); + case TC_SETUP_QDISC_ETF: + return enetc_setup_tc_txtime(ndev, type_data); + case TC_SETUP_BLOCK: + return enetc_setup_tc_psfp(ndev, type_data); + default: + return -EOPNOTSUPP; + } } static const struct net_device_ops enetc_ndev_ops = { @@ -739,7 +767,7 @@ static const struct net_device_ops enetc_ndev_ops = { .ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk, .ndo_set_features = enetc_pf_set_features, .ndo_eth_ioctl = enetc_ioctl, - .ndo_setup_tc = enetc_setup_tc, + .ndo_setup_tc = enetc_pf_setup_tc, .ndo_bpf = enetc_setup_bpf, .ndo_xdp_xmit = enetc_xdp_xmit, }; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 582a663ed0ba..f8a2f02ce22d 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -1517,6 +1517,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } +int enetc_set_psfp(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + if (en) { + err = enetc_psfp_enable(priv); + if (err) + return err; + + priv->active_offloads |= ENETC_F_QCI; + return 0; + } + + err = enetc_psfp_disable(priv); + if (err) + return err; + + priv->active_offloads &= ~ENETC_F_QCI; + + return 0; +} + int enetc_psfp_init(struct enetc_ndev_priv *priv) { if (epsfp.psfp_sfi_bitmap) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index 17924305afa2..dfcaac302e24 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -88,7 +88,20 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr) static int enetc_vf_set_features(struct net_device *ndev, netdev_features_t features) { - return enetc_set_features(ndev, features); + enetc_set_features(ndev, features); + + return 0; +} + +static int enetc_vf_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return enetc_setup_tc_mqprio(ndev, type_data); + default: + return -EOPNOTSUPP; + } } /* Probing/ Init */ @@ -100,7 +113,7 @@ static const struct net_device_ops enetc_ndev_ops = { .ndo_set_mac_address = enetc_vf_set_mac_addr, .ndo_set_features = enetc_vf_set_features, .ndo_eth_ioctl = enetc_ioctl, - .ndo_setup_tc = enetc_setup_tc, + .ndo_setup_tc = enetc_vf_setup_tc, }; static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index d77ee8936c6a..a5fed00cb971 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -561,6 +561,7 @@ struct fec_enet_private { struct clk *clk_2x_txclk; bool ptp_clk_on; + struct mutex ptp_clk_mutex; unsigned int num_tx_queues; unsigned int num_rx_queues; @@ -638,13 +639,6 @@ struct fec_enet_private { int pps_enable; unsigned int next_counter; - struct { - struct timespec64 ts_phc; - u64 ns_sys; - u32 at_corr; - u8 at_inc_corr; - } ptp_saved_state; - u64 ethtool_stats[]; }; @@ -655,8 +649,5 @@ void fec_ptp_disable_hwts(struct net_device *ndev); int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); -void fec_ptp_save_state(struct fec_enet_private *fep); -int fec_ptp_restore_state(struct fec_enet_private *fep); - /****************************************************************************/ #endif /* FEC_H */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 6152f6dbf1bc..92c55e1a5507 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -286,11 +286,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define FEC_MMFR_TA (2 << 16) #define FEC_MMFR_DATA(v) (v & 0xffff) /* FEC ECR bits definition */ -#define FEC_ECR_RESET BIT(0) -#define FEC_ECR_ETHEREN BIT(1) -#define FEC_ECR_MAGICEN BIT(2) -#define FEC_ECR_SLEEP BIT(3) -#define FEC_ECR_EN1588 BIT(4) +#define FEC_ECR_MAGICEN (1 << 2) +#define FEC_ECR_SLEEP (1 << 3) #define FEC_MII_TIMEOUT 30000 /* us */ @@ -986,9 +983,6 @@ fec_restart(struct net_device *ndev) u32 temp_mac[2]; u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = 0x2; /* ETHEREN */ - struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS }; - - fec_ptp_save_state(fep); /* Whack a reset. We should wait for this. * For i.MX6SX SOC, enet use AXI bus, we use disable MAC @@ -1142,7 +1136,7 @@ fec_restart(struct net_device *ndev) } if (fep->bufdesc_ex) - ecntl |= FEC_ECR_EN1588; + ecntl |= (1 << 4); if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && fep->rgmii_txc_dly) @@ -1163,14 +1157,6 @@ fec_restart(struct net_device *ndev) if (fep->bufdesc_ex) fec_ptp_start_cyclecounter(ndev); - /* Restart PPS if needed */ - if (fep->pps_enable) { - /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */ - fep->pps_enable = 0; - fec_ptp_restore_state(fep); - fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1); - } - /* Enable interrupts we wish to service */ if (fep->link) writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); @@ -1221,8 +1207,6 @@ fec_stop(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); u32 val; - struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS }; - u32 ecntl = 0; /* We cannot expect a graceful transmit stop without link !!! */ if (fep->link) { @@ -1232,8 +1216,6 @@ fec_stop(struct net_device *ndev) netdev_err(ndev, "Graceful transmit stop did not complete!\n"); } - fec_ptp_save_state(fep); - /* Whack a reset. We should wait for this. * For i.MX6SX SOC, enet use AXI bus, we use disable MAC * instead of reset MAC itself. @@ -1253,28 +1235,12 @@ fec_stop(struct net_device *ndev) writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); - if (fep->bufdesc_ex) - ecntl |= FEC_ECR_EN1588; - /* We have to keep ENET enabled to have MII interrupt stay working */ if (fep->quirks & FEC_QUIRK_ENET_MAC && !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { - ecntl |= FEC_ECR_ETHEREN; + writel(2, fep->hwp + FEC_ECNTRL); writel(rmii_mode, fep->hwp + FEC_R_CNTRL); } - - writel(ecntl, fep->hwp + FEC_ECNTRL); - - if (fep->bufdesc_ex) - fec_ptp_start_cyclecounter(ndev); - - /* Restart PPS if needed */ - if (fep->pps_enable) { - /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */ - fep->pps_enable = 0; - fec_ptp_restore_state(fep); - fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1); - } } @@ -2029,7 +1995,6 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev) static int fec_enet_clk_enable(struct net_device *ndev, bool enable) { struct fec_enet_private *fep = netdev_priv(ndev); - unsigned long flags; int ret; if (enable) { @@ -2038,15 +2003,15 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) return ret; if (fep->clk_ptp) { - spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&fep->ptp_clk_mutex); ret = clk_prepare_enable(fep->clk_ptp); if (ret) { - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); goto failed_clk_ptp; } else { fep->ptp_clk_on = true; } - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); } ret = clk_prepare_enable(fep->clk_ref); @@ -2061,10 +2026,10 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) } else { clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { - spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&fep->ptp_clk_mutex); clk_disable_unprepare(fep->clk_ptp); fep->ptp_clk_on = false; - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); } clk_disable_unprepare(fep->clk_ref); clk_disable_unprepare(fep->clk_2x_txclk); @@ -2077,10 +2042,10 @@ failed_clk_2x_txclk: clk_disable_unprepare(fep->clk_ref); failed_clk_ref: if (fep->clk_ptp) { - spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&fep->ptp_clk_mutex); clk_disable_unprepare(fep->clk_ptp); fep->ptp_clk_on = false; - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); } failed_clk_ptp: clk_disable_unprepare(fep->clk_enet_out); @@ -3915,7 +3880,7 @@ fec_probe(struct platform_device *pdev) } fep->ptp_clk_on = false; - spin_lock_init(&fep->tmreg_lock); + mutex_init(&fep->ptp_clk_mutex); /* clk_ref is optional, depends on board */ fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref"); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 8dd5a2615a89..3dc3c0b626c2 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -365,19 +365,21 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) */ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { - struct fec_enet_private *fep = + struct fec_enet_private *adapter = container_of(ptp, struct fec_enet_private, ptp_caps); u64 ns; unsigned long flags; - spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&adapter->ptp_clk_mutex); /* Check the ptp clock */ - if (!fep->ptp_clk_on) { - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + if (!adapter->ptp_clk_on) { + mutex_unlock(&adapter->ptp_clk_mutex); return -EINVAL; } - ns = timecounter_read(&fep->tc); - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + mutex_unlock(&adapter->ptp_clk_mutex); *ts = ns_to_timespec64(ns); @@ -402,10 +404,10 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp, unsigned long flags; u32 counter; - spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&fep->ptp_clk_mutex); /* Check the ptp clock */ if (!fep->ptp_clk_on) { - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); return -EINVAL; } @@ -415,9 +417,11 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp, */ counter = ns & fep->cc.mask; + spin_lock_irqsave(&fep->tmreg_lock, flags); writel(counter, fep->hwp + FEC_ATIME); timecounter_init(&fep->tc, &fep->cc, ns); spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); return 0; } @@ -514,11 +518,13 @@ static void fec_time_keep(struct work_struct *work) struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep); unsigned long flags; - spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&fep->ptp_clk_mutex); if (fep->ptp_clk_on) { + spin_lock_irqsave(&fep->tmreg_lock, flags); timecounter_read(&fep->tc); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); } - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); schedule_delayed_work(&fep->time_keep, HZ); } @@ -593,6 +599,8 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx) } fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed; + spin_lock_init(&fep->tmreg_lock); + fec_ptp_start_cyclecounter(ndev); INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); @@ -625,36 +633,7 @@ void fec_ptp_stop(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); - if (fep->pps_enable) - fec_ptp_enable_pps(fep, 0); - cancel_delayed_work_sync(&fep->time_keep); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); } - -void fec_ptp_save_state(struct fec_enet_private *fep) -{ - u32 atime_inc_corr; - - fec_ptp_gettime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc); - fep->ptp_saved_state.ns_sys = ktime_get_ns(); - - fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR); - atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK; - fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET); -} - -int fec_ptp_restore_state(struct fec_enet_private *fep) -{ - u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; - u64 ns_sys; - - writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR); - atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET; - writel(atime_inc, fep->hwp + FEC_ATIME_INC); - - ns_sys = ktime_get_ns() - fep->ptp_saved_state.ns_sys; - timespec64_add_ns(&fep->ptp_saved_state.ts_phc, ns_sys); - return fec_ptp_settime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc); -} diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index 8c939628e2d8..2e6461b0ea8b 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv, int err; err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page, - &buf_state->addr, DMA_FROM_DEVICE, GFP_KERNEL); + &buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC); if (err) return err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 10c1e1ea83a1..e3d9804aeb25 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5909,6 +5909,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi) } /** + * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits + * @vsi: Pointer to vsi structure + * @max_tx_rate: max TX rate in bytes to be converted into Mbits + * + * Helper function to convert units before send to set BW limit + **/ +static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate) +{ + if (max_tx_rate < I40E_BW_MBPS_DIVISOR) { + dev_warn(&vsi->back->pdev->dev, + "Setting max tx rate to minimum usable value of 50Mbps.\n"); + max_tx_rate = I40E_BW_CREDIT_DIVISOR; + } else { + do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); + } + + return max_tx_rate; +} + +/** * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate * @vsi: VSI to be configured * @seid: seid of the channel/VSI @@ -5930,10 +5950,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) max_tx_rate, seid); return -EINVAL; } - if (max_tx_rate && max_tx_rate < 50) { + if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) { dev_warn(&pf->pdev->dev, "Setting max tx rate to minimum usable value of 50Mbps.\n"); - max_tx_rate = 50; + max_tx_rate = I40E_BW_CREDIT_DIVISOR; } /* Tx rate credits are in values of 50Mbps, 0 is disabled */ @@ -8224,9 +8244,9 @@ config_tc: if (i40e_is_tc_mqprio_enabled(pf)) { if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; + u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, + vsi->mqprio_qopt.max_rate[0]); - do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (!ret) { u64 credits = max_tx_rate; @@ -10971,10 +10991,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; + u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, + vsi->mqprio_qopt.max_rate[0]); u64 credits = 0; - do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (ret) goto end_unlock; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 4f184c50f6e8..7e9f6a69eb10 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2039,6 +2039,25 @@ static void i40e_del_qch(struct i40e_vf *vf) } /** + * i40e_vc_get_max_frame_size + * @vf: pointer to the VF + * + * Max frame size is determined based on the current port's max frame size and + * whether a port VLAN is configured on this VF. The VF is not aware whether + * it's in a port VLAN so the PF needs to account for this in max frame size + * checks and sending the max frame size to the VF. + **/ +static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf) +{ + u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size; + + if (vf->port_vlan_id) + max_frame_size -= VLAN_HLEN; + + return max_frame_size; +} + +/** * i40e_vc_get_vf_resources_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2139,6 +2158,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; + vfres->max_mtu = i40e_vc_get_max_frame_size(vf); if (vf->lan_vsi_idx) { vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 10aa99dfdcdb..0c89f16bf1e2 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1077,7 +1077,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p) { struct iavf_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; - bool handle_mac = iavf_is_mac_set_handled(netdev, addr->sa_data); int ret; if (!is_valid_ether_addr(addr->sa_data)) @@ -1094,10 +1093,9 @@ static int iavf_set_mac(struct net_device *netdev, void *p) return 0; } - if (handle_mac) - goto done; - - ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, false, msecs_to_jiffies(2500)); + ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, + iavf_is_mac_set_handled(netdev, addr->sa_data), + msecs_to_jiffies(2500)); /* If ret < 0 then it means wait was interrupted. * If ret == 0 then it means we got a timeout. @@ -1111,7 +1109,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p) if (!ret) return -EAGAIN; -done: if (!ether_addr_equal(netdev->dev_addr, addr->sa_data)) return -EACCES; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 06d18797d25a..18b6a702a1d6 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw) { u32 head, tail; + /* underlying hardware might not allow access and/or always return + * 0 for the head/tail registers so just use the cached values + */ head = ring->next_to_clean; - tail = readl(ring->tail); + tail = ring->next_to_use; if (head != tail) return (head < tail) ? @@ -1390,7 +1393,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, #endif struct sk_buff *skb; - if (!rx_buffer) + if (!rx_buffer || !size) return NULL; /* prefetch first cache line of first page */ va = page_address(rx_buffer->page) + rx_buffer->page_offset; @@ -1548,7 +1551,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; - if (rx_buffer) + if (rx_buffer && size) rx_buffer->pagecnt_bias++; break; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 15ee85dc33bd..5a9e6563923e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -269,11 +269,14 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) void iavf_configure_queues(struct iavf_adapter *adapter) { struct virtchnl_vsi_queue_config_info *vqci; - struct virtchnl_queue_pair_info *vqpi; + int i, max_frame = adapter->vf_res->max_mtu; int pairs = adapter->num_active_queues; - int i, max_frame = IAVF_MAX_RXBUFFER; + struct virtchnl_queue_pair_info *vqpi; size_t len; + if (max_frame > IAVF_MAX_RXBUFFER || !max_frame) + max_frame = IAVF_MAX_RXBUFFER; + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 0c4ec9264071..58d483e2f539 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -914,7 +914,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) */ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) { - u16 offset = 0, qmap = 0, tx_count = 0, pow = 0; + u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0; u16 num_txq_per_tc, num_rxq_per_tc; u16 qcount_tx = vsi->alloc_txq; u16 qcount_rx = vsi->alloc_rxq; @@ -981,23 +981,25 @@ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) * at least 1) */ if (offset) - vsi->num_rxq = offset; + rx_count = offset; else - vsi->num_rxq = num_rxq_per_tc; + rx_count = num_rxq_per_tc; - if (vsi->num_rxq > vsi->alloc_rxq) { + if (rx_count > vsi->alloc_rxq) { dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", - vsi->num_rxq, vsi->alloc_rxq); + rx_count, vsi->alloc_rxq); return -EINVAL; } - vsi->num_txq = tx_count; - if (vsi->num_txq > vsi->alloc_txq) { + if (tx_count > vsi->alloc_txq) { dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", - vsi->num_txq, vsi->alloc_txq); + tx_count, vsi->alloc_txq); return -EINVAL; } + vsi->num_txq = tx_count; + vsi->num_rxq = rx_count; + if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); /* since there is a chance that num_rxq could have been changed @@ -3490,6 +3492,7 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap; u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0]; int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; + u16 new_txq, new_rxq; u8 netdev_tc = 0; int i; @@ -3530,21 +3533,24 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, } } - /* Set actual Tx/Rx queue pairs */ - vsi->num_txq = offset + qcount_tx; - if (vsi->num_txq > vsi->alloc_txq) { + new_txq = offset + qcount_tx; + if (new_txq > vsi->alloc_txq) { dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", - vsi->num_txq, vsi->alloc_txq); + new_txq, vsi->alloc_txq); return -EINVAL; } - vsi->num_rxq = offset + qcount_rx; - if (vsi->num_rxq > vsi->alloc_rxq) { + new_rxq = offset + qcount_rx; + if (new_rxq > vsi->alloc_rxq) { dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", - vsi->num_rxq, vsi->alloc_rxq); + new_rxq, vsi->alloc_rxq); return -EINVAL; } + /* Set actual Tx/Rx queue pairs */ + vsi->num_txq = new_txq; + vsi->num_rxq = new_rxq; + /* Setup queue TC[0].qmap for given VSI context */ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); @@ -3576,6 +3582,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_pf *pf = vsi->back; + struct ice_tc_cfg old_tc_cfg; struct ice_vsi_ctx *ctx; struct device *dev; int i, ret = 0; @@ -3600,6 +3607,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) max_txqs[i] = vsi->num_txq; } + memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg)); vsi->tc_cfg.ena_tc = ena_tc; vsi->tc_cfg.numtc = num_tc; @@ -3616,8 +3624,10 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) else ret = ice_vsi_setup_q_map(vsi, ctx); - if (ret) + if (ret) { + memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg)); goto out; + } /* must to indicate which section of VSI context are being modified */ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 8c30eea61b6d..e109cb93886b 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2399,8 +2399,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) return -EBUSY; } - ice_unplug_aux_dev(pf); - switch (reset) { case ICE_RESET_PFR: set_bit(ICE_PFR_REQ, pf->state); @@ -6651,7 +6649,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) */ int ice_down(struct ice_vsi *vsi) { - int i, tx_err, rx_err, link_err = 0, vlan_err = 0; + int i, tx_err, rx_err, vlan_err = 0; WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); @@ -6685,20 +6683,13 @@ int ice_down(struct ice_vsi *vsi) ice_napi_disable_all(vsi); - if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { - link_err = ice_force_phys_link_state(vsi, false); - if (link_err) - netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", - vsi->vsi_num, link_err); - } - ice_for_each_txq(vsi, i) ice_clean_tx_ring(vsi->tx_rings[i]); ice_for_each_rxq(vsi, i) ice_clean_rx_ring(vsi->rx_rings[i]); - if (tx_err || rx_err || link_err || vlan_err) { + if (tx_err || rx_err || vlan_err) { netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", vsi->vsi_num, vsi->vsw->sw_id); return -EIO; @@ -6860,6 +6851,8 @@ int ice_vsi_open(struct ice_vsi *vsi) if (err) goto err_setup_rx; + ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); + if (vsi->type == ICE_VSI_PF) { /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); @@ -8892,6 +8885,16 @@ int ice_stop(struct net_device *netdev) return -EBUSY; } + if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { + int link_err = ice_force_phys_link_state(vsi, false); + + if (link_err) { + netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", + vsi->vsi_num, link_err); + return -EIO; + } + } + ice_vsi_close(vsi); return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 836dce840712..97453d1dfafe 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -610,7 +610,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, if (test_bit(ICE_VSI_DOWN, vsi->state)) return -ENETDOWN; - if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) + if (!ice_is_xdp_ena_vsi(vsi)) return -ENXIO; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) @@ -621,6 +621,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, xdp_ring = vsi->xdp_rings[queue_index]; spin_lock(&xdp_ring->tx_lock); } else { + /* Generally, should not happen */ + if (unlikely(queue_index >= vsi->num_xdp_txq)) + return -ENXIO; xdp_ring = vsi->xdp_rings[queue_index]; } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index ede3e53b9790..a895862b4821 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -368,6 +368,7 @@ static int prestera_port_sfp_bind(struct prestera_port *port) if (!sw->np) return 0; + of_node_get(sw->np); ports = of_find_node_by_name(sw->np, "ports"); for_each_child_of_node(ports, node) { @@ -417,6 +418,7 @@ static int prestera_port_sfp_bind(struct prestera_port *port) } out: + of_node_put(node); of_node_put(ports); return err; } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c index f538a749ebd4..59470d99f522 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c @@ -872,6 +872,7 @@ static void prestera_pci_remove(struct pci_dev *pdev) static const struct pci_device_id prestera_pci_devices[] = { { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC804) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC80C) }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xCC1E) }, { } }; MODULE_DEVICE_TABLE(pci, prestera_pci_devices); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 5ace4609de47..b344632beadd 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1458,7 +1458,7 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth) static bool mtk_page_pool_enabled(struct mtk_eth *eth) { - return !eth->hwlro; + return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2); } static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth, diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c index 85155cd9405c..4aeb927c3715 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c @@ -179,6 +179,9 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg) /* Only return ad bits of the gw register */ ret &= MLXBF_GIGE_MDIO_GW_AD_MASK; + /* The MDIO lock is set on read. To release it, clear gw register */ + writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET); + return ret; } @@ -203,6 +206,9 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add, temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK), 5, 1000000); + /* The MDIO lock is set on read. To release it, clear gw register */ + writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET); + return ret; } diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 5f9240182351..a6f99b4344d9 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -397,6 +397,11 @@ static void mana_gd_process_eq_events(void *arg) break; } + /* Per GDMA spec, rmb is necessary after checking owner_bits, before + * reading eqe. + */ + rmb(); + mana_gd_process_eqe(eq); eq->head++; @@ -1134,6 +1139,11 @@ static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp) if (WARN_ON_ONCE(owner_bits != new_bits)) return -1; + /* Per GDMA spec, rmb is necessary after checking owner_bits, before + * reading completion info + */ + rmb(); + comp->wq_num = cqe->cqe_info.wq_num; comp->is_sq = cqe->cqe_info.is_sq; memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE); @@ -1465,10 +1475,6 @@ static void mana_gd_shutdown(struct pci_dev *pdev) pci_disable_device(pdev); } -#ifndef PCI_VENDOR_ID_MICROSOFT -#define PCI_VENDOR_ID_MICROSOFT 0x1414 -#endif - static const struct pci_device_id mana_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) }, { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) }, diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index b357ac4c56c5..7e32b04eb0c7 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1449,6 +1449,8 @@ static int ravb_phy_init(struct net_device *ndev) phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); } + /* Indicate that the MAC is responsible for managing PHY PM */ + phydev->mac_managed_pm = true; phy_attached_info(phydev); return 0; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 67ade78fb767..7fd8828d3a84 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2029,6 +2029,8 @@ static int sh_eth_phy_init(struct net_device *ndev) if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) phy_set_max_speed(phydev, SPEED_100); + /* Indicate that the MAC is responsible for managing PHY PM */ + phydev->mac_managed_pm = true; phy_attached_info(phydev); return 0; diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 032b8c0bd788..5b4d661ab986 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -319,7 +319,7 @@ int efx_probe_interrupts(struct efx_nic *efx) efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0); efx->n_rx_channels = 1; efx->n_tx_channels = 1; - efx->tx_channel_offset = 1; + efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0; efx->n_xdp_channels = 0; efx->xdp_channel_offset = efx->n_channels; efx->legacy_irq = efx->pci_dev->irq; diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c index 017212a40df3..f54ebd007286 100644 --- a/drivers/net/ethernet/sfc/siena/efx_channels.c +++ b/drivers/net/ethernet/sfc/siena/efx_channels.c @@ -320,7 +320,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx) efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0); efx->n_rx_channels = 1; efx->n_tx_channels = 1; - efx->tx_channel_offset = 1; + efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0; efx->n_xdp_channels = 0; efx->xdp_channel_offset = efx->n_channels; efx->legacy_irq = efx->pci_dev->irq; diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c index e166dcb9b99c..91e87594ed1e 100644 --- a/drivers/net/ethernet/sfc/siena/tx.c +++ b/drivers/net/ethernet/sfc/siena/tx.c @@ -336,7 +336,7 @@ netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb, * previous packets out. */ if (!netdev_xmit_more()) - efx_tx_send_pending(tx_queue->channel); + efx_tx_send_pending(efx_get_tx_channel(efx, index)); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index d12474042c84..c5f88f7a7a04 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -549,7 +549,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, * previous packets out. */ if (!netdev_xmit_more()) - efx_tx_send_pending(tx_queue->channel); + efx_tx_send_pending(efx_get_tx_channel(efx, index)); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 8594ee839628..88aa0d310aee 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2020,9 +2020,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) skb_reserve(copy_skb, 2); skb_put(copy_skb, len); - dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE); skb_copy_from_linear_data(skb, copy_skb->data, len); - dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); + dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE); /* Reuse original ring buffer. */ hme_write_rxd(hp, this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c index ec010cf2e816..6f874f99b910 100644 --- a/drivers/net/ipa/ipa_qmi.c +++ b/drivers/net/ipa/ipa_qmi.c @@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE); req.v4_route_tbl_info_valid = 1; req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v4_route_tbl_info.count = mem->size / sizeof(__le64); + req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE); req.v6_route_tbl_info_valid = 1; req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v6_route_tbl_info.count = mem->size / sizeof(__le64); + req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER); req.v4_filter_tbl_start_valid = 1; @@ -352,7 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) req.v4_hash_route_tbl_info_valid = 1; req.v4_hash_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64); + req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; } mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED); @@ -360,7 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) req.v6_hash_route_tbl_info_valid = 1; req.v6_hash_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64); + req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; } mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED); diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c index 6838e8065072..75d3fc0092e9 100644 --- a/drivers/net/ipa/ipa_qmi_msg.c +++ b/drivers/net/ipa/ipa_qmi_msg.c @@ -311,7 +311,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .tlv_type = 0x12, .offset = offsetof(struct ipa_init_modem_driver_req, v4_route_tbl_info), - .ei_array = ipa_mem_array_ei, + .ei_array = ipa_mem_bounds_ei, }, { .data_type = QMI_OPT_FLAG, @@ -332,7 +332,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .tlv_type = 0x13, .offset = offsetof(struct ipa_init_modem_driver_req, v6_route_tbl_info), - .ei_array = ipa_mem_array_ei, + .ei_array = ipa_mem_bounds_ei, }, { .data_type = QMI_OPT_FLAG, @@ -496,7 +496,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .tlv_type = 0x1b, .offset = offsetof(struct ipa_init_modem_driver_req, v4_hash_route_tbl_info), - .ei_array = ipa_mem_array_ei, + .ei_array = ipa_mem_bounds_ei, }, { .data_type = QMI_OPT_FLAG, @@ -517,7 +517,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .tlv_type = 0x1c, .offset = offsetof(struct ipa_init_modem_driver_req, v6_hash_route_tbl_info), - .ei_array = ipa_mem_array_ei, + .ei_array = ipa_mem_bounds_ei, }, { .data_type = QMI_OPT_FLAG, diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h index 495e85abe50b..9651aa59b596 100644 --- a/drivers/net/ipa/ipa_qmi_msg.h +++ b/drivers/net/ipa/ipa_qmi_msg.h @@ -86,9 +86,11 @@ enum ipa_platform_type { IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */ }; -/* This defines the start and end offset of a range of memory. Both - * fields are offsets relative to the start of IPA shared memory. - * The end value is the last addressable byte *within* the range. +/* This defines the start and end offset of a range of memory. The start + * value is a byte offset relative to the start of IPA shared memory. The + * end value is the last addressable unit *within* the range. Typically + * the end value is in units of bytes, however it can also be a maximum + * array index value. */ struct ipa_mem_bounds { u32 start; @@ -129,18 +131,19 @@ struct ipa_init_modem_driver_req { u8 hdr_tbl_info_valid; struct ipa_mem_bounds hdr_tbl_info; - /* Routing table information. These define the location and size of - * non-hashable IPv4 and IPv6 filter tables. The start values are - * offsets relative to the start of IPA shared memory. + /* Routing table information. These define the location and maximum + * *index* (not byte) for the modem portion of non-hashable IPv4 and + * IPv6 routing tables. The start values are byte offsets relative + * to the start of IPA shared memory. */ u8 v4_route_tbl_info_valid; - struct ipa_mem_array v4_route_tbl_info; + struct ipa_mem_bounds v4_route_tbl_info; u8 v6_route_tbl_info_valid; - struct ipa_mem_array v6_route_tbl_info; + struct ipa_mem_bounds v6_route_tbl_info; /* Filter table information. These define the location of the * non-hashable IPv4 and IPv6 filter tables. The start values are - * offsets relative to the start of IPA shared memory. + * byte offsets relative to the start of IPA shared memory. */ u8 v4_filter_tbl_start_valid; u32 v4_filter_tbl_start; @@ -181,18 +184,20 @@ struct ipa_init_modem_driver_req { u8 zip_tbl_info_valid; struct ipa_mem_bounds zip_tbl_info; - /* Routing table information. These define the location and size - * of hashable IPv4 and IPv6 filter tables. The start values are - * offsets relative to the start of IPA shared memory. + /* Routing table information. These define the location and maximum + * *index* (not byte) for the modem portion of hashable IPv4 and IPv6 + * routing tables (if supported by hardware). The start values are + * byte offsets relative to the start of IPA shared memory. */ u8 v4_hash_route_tbl_info_valid; - struct ipa_mem_array v4_hash_route_tbl_info; + struct ipa_mem_bounds v4_hash_route_tbl_info; u8 v6_hash_route_tbl_info_valid; - struct ipa_mem_array v6_hash_route_tbl_info; + struct ipa_mem_bounds v6_hash_route_tbl_info; /* Filter table information. These define the location and size - * of hashable IPv4 and IPv6 filter tables. The start values are - * offsets relative to the start of IPA shared memory. + * of hashable IPv4 and IPv6 filter tables (if supported by hardware). + * The start values are byte offsets relative to the start of IPA + * shared memory. */ u8 v4_hash_filter_tbl_start_valid; u32 v4_hash_filter_tbl_start; diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index 2f5a58bfc529..69efe672ca52 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -108,8 +108,6 @@ /* Assignment of route table entries to the modem and AP */ #define IPA_ROUTE_MODEM_MIN 0 -#define IPA_ROUTE_MODEM_COUNT 8 - #define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT #define IPA_ROUTE_AP_COUNT \ (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT) diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h index b6a9a0d79d68..1538e2e1732f 100644 --- a/drivers/net/ipa/ipa_table.h +++ b/drivers/net/ipa/ipa_table.h @@ -13,6 +13,9 @@ struct ipa; /* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */ #define IPA_FILTER_COUNT_MAX 14 +/* The number of route table entries allotted to the modem */ +#define IPA_ROUTE_MODEM_COUNT 8 + /* The maximum number of route table entries (IPv4, IPv6; hashed or not) */ #define IPA_ROUTE_COUNT_MAX 15 diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index dfeb5b392e64..bb1c298c1e78 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -495,7 +495,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) static int ipvlan_process_outbound(struct sk_buff *skb) { - struct ethhdr *ethh = eth_hdr(skb); int ret = NET_XMIT_DROP; /* The ipvlan is a pseudo-L2 device, so the packets that we receive @@ -505,6 +504,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb) if (skb_mac_header_was_set(skb)) { /* In this mode we dont care about * multicast and broadcast traffic */ + struct ethhdr *ethh = eth_hdr(skb); + if (is_multicast_ether_addr(ethh->h_dest)) { pr_debug_ratelimited( "Dropped {multi|broad}cast of type=[%x]\n", @@ -589,7 +590,7 @@ out: static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) { const struct ipvl_dev *ipvlan = netdev_priv(dev); - struct ethhdr *eth = eth_hdr(skb); + struct ethhdr *eth = skb_eth_hdr(skb); struct ipvl_addr *addr; void *lyr3h; int addr_type; @@ -619,6 +620,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) return dev_forward_skb(ipvlan->phy_dev, skb); } else if (is_multicast_ether_addr(eth->h_dest)) { + skb_reset_mac_header(skb); ipvlan_skb_crossing_ns(skb, NULL); ipvlan_multicast_enqueue(ipvlan->port, skb, true); return NET_XMIT_SUCCESS; diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c index 9e3c815a070f..796e9c7857d0 100644 --- a/drivers/net/mdio/of_mdio.c +++ b/drivers/net/mdio/of_mdio.c @@ -231,6 +231,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) return 0; unregister: + of_node_put(child); mdiobus_unregister(mdio); return rc; } diff --git a/drivers/net/netdevsim/hwstats.c b/drivers/net/netdevsim/hwstats.c index 605a38e16db0..0e58aa7f0374 100644 --- a/drivers/net/netdevsim/hwstats.c +++ b/drivers/net/netdevsim/hwstats.c @@ -433,11 +433,11 @@ int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev) goto err_remove_hwstats_recursive; } - debugfs_create_file("enable_ifindex", 0600, hwstats->l3_ddir, hwstats, + debugfs_create_file("enable_ifindex", 0200, hwstats->l3_ddir, hwstats, &nsim_dev_hwstats_l3_enable_fops.fops); - debugfs_create_file("disable_ifindex", 0600, hwstats->l3_ddir, hwstats, + debugfs_create_file("disable_ifindex", 0200, hwstats->l3_ddir, hwstats, &nsim_dev_hwstats_l3_disable_fops.fops); - debugfs_create_file("fail_next_enable", 0600, hwstats->l3_ddir, hwstats, + debugfs_create_file("fail_next_enable", 0200, hwstats->l3_ddir, hwstats, &nsim_dev_hwstats_l3_fail_fops.fops); INIT_DELAYED_WORK(&hwstats->traffic_dw, diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c index 8b7a46db30e0..7111e2e958e9 100644 --- a/drivers/net/phy/aquantia_main.c +++ b/drivers/net/phy/aquantia_main.c @@ -91,6 +91,9 @@ #define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8) #define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0) +#define VEND1_GLOBAL_GEN_STAT2 0xc831 +#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15) + #define VEND1_GLOBAL_RSVD_STAT1 0xc885 #define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4) #define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0) @@ -125,6 +128,12 @@ #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1) #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0) +/* Sleep and timeout for checking if the Processor-Intensive + * MDIO operation is finished + */ +#define AQR107_OP_IN_PROG_SLEEP 1000 +#define AQR107_OP_IN_PROG_TIMEOUT 100000 + struct aqr107_hw_stat { const char *name; int reg; @@ -597,16 +606,52 @@ static void aqr107_link_change_notify(struct phy_device *phydev) phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n"); } +static int aqr107_wait_processor_intensive_op(struct phy_device *phydev) +{ + int val, err; + + /* The datasheet notes to wait at least 1ms after issuing a + * processor intensive operation before checking. + * We cannot use the 'sleep_before_read' parameter of read_poll_timeout + * because that just determines the maximum time slept, not the minimum. + */ + usleep_range(1000, 5000); + + err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, + VEND1_GLOBAL_GEN_STAT2, val, + !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG), + AQR107_OP_IN_PROG_SLEEP, + AQR107_OP_IN_PROG_TIMEOUT, false); + if (err) { + phydev_err(phydev, "timeout: processor-intensive MDIO operation\n"); + return err; + } + + return 0; +} + static int aqr107_suspend(struct phy_device *phydev) { - return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, - MDIO_CTRL1_LPOWER); + int err; + + err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, + MDIO_CTRL1_LPOWER); + if (err) + return err; + + return aqr107_wait_processor_intensive_op(phydev); } static int aqr107_resume(struct phy_device *phydev) { - return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, - MDIO_CTRL1_LPOWER); + int err; + + err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, + MDIO_CTRL1_LPOWER); + if (err) + return err; + + return aqr107_wait_processor_intensive_op(phydev); } static int aqr107_probe(struct phy_device *phydev) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 6f52b4fb6888..38234d7e14c5 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -2679,16 +2679,19 @@ static int lan8804_config_init(struct phy_device *phydev) static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev) { int irq_status, tsu_irq_status; + int ret = IRQ_NONE; irq_status = phy_read(phydev, LAN8814_INTS); - if (irq_status > 0 && (irq_status & LAN8814_INT_LINK)) - phy_trigger_machine(phydev); - if (irq_status < 0) { phy_error(phydev); return IRQ_NONE; } + if (irq_status & LAN8814_INT_LINK) { + phy_trigger_machine(phydev); + ret = IRQ_HANDLED; + } + while (1) { tsu_irq_status = lanphy_read_page_reg(phydev, 4, LAN8814_INTR_STS_REG); @@ -2697,12 +2700,15 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev) (tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ | LAN8814_INTR_STS_REG_1588_TSU1_ | LAN8814_INTR_STS_REG_1588_TSU2_ | - LAN8814_INTR_STS_REG_1588_TSU3_))) + LAN8814_INTR_STS_REG_1588_TSU3_))) { lan8814_handle_ptp_interrupt(phydev); - else + ret = IRQ_HANDLED; + } else { break; + } } - return IRQ_HANDLED; + + return ret; } static int lan8814_ack_interrupt(struct phy_device *phydev) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index aac133a1e27a..154a3c0a6dfd 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1275,10 +1275,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev, } } - netif_addr_lock_bh(dev); - dev_uc_sync_multiple(port_dev, dev); - dev_mc_sync_multiple(port_dev, dev); - netif_addr_unlock_bh(dev); + if (dev->flags & IFF_UP) { + netif_addr_lock_bh(dev); + dev_uc_sync_multiple(port_dev, dev); + dev_mc_sync_multiple(port_dev, dev); + netif_addr_unlock_bh(dev); + } port->index = -1; list_add_tail_rcu(&port->list, &team->port_list); @@ -1349,8 +1351,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev) netdev_rx_handler_unregister(port_dev); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); - dev_uc_unsync(port_dev, dev); - dev_mc_unsync(port_dev, dev); + if (dev->flags & IFF_UP) { + dev_uc_unsync(port_dev, dev); + dev_mc_unsync(port_dev, dev); + } dev_close(port_dev); team_port_leave(team, port); @@ -1700,6 +1704,14 @@ static int team_open(struct net_device *dev) static int team_close(struct net_device *dev) { + struct team *team = netdev_priv(dev); + struct team_port *port; + + list_for_each_entry(port, &team->port_list, list) { + dev_uc_unsync(port->dev, dev); + dev_mc_unsync(port->dev, dev); + } + return 0; } diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c index d0f3b6d7f408..5c804bcabfe6 100644 --- a/drivers/net/wireguard/netlink.c +++ b/drivers/net/wireguard/netlink.c @@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs) if (attrs[WGPEER_A_ENDPOINT]) { struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]); size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]); + struct endpoint endpoint = { { { 0 } } }; - if ((len == sizeof(struct sockaddr_in) && - addr->sa_family == AF_INET) || - (len == sizeof(struct sockaddr_in6) && - addr->sa_family == AF_INET6)) { - struct endpoint endpoint = { { { 0 } } }; - - memcpy(&endpoint.addr, addr, len); + if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) { + endpoint.addr4 = *(struct sockaddr_in *)addr; + wg_socket_set_peer_endpoint(peer, &endpoint); + } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) { + endpoint.addr6 = *(struct sockaddr_in6 *)addr; wg_socket_set_peer_endpoint(peer, &endpoint); } } diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c index ba87d294604f..d4bb40a695ab 100644 --- a/drivers/net/wireguard/selftest/ratelimiter.c +++ b/drivers/net/wireguard/selftest/ratelimiter.c @@ -6,29 +6,28 @@ #ifdef DEBUG #include <linux/jiffies.h> -#include <linux/hrtimer.h> static const struct { bool result; - u64 nsec_to_sleep_before; + unsigned int msec_to_sleep_before; } expected_results[] __initconst = { [0 ... PACKETS_BURSTABLE - 1] = { true, 0 }, [PACKETS_BURSTABLE] = { false, 0 }, - [PACKETS_BURSTABLE + 1] = { true, NSEC_PER_SEC / PACKETS_PER_SECOND }, + [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND }, [PACKETS_BURSTABLE + 2] = { false, 0 }, - [PACKETS_BURSTABLE + 3] = { true, (NSEC_PER_SEC / PACKETS_PER_SECOND) * 2 }, + [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 }, [PACKETS_BURSTABLE + 4] = { true, 0 }, [PACKETS_BURSTABLE + 5] = { false, 0 } }; static __init unsigned int maximum_jiffies_at_index(int index) { - u64 total_nsecs = 2 * NSEC_PER_SEC / PACKETS_PER_SECOND / 3; + unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3; int i; for (i = 0; i <= index; ++i) - total_nsecs += expected_results[i].nsec_to_sleep_before; - return nsecs_to_jiffies(total_nsecs); + total_msecs += expected_results[i].msec_to_sleep_before; + return msecs_to_jiffies(total_msecs); } static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4, @@ -43,12 +42,8 @@ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4, loop_start_time = jiffies; for (i = 0; i < ARRAY_SIZE(expected_results); ++i) { - if (expected_results[i].nsec_to_sleep_before) { - ktime_t timeout = ktime_add(ktime_add_ns(ktime_get_coarse_boottime(), TICK_NSEC * 4 / 3), - ns_to_ktime(expected_results[i].nsec_to_sleep_before)); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_hrtimeout_range_clock(&timeout, 0, HRTIMER_MODE_ABS, CLOCK_BOOTTIME); - } + if (expected_results[i].msec_to_sleep_before) + msleep(expected_results[i].msec_to_sleep_before); if (time_is_before_jiffies(loop_start_time + maximum_jiffies_at_index(i))) @@ -132,7 +127,7 @@ bool __init wg_ratelimiter_selftest(void) if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN)) return true; - BUILD_BUG_ON(NSEC_PER_SEC % PACKETS_PER_SECOND != 0); + BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0); if (wg_ratelimiter_init()) goto out; @@ -172,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void) ++test; #endif - for (trials = TRIALS_BEFORE_GIVING_UP;;) { + for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) { int test_count = 0, ret; ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count); diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index a647a406b87b..b20409f8c13a 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -140,6 +140,7 @@ config IWLMEI depends on INTEL_MEI depends on PM depends on CFG80211 + depends on BROKEN help Enables the iwlmei kernel module. diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 5eb28f8ee87e..11536f115198 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1833,8 +1833,8 @@ static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm, * If nss < MAX: we can set zeros in other streams */ if (nss > MAX_HE_SUPP_NSS) { - IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, - MAX_HE_SUPP_NSS); + IWL_DEBUG_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, + MAX_HE_SUPP_NSS); nss = MAX_HE_SUPP_NSS; } diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 253cbc1956d1..6de13d641438 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -267,7 +267,8 @@ static void mt76_init_stream_cap(struct mt76_phy *phy, } vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); - vht_cap->vht_mcs.tx_highest |= + if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW)) + vht_cap->vht_mcs.tx_highest |= cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index ad6c7d632eed..d6aae60c440d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -1088,7 +1088,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid) offset %= 32; val = mt76_rr(dev, addr); - val >>= (tid % 32); + val >>= offset; if (offset > 20) { addr += 4; diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index bf4f5c09d9b1..bbe5099c836d 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1712,8 +1712,6 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, res->flags = IORESOURCE_MEM; for (i = 0; i < nd_region->ndr_mappings; i++) { - uuid_t uuid; - nsl_get_uuid(ndd, nd_label, &uuid); if (has_uuid_at_pos(nd_region, &uuid, cookie, i)) continue; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 7e88cd242380..96e6e9a5f235 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -45,7 +45,7 @@ static struct nd_region *to_region(struct pmem_device *pmem) return to_nd_region(to_dev(pmem)->parent); } -static phys_addr_t to_phys(struct pmem_device *pmem, phys_addr_t offset) +static phys_addr_t pmem_to_phys(struct pmem_device *pmem, phys_addr_t offset) { return pmem->phys_addr + offset; } @@ -63,7 +63,7 @@ static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector) static void pmem_mkpage_present(struct pmem_device *pmem, phys_addr_t offset, unsigned int len) { - phys_addr_t phys = to_phys(pmem, offset); + phys_addr_t phys = pmem_to_phys(pmem, offset); unsigned long pfn_start, pfn_end, pfn; /* only pmem in the linear map supports HWPoison */ @@ -97,7 +97,7 @@ static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks) static long __pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, unsigned int len) { - phys_addr_t phys = to_phys(pmem, offset); + phys_addr_t phys = pmem_to_phys(pmem, offset); long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len); if (cleared > 0) { diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 7bc92923104c..1c573e7a60bc 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -314,7 +314,7 @@ static int unflatten_dt_nodes(const void *blob, for (offset = 0; offset >= 0 && depth >= initial_depth; offset = fdt_next_node(blob, offset, &depth)) { - if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH)) + if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1)) continue; if (!IS_ENABLED(CONFIG_OF_KOBJ) && diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 77d1ba3a4154..e87567dbe99f 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -873,7 +873,7 @@ int dev_pm_opp_config_clks_simple(struct device *dev, } } - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple); diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index f223afe47d10..a66386043aa6 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1546,6 +1546,7 @@ static int __init ccio_probe(struct parisc_device *dev) } ccio_ioc_init(ioc); if (ccio_init_resources(ioc)) { + iounmap(ioc->ioc_regs); kfree(ioc); return -ENOMEM; } diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 3a8c98615634..bdef7a8d6ab8 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -221,16 +221,7 @@ static size_t irt_num_entry; static struct irt_entry *iosapic_alloc_irt(int num_entries) { - unsigned long a; - - /* The IRT needs to be 8-byte aligned for the PDC call. - * Normally kmalloc would guarantee larger alignment, but - * if CONFIG_DEBUG_SLAB is enabled, then we can get only - * 4-byte alignment on 32-bit kernels - */ - a = (unsigned long)kmalloc(sizeof(struct irt_entry) * num_entries + 8, GFP_KERNEL); - a = (a + 7UL) & ~7UL; - return (struct irt_entry *)a; + return kcalloc(num_entries, sizeof(struct irt_entry), GFP_KERNEL); } /** diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 80d8309652a4..b80a9b74662b 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -36,7 +36,7 @@ #define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0) #define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16) -#define CMN_CHILD_NODE_ADDR GENMASK(27, 0) +#define CMN_CHILD_NODE_ADDR GENMASK(29, 0) #define CMN_CHILD_NODE_EXTERNAL BIT(31) #define CMN_MAX_DIMENSION 12 diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c index a4d7d9bd100d..67712c77d806 100644 --- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c +++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c @@ -274,7 +274,6 @@ struct mvebu_a3700_comphy_lane { int submode; bool invert_tx; bool invert_rx; - bool needs_reset; }; struct gbe_phy_init_data_fix { @@ -1097,40 +1096,12 @@ mvebu_a3700_comphy_pcie_power_off(struct mvebu_a3700_comphy_lane *lane) 0x0, PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT); } -static int mvebu_a3700_comphy_reset(struct phy *phy) +static void mvebu_a3700_comphy_usb3_power_off(struct mvebu_a3700_comphy_lane *lane) { - struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); - u16 mask, data; - - dev_dbg(lane->dev, "resetting lane %d\n", lane->id); - - /* COMPHY reset for internal logic */ - comphy_lane_reg_set(lane, COMPHY_SFT_RESET, - SFT_RST_NO_REG, SFT_RST_NO_REG); - - /* COMPHY register reset (cleared automatically) */ - comphy_lane_reg_set(lane, COMPHY_SFT_RESET, SFT_RST, SFT_RST); - - /* PIPE soft and register reset */ - data = PIPE_SOFT_RESET | PIPE_REG_RESET; - mask = data; - comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask); - - /* Release PIPE register reset */ - comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, - 0x0, PIPE_REG_RESET); - - /* Reset SB configuration register (only for lanes 0 and 1) */ - if (lane->id == 0 || lane->id == 1) { - u32 mask, data; - - data = PIN_RESET_CORE_BIT | PIN_RESET_COMPHY_BIT | - PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT; - mask = data | PIN_PU_IVREF_BIT | PIN_TX_IDLE_BIT; - comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); - } - - return 0; + /* + * The USB3 MAC sets the USB3 PHY to low state, so we do not + * need to power off USB3 PHY again. + */ } static bool mvebu_a3700_comphy_check_mode(int lane, @@ -1171,10 +1142,6 @@ static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode, (lane->mode != mode || lane->submode != submode)) return -EBUSY; - /* If changing mode, ensure reset is called */ - if (lane->mode != PHY_MODE_INVALID && lane->mode != mode) - lane->needs_reset = true; - /* Just remember the mode, ->power_on() will do the real setup */ lane->mode = mode; lane->submode = submode; @@ -1185,7 +1152,6 @@ static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode, static int mvebu_a3700_comphy_power_on(struct phy *phy) { struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); - int ret; if (!mvebu_a3700_comphy_check_mode(lane->id, lane->mode, lane->submode)) { @@ -1193,14 +1159,6 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy) return -EINVAL; } - if (lane->needs_reset) { - ret = mvebu_a3700_comphy_reset(phy); - if (ret) - return ret; - - lane->needs_reset = false; - } - switch (lane->mode) { case PHY_MODE_USB_HOST_SS: dev_dbg(lane->dev, "set lane %d to USB3 host mode\n", lane->id); @@ -1224,38 +1182,28 @@ static int mvebu_a3700_comphy_power_off(struct phy *phy) { struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); - switch (lane->mode) { - case PHY_MODE_USB_HOST_SS: - /* - * The USB3 MAC sets the USB3 PHY to low state, so we do not - * need to power off USB3 PHY again. - */ - break; - - case PHY_MODE_SATA: - mvebu_a3700_comphy_sata_power_off(lane); - break; - - case PHY_MODE_ETHERNET: + switch (lane->id) { + case 0: + mvebu_a3700_comphy_usb3_power_off(lane); mvebu_a3700_comphy_ethernet_power_off(lane); - break; - - case PHY_MODE_PCIE: + return 0; + case 1: mvebu_a3700_comphy_pcie_power_off(lane); - break; - + mvebu_a3700_comphy_ethernet_power_off(lane); + return 0; + case 2: + mvebu_a3700_comphy_usb3_power_off(lane); + mvebu_a3700_comphy_sata_power_off(lane); + return 0; default: dev_err(lane->dev, "invalid COMPHY mode\n"); return -EINVAL; } - - return 0; } static const struct phy_ops mvebu_a3700_comphy_ops = { .power_on = mvebu_a3700_comphy_power_on, .power_off = mvebu_a3700_comphy_power_off, - .reset = mvebu_a3700_comphy_reset, .set_mode = mvebu_a3700_comphy_set_mode, .owner = THIS_MODULE, }; @@ -1393,8 +1341,7 @@ static int mvebu_a3700_comphy_probe(struct platform_device *pdev) * To avoid relying on the bootloader/firmware configuration, * power off all comphys. */ - mvebu_a3700_comphy_reset(phy); - lane->needs_reset = false; + mvebu_a3700_comphy_power_off(phy); } provider = devm_of_phy_provider_register(&pdev->dev, diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c index c5fd154990c8..c7df8c5fe585 100644 --- a/drivers/pinctrl/pinctrl-ocelot.c +++ b/drivers/pinctrl/pinctrl-ocelot.c @@ -331,6 +331,7 @@ struct ocelot_pinctrl { const struct ocelot_pincfg_data *pincfg_data; struct ocelot_pmx_func func[FUNC_MAX]; u8 stride; + struct workqueue_struct *wq; }; struct ocelot_match_data { @@ -338,6 +339,11 @@ struct ocelot_match_data { struct ocelot_pincfg_data pincfg_data; }; +struct ocelot_irq_work { + struct work_struct irq_work; + struct irq_desc *irq_desc; +}; + #define LUTON_P(p, f0, f1) \ static struct ocelot_pin_caps luton_pin_##p = { \ .pin = p, \ @@ -1813,6 +1819,75 @@ static void ocelot_irq_mask(struct irq_data *data) gpiochip_disable_irq(chip, gpio); } +static void ocelot_irq_work(struct work_struct *work) +{ + struct ocelot_irq_work *w = container_of(work, struct ocelot_irq_work, irq_work); + struct irq_chip *parent_chip = irq_desc_get_chip(w->irq_desc); + struct gpio_chip *chip = irq_desc_get_chip_data(w->irq_desc); + struct irq_data *data = irq_desc_get_irq_data(w->irq_desc); + unsigned int gpio = irqd_to_hwirq(data); + + local_irq_disable(); + chained_irq_enter(parent_chip, w->irq_desc); + generic_handle_domain_irq(chip->irq.domain, gpio); + chained_irq_exit(parent_chip, w->irq_desc); + local_irq_enable(); + + kfree(w); +} + +static void ocelot_irq_unmask_level(struct irq_data *data) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(data); + struct ocelot_pinctrl *info = gpiochip_get_data(chip); + struct irq_desc *desc = irq_data_to_desc(data); + unsigned int gpio = irqd_to_hwirq(data); + unsigned int bit = BIT(gpio % 32); + bool ack = false, active = false; + u8 trigger_level; + int val; + + trigger_level = irqd_get_trigger_type(data); + + /* Check if the interrupt line is still active. */ + regmap_read(info->map, REG(OCELOT_GPIO_IN, info, gpio), &val); + if ((!(val & bit) && trigger_level == IRQ_TYPE_LEVEL_LOW) || + (val & bit && trigger_level == IRQ_TYPE_LEVEL_HIGH)) + active = true; + + /* + * Check if the interrupt controller has seen any changes in the + * interrupt line. + */ + regmap_read(info->map, REG(OCELOT_GPIO_INTR, info, gpio), &val); + if (val & bit) + ack = true; + + /* Enable the interrupt now */ + gpiochip_enable_irq(chip, gpio); + regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio), + bit, bit); + + /* + * In case the interrupt line is still active and the interrupt + * controller has not seen any changes in the interrupt line, then it + * means that there happen another interrupt while the line was active. + * So we missed that one, so we need to kick the interrupt again + * handler. + */ + if (active && !ack) { + struct ocelot_irq_work *work; + + work = kmalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return; + + work->irq_desc = desc; + INIT_WORK(&work->irq_work, ocelot_irq_work); + queue_work(info->wq, &work->irq_work); + } +} + static void ocelot_irq_unmask(struct irq_data *data) { struct gpio_chip *chip = irq_data_get_irq_chip_data(data); @@ -1836,13 +1911,12 @@ static void ocelot_irq_ack(struct irq_data *data) static int ocelot_irq_set_type(struct irq_data *data, unsigned int type); -static struct irq_chip ocelot_eoi_irqchip = { +static struct irq_chip ocelot_level_irqchip = { .name = "gpio", .irq_mask = ocelot_irq_mask, - .irq_eoi = ocelot_irq_ack, - .irq_unmask = ocelot_irq_unmask, - .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED | - IRQCHIP_IMMUTABLE, + .irq_ack = ocelot_irq_ack, + .irq_unmask = ocelot_irq_unmask_level, + .flags = IRQCHIP_IMMUTABLE, .irq_set_type = ocelot_irq_set_type, GPIOCHIP_IRQ_RESOURCE_HELPERS }; @@ -1859,14 +1933,9 @@ static struct irq_chip ocelot_irqchip = { static int ocelot_irq_set_type(struct irq_data *data, unsigned int type) { - type &= IRQ_TYPE_SENSE_MASK; - - if (!(type & (IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_HIGH))) - return -EINVAL; - - if (type & IRQ_TYPE_LEVEL_HIGH) - irq_set_chip_handler_name_locked(data, &ocelot_eoi_irqchip, - handle_fasteoi_irq, NULL); + if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) + irq_set_chip_handler_name_locked(data, &ocelot_level_irqchip, + handle_level_irq, NULL); if (type & IRQ_TYPE_EDGE_BOTH) irq_set_chip_handler_name_locked(data, &ocelot_irqchip, handle_edge_irq, NULL); @@ -1996,6 +2065,10 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev) if (!info->desc) return -ENOMEM; + info->wq = alloc_ordered_workqueue("ocelot_ordered", 0); + if (!info->wq) + return -ENOMEM; + info->pincfg_data = &data->pincfg_data; reset = devm_reset_control_get_optional_shared(dev, "switch"); @@ -2018,7 +2091,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev) dev_err(dev, "Failed to create regmap\n"); return PTR_ERR(info->map); } - dev_set_drvdata(dev, info->map); + dev_set_drvdata(dev, info); info->dev = dev; /* Pinconf registers */ @@ -2043,6 +2116,15 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev) return 0; } +static int ocelot_pinctrl_remove(struct platform_device *pdev) +{ + struct ocelot_pinctrl *info = platform_get_drvdata(pdev); + + destroy_workqueue(info->wq); + + return 0; +} + static struct platform_driver ocelot_pinctrl_driver = { .driver = { .name = "pinctrl-ocelot", @@ -2050,6 +2132,7 @@ static struct platform_driver ocelot_pinctrl_driver = { .suppress_bind_attrs = true, }, .probe = ocelot_pinctrl_probe, + .remove = ocelot_pinctrl_remove, }; module_platform_driver(ocelot_pinctrl_driver); MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c index 6bec7f143134..704a99d2f93c 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c +++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c @@ -530,10 +530,10 @@ DECLARE_MSM_GPIO_PINS(187); DECLARE_MSM_GPIO_PINS(188); DECLARE_MSM_GPIO_PINS(189); -static const unsigned int sdc2_clk_pins[] = { 190 }; -static const unsigned int sdc2_cmd_pins[] = { 191 }; -static const unsigned int sdc2_data_pins[] = { 192 }; -static const unsigned int ufs_reset_pins[] = { 193 }; +static const unsigned int ufs_reset_pins[] = { 190 }; +static const unsigned int sdc2_clk_pins[] = { 191 }; +static const unsigned int sdc2_cmd_pins[] = { 192 }; +static const unsigned int sdc2_data_pins[] = { 193 }; enum sc8180x_functions { msm_mux_adsp_ext, @@ -1582,7 +1582,7 @@ static const int sc8180x_acpi_reserved_gpios[] = { static const struct msm_gpio_wakeirq_map sc8180x_pdc_map[] = { { 3, 31 }, { 5, 32 }, { 8, 33 }, { 9, 34 }, { 10, 100 }, { 12, 104 }, { 24, 37 }, { 26, 38 }, { 27, 41 }, { 28, 42 }, { 30, 39 }, { 36, 43 }, - { 37, 43 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 }, + { 37, 44 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 }, { 42, 48 }, { 46, 50 }, { 47, 49 }, { 48, 51 }, { 49, 53 }, { 50, 52 }, { 51, 116 }, { 51, 123 }, { 53, 54 }, { 54, 55 }, { 55, 56 }, { 56, 57 }, { 58, 58 }, { 60, 60 }, { 68, 62 }, { 70, 63 }, { 76, 86 }, diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c index afc1f5df7545..b82ad135bf2a 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c @@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(of, a100_r_pinctrl_match); static struct platform_driver a100_r_pinctrl_driver = { .probe = a100_r_pinctrl_probe, .driver = { - .name = "sun50iw10p1-r-pinctrl", + .name = "sun50i-a100-r-pinctrl", .of_match_table = a100_r_pinctrl_match, }, }; diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index dc78a523a69f..b6b938aa6615 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -675,12 +675,12 @@ int dasd_alias_remove_device(struct dasd_device *device) struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) { struct dasd_eckd_private *alias_priv, *private = base_device->private; - struct alias_pav_group *group = private->pavgroup; struct alias_lcu *lcu = private->lcu; struct dasd_device *alias_device; + struct alias_pav_group *group; unsigned long flags; - if (!group || !lcu) + if (!lcu) return NULL; if (lcu->pav == NO_PAV || lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING)) @@ -697,6 +697,11 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) } spin_lock_irqsave(&lcu->lock, flags); + group = private->pavgroup; + if (!group) { + spin_unlock_irqrestore(&lcu->lock, flags); + return NULL; + } alias_device = group->next; if (!alias_device) { if (list_empty(&group->aliaslist)) { diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 6c8c41fac4e1..ee82207b4e60 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -984,6 +984,11 @@ static ssize_t assign_adapter_store(struct device *dev, goto done; } + if (test_bit_inv(apid, matrix_mdev->matrix.apm)) { + ret = count; + goto done; + } + set_bit_inv(apid, matrix_mdev->matrix.apm); ret = vfio_ap_mdev_validate_masks(matrix_mdev); @@ -1109,6 +1114,11 @@ static ssize_t unassign_adapter_store(struct device *dev, goto done; } + if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) { + ret = count; + goto done; + } + clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm); vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid); ret = count; @@ -1183,6 +1193,11 @@ static ssize_t assign_domain_store(struct device *dev, goto done; } + if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) { + ret = count; + goto done; + } + set_bit_inv(apqi, matrix_mdev->matrix.aqm); ret = vfio_ap_mdev_validate_masks(matrix_mdev); @@ -1286,6 +1301,11 @@ static ssize_t unassign_domain_store(struct device *dev, goto done; } + if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) { + ret = count; + goto done; + } + clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm); vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi); ret = count; @@ -1329,6 +1349,11 @@ static ssize_t assign_control_domain_store(struct device *dev, goto done; } + if (test_bit_inv(id, matrix_mdev->matrix.adm)) { + ret = count; + goto done; + } + /* Set the bit in the ADM (bitmask) corresponding to the AP control * domain number (id). The bits in the mask, from most significant to * least significant, correspond to IDs 0 up to the one less than the @@ -1378,6 +1403,11 @@ static ssize_t unassign_control_domain_store(struct device *dev, goto done; } + if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) { + ret = count; + goto done; + } + clear_bit_inv(domid, matrix_mdev->matrix.adm); if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) { diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 565339a0811d..331e896d8225 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2993,7 +2993,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma || - dma_get_required_mask(&pdev->dev) <= 32) + dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32)) ioc->dma_mask = 32; /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 3d6b137314f3..bbc4d5890ae6 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -3686,11 +3686,6 @@ err2: err1: scsi_host_put(lport->host); err0: - if (qedf) { - QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); - - clear_bit(QEDF_PROBING, &qedf->flags); - } return rc; } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 62666df1a59e..4acff4e84b90 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -2151,8 +2151,10 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess, le32_to_cpu(abts->exchange_addr_to_abort)); - if (!abort_cmd) + if (!abort_cmd) { + mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); return -EIO; + } mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun; if (abort_cmd->qpair) { diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index ae38f0d25a8d..572b5896caa3 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -2529,6 +2529,7 @@ struct tb *icm_probe(struct tb_nhi *nhi) tb->cm_ops = &icm_icl_ops; break; + case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI: case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI: icm->is_supported = icm_tgl_is_supported; icm->get_mode = icm_ar_get_mode; diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h index f09da5b62233..01190d9ced16 100644 --- a/drivers/thunderbolt/nhi.h +++ b/drivers/thunderbolt/nhi.h @@ -55,6 +55,7 @@ extern const struct tb_nhi_ops icl_nhi_ops; * need for the PCI quirk anymore as we will use ICM also on Apple * hardware. */ +#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI 0x1134 #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137 #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 0dcecbbc3967..f7fbef83583c 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -1334,6 +1334,7 @@ static int omap8250_probe(struct platform_device *pdev) up.port.throttle = omap_8250_throttle; up.port.unthrottle = omap_8250_unthrottle; up.port.rs485_config = serial8250_em485_config; + up.port.rs485_supported = serial8250_em485_supported; up.rs485_start_tx = serial8250_em485_start_tx; up.rs485_stop_tx = serial8250_em485_stop_tx; up.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE); diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index b20f6f2fa51c..fbc4b071b330 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -2724,14 +2724,15 @@ static int lpuart_probe(struct platform_device *pdev) lpuart_reg.cons = LPUART_CONSOLE; handler = lpuart_int; } - ret = uart_add_one_port(&lpuart_reg, &sport->port); - if (ret) - goto failed_attach_port; ret = lpuart_global_reset(sport); if (ret) goto failed_reset; + ret = uart_add_one_port(&lpuart_reg, &sport->port); + if (ret) + goto failed_attach_port; + ret = uart_get_rs485_mode(&sport->port); if (ret) goto failed_get_rs485; @@ -2747,9 +2748,9 @@ static int lpuart_probe(struct platform_device *pdev) failed_irq_request: failed_get_rs485: -failed_reset: uart_remove_one_port(&lpuart_reg, &sport->port); failed_attach_port: +failed_reset: lpuart_disable_clks(sport); return ret; } diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index ad4f3567ff90..a5748e41483b 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -525,7 +525,7 @@ static void tegra_uart_tx_dma_complete(void *args) count = tup->tx_bytes_requested - state.residue; async_tx_ack(tup->tx_dma_desc); spin_lock_irqsave(&tup->uport.lock, flags); - xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); + uart_xmit_advance(&tup->uport, count); tup->tx_in_progress = 0; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&tup->uport); @@ -613,7 +613,6 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u) static void tegra_uart_stop_tx(struct uart_port *u) { struct tegra_uart_port *tup = to_tegra_uport(u); - struct circ_buf *xmit = &tup->uport.state->xmit; struct dma_tx_state state; unsigned int count; @@ -624,7 +623,7 @@ static void tegra_uart_stop_tx(struct uart_port *u) dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state); count = tup->tx_bytes_requested - state.residue; async_tx_ack(tup->tx_dma_desc); - xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); + uart_xmit_advance(&tup->uport, count); tup->tx_in_progress = 0; } diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c index 5c3a07546a58..4b1d4fe8458e 100644 --- a/drivers/tty/serial/sifive.c +++ b/drivers/tty/serial/sifive.c @@ -945,7 +945,7 @@ static int sifive_serial_probe(struct platform_device *pdev) return PTR_ERR(base); } - clk = devm_clk_get(&pdev->dev, NULL); + clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) { dev_err(&pdev->dev, "unable to find controller clock\n"); return PTR_ERR(clk); diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c index 4877c54c613d..889b701ba7c6 100644 --- a/drivers/tty/serial/tegra-tcu.c +++ b/drivers/tty/serial/tegra-tcu.c @@ -101,7 +101,7 @@ static void tegra_tcu_uart_start_tx(struct uart_port *port) break; tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count); - xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); + uart_xmit_advance(port, count); } uart_write_wakeup(port); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index d4b1e70d1498..bbab424b0d55 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -6039,7 +6039,7 @@ re_enumerate: * * Return: The same as for usb_reset_and_verify_device(). * However, if a reset is already in progress (for instance, if a - * driver doesn't have pre_ or post_reset() callbacks, and while + * driver doesn't have pre_reset() or post_reset() callbacks, and while * being unbound or re-bound during the ongoing reset its disconnect() * or probe() routine tries to perform a second, nested reset), the * routine returns -EINPROGRESS. diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 8c8e32651473..d0237b30c9be 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1752,12 +1752,6 @@ static int dwc3_probe(struct platform_device *pdev) dwc3_get_properties(dwc); - if (!dwc->sysdev_is_parent) { - ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); - if (ret) - return ret; - } - dwc->reset = devm_reset_control_array_get_optional_shared(dev); if (IS_ERR(dwc->reset)) return PTR_ERR(dwc->reset); @@ -1823,6 +1817,13 @@ static int dwc3_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dwc); dwc3_cache_hwparams(dwc); + if (!dwc->sysdev_is_parent && + DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) { + ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); + if (ret) + goto disable_clks; + } + spin_lock_init(&dwc->lock); mutex_init(&dwc->mutex); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index a5e8374a8d71..697683e3fbff 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -256,6 +256,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EM060K 0x030b #define QUECTEL_PRODUCT_EM12 0x0512 #define QUECTEL_PRODUCT_RM500Q 0x0800 +#define QUECTEL_PRODUCT_RM520N 0x0801 #define QUECTEL_PRODUCT_EC200S_CN 0x6002 #define QUECTEL_PRODUCT_EC200T 0x6026 #define QUECTEL_PRODUCT_RM500K 0x7001 @@ -1138,6 +1139,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff), .driver_info = NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */ + .driver_info = ZLP }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), @@ -1159,6 +1162,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), .driver_info = ZLP }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig index 5defdfead653..831e7049977d 100644 --- a/drivers/usb/typec/Kconfig +++ b/drivers/usb/typec/Kconfig @@ -56,6 +56,7 @@ config TYPEC_ANX7411 tristate "Analogix ANX7411 Type-C DRP Port controller driver" depends on I2C depends on USB_ROLE_SWITCH + depends on POWER_SUPPLY help Say Y or M here if your system has Analogix ANX7411 Type-C DRP Port controller driver. diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index 886c564787f1..b58b445bb529 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -74,10 +74,6 @@ #define SYNTHVID_DEPTH_WIN8 32 #define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024) -#define PCI_VENDOR_ID_MICROSOFT 0x1414 -#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353 - - enum pipe_msg_type { PIPE_MSG_INVALID, PIPE_MSG_DATA, diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index d5f3f763717e..d4b251925796 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -382,9 +382,10 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr, unsigned long ring_size = nr_pages * XEN_PAGE_SIZE; grant_ref_t gref_head; unsigned int i; + void *addr; int ret; - *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO); + addr = *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO); if (!*vaddr) { ret = -ENOMEM; goto err; @@ -401,13 +402,15 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr, unsigned long gfn; if (is_vmalloc_addr(*vaddr)) - gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr[i])); + gfn = pfn_to_gfn(vmalloc_to_pfn(addr)); else - gfn = virt_to_gfn(vaddr[i]); + gfn = virt_to_gfn(addr); grefs[i] = gnttab_claim_grant_reference(&gref_head); gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id, gfn, 0); + + addr += XEN_PAGE_SIZE; } return 0; |