diff options
Diffstat (limited to 'drivers')
194 files changed, 4440 insertions, 1217 deletions
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c index e9626bf6ca29..d6c1b10f6c25 100644 --- a/drivers/acpi/nfit/mce.c +++ b/drivers/acpi/nfit/mce.c @@ -25,8 +25,12 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, struct acpi_nfit_desc *acpi_desc; struct nfit_spa *nfit_spa; - /* We only care about memory errors */ - if (!mce_is_memory_error(mce)) + /* We only care about uncorrectable memory errors */ + if (!mce_is_memory_error(mce) || mce_is_correctable(mce)) + return NOTIFY_DONE; + + /* Verify the address reported in the MCE is valid. */ + if (!mce_usable_address(mce)) return NOTIFY_DONE; /* diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 10ecb232245d..4b1ff5bc256a 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Renesas R-Car SATA driver * * Author: Vladimir Barinov <source@cogentembedded.com> * Copyright (C) 2013-2015 Cogent Embedded, Inc. * Copyright (C) 2013-2015 Renesas Solutions Corp. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. */ #include <linux/kernel.h> diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 56452cabce5b..0ed4b200fa58 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1919,6 +1919,7 @@ static int negotiate_mq(struct blkfront_info *info) GFP_KERNEL); if (!info->rinfo) { xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure"); + info->nr_rings = 0; return -ENOMEM; } diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c index 68ac3e93b600..f58ff67e97ac 100644 --- a/drivers/bus/brcmstb_gisb.c +++ b/drivers/bus/brcmstb_gisb.c @@ -150,8 +150,7 @@ static ssize_t gisb_arb_get_timeout(struct device *dev, struct device_attribute *attr, char *buf) { - struct platform_device *pdev = to_platform_device(dev); - struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev); + struct brcmstb_gisb_arb_device *gdev = dev_get_drvdata(dev); u32 timeout; mutex_lock(&gdev->lock); @@ -165,8 +164,7 @@ static ssize_t gisb_arb_set_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct platform_device *pdev = to_platform_device(dev); - struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev); + struct brcmstb_gisb_arb_device *gdev = dev_get_drvdata(dev); int val, ret; ret = kstrtoint(buf, 10, &val); @@ -418,8 +416,7 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int brcmstb_gisb_arb_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev); + struct brcmstb_gisb_arb_device *gdev = dev_get_drvdata(dev); gdev->saved_timeout = gisb_read(gdev, ARB_TIMER); @@ -431,8 +428,7 @@ static int brcmstb_gisb_arb_suspend(struct device *dev) */ static int brcmstb_gisb_arb_resume_noirq(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev); + struct brcmstb_gisb_arb_device *gdev = dev_get_drvdata(dev); gisb_write(gdev, gdev->saved_timeout, ARB_TIMER); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index a3a2d39280d9..f94d33525771 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -91,6 +91,9 @@ struct sysc { struct delayed_work idle_work; }; +static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, + bool is_child); + void sysc_write(struct sysc *ddata, int offset, u32 value) { writel_relaxed(value, ddata->module_va + offset); @@ -214,8 +217,13 @@ static int sysc_get_clocks(struct sysc *ddata) if (!ddata->clocks) return -ENOMEM; - for (i = 0; i < ddata->nr_clocks; i++) { - error = sysc_get_one_clock(ddata, ddata->clock_roles[i]); + for (i = 0; i < SYSC_MAX_CLOCKS; i++) { + const char *name = ddata->clock_roles[i]; + + if (!name) + continue; + + error = sysc_get_one_clock(ddata, name); if (error && error != -ENOENT) return error; } @@ -374,6 +382,7 @@ static int sysc_check_one_child(struct sysc *ddata, dev_warn(ddata->dev, "really a child ti,hwmods property?"); sysc_check_quirk_stdout(ddata, np); + sysc_parse_dts_quirks(ddata, np, true); return 0; } @@ -815,6 +824,7 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0), SYSC_QUIRK("ocp2scp", 0, 0, -1, -1, 0x50060007, 0xffffffff, 0), SYSC_QUIRK("padconf", 0, 0, 0x10, -1, 0x4fff0800, 0xffffffff, 0), + SYSC_QUIRK("padconf", 0, 0, -1, -1, 0x40001100, 0xffffffff, 0), SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000100, 0xffffffff, 0), SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x00004102, 0xffffffff, 0), SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000400, 0xffffffff, 0), @@ -833,7 +843,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("rtc", 0, 0x74, 0x78, -1, 0x4eb01908, 0xffff00f0, 0), SYSC_QUIRK("timer32k", 0, 0, 0x4, -1, 0x00000060, 0xffffffff, 0), SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0), + SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0), SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0), + SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -1, 0x50700101, 0xffffffff, 0), SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, 0xffffffff, 0), SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, 0), @@ -1271,23 +1283,37 @@ static const struct sysc_dts_quirk sysc_dts_quirks[] = { .mask = SYSC_QUIRK_NO_RESET_ON_INIT, }, }; -static int sysc_init_dts_quirks(struct sysc *ddata) +static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, + bool is_child) { - struct device_node *np = ddata->dev->of_node; const struct property *prop; - int i, len, error; - u32 val; - - ddata->legacy_mode = of_get_property(np, "ti,hwmods", NULL); + int i, len; for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) { - prop = of_get_property(np, sysc_dts_quirks[i].name, &len); + const char *name = sysc_dts_quirks[i].name; + + prop = of_get_property(np, name, &len); if (!prop) continue; ddata->cfg.quirks |= sysc_dts_quirks[i].mask; + if (is_child) { + dev_warn(ddata->dev, + "dts flag should be at module level for %s\n", + name); + } } +} +static int sysc_init_dts_quirks(struct sysc *ddata) +{ + struct device_node *np = ddata->dev->of_node; + int error; + u32 val; + + ddata->legacy_mode = of_get_property(np, "ti,hwmods", NULL); + + sysc_parse_dts_quirks(ddata, np, false); error = of_property_read_u32(np, "ti,sysc-delay-us", &val); if (!error) { if (val > 255) { @@ -1498,6 +1524,16 @@ static const struct sysc_regbits sysc_regbits_omap4_mcasp = { static const struct sysc_capabilities sysc_omap4_mcasp = { .type = TI_SYSC_OMAP4_MCASP, .regbits = &sysc_regbits_omap4_mcasp, + .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED, +}; + +/* + * McASP found on dra7 and later + */ +static const struct sysc_capabilities sysc_dra7_mcasp = { + .type = TI_SYSC_OMAP4_SIMPLE, + .regbits = &sysc_regbits_omap4_simple, + .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED, }; /* @@ -1726,6 +1762,7 @@ static const struct of_device_id sysc_match[] = { { .compatible = "ti,sysc-omap3-sham", .data = &sysc_omap3_sham, }, { .compatible = "ti,sysc-omap-aes", .data = &sysc_omap3_aes, }, { .compatible = "ti,sysc-mcasp", .data = &sysc_omap4_mcasp, }, + { .compatible = "ti,sysc-dra7-mcasp", .data = &sysc_dra7_mcasp, }, { .compatible = "ti,sysc-usb-host-fs", .data = &sysc_omap4_usb_host_fs, }, { .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, }, diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c index ef0ca9414f37..ff83e899df71 100644 --- a/drivers/clk/clk-fixed-factor.c +++ b/drivers/clk/clk-fixed-factor.c @@ -210,6 +210,7 @@ static int of_fixed_factor_clk_remove(struct platform_device *pdev) { struct clk *clk = platform_get_drvdata(pdev); + of_clk_del_provider(pdev->dev.of_node); clk_unregister_fixed_factor(clk); return 0; diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c index c981159b02c0..792735d7e46e 100644 --- a/drivers/clk/meson/axg.c +++ b/drivers/clk/meson/axg.c @@ -325,6 +325,7 @@ static struct clk_regmap axg_fclk_div2 = { .ops = &clk_regmap_gate_ops, .parent_names = (const char *[]){ "fclk_div2_div" }, .num_parents = 1, + .flags = CLK_IS_CRITICAL, }, }; @@ -349,6 +350,18 @@ static struct clk_regmap axg_fclk_div3 = { .ops = &clk_regmap_gate_ops, .parent_names = (const char *[]){ "fclk_div3_div" }, .num_parents = 1, + /* + * FIXME: + * This clock, as fdiv2, is used by the SCPI FW and is required + * by the platform to operate correctly. + * Until the following condition are met, we need this clock to + * be marked as critical: + * a) The SCPI generic driver claims and enable all the clocks + * it needs + * b) CCF has a clock hand-off mechanism to make the sure the + * clock stays on until the proper driver comes along + */ + .flags = CLK_IS_CRITICAL, }, }; diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 9309cfaaa464..4ada9668fd49 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -506,6 +506,18 @@ static struct clk_regmap gxbb_fclk_div3 = { .ops = &clk_regmap_gate_ops, .parent_names = (const char *[]){ "fclk_div3_div" }, .num_parents = 1, + /* + * FIXME: + * This clock, as fdiv2, is used by the SCPI FW and is required + * by the platform to operate correctly. + * Until the following condition are met, we need this clock to + * be marked as critical: + * a) The SCPI generic driver claims and enable all the clocks + * it needs + * b) CCF has a clock hand-off mechanism to make the sure the + * clock stays on until the proper driver comes along + */ + .flags = CLK_IS_CRITICAL, }, }; diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c index e4ca6a45f313..ef1b267cb058 100644 --- a/drivers/clk/qcom/gcc-qcs404.c +++ b/drivers/clk/qcom/gcc-qcs404.c @@ -265,7 +265,7 @@ static struct clk_fixed_factor cxo = { .div = 1, .hw.init = &(struct clk_init_data){ .name = "cxo", - .parent_names = (const char *[]){ "xo_board" }, + .parent_names = (const char *[]){ "xo-board" }, .num_parents = 1, .ops = &clk_fixed_factor_ops, }, diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c index 9c38895542f4..d4350bb10b83 100644 --- a/drivers/clocksource/i8253.c +++ b/drivers/clocksource/i8253.c @@ -20,6 +20,13 @@ DEFINE_RAW_SPINLOCK(i8253_lock); EXPORT_SYMBOL(i8253_lock); +/* + * Handle PIT quirk in pit_shutdown() where zeroing the counter register + * restarts the PIT, negating the shutdown. On platforms with the quirk, + * platform specific code can set this to false. + */ +bool i8253_clear_counter_on_shutdown __ro_after_init = true; + #ifdef CONFIG_CLKSRC_I8253 /* * Since the PIT overflows every tick, its not very useful @@ -109,8 +116,11 @@ static int pit_shutdown(struct clock_event_device *evt) raw_spin_lock(&i8253_lock); outb_p(0x30, PIT_MODE); - outb_p(0, PIT_CH0); - outb_p(0, PIT_CH0); + + if (i8253_clear_counter_on_shutdown) { + outb_p(0, PIT_CH0); + outb_p(0, PIT_CH0); + } raw_spin_unlock(&i8253_lock); return 0; diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 825725057e00..c7a328f81485 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -179,7 +179,7 @@ static unsigned int pxad_drcmr(unsigned int line) return 0x1000 + line * 4; } -bool pxad_filter_fn(struct dma_chan *chan, void *param); +static bool pxad_filter_fn(struct dma_chan *chan, void *param); /* * Debug fs @@ -1500,7 +1500,7 @@ static struct platform_driver pxad_driver = { .remove = pxad_remove, }; -bool pxad_filter_fn(struct dma_chan *chan, void *param) +static bool pxad_filter_fn(struct dma_chan *chan, void *param) { struct pxad_chan *c = to_pxad_chan(chan); struct pxad_param *p = param; @@ -1513,7 +1513,6 @@ bool pxad_filter_fn(struct dma_chan *chan, void *param) return true; } -EXPORT_SYMBOL_GPL(pxad_filter_fn); module_platform_driver(pxad_driver); diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig index b170c2851e48..6a7a7c2c5b5f 100644 --- a/drivers/firmware/imx/Kconfig +++ b/drivers/firmware/imx/Kconfig @@ -9,3 +9,9 @@ config IMX_SCU This driver manages the IPC interface between host CPU and the SCU firmware running on M4. + +config IMX_SCU_PD + bool "IMX SCU Power Domain driver" + depends on IMX_SCU + help + The System Controller Firmware (SCFW) based power domain driver. diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile index 0ac04dfda8d4..1b2e15b3c9ca 100644 --- a/drivers/firmware/imx/Makefile +++ b/drivers/firmware/imx/Makefile @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o +obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o +obj-$(CONFIG_IMX_SCU_PD) += scu-pd.o diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c new file mode 100644 index 000000000000..407245f2efd0 --- /dev/null +++ b/drivers/firmware/imx/scu-pd.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2018 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + * + * Implementation of the SCU based Power Domains + * + * NOTE: a better implementation suggested by Ulf Hansson is using a + * single global power domain and implement the ->attach|detach_dev() + * callback for the genpd and use the regular of_genpd_add_provider_simple(). + * From within the ->attach_dev(), we could get the OF node for + * the device that is being attached and then parse the power-domain + * cell containing the "resource id" and store that in the per device + * struct generic_pm_domain_data (we have void pointer there for + * storing these kind of things). + * + * Additionally, we need to implement the ->stop() and ->start() + * callbacks of genpd, which is where you "power on/off" devices, + * rather than using the above ->power_on|off() callbacks. + * + * However, there're two known issues: + * 1. The ->attach_dev() of power domain infrastructure still does + * not support multi domains case as the struct device *dev passed + * in is a virtual PD device, it does not help for parsing the real + * device resource id from device tree, so it's unware of which + * real sub power domain of device should be attached. + * + * The framework needs some proper extension to support multi power + * domain cases. + * + * 2. It also breaks most of current drivers as the driver probe sequence + * behavior changed if removing ->power_on|off() callback and use + * ->start() and ->stop() instead. genpd_dev_pm_attach will only power + * up the domain and attach device, but will not call .start() which + * relies on device runtime pm. That means the device power is still + * not up before running driver probe function. For SCU enabled + * platforms, all device drivers accessing registers/clock without power + * domain enabled will trigger a HW access error. That means we need fix + * most drivers probe sequence with proper runtime pm. + * + * In summary, we need fix above two issue before being able to switch to + * the "single global power domain" way. + * + */ + +#include <dt-bindings/firmware/imx/rsrc.h> +#include <linux/firmware/imx/sci.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/pm_domain.h> +#include <linux/slab.h> + +/* SCU Power Mode Protocol definition */ +struct imx_sc_msg_req_set_resource_power_mode { + struct imx_sc_rpc_msg hdr; + u16 resource; + u8 mode; +} __packed; + +#define IMX_SCU_PD_NAME_SIZE 20 +struct imx_sc_pm_domain { + struct generic_pm_domain pd; + char name[IMX_SCU_PD_NAME_SIZE]; + u32 rsrc; +}; + +struct imx_sc_pd_range { + char *name; + u32 rsrc; + u8 num; + bool postfix; +}; + +struct imx_sc_pd_soc { + const struct imx_sc_pd_range *pd_ranges; + u8 num_ranges; +}; + +static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = { + /* LSIO SS */ + { "lsio-pwm", IMX_SC_R_PWM_0, 8, 1 }, + { "lsio-gpio", IMX_SC_R_GPIO_0, 8, 1 }, + { "lsio-gpt", IMX_SC_R_GPT_0, 5, 1 }, + { "lsio-kpp", IMX_SC_R_KPP, 1, 0 }, + { "lsio-fspi", IMX_SC_R_FSPI_0, 2, 1 }, + { "lsio-mu", IMX_SC_R_MU_0A, 14, 1 }, + + /* CONN SS */ + { "con-usb", IMX_SC_R_USB_0, 2, 1 }, + { "con-usb0phy", IMX_SC_R_USB_0_PHY, 1, 0 }, + { "con-usb2", IMX_SC_R_USB_2, 1, 0 }, + { "con-usb2phy", IMX_SC_R_USB_2_PHY, 1, 0 }, + { "con-sdhc", IMX_SC_R_SDHC_0, 3, 1 }, + { "con-enet", IMX_SC_R_ENET_0, 2, 1 }, + { "con-nand", IMX_SC_R_NAND, 1, 0 }, + { "con-mlb", IMX_SC_R_MLB_0, 1, 1 }, + + /* Audio DMA SS */ + { "adma-audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, 0 }, + { "adma-audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, 0 }, + { "adma-audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, 0 }, + { "adma-dma0-ch", IMX_SC_R_DMA_0_CH0, 16, 1 }, + { "adma-dma1-ch", IMX_SC_R_DMA_1_CH0, 16, 1 }, + { "adma-dma2-ch", IMX_SC_R_DMA_2_CH0, 5, 1 }, + { "adma-asrc0", IMX_SC_R_ASRC_0, 1, 0 }, + { "adma-asrc1", IMX_SC_R_ASRC_1, 1, 0 }, + { "adma-esai0", IMX_SC_R_ESAI_0, 1, 0 }, + { "adma-spdif0", IMX_SC_R_SPDIF_0, 1, 0 }, + { "adma-sai", IMX_SC_R_SAI_0, 3, 1 }, + { "adma-amix", IMX_SC_R_AMIX, 1, 0 }, + { "adma-mqs0", IMX_SC_R_MQS_0, 1, 0 }, + { "adma-dsp", IMX_SC_R_DSP, 1, 0 }, + { "adma-dsp-ram", IMX_SC_R_DSP_RAM, 1, 0 }, + { "adma-can", IMX_SC_R_CAN_0, 3, 1 }, + { "adma-ftm", IMX_SC_R_FTM_0, 2, 1 }, + { "adma-lpi2c", IMX_SC_R_I2C_0, 4, 1 }, + { "adma-adc", IMX_SC_R_ADC_0, 1, 1 }, + { "adma-lcd", IMX_SC_R_LCD_0, 1, 1 }, + { "adma-lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, 1 }, + { "adma-lpuart", IMX_SC_R_UART_0, 4, 1 }, + { "adma-lpspi", IMX_SC_R_SPI_0, 4, 1 }, + + /* VPU SS */ + { "vpu", IMX_SC_R_VPU, 1, 0 }, + { "vpu-pid", IMX_SC_R_VPU_PID0, 8, 1 }, + { "vpu-dec0", IMX_SC_R_VPU_DEC_0, 1, 0 }, + { "vpu-enc0", IMX_SC_R_VPU_ENC_0, 1, 0 }, + + /* GPU SS */ + { "gpu0-pid", IMX_SC_R_GPU_0_PID0, 4, 1 }, + + /* HSIO SS */ + { "hsio-pcie-b", IMX_SC_R_PCIE_B, 1, 0 }, + { "hsio-serdes-1", IMX_SC_R_SERDES_1, 1, 0 }, + { "hsio-gpio", IMX_SC_R_HSIO_GPIO, 1, 0 }, + + /* MIPI/LVDS SS */ + { "mipi0", IMX_SC_R_MIPI_0, 1, 0 }, + { "mipi0-pwm0", IMX_SC_R_MIPI_0_PWM_0, 1, 0 }, + { "mipi0-i2c", IMX_SC_R_MIPI_0_I2C_0, 2, 1 }, + { "lvds0", IMX_SC_R_LVDS_0, 1, 0 }, + + /* DC SS */ + { "dc0", IMX_SC_R_DC_0, 1, 0 }, + { "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, 1 }, +}; + +static const struct imx_sc_pd_soc imx8qxp_scu_pd = { + .pd_ranges = imx8qxp_scu_pd_ranges, + .num_ranges = ARRAY_SIZE(imx8qxp_scu_pd_ranges), +}; + +static struct imx_sc_ipc *pm_ipc_handle; + +static inline struct imx_sc_pm_domain * +to_imx_sc_pd(struct generic_pm_domain *genpd) +{ + return container_of(genpd, struct imx_sc_pm_domain, pd); +} + +static int imx_sc_pd_power(struct generic_pm_domain *domain, bool power_on) +{ + struct imx_sc_msg_req_set_resource_power_mode msg; + struct imx_sc_rpc_msg *hdr = &msg.hdr; + struct imx_sc_pm_domain *pd; + int ret; + + pd = to_imx_sc_pd(domain); + + hdr->ver = IMX_SC_RPC_VERSION; + hdr->svc = IMX_SC_RPC_SVC_PM; + hdr->func = IMX_SC_PM_FUNC_SET_RESOURCE_POWER_MODE; + hdr->size = 2; + + msg.resource = pd->rsrc; + msg.mode = power_on ? IMX_SC_PM_PW_MODE_ON : IMX_SC_PM_PW_MODE_LP; + + ret = imx_scu_call_rpc(pm_ipc_handle, &msg, true); + if (ret) + dev_err(&domain->dev, "failed to power %s resource %d ret %d\n", + power_on ? "up" : "off", pd->rsrc, ret); + + return ret; +} + +static int imx_sc_pd_power_on(struct generic_pm_domain *domain) +{ + return imx_sc_pd_power(domain, true); +} + +static int imx_sc_pd_power_off(struct generic_pm_domain *domain) +{ + return imx_sc_pd_power(domain, false); +} + +static struct generic_pm_domain *imx_scu_pd_xlate(struct of_phandle_args *spec, + void *data) +{ + struct generic_pm_domain *domain = ERR_PTR(-ENOENT); + struct genpd_onecell_data *pd_data = data; + unsigned int i; + + for (i = 0; i < pd_data->num_domains; i++) { + struct imx_sc_pm_domain *sc_pd; + + sc_pd = to_imx_sc_pd(pd_data->domains[i]); + if (sc_pd->rsrc == spec->args[0]) { + domain = &sc_pd->pd; + break; + } + } + + return domain; +} + +static struct imx_sc_pm_domain * +imx_scu_add_pm_domain(struct device *dev, int idx, + const struct imx_sc_pd_range *pd_ranges) +{ + struct imx_sc_pm_domain *sc_pd; + int ret; + + sc_pd = devm_kzalloc(dev, sizeof(*sc_pd), GFP_KERNEL); + if (!sc_pd) + return ERR_PTR(-ENOMEM); + + sc_pd->rsrc = pd_ranges->rsrc + idx; + sc_pd->pd.power_off = imx_sc_pd_power_off; + sc_pd->pd.power_on = imx_sc_pd_power_on; + + if (pd_ranges->postfix) + snprintf(sc_pd->name, sizeof(sc_pd->name), + "%s%i", pd_ranges->name, idx); + else + snprintf(sc_pd->name, sizeof(sc_pd->name), + "%s", pd_ranges->name); + + sc_pd->pd.name = sc_pd->name; + + if (sc_pd->rsrc >= IMX_SC_R_LAST) { + dev_warn(dev, "invalid pd %s rsrc id %d found", + sc_pd->name, sc_pd->rsrc); + + devm_kfree(dev, sc_pd); + return NULL; + } + + ret = pm_genpd_init(&sc_pd->pd, NULL, true); + if (ret) { + dev_warn(dev, "failed to init pd %s rsrc id %d", + sc_pd->name, sc_pd->rsrc); + devm_kfree(dev, sc_pd); + return NULL; + } + + return sc_pd; +} + +static int imx_scu_init_pm_domains(struct device *dev, + const struct imx_sc_pd_soc *pd_soc) +{ + const struct imx_sc_pd_range *pd_ranges = pd_soc->pd_ranges; + struct generic_pm_domain **domains; + struct genpd_onecell_data *pd_data; + struct imx_sc_pm_domain *sc_pd; + u32 count = 0; + int i, j; + + for (i = 0; i < pd_soc->num_ranges; i++) + count += pd_ranges[i].num; + + domains = devm_kcalloc(dev, count, sizeof(*domains), GFP_KERNEL); + if (!domains) + return -ENOMEM; + + pd_data = devm_kzalloc(dev, sizeof(*pd_data), GFP_KERNEL); + if (!pd_data) + return -ENOMEM; + + count = 0; + for (i = 0; i < pd_soc->num_ranges; i++) { + for (j = 0; j < pd_ranges[i].num; j++) { + sc_pd = imx_scu_add_pm_domain(dev, j, &pd_ranges[i]); + if (IS_ERR_OR_NULL(sc_pd)) + continue; + + domains[count++] = &sc_pd->pd; + dev_dbg(dev, "added power domain %s\n", sc_pd->pd.name); + } + } + + pd_data->domains = domains; + pd_data->num_domains = count; + pd_data->xlate = imx_scu_pd_xlate; + + of_genpd_add_provider_onecell(dev->of_node, pd_data); + + return 0; +} + +static int imx_sc_pd_probe(struct platform_device *pdev) +{ + const struct imx_sc_pd_soc *pd_soc; + int ret; + + ret = imx_scu_get_handle(&pm_ipc_handle); + if (ret) + return ret; + + pd_soc = of_device_get_match_data(&pdev->dev); + if (!pd_soc) + return -ENODEV; + + return imx_scu_init_pm_domains(&pdev->dev, pd_soc); +} + +static const struct of_device_id imx_sc_pd_match[] = { + { .compatible = "fsl,imx8qxp-scu-pd", &imx8qxp_scu_pd}, + { /* sentinel */ } +}; + +static struct platform_driver imx_sc_pd_driver = { + .driver = { + .name = "imx-scu-pd", + .of_match_table = imx_sc_pd_match, + }, + .probe = imx_sc_pd_probe, +}; +builtin_platform_driver(imx_sc_pd_driver); + +MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>"); +MODULE_DESCRIPTION("IMX SCU Power Domain driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index a200a2174611..a13558154ac3 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Defines interfaces for interacting wtih the Raspberry Pi firmware's * property channel. * * Copyright © 2015 Broadcom - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/dma-mapping.h> @@ -14,6 +11,7 @@ #include <linux/module.h> #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include <soc/bcm2835/raspberrypi-firmware.h> #define MBOX_MSG(chan, data28) (((data28) & ~0xf) | ((chan) & 0xf)) @@ -21,8 +19,6 @@ #define MBOX_DATA28(msg) ((msg) & ~0xf) #define MBOX_CHAN_PROPERTY 8 -#define MAX_RPI_FW_PROP_BUF_SIZE 32 - static struct platform_device *rpi_hwmon; struct rpi_firmware { @@ -56,8 +52,12 @@ rpi_firmware_transaction(struct rpi_firmware *fw, u32 chan, u32 data) reinit_completion(&fw->c); ret = mbox_send_message(fw->chan, &message); if (ret >= 0) { - wait_for_completion(&fw->c); - ret = 0; + if (wait_for_completion_timeout(&fw->c, HZ)) { + ret = 0; + } else { + ret = -ETIMEDOUT; + WARN_ONCE(1, "Firmware transaction timeout"); + } } else { dev_err(fw->cl.dev, "mbox_send_message returned %d\n", ret); } @@ -144,28 +144,30 @@ EXPORT_SYMBOL_GPL(rpi_firmware_property_list); int rpi_firmware_property(struct rpi_firmware *fw, u32 tag, void *tag_data, size_t buf_size) { - /* Single tags are very small (generally 8 bytes), so the - * stack should be safe. - */ - u8 data[sizeof(struct rpi_firmware_property_tag_header) + - MAX_RPI_FW_PROP_BUF_SIZE]; - struct rpi_firmware_property_tag_header *header = - (struct rpi_firmware_property_tag_header *)data; + struct rpi_firmware_property_tag_header *header; int ret; - if (WARN_ON(buf_size > sizeof(data) - sizeof(*header))) - return -EINVAL; + /* Some mailboxes can use over 1k bytes. Rather than checking + * size and using stack or kmalloc depending on requirements, + * just use kmalloc. Mailboxes don't get called enough to worry + * too much about the time taken in the allocation. + */ + void *data = kmalloc(sizeof(*header) + buf_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + header = data; header->tag = tag; header->buf_size = buf_size; header->req_resp_size = 0; - memcpy(data + sizeof(struct rpi_firmware_property_tag_header), - tag_data, buf_size); + memcpy(data + sizeof(*header), tag_data, buf_size); + + ret = rpi_firmware_property_list(fw, data, buf_size + sizeof(*header)); + + memcpy(tag_data, data + sizeof(*header), buf_size); - ret = rpi_firmware_property_list(fw, &data, buf_size + sizeof(*header)); - memcpy(tag_data, - data + sizeof(struct rpi_firmware_property_tag_header), - buf_size); + kfree(data); return ret; } diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c index f7f6a0a5cb07..a84df1a8ca2b 100644 --- a/drivers/firmware/tegra/bpmp-debugfs.c +++ b/drivers/firmware/tegra/bpmp-debugfs.c @@ -379,33 +379,6 @@ static int create_debugfs_mirror(struct tegra_bpmp *bpmp, void *buf, return err; } -static int mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq) -{ - struct mrq_query_abi_request req = { .mrq = cpu_to_le32(mrq) }; - struct mrq_query_abi_response resp; - struct tegra_bpmp_message msg = { - .mrq = MRQ_QUERY_ABI, - .tx = { - .data = &req, - .size = sizeof(req), - }, - .rx = { - .data = &resp, - .size = sizeof(resp), - }, - }; - int ret; - - ret = tegra_bpmp_transfer(bpmp, &msg); - if (ret < 0) { - /* something went wrong; assume not supported */ - dev_warn(bpmp->dev, "tegra_bpmp_transfer failed (%d)\n", ret); - return 0; - } - - return resp.status ? 0 : 1; -} - int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp) { dma_addr_t phys; @@ -415,7 +388,7 @@ int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp) int ret; struct dentry *root; - if (!mrq_is_supported(bpmp, MRQ_DEBUGFS)) + if (!tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUGFS)) return 0; root = debugfs_create_dir("bpmp", NULL); diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c index a3d5b518c10e..689478b92bce 100644 --- a/drivers/firmware/tegra/bpmp.c +++ b/drivers/firmware/tegra/bpmp.c @@ -28,6 +28,7 @@ #define MSG_ACK BIT(0) #define MSG_RING BIT(1) +#define TAG_SZ 32 static inline struct tegra_bpmp * mbox_client_to_bpmp(struct mbox_client *client) @@ -470,6 +471,31 @@ unlock: } EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq); +bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq) +{ + struct mrq_query_abi_request req = { .mrq = cpu_to_le32(mrq) }; + struct mrq_query_abi_response resp; + struct tegra_bpmp_message msg = { + .mrq = MRQ_QUERY_ABI, + .tx = { + .data = &req, + .size = sizeof(req), + }, + .rx = { + .data = &resp, + .size = sizeof(resp), + }, + }; + int ret; + + ret = tegra_bpmp_transfer(bpmp, &msg); + if (ret || msg.rx.ret) + return false; + + return resp.status == 0; +} +EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_is_supported); + static void tegra_bpmp_mrq_handle_ping(unsigned int mrq, struct tegra_bpmp_channel *channel, void *data) @@ -521,8 +547,9 @@ static int tegra_bpmp_ping(struct tegra_bpmp *bpmp) return err; } -static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag, - size_t size) +/* deprecated version of tag query */ +static int tegra_bpmp_get_firmware_tag_old(struct tegra_bpmp *bpmp, char *tag, + size_t size) { struct mrq_query_tag_request request; struct tegra_bpmp_message msg; @@ -531,7 +558,10 @@ static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag, void *virt; int err; - virt = dma_alloc_coherent(bpmp->dev, MSG_DATA_MIN_SZ, &phys, + if (size != TAG_SZ) + return -EINVAL; + + virt = dma_alloc_coherent(bpmp->dev, TAG_SZ, &phys, GFP_KERNEL | GFP_DMA32); if (!virt) return -ENOMEM; @@ -549,13 +579,44 @@ static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag, local_irq_restore(flags); if (err == 0) - strlcpy(tag, virt, size); + memcpy(tag, virt, TAG_SZ); - dma_free_coherent(bpmp->dev, MSG_DATA_MIN_SZ, virt, phys); + dma_free_coherent(bpmp->dev, TAG_SZ, virt, phys); return err; } +static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag, + size_t size) +{ + if (tegra_bpmp_mrq_is_supported(bpmp, MRQ_QUERY_FW_TAG)) { + struct mrq_query_fw_tag_response resp; + struct tegra_bpmp_message msg = { + .mrq = MRQ_QUERY_FW_TAG, + .rx = { + .data = &resp, + .size = sizeof(resp), + }, + }; + int err; + + if (size != sizeof(resp.tag)) + return -EINVAL; + + err = tegra_bpmp_transfer(bpmp, &msg); + + if (err) + return err; + if (msg.rx.ret < 0) + return -EINVAL; + + memcpy(tag, resp.tag, sizeof(resp.tag)); + return 0; + } + + return tegra_bpmp_get_firmware_tag_old(bpmp, tag, size); +} + static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel) { unsigned long flags = channel->ob->flags; @@ -664,7 +725,7 @@ static int tegra_bpmp_probe(struct platform_device *pdev) { struct tegra_bpmp *bpmp; unsigned int i; - char tag[32]; + char tag[TAG_SZ]; size_t size; int err; @@ -792,13 +853,13 @@ static int tegra_bpmp_probe(struct platform_device *pdev) goto free_mrq; } - err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag) - 1); + err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag)); if (err < 0) { dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err); goto free_mrq; } - dev_info(&pdev->dev, "firmware: %s\n", tag); + dev_info(&pdev->dev, "firmware: %.*s\n", (int)sizeof(tag), tag); platform_set_drvdata(pdev, bpmp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d0102cfc8efb..104b2e0d893b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -151,6 +151,7 @@ extern int amdgpu_compute_multipipe; extern int amdgpu_gpu_recovery; extern int amdgpu_emu_mode; extern uint amdgpu_smu_memory_pool_size; +extern uint amdgpu_dc_feature_mask; extern struct amdgpu_mgpu_info mgpu_info; #ifdef CONFIG_DRM_AMDGPU_SI diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 943dbf3c5da1..8de55f7f1a3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -127,6 +127,9 @@ int amdgpu_compute_multipipe = -1; int amdgpu_gpu_recovery = -1; /* auto */ int amdgpu_emu_mode = 0; uint amdgpu_smu_memory_pool_size = 0; +/* FBC (bit 0) disabled by default*/ +uint amdgpu_dc_feature_mask = 0; + struct amdgpu_mgpu_info mgpu_info = { .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), }; @@ -631,6 +634,14 @@ module_param(halt_if_hws_hang, int, 0644); MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)"); #endif +/** + * DOC: dcfeaturemask (uint) + * Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h. + * The default is the current set of stable display features. + */ +MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))"); +module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444); + static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_SI {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c index 2d4473557b0d..d13fc4fcb517 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c @@ -49,6 +49,7 @@ int vega20_reg_base_init(struct amdgpu_device *adev) adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); + adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); } return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index b0df6dc9a775..c1262f62cd9f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -429,6 +429,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->asic_type < CHIP_RAVEN) init_data.flags.gpu_vm_support = true; + if (amdgpu_dc_feature_mask & DC_FBC_MASK) + init_data.flags.fbc_support = true; + /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -1524,13 +1527,6 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) { struct amdgpu_display_manager *dm = bl_get_data(bd); - /* - * PWM interperts 0 as 100% rather than 0% because of HW - * limitation for level 0.So limiting minimum brightness level - * to 1. - */ - if (bd->props.brightness < 1) - return 1; if (dc_link_set_backlight_level(dm->backlight_link, bd->props.brightness, 0, 0)) return 0; @@ -2707,18 +2703,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, drm_connector = &aconnector->base; if (!aconnector->dc_sink) { - /* - * Create dc_sink when necessary to MST - * Don't apply fake_sink to MST - */ - if (aconnector->mst_port) { - dm_dp_mst_dc_sink_create(drm_connector); - return stream; + if (!aconnector->mst_port) { + sink = create_fake_sink(aconnector); + if (!sink) + return stream; } - - sink = create_fake_sink(aconnector); - if (!sink) - return stream; } else { sink = aconnector->dc_sink; } @@ -3308,7 +3297,7 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane, static const struct drm_plane_funcs dm_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = drm_plane_cleanup, + .destroy = drm_primary_helper_destroy, .reset = dm_drm_plane_reset, .atomic_duplicate_state = dm_drm_plane_duplicate_state, .atomic_destroy_state = dm_drm_plane_destroy_state, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 978b34a5011c..924a38a1fc44 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -160,8 +160,6 @@ struct amdgpu_dm_connector { struct mutex hpd_lock; bool fake_enable; - - bool mst_connected; }; #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 03601d717fed..d02c32a1039c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -205,40 +205,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { .atomic_get_property = amdgpu_dm_connector_atomic_get_property }; -void dm_dp_mst_dc_sink_create(struct drm_connector *connector) -{ - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct dc_sink *dc_sink; - struct dc_sink_init_data init_params = { - .link = aconnector->dc_link, - .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; - - /* FIXME none of this is safe. we shouldn't touch aconnector here in - * atomic_check - */ - - /* - * TODO: Need to further figure out why ddc.algo is NULL while MST port exists - */ - if (!aconnector->port || !aconnector->port->aux.ddc.algo) - return; - - ASSERT(aconnector->edid); - - dc_sink = dc_link_add_remote_sink( - aconnector->dc_link, - (uint8_t *)aconnector->edid, - (aconnector->edid->extensions + 1) * EDID_LENGTH, - &init_params); - - dc_sink->priv = aconnector; - aconnector->dc_sink = dc_sink; - - if (aconnector->dc_sink) - amdgpu_dm_update_freesync_caps( - connector, aconnector->edid); -} - static int dm_dp_mst_get_modes(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); @@ -319,12 +285,7 @@ dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector) struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder; struct drm_encoder *encoder; - const struct drm_connector_helper_funcs *connector_funcs = - connector->base.helper_private; - struct drm_encoder *enc_master = - connector_funcs->best_encoder(&connector->base); - DRM_DEBUG_KMS("enc master is %p\n", enc_master); amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL); if (!amdgpu_encoder) return NULL; @@ -354,25 +315,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct amdgpu_device *adev = dev->dev_private; struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - aconnector = to_amdgpu_dm_connector(connector); - if (aconnector->mst_port == master - && !aconnector->port) { - DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n", - aconnector, connector->base.id, aconnector->mst_port); - - aconnector->port = port; - drm_connector_set_path_property(connector, pathprop); - - drm_connector_list_iter_end(&conn_iter); - aconnector->mst_connected = true; - return &aconnector->base; - } - } - drm_connector_list_iter_end(&conn_iter); aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); if (!aconnector) @@ -421,8 +363,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, */ amdgpu_dm_connector_funcs_reset(connector); - aconnector->mst_connected = true; - DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", aconnector, connector->base.id, aconnector->mst_port); @@ -434,6 +374,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector) { + struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); + struct drm_device *dev = master->base.dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", @@ -447,7 +390,10 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, aconnector->dc_sink = NULL; } - aconnector->mst_connected = false; + drm_connector_unregister(connector); + if (adev->mode_info.rfbdev) + drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector); + drm_connector_put(connector); } static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) @@ -458,18 +404,10 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) drm_kms_helper_hotplug_event(dev); } -static void dm_dp_mst_link_status_reset(struct drm_connector *connector) -{ - mutex_lock(&connector->dev->mode_config.mutex); - drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); - mutex_unlock(&connector->dev->mode_config.mutex); -} - static void dm_dp_mst_register_connector(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct amdgpu_device *adev = dev->dev_private; - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); if (adev->mode_info.rfbdev) drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); @@ -477,9 +415,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector) DRM_ERROR("adev->mode_info.rfbdev is NULL\n"); drm_connector_register(connector); - - if (aconnector->mst_connected) - dm_dp_mst_link_status_reset(connector); } static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 8cf51da26657..2da851b40042 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -31,6 +31,5 @@ struct amdgpu_dm_connector; void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector); -void dm_dp_mst_dc_sink_create(struct drm_connector *connector); #endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index fb04a4ad141f..5da2186b3615 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1722,7 +1722,7 @@ static void write_i2c_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ - offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n", + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ @@ -1734,7 +1734,7 @@ static void write_i2c_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ - offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n", + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 199527171100..b57fa61b3034 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -169,6 +169,7 @@ struct link_training_settings; struct dc_config { bool gpu_vm_support; bool disable_disp_pll_sharing; + bool fbc_support; }; enum visual_confirm { diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index b75ede5f84f7..b459867a05b2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1736,7 +1736,12 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx, if (events->force_trigger) value |= 0x1; - value |= 0x84; + if (num_pipes) { + struct dc *dc = pipe_ctx[0]->stream->ctx->dc; + + if (dc->fbc_compressor) + value |= 0x84; + } for (i = 0; i < num_pipes; i++) pipe_ctx[i]->stream_res.tg->funcs-> diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index e3624ca24574..7c9fd9052ee2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -1362,7 +1362,8 @@ static bool construct( pool->base.sw_i2cs[i] = NULL; } - dc->fbc_compressor = dce110_compressor_create(ctx); + if (dc->config.fbc_support) + dc->fbc_compressor = dce110_compressor_create(ctx); if (!underlay_create(ctx, &pool->base)) goto res_create_fail; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 2083c308007c..470d7b89071a 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -133,6 +133,10 @@ enum PP_FEATURE_MASK { PP_AVFS_MASK = 0x40000, }; +enum DC_FEATURE_MASK { + DC_FBC_MASK = 0x1, +}; + /** * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks */ diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index d2e7c0fa96c2..8eb0bb241210 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -1325,7 +1325,7 @@ struct atom_smu_info_v3_3 { struct atom_common_table_header table_header; uint8_t smuip_min_ver; uint8_t smuip_max_ver; - uint8_t smu_rsd1; + uint8_t waflclk_ss_mode; uint8_t gpuclk_ss_mode; uint16_t sclk_ss_percentage; uint16_t sclk_ss_rate_10hz; @@ -1355,7 +1355,10 @@ struct atom_smu_info_v3_3 { uint32_t syspll3_1_vco_freq_10khz; uint32_t bootup_fclk_10khz; uint32_t bootup_waflclk_10khz; - uint32_t reserved[3]; + uint32_t smu_info_caps; + uint16_t waflclk_ss_percentage; // in unit of 0.001% + uint16_t smuinitoffset; + uint32_t reserved; }; /* diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 57143d51e3ee..99861f32b1f9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -120,6 +120,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) data->registry_data.disable_auto_wattman = 1; data->registry_data.auto_wattman_debug = 0; data->registry_data.auto_wattman_sample_period = 100; + data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD; data->registry_data.auto_wattman_threshold = 50; data->registry_data.gfxoff_controlled_by_driver = 1; data->gfxoff_allowed = false; @@ -829,6 +830,28 @@ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr) return 0; } +static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_DPM_UCLK].enabled) + return smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetUclkFastSwitch, + 1); + + return 0; +} + +static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + + return smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetFclkGfxClkRatio, + data->registry_data.fclk_gfxclk_ratio); +} + static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = @@ -1532,6 +1555,16 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "[EnableDPMTasks] Failed to enable all smu features!", return result); + result = vega20_notify_smc_display_change(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "[EnableDPMTasks] Failed to notify smc display change!", + return result); + + result = vega20_send_clock_ratio(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "[EnableDPMTasks] Failed to send clock ratio!", + return result); + /* Initialize UVD/VCE powergating state */ vega20_init_powergate_state(hwmgr); @@ -1972,19 +2005,6 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, return ret; } -static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr, - bool has_disp) -{ - struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); - - if (data->smu_features[GNLD_DPM_UCLK].enabled) - return smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetUclkFastSwitch, - has_disp ? 1 : 0); - - return 0; -} - int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr, struct pp_display_clock_request *clock_req) { @@ -2044,13 +2064,6 @@ static int vega20_notify_smc_display_config_after_ps_adjustment( struct pp_display_clock_request clock_req; int ret = 0; - if ((hwmgr->display_config->num_display > 1) && - !hwmgr->display_config->multi_monitor_in_sync && - !hwmgr->display_config->nb_pstate_switch_disable) - vega20_notify_smc_display_change(hwmgr, false); - else - vega20_notify_smc_display_change(hwmgr, true); - min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk; min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h index 56fe6a0d42e8..25faaa5c5b10 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h @@ -328,6 +328,7 @@ struct vega20_registry_data { uint8_t disable_auto_wattman; uint32_t auto_wattman_debug; uint32_t auto_wattman_sample_period; + uint32_t fclk_gfxclk_ratio; uint8_t auto_wattman_threshold; uint8_t log_avfs_param; uint8_t enable_enginess; diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h index 45d64a81e945..4f63a736ea0e 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h @@ -105,7 +105,8 @@ #define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B #define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C #define PPSMC_MSG_WaflTest 0x4D -// Unused ID 0x4E to 0x50 +#define PPSMC_MSG_SetFclkGfxClkRatio 0x4E +// Unused ID 0x4F to 0x50 #define PPSMC_MSG_AllowGfxOff 0x51 #define PPSMC_MSG_DisallowGfxOff 0x52 #define PPSMC_MSG_GetPptLimit 0x53 diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index e7c3ed6c9a2e..9b476368aa31 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -93,7 +93,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) * If the GPU managed to complete this jobs fence, the timout is * spurious. Bail out. */ - if (fence_completed(gpu, submit->out_fence->seqno)) + if (dma_fence_is_signaled(submit->out_fence)) return; /* diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 94529aa82339..aef487dd8731 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -164,13 +164,6 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end) return frm; } -static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc) -{ - struct decon_context *ctx = crtc->ctx; - - return decon_get_frame_count(ctx, false); -} - static void decon_setup_trigger(struct decon_context *ctx) { if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG)) @@ -536,7 +529,6 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = { .disable = decon_disable, .enable_vblank = decon_enable_vblank, .disable_vblank = decon_disable_vblank, - .get_vblank_counter = decon_get_vblank_counter, .atomic_begin = decon_atomic_begin, .update_plane = decon_update_plane, .disable_plane = decon_disable_plane, @@ -554,7 +546,6 @@ static int decon_bind(struct device *dev, struct device *master, void *data) int ret; ctx->drm_dev = drm_dev; - drm_dev->max_vblank_count = 0xffffffff; for (win = ctx->first_win; win < WINDOWS_NR; win++) { ctx->configs[win].pixel_formats = decon_formats; diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index eea90251808f..2696289ecc78 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -162,16 +162,6 @@ static void exynos_drm_crtc_disable_vblank(struct drm_crtc *crtc) exynos_crtc->ops->disable_vblank(exynos_crtc); } -static u32 exynos_drm_crtc_get_vblank_counter(struct drm_crtc *crtc) -{ - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - - if (exynos_crtc->ops->get_vblank_counter) - return exynos_crtc->ops->get_vblank_counter(exynos_crtc); - - return 0; -} - static const struct drm_crtc_funcs exynos_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, @@ -181,7 +171,6 @@ static const struct drm_crtc_funcs exynos_crtc_funcs = { .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, .enable_vblank = exynos_drm_crtc_enable_vblank, .disable_vblank = exynos_drm_crtc_disable_vblank, - .get_vblank_counter = exynos_drm_crtc_get_vblank_counter, }; struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index ec9604f1272b..5e61e707f955 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -135,7 +135,6 @@ struct exynos_drm_crtc_ops { void (*disable)(struct exynos_drm_crtc *crtc); int (*enable_vblank)(struct exynos_drm_crtc *crtc); void (*disable_vblank)(struct exynos_drm_crtc *crtc); - u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc); enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc, const struct drm_display_mode *mode); bool (*mode_fixup)(struct exynos_drm_crtc *crtc, diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 07af7758066d..d81e62ae286a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -14,6 +14,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_panel.h> #include <drm/drm_atomic_helper.h> @@ -1474,12 +1475,12 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder) { struct exynos_dsi *dsi = encoder_to_dsi(encoder); struct drm_connector *connector = &dsi->connector; + struct drm_device *drm = encoder->dev; int ret; connector->polled = DRM_CONNECTOR_POLL_HPD; - ret = drm_connector_init(encoder->dev, connector, - &exynos_dsi_connector_funcs, + ret = drm_connector_init(drm, connector, &exynos_dsi_connector_funcs, DRM_MODE_CONNECTOR_DSI); if (ret) { DRM_ERROR("Failed to initialize connector with drm\n"); @@ -1489,7 +1490,12 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder) connector->status = connector_status_disconnected; drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs); drm_connector_attach_encoder(connector, encoder); + if (!drm->registered) + return 0; + connector->funcs->reset(connector); + drm_fb_helper_add_one_connector(drm->fb_helper, connector); + drm_connector_register(connector); return 0; } @@ -1527,7 +1533,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host, } dsi->panel = of_drm_find_panel(device->dev.of_node); - if (dsi->panel) { + if (IS_ERR(dsi->panel)) { + dsi->panel = NULL; + } else { drm_panel_attach(dsi->panel, &dsi->connector); dsi->connector.status = connector_status_connected; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 918dd2c82209..01d182289efa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -192,7 +192,7 @@ int exynos_drm_fbdev_init(struct drm_device *dev) struct drm_fb_helper *helper; int ret; - if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) + if (!dev->mode_config.num_crtc) return 0; fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 2402395a068d..58e166effa45 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1905,7 +1905,6 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) vgpu_free_mm(mm); return ERR_PTR(-ENOMEM); } - mm->ggtt_mm.last_partial_off = -1UL; return mm; } @@ -1930,7 +1929,6 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) invalidate_ppgtt_mm(mm); } else { vfree(mm->ggtt_mm.virtual_ggtt); - mm->ggtt_mm.last_partial_off = -1UL; } vgpu_free_mm(mm); @@ -2168,6 +2166,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, struct intel_gvt_gtt_entry e, m; dma_addr_t dma_addr; int ret; + struct intel_gvt_partial_pte *partial_pte, *pos, *n; + bool partial_update = false; if (bytes != 4 && bytes != 8) return -EINVAL; @@ -2178,68 +2178,57 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, if (!vgpu_gmadr_is_valid(vgpu, gma)) return 0; - ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); - + e.type = GTT_TYPE_GGTT_PTE; memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, bytes); /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes - * write, we assume the two 4 bytes writes are consecutive. - * Otherwise, we abort and report error + * write, save the first 4 bytes in a list and update virtual + * PTE. Only update shadow PTE when the second 4 bytes comes. */ if (bytes < info->gtt_entry_size) { - if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) { - /* the first partial part*/ - ggtt_mm->ggtt_mm.last_partial_off = off; - ggtt_mm->ggtt_mm.last_partial_data = e.val64; - return 0; - } else if ((g_gtt_index == - (ggtt_mm->ggtt_mm.last_partial_off >> - info->gtt_entry_size_shift)) && - (off != ggtt_mm->ggtt_mm.last_partial_off)) { - /* the second partial part */ - - int last_off = ggtt_mm->ggtt_mm.last_partial_off & - (info->gtt_entry_size - 1); - - memcpy((void *)&e.val64 + last_off, - (void *)&ggtt_mm->ggtt_mm.last_partial_data + - last_off, bytes); - - ggtt_mm->ggtt_mm.last_partial_off = -1UL; - } else { - int last_offset; - - gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n", - ggtt_mm->ggtt_mm.last_partial_off, off, - bytes, info->gtt_entry_size); - - /* set host ggtt entry to scratch page and clear - * virtual ggtt entry as not present for last - * partially write offset - */ - last_offset = ggtt_mm->ggtt_mm.last_partial_off & - (~(info->gtt_entry_size - 1)); - - ggtt_get_host_entry(ggtt_mm, &m, last_offset); - ggtt_invalidate_pte(vgpu, &m); - ops->set_pfn(&m, gvt->gtt.scratch_mfn); - ops->clear_present(&m); - ggtt_set_host_entry(ggtt_mm, &m, last_offset); - ggtt_invalidate(gvt->dev_priv); - - ggtt_get_guest_entry(ggtt_mm, &e, last_offset); - ops->clear_present(&e); - ggtt_set_guest_entry(ggtt_mm, &e, last_offset); - - ggtt_mm->ggtt_mm.last_partial_off = off; - ggtt_mm->ggtt_mm.last_partial_data = e.val64; + bool found = false; + + list_for_each_entry_safe(pos, n, + &ggtt_mm->ggtt_mm.partial_pte_list, list) { + if (g_gtt_index == pos->offset >> + info->gtt_entry_size_shift) { + if (off != pos->offset) { + /* the second partial part*/ + int last_off = pos->offset & + (info->gtt_entry_size - 1); + + memcpy((void *)&e.val64 + last_off, + (void *)&pos->data + last_off, + bytes); + + list_del(&pos->list); + kfree(pos); + found = true; + break; + } + + /* update of the first partial part */ + pos->data = e.val64; + ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); + return 0; + } + } - return 0; + if (!found) { + /* the first partial part */ + partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL); + if (!partial_pte) + return -ENOMEM; + partial_pte->offset = off; + partial_pte->data = e.val64; + list_add_tail(&partial_pte->list, + &ggtt_mm->ggtt_mm.partial_pte_list); + partial_update = true; } } - if (ops->test_present(&e)) { + if (!partial_update && (ops->test_present(&e))) { gfn = ops->get_pfn(&e); m = e; @@ -2263,16 +2252,18 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, } else ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); } else { - ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index); - ggtt_invalidate_pte(vgpu, &m); ops->set_pfn(&m, gvt->gtt.scratch_mfn); ops->clear_present(&m); } out: + ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); + + ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index); + ggtt_invalidate_pte(vgpu, &e); + ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); ggtt_invalidate(gvt->dev_priv); - ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); return 0; } @@ -2430,6 +2421,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) intel_vgpu_reset_ggtt(vgpu, false); + INIT_LIST_HEAD(>t->ggtt_mm->ggtt_mm.partial_pte_list); + return create_scratch_page_tree(vgpu); } @@ -2454,6 +2447,14 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu) { + struct intel_gvt_partial_pte *pos; + + list_for_each_entry(pos, + &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, list) { + gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n", + pos->offset, pos->data); + kfree(pos); + } intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm); vgpu->gtt.ggtt_mm = NULL; } diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 7a9b36176efb..d8cb04cc946d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -35,7 +35,6 @@ #define _GVT_GTT_H_ #define I915_GTT_PAGE_SHIFT 12 -#define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1)) struct intel_vgpu_mm; @@ -133,6 +132,12 @@ enum intel_gvt_mm_type { #define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES +struct intel_gvt_partial_pte { + unsigned long offset; + u64 data; + struct list_head list; +}; + struct intel_vgpu_mm { enum intel_gvt_mm_type type; struct intel_vgpu *vgpu; @@ -157,8 +162,7 @@ struct intel_vgpu_mm { } ppgtt_mm; struct { void *virtual_ggtt; - unsigned long last_partial_off; - u64 last_partial_data; + struct list_head partial_pte_list; } ggtt_mm; }; }; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 90f50f67909a..aa280bb07125 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1609,7 +1609,7 @@ static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu, return 0; } -static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu, +static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { vgpu_vreg(vgpu, offset) = 0; @@ -2607,6 +2607,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + + MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); + MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); return 0; } @@ -3205,9 +3208,6 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); - MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write); - MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write); - MMIO_D(RC6_CTX_BASE, D_BXT); MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT); diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 10e63eea5492..36a5147cd01e 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -131,7 +131,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ - {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */ + {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 44e2c0f5ec50..ffdbbac4400e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1175,8 +1175,6 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv) return -EINVAL; } - dram_info->valid_dimm = true; - /* * If any of the channel is single rank channel, worst case output * will be same as if single rank memory, so consider single rank @@ -1193,8 +1191,7 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv) return -EINVAL; } - if (ch0.is_16gb_dimm || ch1.is_16gb_dimm) - dram_info->is_16gb_dimm = true; + dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm; dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0, val_ch1, @@ -1314,7 +1311,6 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv) return -EINVAL; } - dram_info->valid_dimm = true; dram_info->valid = true; return 0; } @@ -1327,12 +1323,17 @@ intel_get_dram_info(struct drm_i915_private *dev_priv) int ret; dram_info->valid = false; - dram_info->valid_dimm = false; - dram_info->is_16gb_dimm = false; dram_info->rank = I915_DRAM_RANK_INVALID; dram_info->bandwidth_kbps = 0; dram_info->num_channels = 0; + /* + * Assume 16Gb DIMMs are present until proven otherwise. + * This is only used for the level 0 watermark latency + * w/a which does not apply to bxt/glk. + */ + dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv); + if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv)) return; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8624b4bdc242..9102571e9692 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1948,7 +1948,6 @@ struct drm_i915_private { struct dram_info { bool valid; - bool valid_dimm; bool is_16gb_dimm; u8 num_channels; enum dram_rank { diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 09187286d346..1aaccbe7e1de 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -460,7 +460,7 @@ eb_validate_vma(struct i915_execbuffer *eb, * any non-page-aligned or non-canonical addresses. */ if (unlikely(entry->flags & EXEC_OBJECT_PINNED && - entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK))) + entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK))) return -EINVAL; /* pad_to_size was once a reserved field, so sanitize it */ diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 56c7f8637311..47c302543799 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1757,7 +1757,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m) if (i == 4) continue; - seq_printf(m, "\t\t(%03d, %04d) %08lx: ", + seq_printf(m, "\t\t(%03d, %04d) %08llx: ", pde, pte, (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE); for (i = 0; i < 4; i++) { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 7e2af5f4f39b..28039290655c 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -42,13 +42,15 @@ #include "i915_selftest.h" #include "i915_timeline.h" -#define I915_GTT_PAGE_SIZE_4K BIT(12) -#define I915_GTT_PAGE_SIZE_64K BIT(16) -#define I915_GTT_PAGE_SIZE_2M BIT(21) +#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) +#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) +#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M +#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE + #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE #define I915_FENCE_REG_NONE -1 @@ -659,20 +661,20 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, u64 start, u64 end, unsigned int flags); /* Flags used by pin/bind&friends. */ -#define PIN_NONBLOCK BIT(0) -#define PIN_MAPPABLE BIT(1) -#define PIN_ZONE_4G BIT(2) -#define PIN_NONFAULT BIT(3) -#define PIN_NOEVICT BIT(4) - -#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */ -#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */ -#define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */ -#define PIN_UPDATE BIT(8) - -#define PIN_HIGH BIT(9) -#define PIN_OFFSET_BIAS BIT(10) -#define PIN_OFFSET_FIXED BIT(11) +#define PIN_NONBLOCK BIT_ULL(0) +#define PIN_MAPPABLE BIT_ULL(1) +#define PIN_ZONE_4G BIT_ULL(2) +#define PIN_NONFAULT BIT_ULL(3) +#define PIN_NOEVICT BIT_ULL(4) + +#define PIN_MBZ BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */ +#define PIN_GLOBAL BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */ +#define PIN_USER BIT_ULL(7) /* I915_VMA_LOCAL_BIND */ +#define PIN_UPDATE BIT_ULL(8) + +#define PIN_HIGH BIT_ULL(9) +#define PIN_OFFSET_BIAS BIT_ULL(10) +#define PIN_OFFSET_FIXED BIT_ULL(11) #define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE) #endif diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7c491ea3d052..e31c27e45734 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2095,8 +2095,12 @@ enum i915_power_well_id { /* ICL PHY DFLEX registers */ #define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0) -#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n))) -#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n))) +#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port))) /* BXT PHY Ref registers */ #define _PORT_REF_DW3_A 0x16218C @@ -4593,12 +4597,12 @@ enum { #define DRM_DIP_ENABLE (1 << 28) #define PSR_VSC_BIT_7_SET (1 << 27) -#define VSC_SELECT_MASK (0x3 << 26) -#define VSC_SELECT_SHIFT 26 -#define VSC_DIP_HW_HEA_DATA (0 << 26) -#define VSC_DIP_HW_HEA_SW_DATA (1 << 26) -#define VSC_DIP_HW_DATA_SW_HEA (2 << 26) -#define VSC_DIP_SW_HEA_DATA (3 << 26) +#define VSC_SELECT_MASK (0x3 << 25) +#define VSC_SELECT_SHIFT 25 +#define VSC_DIP_HW_HEA_DATA (0 << 25) +#define VSC_DIP_HW_HEA_SW_DATA (1 << 25) +#define VSC_DIP_HW_DATA_SW_HEA (2 << 25) +#define VSC_DIP_SW_HEA_DATA (3 << 25) #define VDIP_ENABLE_PPS (1 << 24) /* Panel power sequencing */ diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 769f3f586661..ee3ca2de983b 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -144,6 +144,9 @@ static const struct { /* HDMI N/CTS table */ #define TMDS_297M 297000 #define TMDS_296M 296703 +#define TMDS_594M 594000 +#define TMDS_593M 593407 + static const struct { int sample_rate; int clock; @@ -164,6 +167,20 @@ static const struct { { 176400, TMDS_297M, 18816, 247500 }, { 192000, TMDS_296M, 23296, 281250 }, { 192000, TMDS_297M, 20480, 247500 }, + { 44100, TMDS_593M, 8918, 937500 }, + { 44100, TMDS_594M, 9408, 990000 }, + { 48000, TMDS_593M, 5824, 562500 }, + { 48000, TMDS_594M, 6144, 594000 }, + { 32000, TMDS_593M, 5824, 843750 }, + { 32000, TMDS_594M, 3072, 445500 }, + { 88200, TMDS_593M, 17836, 937500 }, + { 88200, TMDS_594M, 18816, 990000 }, + { 96000, TMDS_593M, 11648, 562500 }, + { 96000, TMDS_594M, 12288, 594000 }, + { 176400, TMDS_593M, 35672, 937500 }, + { 176400, TMDS_594M, 37632, 990000 }, + { 192000, TMDS_593M, 23296, 562500 }, + { 192000, TMDS_594M, 24576, 594000 }, }; /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 29075c763428..8d74276029e6 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -2138,16 +2138,8 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv, static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv, int pixel_rate) { - if (INTEL_GEN(dev_priv) >= 10) + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) return DIV_ROUND_UP(pixel_rate, 2); - else if (IS_GEMINILAKE(dev_priv)) - /* - * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk - * as a temporary workaround. Use a higher cdclk instead. (Note that - * intel_compute_max_dotclk() limits the max pixel clock to 99% of max - * cdclk.) - */ - return DIV_ROUND_UP(pixel_rate * 100, 2 * 99); else if (IS_GEN9(dev_priv) || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) return pixel_rate; @@ -2543,14 +2535,8 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) { int max_cdclk_freq = dev_priv->max_cdclk_freq; - if (INTEL_GEN(dev_priv) >= 10) + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) return 2 * max_cdclk_freq; - else if (IS_GEMINILAKE(dev_priv)) - /* - * FIXME: Limiting to 99% as a temporary workaround. See - * intel_min_cdclk() for details. - */ - return 2 * max_cdclk_freq * 99 / 100; else if (IS_GEN9(dev_priv) || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) return max_cdclk_freq; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9741cc419e1b..23d8008a93bb 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -12768,17 +12768,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_check_cpu_fifo_underruns(dev_priv); intel_check_pch_fifo_underruns(dev_priv); - if (!new_crtc_state->active) { - /* - * Make sure we don't call initial_watermarks - * for ILK-style watermark updates. - * - * No clue what this is supposed to achieve. - */ - if (INTEL_GEN(dev_priv) >= 9) - dev_priv->display.initial_watermarks(intel_state, - to_intel_crtc_state(new_crtc_state)); - } + /* FIXME unify this for all platforms */ + if (!new_crtc_state->active && + !HAS_GMCH_DISPLAY(dev_priv) && + dev_priv->display.initial_watermarks) + dev_priv->display.initial_watermarks(intel_state, + to_intel_crtc_state(new_crtc_state)); } } @@ -14646,7 +14641,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, fb->height < SKL_MIN_YUV_420_SRC_H || (fb->width % 4) != 0 || (fb->height % 4) != 0)) { DRM_DEBUG_KMS("src dimensions not correct for NV12\n"); - return -EINVAL; + goto err; } for (i = 0; i < fb->format->num_planes; i++) { diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index cdf19553ffac..5d5336fbe7b0 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c @@ -297,8 +297,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) lpe_audio_platdev_destroy(dev_priv); irq_free_desc(dev_priv->lpe_audio.irq); -} + dev_priv->lpe_audio.irq = -1; + dev_priv->lpe_audio.platdev = NULL; +} /** * intel_lpe_audio_notify() - notify lpe audio event diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 1db9b8328275..245f0022bcfd 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2881,8 +2881,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, * any underrun. If not able to get Dimm info assume 16GB dimm * to avoid any underrun. */ - if (!dev_priv->dram_info.valid_dimm || - dev_priv->dram_info.is_16gb_dimm) + if (dev_priv->dram_info.is_16gb_dimm) wm[0] += 1; } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 8d03f64eabd7..5c22f2c8d4cf 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -551,7 +551,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) err = igt_check_page_sizes(vma); if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) { - pr_err("page_sizes.gtt=%u, expected %lu\n", + pr_err("page_sizes.gtt=%u, expected %llu\n", vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K); err = -EINVAL; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 8e2e269db97e..127d81513671 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1337,7 +1337,7 @@ static int igt_gtt_reserve(void *arg) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); if (vma->node.start != total || vma->node.size != 2*I915_GTT_PAGE_SIZE) { - pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", + pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", vma->node.start, vma->node.size, total, 2*I915_GTT_PAGE_SIZE); err = -EINVAL; @@ -1386,7 +1386,7 @@ static int igt_gtt_reserve(void *arg) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); if (vma->node.start != total || vma->node.size != 2*I915_GTT_PAGE_SIZE) { - pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", + pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", vma->node.start, vma->node.size, total, 2*I915_GTT_PAGE_SIZE); err = -EINVAL; @@ -1430,7 +1430,7 @@ static int igt_gtt_reserve(void *arg) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); if (vma->node.start != offset || vma->node.size != 2*I915_GTT_PAGE_SIZE) { - pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", + pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", vma->node.start, vma->node.size, offset, 2*I915_GTT_PAGE_SIZE); err = -EINVAL; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index d4e98e5876bc..0fb4718ef0df 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -902,26 +902,6 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) return ret; } -/* Get the list of RPMh voltage levels from cmd-db */ -static int a6xx_gmu_rpmh_arc_cmds(const char *id, void *vals, int size) -{ - u32 len = cmd_db_read_aux_data_len(id); - - if (!len) - return 0; - - if (WARN_ON(len > size)) - return -EINVAL; - - cmd_db_read_aux_data(id, vals, len); - - /* - * The data comes back as an array of unsigned shorts so adjust the - * count accordingly - */ - return len >> 1; -} - /* Return the 'arc-level' for the given frequency */ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq) { @@ -949,11 +929,30 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq) } static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, - unsigned long *freqs, int freqs_count, - u16 *pri, int pri_count, - u16 *sec, int sec_count) + unsigned long *freqs, int freqs_count, const char *id) { int i, j; + const u16 *pri, *sec; + size_t pri_count, sec_count; + + pri = cmd_db_read_aux_data(id, &pri_count); + if (IS_ERR(pri)) + return PTR_ERR(pri); + /* + * The data comes back as an array of unsigned shorts so adjust the + * count accordingly + */ + pri_count >>= 1; + if (!pri_count) + return -EINVAL; + + sec = cmd_db_read_aux_data("mx.lvl", &sec_count); + if (IS_ERR(sec)) + return PTR_ERR(sec); + + sec_count >>= 1; + if (!sec_count) + return -EINVAL; /* Construct a vote for each frequency */ for (i = 0; i < freqs_count; i++) { @@ -1012,25 +1011,15 @@ static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base; - - u16 gx[16], cx[16], mx[16]; - u32 gxcount, cxcount, mxcount; int ret; - /* Get the list of available voltage levels for each component */ - gxcount = a6xx_gmu_rpmh_arc_cmds("gfx.lvl", gx, sizeof(gx)); - cxcount = a6xx_gmu_rpmh_arc_cmds("cx.lvl", cx, sizeof(cx)); - mxcount = a6xx_gmu_rpmh_arc_cmds("mx.lvl", mx, sizeof(mx)); - /* Build the GX votes */ ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, - gmu->gpu_freqs, gmu->nr_gpu_freqs, - gx, gxcount, mx, mxcount); + gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); /* Build the CX votes */ ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, - gmu->gmu_freqs, gmu->nr_gmu_freqs, - cx, cxcount, mx, mxcount); + gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); return ret; } diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c index af7dcb6da351..e7eb0d1e17be 100644 --- a/drivers/gpu/drm/sun4i/sun4i_lvds.c +++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c @@ -75,7 +75,7 @@ static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder) DRM_DEBUG_DRIVER("Enabling LVDS output\n"); - if (!IS_ERR(tcon->panel)) { + if (tcon->panel) { drm_panel_prepare(tcon->panel); drm_panel_enable(tcon->panel); } @@ -88,7 +88,7 @@ static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder) DRM_DEBUG_DRIVER("Disabling LVDS output\n"); - if (!IS_ERR(tcon->panel)) { + if (tcon->panel) { drm_panel_disable(tcon->panel); drm_panel_unprepare(tcon->panel); } diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index bf068da6b12e..f4a22689eb54 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -135,7 +135,7 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder) DRM_DEBUG_DRIVER("Enabling RGB output\n"); - if (!IS_ERR(tcon->panel)) { + if (tcon->panel) { drm_panel_prepare(tcon->panel); drm_panel_enable(tcon->panel); } @@ -148,7 +148,7 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) DRM_DEBUG_DRIVER("Disabling RGB output\n"); - if (!IS_ERR(tcon->panel)) { + if (tcon->panel) { drm_panel_disable(tcon->panel); drm_panel_unprepare(tcon->panel); } diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index c78cd35a1294..f949287d926c 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -491,7 +491,8 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, sun4i_tcon0_mode_set_common(tcon, mode); /* Set dithering if needed */ - sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector); + if (tcon->panel) + sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector); /* Adjust clock delay */ clk_delay = sun4i_tcon_get_clk_delay(mode, 0); @@ -555,7 +556,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, * Following code is a way to avoid quirks all around TCON * and DOTCLOCK drivers. */ - if (!IS_ERR(tcon->panel)) { + if (tcon->panel) { struct drm_panel *panel = tcon->panel; struct drm_connector *connector = panel->connector; struct drm_display_info display_info = connector->display_info; diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index cf2a18571d48..a132c37d7334 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -380,6 +380,9 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev, mutex_unlock(&vgasr_mutex); return -EINVAL; } + /* notify if GPU has been already bound */ + if (ops->gpu_bound) + ops->gpu_bound(pdev, id); } mutex_unlock(&vgasr_mutex); diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index aec253b44156..3cd7229b6e54 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -660,6 +660,20 @@ exit: return ret; } +static int alps_sp_open(struct input_dev *dev) +{ + struct hid_device *hid = input_get_drvdata(dev); + + return hid_hw_open(hid); +} + +static void alps_sp_close(struct input_dev *dev) +{ + struct hid_device *hid = input_get_drvdata(dev); + + hid_hw_close(hid); +} + static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) { struct alps_dev *data = hid_get_drvdata(hdev); @@ -733,6 +747,10 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) input2->id.version = input->id.version; input2->dev.parent = input->dev.parent; + input_set_drvdata(input2, hdev); + input2->open = alps_sp_open; + input2->close = alps_sp_close; + __set_bit(EV_KEY, input2->evbit); data->sp_btn_cnt = (data->sp_btn_info & 0x0F); for (i = 0; i < data->sp_btn_cnt; i++) diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index dc6d6477e961..a1fa2fc8c9b5 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -359,6 +359,9 @@ static bool asus_kbd_wmi_led_control_present(struct hid_device *hdev) u32 value; int ret; + if (!IS_ENABLED(CONFIG_ASUS_WMI)) + return false; + ret = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2, ASUS_WMI_DEVID_KBD_BACKLIGHT, 0, &value); hid_dbg(hdev, "WMI backlight check: rc %d value %x", ret, value); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index f63489c882bb..c0d668944dbe 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -927,6 +927,9 @@ #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008 +#define I2C_VENDOR_ID_RAYDIUM 0x2386 +#define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33 + #define USB_VENDOR_ID_RAZER 0x1532 #define USB_DEVICE_ID_RAZER_BLADE_14 0x011D diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 52c3b01917e7..8237dd86fb17 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -107,7 +107,6 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT }, - { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 4aab96cf0818..3cde7c1b9c33 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -49,6 +49,7 @@ #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) #define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2) +#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP BIT(3) /* flags */ #define I2C_HID_STARTED 0 @@ -158,6 +159,8 @@ struct i2c_hid { bool irq_wake_enabled; struct mutex reset_lock; + + unsigned long sleep_delay; }; static const struct i2c_hid_quirks { @@ -172,6 +175,8 @@ static const struct i2c_hid_quirks { { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, I2C_HID_QUIRK_NO_IRQ_AFTER_RESET | I2C_HID_QUIRK_NO_RUNTIME_PM }, + { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33, + I2C_HID_QUIRK_DELAY_AFTER_SLEEP }, { 0, 0 } }; @@ -387,6 +392,7 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state) { struct i2c_hid *ihid = i2c_get_clientdata(client); int ret; + unsigned long now, delay; i2c_hid_dbg(ihid, "%s\n", __func__); @@ -404,9 +410,22 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state) goto set_pwr_exit; } + if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP && + power_state == I2C_HID_PWR_ON) { + now = jiffies; + if (time_after(ihid->sleep_delay, now)) { + delay = jiffies_to_usecs(ihid->sleep_delay - now); + usleep_range(delay, delay + 1); + } + } + ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state, 0, NULL, 0, NULL, 0); + if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP && + power_state == I2C_HID_PWR_SLEEP) + ihid->sleep_delay = jiffies + msecs_to_jiffies(20); + if (ret) dev_err(&client->dev, "failed to change power setting.\n"); diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c index cac262a912c1..89f2976f9c53 100644 --- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c @@ -331,6 +331,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { .driver_data = (void *)&sipodev_desc }, { + .ident = "Direkt-Tek DTLAPY133-1", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY133-1"), + }, + .driver_data = (void *)&sipodev_desc + }, + { .ident = "Mediacom Flexbook Edge 11", .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"), diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 23872d08308c..a746017fac17 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -512,14 +512,24 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, if (cmd == HIDIOCGCOLLECTIONINDEX) { if (uref->usage_index >= field->maxusage) goto inval; + uref->usage_index = + array_index_nospec(uref->usage_index, + field->maxusage); } else if (uref->usage_index >= field->report_count) goto inval; } - if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) && - (uref_multi->num_values > HID_MAX_MULTI_USAGES || - uref->usage_index + uref_multi->num_values > field->report_count)) - goto inval; + if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) { + if (uref_multi->num_values > HID_MAX_MULTI_USAGES || + uref->usage_index + uref_multi->num_values > + field->report_count) + goto inval; + + uref->usage_index = + array_index_nospec(uref->usage_index, + field->report_count - + uref_multi->num_values); + } switch (cmd) { case HIDIOCGUSAGE: diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index 975c95169884..84f61cec6319 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -649,8 +649,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, if (info[i]->config[j] & HWMON_T_INPUT) { err = hwmon_thermal_add_sensor(dev, hwdev, j); - if (err) - goto free_device; + if (err) { + device_unregister(hdev); + goto ida_remove; + } } } } @@ -658,8 +660,6 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, return hdev; -free_device: - device_unregister(hdev); free_hwmon: kfree(hwdev); ida_remove: diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c index 0ccca87f5271..293dd1c6c7b3 100644 --- a/drivers/hwmon/ibmpowernv.c +++ b/drivers/hwmon/ibmpowernv.c @@ -181,7 +181,7 @@ static ssize_t show_label(struct device *dev, struct device_attribute *devattr, return sprintf(buf, "%s\n", sdata->label); } -static int __init get_logical_cpu(int hwcpu) +static int get_logical_cpu(int hwcpu) { int cpu; @@ -192,9 +192,8 @@ static int __init get_logical_cpu(int hwcpu) return -ENOENT; } -static void __init make_sensor_label(struct device_node *np, - struct sensor_data *sdata, - const char *label) +static void make_sensor_label(struct device_node *np, + struct sensor_data *sdata, const char *label) { u32 id; size_t n; diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 56ccb1ea7da5..f2c681971201 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -224,6 +224,15 @@ config I2C_NFORCE2_S4985 This driver can also be built as a module. If so, the module will be called i2c-nforce2-s4985. +config I2C_NVIDIA_GPU + tristate "NVIDIA GPU I2C controller" + depends on PCI + help + If you say yes to this option, support will be included for the + NVIDIA GPU I2C controller which is used to communicate with the GPU's + Type-C controller. This driver can also be built as a module called + i2c-nvidia-gpu. + config I2C_SIS5595 tristate "SiS 5595" depends on PCI @@ -752,7 +761,7 @@ config I2C_OCORES config I2C_OMAP tristate "OMAP I2C adapter" - depends on ARCH_OMAP + depends on ARCH_OMAP || ARCH_K3 default y if MACH_OMAP_H3 || MACH_OMAP_OSK help If you say yes to this option, support will be included for the diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 18b26af82b1c..5f0cb6915969 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_I2C_ISCH) += i2c-isch.o obj-$(CONFIG_I2C_ISMT) += i2c-ismt.o obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o obj-$(CONFIG_I2C_NFORCE2_S4985) += i2c-nforce2-s4985.o +obj-$(CONFIG_I2C_NVIDIA_GPU) += i2c-nvidia-gpu.o obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c new file mode 100644 index 000000000000..8822357bca0c --- /dev/null +++ b/drivers/i2c/busses/i2c-nvidia-gpu.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Nvidia GPU I2C controller Driver + * + * Copyright (C) 2018 NVIDIA Corporation. All rights reserved. + * Author: Ajay Gupta <ajayg@nvidia.com> + */ +#include <linux/delay.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> + +#include <asm/unaligned.h> + +/* I2C definitions */ +#define I2C_MST_CNTL 0x00 +#define I2C_MST_CNTL_GEN_START BIT(0) +#define I2C_MST_CNTL_GEN_STOP BIT(1) +#define I2C_MST_CNTL_CMD_READ (1 << 2) +#define I2C_MST_CNTL_CMD_WRITE (2 << 2) +#define I2C_MST_CNTL_BURST_SIZE_SHIFT 6 +#define I2C_MST_CNTL_GEN_NACK BIT(28) +#define I2C_MST_CNTL_STATUS GENMASK(30, 29) +#define I2C_MST_CNTL_STATUS_OKAY (0 << 29) +#define I2C_MST_CNTL_STATUS_NO_ACK (1 << 29) +#define I2C_MST_CNTL_STATUS_TIMEOUT (2 << 29) +#define I2C_MST_CNTL_STATUS_BUS_BUSY (3 << 29) +#define I2C_MST_CNTL_CYCLE_TRIGGER BIT(31) + +#define I2C_MST_ADDR 0x04 + +#define I2C_MST_I2C0_TIMING 0x08 +#define I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ 0x10e +#define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT 16 +#define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX 255 +#define I2C_MST_I2C0_TIMING_TIMEOUT_CHECK BIT(24) + +#define I2C_MST_DATA 0x0c + +#define I2C_MST_HYBRID_PADCTL 0x20 +#define I2C_MST_HYBRID_PADCTL_MODE_I2C BIT(0) +#define I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV BIT(14) +#define I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV BIT(15) + +struct gpu_i2c_dev { + struct device *dev; + void __iomem *regs; + struct i2c_adapter adapter; + struct i2c_board_info *gpu_ccgx_ucsi; +}; + +static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd) +{ + u32 val; + + /* enable I2C */ + val = readl(i2cd->regs + I2C_MST_HYBRID_PADCTL); + val |= I2C_MST_HYBRID_PADCTL_MODE_I2C | + I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV | + I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV; + writel(val, i2cd->regs + I2C_MST_HYBRID_PADCTL); + + /* enable 100KHZ mode */ + val = I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ; + val |= (I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX + << I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT); + val |= I2C_MST_I2C0_TIMING_TIMEOUT_CHECK; + writel(val, i2cd->regs + I2C_MST_I2C0_TIMING); +} + +static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd) +{ + unsigned long target = jiffies + msecs_to_jiffies(1000); + u32 val; + + do { + val = readl(i2cd->regs + I2C_MST_CNTL); + if (!(val & I2C_MST_CNTL_CYCLE_TRIGGER)) + break; + if ((val & I2C_MST_CNTL_STATUS) != + I2C_MST_CNTL_STATUS_BUS_BUSY) + break; + usleep_range(500, 600); + } while (time_is_after_jiffies(target)); + + if (time_is_before_jiffies(target)) { + dev_err(i2cd->dev, "i2c timeout error %x\n", val); + return -ETIME; + } + + val = readl(i2cd->regs + I2C_MST_CNTL); + switch (val & I2C_MST_CNTL_STATUS) { + case I2C_MST_CNTL_STATUS_OKAY: + return 0; + case I2C_MST_CNTL_STATUS_NO_ACK: + return -EIO; + case I2C_MST_CNTL_STATUS_TIMEOUT: + return -ETIME; + default: + return 0; + } +} + +static int gpu_i2c_read(struct gpu_i2c_dev *i2cd, u8 *data, u16 len) +{ + int status; + u32 val; + + val = I2C_MST_CNTL_GEN_START | I2C_MST_CNTL_CMD_READ | + (len << I2C_MST_CNTL_BURST_SIZE_SHIFT) | + I2C_MST_CNTL_CYCLE_TRIGGER | I2C_MST_CNTL_GEN_NACK; + writel(val, i2cd->regs + I2C_MST_CNTL); + + status = gpu_i2c_check_status(i2cd); + if (status < 0) + return status; + + val = readl(i2cd->regs + I2C_MST_DATA); + switch (len) { + case 1: + data[0] = val; + break; + case 2: + put_unaligned_be16(val, data); + break; + case 3: + put_unaligned_be16(val >> 8, data); + data[2] = val; + break; + case 4: + put_unaligned_be32(val, data); + break; + default: + break; + } + return status; +} + +static int gpu_i2c_start(struct gpu_i2c_dev *i2cd) +{ + writel(I2C_MST_CNTL_GEN_START, i2cd->regs + I2C_MST_CNTL); + return gpu_i2c_check_status(i2cd); +} + +static int gpu_i2c_stop(struct gpu_i2c_dev *i2cd) +{ + writel(I2C_MST_CNTL_GEN_STOP, i2cd->regs + I2C_MST_CNTL); + return gpu_i2c_check_status(i2cd); +} + +static int gpu_i2c_write(struct gpu_i2c_dev *i2cd, u8 data) +{ + u32 val; + + writel(data, i2cd->regs + I2C_MST_DATA); + + val = I2C_MST_CNTL_CMD_WRITE | (1 << I2C_MST_CNTL_BURST_SIZE_SHIFT); + writel(val, i2cd->regs + I2C_MST_CNTL); + + return gpu_i2c_check_status(i2cd); +} + +static int gpu_i2c_master_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + struct gpu_i2c_dev *i2cd = i2c_get_adapdata(adap); + int status, status2; + int i, j; + + /* + * The controller supports maximum 4 byte read due to known + * limitation of sending STOP after every read. + */ + for (i = 0; i < num; i++) { + if (msgs[i].flags & I2C_M_RD) { + /* program client address before starting read */ + writel(msgs[i].addr, i2cd->regs + I2C_MST_ADDR); + /* gpu_i2c_read has implicit start */ + status = gpu_i2c_read(i2cd, msgs[i].buf, msgs[i].len); + if (status < 0) + goto stop; + } else { + u8 addr = i2c_8bit_addr_from_msg(msgs + i); + + status = gpu_i2c_start(i2cd); + if (status < 0) { + if (i == 0) + return status; + goto stop; + } + + status = gpu_i2c_write(i2cd, addr); + if (status < 0) + goto stop; + + for (j = 0; j < msgs[i].len; j++) { + status = gpu_i2c_write(i2cd, msgs[i].buf[j]); + if (status < 0) + goto stop; + } + } + } + status = gpu_i2c_stop(i2cd); + if (status < 0) + return status; + + return i; +stop: + status2 = gpu_i2c_stop(i2cd); + if (status2 < 0) + dev_err(i2cd->dev, "i2c stop failed %d\n", status2); + return status; +} + +static const struct i2c_adapter_quirks gpu_i2c_quirks = { + .max_read_len = 4, + .flags = I2C_AQ_COMB_WRITE_THEN_READ, +}; + +static u32 gpu_i2c_functionality(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm gpu_i2c_algorithm = { + .master_xfer = gpu_i2c_master_xfer, + .functionality = gpu_i2c_functionality, +}; + +/* + * This driver is for Nvidia GPU cards with USB Type-C interface. + * We want to identify the cards using vendor ID and class code only + * to avoid dependency of adding product id for any new card which + * requires this driver. + * Currently there is no class code defined for UCSI device over PCI + * so using UNKNOWN class for now and it will be updated when UCSI + * over PCI gets a class code. + * There is no other NVIDIA cards with UNKNOWN class code. Even if the + * driver gets loaded for an undesired card then eventually i2c_read() + * (initiated from UCSI i2c_client) will timeout or UCSI commands will + * timeout. + */ +#define PCI_CLASS_SERIAL_UNKNOWN 0x0c80 +static const struct pci_device_id gpu_i2c_ids[] = { + { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_SERIAL_UNKNOWN << 8, 0xffffff00}, + { } +}; +MODULE_DEVICE_TABLE(pci, gpu_i2c_ids); + +static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq) +{ + struct i2c_client *ccgx_client; + + i2cd->gpu_ccgx_ucsi = devm_kzalloc(i2cd->dev, + sizeof(*i2cd->gpu_ccgx_ucsi), + GFP_KERNEL); + if (!i2cd->gpu_ccgx_ucsi) + return -ENOMEM; + + strlcpy(i2cd->gpu_ccgx_ucsi->type, "ccgx-ucsi", + sizeof(i2cd->gpu_ccgx_ucsi->type)); + i2cd->gpu_ccgx_ucsi->addr = 0x8; + i2cd->gpu_ccgx_ucsi->irq = irq; + ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi); + if (!ccgx_client) + return -ENODEV; + + return 0; +} + +static int gpu_i2c_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct gpu_i2c_dev *i2cd; + int status; + + i2cd = devm_kzalloc(&pdev->dev, sizeof(*i2cd), GFP_KERNEL); + if (!i2cd) + return -ENOMEM; + + i2cd->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, i2cd); + + status = pcim_enable_device(pdev); + if (status < 0) { + dev_err(&pdev->dev, "pcim_enable_device failed %d\n", status); + return status; + } + + pci_set_master(pdev); + + i2cd->regs = pcim_iomap(pdev, 0, 0); + if (!i2cd->regs) { + dev_err(&pdev->dev, "pcim_iomap failed\n"); + return -ENOMEM; + } + + status = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (status < 0) { + dev_err(&pdev->dev, "pci_alloc_irq_vectors err %d\n", status); + return status; + } + + gpu_enable_i2c_bus(i2cd); + + i2c_set_adapdata(&i2cd->adapter, i2cd); + i2cd->adapter.owner = THIS_MODULE; + strlcpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter", + sizeof(i2cd->adapter.name)); + i2cd->adapter.algo = &gpu_i2c_algorithm; + i2cd->adapter.quirks = &gpu_i2c_quirks; + i2cd->adapter.dev.parent = &pdev->dev; + status = i2c_add_adapter(&i2cd->adapter); + if (status < 0) + goto free_irq_vectors; + + status = gpu_populate_client(i2cd, pdev->irq); + if (status < 0) { + dev_err(&pdev->dev, "gpu_populate_client failed %d\n", status); + goto del_adapter; + } + + return 0; + +del_adapter: + i2c_del_adapter(&i2cd->adapter); +free_irq_vectors: + pci_free_irq_vectors(pdev); + return status; +} + +static void gpu_i2c_remove(struct pci_dev *pdev) +{ + struct gpu_i2c_dev *i2cd = dev_get_drvdata(&pdev->dev); + + i2c_del_adapter(&i2cd->adapter); + pci_free_irq_vectors(pdev); +} + +static int gpu_i2c_resume(struct device *dev) +{ + struct gpu_i2c_dev *i2cd = dev_get_drvdata(dev); + + gpu_enable_i2c_bus(i2cd); + return 0; +} + +static UNIVERSAL_DEV_PM_OPS(gpu_i2c_driver_pm, NULL, gpu_i2c_resume, NULL); + +static struct pci_driver gpu_i2c_driver = { + .name = "nvidia-gpu", + .id_table = gpu_i2c_ids, + .probe = gpu_i2c_probe, + .remove = gpu_i2c_remove, + .driver = { + .pm = &gpu_i2c_driver_pm, + }, +}; + +module_pci_driver(gpu_i2c_driver); + +MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>"); +MODULE_DESCRIPTION("Nvidia GPU I2C controller Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 527f55c8c4c7..db075bc0d952 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -571,18 +571,19 @@ static int geni_i2c_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth); - ret = i2c_add_adapter(&gi2c->adap); - if (ret) { - dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret); - return ret; - } - gi2c->suspended = 1; pm_runtime_set_suspended(gi2c->se.dev); pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY); pm_runtime_use_autosuspend(gi2c->se.dev); pm_runtime_enable(gi2c->se.dev); + ret = i2c_add_adapter(&gi2c->adap); + if (ret) { + dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret); + pm_runtime_disable(gi2c->se.dev); + return ret; + } + return 0; } @@ -590,8 +591,8 @@ static int geni_i2c_remove(struct platform_device *pdev) { struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev); - pm_runtime_disable(gi2c->se.dev); i2c_del_adapter(&gi2c->adap); + pm_runtime_disable(gi2c->se.dev); return 0; } diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c index ce7acd115dd8..1870cf87afe1 100644 --- a/drivers/leds/trigger/ledtrig-pattern.c +++ b/drivers/leds/trigger/ledtrig-pattern.c @@ -75,8 +75,6 @@ static void pattern_trig_timer_function(struct timer_list *t) { struct pattern_trig_data *data = from_timer(data, t, timer); - mutex_lock(&data->lock); - for (;;) { if (!data->is_indefinite && !data->repeat) break; @@ -87,9 +85,10 @@ static void pattern_trig_timer_function(struct timer_list *t) data->curr->brightness); mod_timer(&data->timer, jiffies + msecs_to_jiffies(data->curr->delta_t)); - - /* Skip the tuple with zero duration */ - pattern_trig_update_patterns(data); + if (!data->next->delta_t) { + /* Skip the tuple with zero duration */ + pattern_trig_update_patterns(data); + } /* Select next tuple */ pattern_trig_update_patterns(data); } else { @@ -116,8 +115,6 @@ static void pattern_trig_timer_function(struct timer_list *t) break; } - - mutex_unlock(&data->lock); } static int pattern_trig_start_pattern(struct led_classdev *led_cdev) @@ -176,14 +173,10 @@ static ssize_t repeat_store(struct device *dev, struct device_attribute *attr, if (res < -1 || res == 0) return -EINVAL; - /* - * Clear previous patterns' performence firstly, and remove the timer - * without mutex lock to avoid dead lock. - */ - del_timer_sync(&data->timer); - mutex_lock(&data->lock); + del_timer_sync(&data->timer); + if (data->is_hw_pattern) led_cdev->pattern_clear(led_cdev); @@ -234,14 +227,10 @@ static ssize_t pattern_trig_store_patterns(struct led_classdev *led_cdev, struct pattern_trig_data *data = led_cdev->trigger_data; int ccount, cr, offset = 0, err = 0; - /* - * Clear previous patterns' performence firstly, and remove the timer - * without mutex lock to avoid dead lock. - */ - del_timer_sync(&data->timer); - mutex_lock(&data->lock); + del_timer_sync(&data->timer); + if (data->is_hw_pattern) led_cdev->pattern_clear(led_cdev); diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index c215287e80cf..a66dea4f1ed2 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -2060,7 +2060,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, * timings. */ name = gpmc_cs_get_name(cs); - if (name && of_node_cmp(child->name, name) == 0) + if (name && of_node_name_eq(child, name)) goto no_timings; ret = gpmc_cs_request(cs, resource_size(&res), &base); @@ -2068,7 +2068,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs); return ret; } - gpmc_cs_set_name(cs, child->name); + gpmc_cs_set_name(cs, child->full_name); gpmc_read_settings_dt(child, &gpmc_s); gpmc_read_timings_dt(child, &gpmc_t); @@ -2113,7 +2113,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, goto err; } - if (of_node_cmp(child->name, "nand") == 0) { + if (of_node_name_eq(child, "nand")) { /* Warn about older DT blobs with no compatible property */ if (!of_property_read_bool(child, "compatible")) { dev_warn(&pdev->dev, @@ -2123,7 +2123,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, } } - if (of_node_cmp(child->name, "onenand") == 0) { + if (of_node_name_eq(child, "onenand")) { /* Warn about older DT blobs with no compatible property */ if (!of_property_read_bool(child, "compatible")) { dev_warn(&pdev->dev, diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig index 6d74e499e18d..34e0b70f5c5f 100644 --- a/drivers/memory/tegra/Kconfig +++ b/drivers/memory/tegra/Kconfig @@ -6,6 +6,16 @@ config TEGRA_MC This driver supports the Memory Controller (MC) hardware found on NVIDIA Tegra SoCs. +config TEGRA20_EMC + bool "NVIDIA Tegra20 External Memory Controller driver" + default y + depends on ARCH_TEGRA_2x_SOC + help + This driver is for the External Memory Controller (EMC) found on + Tegra20 chips. The EMC controls the external DRAM on the board. + This driver is required to change memory timings / clock rate for + external memory. + config TEGRA124_EMC bool "NVIDIA Tegra124 External Memory Controller driver" default y diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile index 94ab16ba075b..3971a6b7c487 100644 --- a/drivers/memory/tegra/Makefile +++ b/drivers/memory/tegra/Makefile @@ -10,5 +10,6 @@ tegra-mc-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210.o obj-$(CONFIG_TEGRA_MC) += tegra-mc.o +obj-$(CONFIG_TEGRA20_EMC) += tegra20-emc.o obj-$(CONFIG_TEGRA124_EMC) += tegra124-emc.o obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c new file mode 100644 index 000000000000..9ee5bef49e47 --- /dev/null +++ b/drivers/memory/tegra/tegra20-emc.c @@ -0,0 +1,591 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Tegra20 External Memory Controller driver + * + * Author: Dmitry Osipenko <digetx@gmail.com> + */ + +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/sort.h> +#include <linux/types.h> + +#include <soc/tegra/fuse.h> + +#define EMC_INTSTATUS 0x000 +#define EMC_INTMASK 0x004 +#define EMC_TIMING_CONTROL 0x028 +#define EMC_RC 0x02c +#define EMC_RFC 0x030 +#define EMC_RAS 0x034 +#define EMC_RP 0x038 +#define EMC_R2W 0x03c +#define EMC_W2R 0x040 +#define EMC_R2P 0x044 +#define EMC_W2P 0x048 +#define EMC_RD_RCD 0x04c +#define EMC_WR_RCD 0x050 +#define EMC_RRD 0x054 +#define EMC_REXT 0x058 +#define EMC_WDV 0x05c +#define EMC_QUSE 0x060 +#define EMC_QRST 0x064 +#define EMC_QSAFE 0x068 +#define EMC_RDV 0x06c +#define EMC_REFRESH 0x070 +#define EMC_BURST_REFRESH_NUM 0x074 +#define EMC_PDEX2WR 0x078 +#define EMC_PDEX2RD 0x07c +#define EMC_PCHG2PDEN 0x080 +#define EMC_ACT2PDEN 0x084 +#define EMC_AR2PDEN 0x088 +#define EMC_RW2PDEN 0x08c +#define EMC_TXSR 0x090 +#define EMC_TCKE 0x094 +#define EMC_TFAW 0x098 +#define EMC_TRPAB 0x09c +#define EMC_TCLKSTABLE 0x0a0 +#define EMC_TCLKSTOP 0x0a4 +#define EMC_TREFBW 0x0a8 +#define EMC_QUSE_EXTRA 0x0ac +#define EMC_ODT_WRITE 0x0b0 +#define EMC_ODT_READ 0x0b4 +#define EMC_FBIO_CFG5 0x104 +#define EMC_FBIO_CFG6 0x114 +#define EMC_AUTO_CAL_INTERVAL 0x2a8 +#define EMC_CFG_2 0x2b8 +#define EMC_CFG_DIG_DLL 0x2bc +#define EMC_DLL_XFORM_DQS 0x2c0 +#define EMC_DLL_XFORM_QUSE 0x2c4 +#define EMC_ZCAL_REF_CNT 0x2e0 +#define EMC_ZCAL_WAIT_CNT 0x2e4 +#define EMC_CFG_CLKTRIM_0 0x2d0 +#define EMC_CFG_CLKTRIM_1 0x2d4 +#define EMC_CFG_CLKTRIM_2 0x2d8 + +#define EMC_CLKCHANGE_REQ_ENABLE BIT(0) +#define EMC_CLKCHANGE_PD_ENABLE BIT(1) +#define EMC_CLKCHANGE_SR_ENABLE BIT(2) + +#define EMC_TIMING_UPDATE BIT(0) + +#define EMC_REFRESH_OVERFLOW_INT BIT(3) +#define EMC_CLKCHANGE_COMPLETE_INT BIT(4) + +static const u16 emc_timing_registers[] = { + EMC_RC, + EMC_RFC, + EMC_RAS, + EMC_RP, + EMC_R2W, + EMC_W2R, + EMC_R2P, + EMC_W2P, + EMC_RD_RCD, + EMC_WR_RCD, + EMC_RRD, + EMC_REXT, + EMC_WDV, + EMC_QUSE, + EMC_QRST, + EMC_QSAFE, + EMC_RDV, + EMC_REFRESH, + EMC_BURST_REFRESH_NUM, + EMC_PDEX2WR, + EMC_PDEX2RD, + EMC_PCHG2PDEN, + EMC_ACT2PDEN, + EMC_AR2PDEN, + EMC_RW2PDEN, + EMC_TXSR, + EMC_TCKE, + EMC_TFAW, + EMC_TRPAB, + EMC_TCLKSTABLE, + EMC_TCLKSTOP, + EMC_TREFBW, + EMC_QUSE_EXTRA, + EMC_FBIO_CFG6, + EMC_ODT_WRITE, + EMC_ODT_READ, + EMC_FBIO_CFG5, + EMC_CFG_DIG_DLL, + EMC_DLL_XFORM_DQS, + EMC_DLL_XFORM_QUSE, + EMC_ZCAL_REF_CNT, + EMC_ZCAL_WAIT_CNT, + EMC_AUTO_CAL_INTERVAL, + EMC_CFG_CLKTRIM_0, + EMC_CFG_CLKTRIM_1, + EMC_CFG_CLKTRIM_2, +}; + +struct emc_timing { + unsigned long rate; + u32 data[ARRAY_SIZE(emc_timing_registers)]; +}; + +struct tegra_emc { + struct device *dev; + struct completion clk_handshake_complete; + struct notifier_block clk_nb; + struct clk *backup_clk; + struct clk *emc_mux; + struct clk *pll_m; + struct clk *clk; + void __iomem *regs; + + struct emc_timing *timings; + unsigned int num_timings; +}; + +static irqreturn_t tegra_emc_isr(int irq, void *data) +{ + struct tegra_emc *emc = data; + u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT; + u32 status; + + status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask; + if (!status) + return IRQ_NONE; + + /* notify about EMC-CAR handshake completion */ + if (status & EMC_CLKCHANGE_COMPLETE_INT) + complete(&emc->clk_handshake_complete); + + /* notify about HW problem */ + if (status & EMC_REFRESH_OVERFLOW_INT) + dev_err_ratelimited(emc->dev, + "refresh request overflow timeout\n"); + + /* clear interrupts */ + writel_relaxed(status, emc->regs + EMC_INTSTATUS); + + return IRQ_HANDLED; +} + +static struct emc_timing *tegra_emc_find_timing(struct tegra_emc *emc, + unsigned long rate) +{ + struct emc_timing *timing = NULL; + unsigned int i; + + for (i = 0; i < emc->num_timings; i++) { + if (emc->timings[i].rate >= rate) { + timing = &emc->timings[i]; + break; + } + } + + if (!timing) { + dev_err(emc->dev, "no timing for rate %lu\n", rate); + return NULL; + } + + return timing; +} + +static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate) +{ + struct emc_timing *timing = tegra_emc_find_timing(emc, rate); + unsigned int i; + + if (!timing) + return -EINVAL; + + dev_dbg(emc->dev, "%s: using timing rate %lu for requested rate %lu\n", + __func__, timing->rate, rate); + + /* program shadow registers */ + for (i = 0; i < ARRAY_SIZE(timing->data); i++) + writel_relaxed(timing->data[i], + emc->regs + emc_timing_registers[i]); + + /* wait until programming has settled */ + readl_relaxed(emc->regs + emc_timing_registers[i - 1]); + + reinit_completion(&emc->clk_handshake_complete); + + return 0; +} + +static int emc_complete_timing_change(struct tegra_emc *emc, bool flush) +{ + long timeout; + + dev_dbg(emc->dev, "%s: flush %d\n", __func__, flush); + + if (flush) { + /* manually initiate memory timing update */ + writel_relaxed(EMC_TIMING_UPDATE, + emc->regs + EMC_TIMING_CONTROL); + return 0; + } + + timeout = wait_for_completion_timeout(&emc->clk_handshake_complete, + usecs_to_jiffies(100)); + if (timeout == 0) { + dev_err(emc->dev, "EMC-CAR handshake failed\n"); + return -EIO; + } else if (timeout < 0) { + dev_err(emc->dev, "failed to wait for EMC-CAR handshake: %ld\n", + timeout); + return timeout; + } + + return 0; +} + +static int tegra_emc_clk_change_notify(struct notifier_block *nb, + unsigned long msg, void *data) +{ + struct tegra_emc *emc = container_of(nb, struct tegra_emc, clk_nb); + struct clk_notifier_data *cnd = data; + int err; + + switch (msg) { + case PRE_RATE_CHANGE: + err = emc_prepare_timing_change(emc, cnd->new_rate); + break; + + case ABORT_RATE_CHANGE: + err = emc_prepare_timing_change(emc, cnd->old_rate); + if (err) + break; + + err = emc_complete_timing_change(emc, true); + break; + + case POST_RATE_CHANGE: + err = emc_complete_timing_change(emc, false); + break; + + default: + return NOTIFY_DONE; + } + + return notifier_from_errno(err); +} + +static int load_one_timing_from_dt(struct tegra_emc *emc, + struct emc_timing *timing, + struct device_node *node) +{ + u32 rate; + int err; + + if (!of_device_is_compatible(node, "nvidia,tegra20-emc-table")) { + dev_err(emc->dev, "incompatible DT node: %pOF\n", node); + return -EINVAL; + } + + err = of_property_read_u32(node, "clock-frequency", &rate); + if (err) { + dev_err(emc->dev, "timing %pOF: failed to read rate: %d\n", + node, err); + return err; + } + + err = of_property_read_u32_array(node, "nvidia,emc-registers", + timing->data, + ARRAY_SIZE(emc_timing_registers)); + if (err) { + dev_err(emc->dev, + "timing %pOF: failed to read emc timing data: %d\n", + node, err); + return err; + } + + /* + * The EMC clock rate is twice the bus rate, and the bus rate is + * measured in kHz. + */ + timing->rate = rate * 2 * 1000; + + dev_dbg(emc->dev, "%s: %pOF: EMC rate %lu\n", + __func__, node, timing->rate); + + return 0; +} + +static int cmp_timings(const void *_a, const void *_b) +{ + const struct emc_timing *a = _a; + const struct emc_timing *b = _b; + + if (a->rate < b->rate) + return -1; + + if (a->rate > b->rate) + return 1; + + return 0; +} + +static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc, + struct device_node *node) +{ + struct device_node *child; + struct emc_timing *timing; + int child_count; + int err; + + child_count = of_get_child_count(node); + if (!child_count) { + dev_err(emc->dev, "no memory timings in DT node: %pOF\n", node); + return -EINVAL; + } + + emc->timings = devm_kcalloc(emc->dev, child_count, sizeof(*timing), + GFP_KERNEL); + if (!emc->timings) + return -ENOMEM; + + emc->num_timings = child_count; + timing = emc->timings; + + for_each_child_of_node(node, child) { + err = load_one_timing_from_dt(emc, timing++, child); + if (err) { + of_node_put(child); + return err; + } + } + + sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings, + NULL); + + return 0; +} + +static struct device_node * +tegra_emc_find_node_by_ram_code(struct device *dev) +{ + struct device_node *np; + u32 value, ram_code; + int err; + + if (!of_property_read_bool(dev->of_node, "nvidia,use-ram-code")) + return of_node_get(dev->of_node); + + ram_code = tegra_read_ram_code(); + + for (np = of_find_node_by_name(dev->of_node, "emc-tables"); np; + np = of_find_node_by_name(np, "emc-tables")) { + err = of_property_read_u32(np, "nvidia,ram-code", &value); + if (err || value != ram_code) { + of_node_put(np); + continue; + } + + return np; + } + + dev_err(dev, "no memory timings for RAM code %u found in device tree\n", + ram_code); + + return NULL; +} + +static int emc_setup_hw(struct tegra_emc *emc) +{ + u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT; + u32 emc_cfg; + + emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2); + + /* + * Depending on a memory type, DRAM should enter either self-refresh + * or power-down state on EMC clock change. + */ + if (!(emc_cfg & EMC_CLKCHANGE_PD_ENABLE) && + !(emc_cfg & EMC_CLKCHANGE_SR_ENABLE)) { + dev_err(emc->dev, + "bootloader didn't specify DRAM auto-suspend mode\n"); + return -EINVAL; + } + + /* enable EMC and CAR to handshake on PLL divider/source changes */ + emc_cfg |= EMC_CLKCHANGE_REQ_ENABLE; + writel_relaxed(emc_cfg, emc->regs + EMC_CFG_2); + + /* initialize interrupt */ + writel_relaxed(intmask, emc->regs + EMC_INTMASK); + writel_relaxed(intmask, emc->regs + EMC_INTSTATUS); + + return 0; +} + +static int emc_init(struct tegra_emc *emc, unsigned long rate) +{ + int err; + + err = clk_set_parent(emc->emc_mux, emc->backup_clk); + if (err) { + dev_err(emc->dev, + "failed to reparent to backup source: %d\n", err); + return err; + } + + err = clk_set_rate(emc->pll_m, rate); + if (err) { + dev_err(emc->dev, + "failed to change pll_m rate: %d\n", err); + return err; + } + + err = clk_set_parent(emc->emc_mux, emc->pll_m); + if (err) { + dev_err(emc->dev, + "failed to reparent to pll_m: %d\n", err); + return err; + } + + err = clk_set_rate(emc->clk, rate); + if (err) { + dev_err(emc->dev, + "failed to change emc rate: %d\n", err); + return err; + } + + return 0; +} + +static int tegra_emc_probe(struct platform_device *pdev) +{ + struct device_node *np; + struct tegra_emc *emc; + struct resource *res; + int irq, err; + + /* driver has nothing to do in a case of memory timing absence */ + if (of_get_child_count(pdev->dev.of_node) == 0) { + dev_info(&pdev->dev, + "EMC device tree node doesn't have memory timings\n"); + return 0; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "interrupt not specified\n"); + dev_err(&pdev->dev, "please update your device tree\n"); + return irq; + } + + np = tegra_emc_find_node_by_ram_code(&pdev->dev); + if (!np) + return -EINVAL; + + emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL); + if (!emc) { + of_node_put(np); + return -ENOMEM; + } + + init_completion(&emc->clk_handshake_complete); + emc->clk_nb.notifier_call = tegra_emc_clk_change_notify; + emc->dev = &pdev->dev; + + err = tegra_emc_load_timings_from_dt(emc, np); + of_node_put(np); + if (err) + return err; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + emc->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(emc->regs)) + return PTR_ERR(emc->regs); + + err = emc_setup_hw(emc); + if (err) + return err; + + err = devm_request_irq(&pdev->dev, irq, tegra_emc_isr, 0, + dev_name(&pdev->dev), emc); + if (err) { + dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", irq, err); + return err; + } + + emc->clk = devm_clk_get(&pdev->dev, "emc"); + if (IS_ERR(emc->clk)) { + err = PTR_ERR(emc->clk); + dev_err(&pdev->dev, "failed to get emc clock: %d\n", err); + return err; + } + + emc->pll_m = clk_get_sys(NULL, "pll_m"); + if (IS_ERR(emc->pll_m)) { + err = PTR_ERR(emc->pll_m); + dev_err(&pdev->dev, "failed to get pll_m clock: %d\n", err); + return err; + } + + emc->backup_clk = clk_get_sys(NULL, "pll_p"); + if (IS_ERR(emc->backup_clk)) { + err = PTR_ERR(emc->backup_clk); + dev_err(&pdev->dev, "failed to get pll_p clock: %d\n", err); + goto put_pll_m; + } + + emc->emc_mux = clk_get_parent(emc->clk); + if (IS_ERR(emc->emc_mux)) { + err = PTR_ERR(emc->emc_mux); + dev_err(&pdev->dev, "failed to get emc_mux clock: %d\n", err); + goto put_backup; + } + + err = clk_notifier_register(emc->clk, &emc->clk_nb); + if (err) { + dev_err(&pdev->dev, "failed to register clk notifier: %d\n", + err); + goto put_backup; + } + + /* set DRAM clock rate to maximum */ + err = emc_init(emc, emc->timings[emc->num_timings - 1].rate); + if (err) { + dev_err(&pdev->dev, "failed to initialize EMC clock rate: %d\n", + err); + goto unreg_notifier; + } + + return 0; + +unreg_notifier: + clk_notifier_unregister(emc->clk, &emc->clk_nb); +put_backup: + clk_put(emc->backup_clk); +put_pll_m: + clk_put(emc->pll_m); + + return err; +} + +static const struct of_device_id tegra_emc_of_match[] = { + { .compatible = "nvidia,tegra20-emc", }, + {}, +}; + +static struct platform_driver tegra_emc_driver = { + .probe = tegra_emc_probe, + .driver = { + .name = "tegra20-emc", + .of_match_table = tegra_emc_of_match, + .suppress_bind_attrs = true, + }, +}; + +static int __init tegra_emc_init(void) +{ + return platform_driver_register(&tegra_emc_driver); +} +subsys_initcall(tegra_emc_init); diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index e514d57a0419..aa983422aa97 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -207,7 +207,7 @@ comment "Disk-On-Chip Device Drivers" config MTD_DOCG3 tristate "M-Systems Disk-On-Chip G3" select BCH - select BCH_CONST_PARAMS + select BCH_CONST_PARAMS if !MTD_NAND_BCH select BITREVERSE help This provides an MTD device driver for the M-Systems DiskOnChip diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c index 784c6e1a0391..fd5fe12d7461 100644 --- a/drivers/mtd/maps/sa1100-flash.c +++ b/drivers/mtd/maps/sa1100-flash.c @@ -221,7 +221,14 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev, info->mtd = info->subdev[0].mtd; ret = 0; } else if (info->num_subdev > 1) { - struct mtd_info *cdev[nr]; + struct mtd_info **cdev; + + cdev = kmalloc_array(nr, sizeof(*cdev), GFP_KERNEL); + if (!cdev) { + ret = -ENOMEM; + goto err; + } + /* * We detected multiple devices. Concatenate them together. */ @@ -230,6 +237,7 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev, info->mtd = mtd_concat_create(cdev, info->num_subdev, plat->name); + kfree(cdev); if (info->mtd == NULL) { ret = -ENXIO; goto err; diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 05bd0779fe9b..71050a0b31df 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -590,7 +590,6 @@ retry: /** * panic_nand_wait - [GENERIC] wait until the command is done - * @mtd: MTD device structure * @chip: NAND chip structure * @timeo: timeout * diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index e24db817154e..d846428ef038 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -996,7 +996,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf, err_unmap: dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE); - return 0; + return ret; } static ssize_t cqspi_read(struct spi_nor *nor, loff_t from, diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 9407ca5f9443..3e54e31889c7 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -3250,12 +3250,14 @@ static int spi_nor_init_params(struct spi_nor *nor, memcpy(&sfdp_params, params, sizeof(sfdp_params)); memcpy(&prev_map, &nor->erase_map, sizeof(prev_map)); - if (spi_nor_parse_sfdp(nor, &sfdp_params)) + if (spi_nor_parse_sfdp(nor, &sfdp_params)) { + nor->addr_width = 0; /* restore previous erase map */ memcpy(&nor->erase_map, &prev_map, sizeof(nor->erase_map)); - else + } else { memcpy(params, &sfdp_params, sizeof(*params)); + } } return 0; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index ffa37adb7681..333387f1f1fe 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3112,13 +3112,13 @@ static int bond_slave_netdev_event(unsigned long event, case NETDEV_CHANGE: /* For 802.3ad mode only: * Getting invalid Speed/Duplex values here will put slave - * in weird state. So mark it as link-down for the time + * in weird state. So mark it as link-fail for the time * being and let link-monitoring (miimon) set it right when * correct speeds/duplex are available. */ if (bond_update_speed_duplex(slave) && BOND_MODE(bond) == BOND_MODE_8023AD) - slave->link = BOND_LINK_DOWN; + slave->link = BOND_LINK_FAIL; if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_adapter_speed_duplex_changed(slave); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 54e0ca6ed730..86b6464b4525 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -1117,11 +1117,6 @@ static int ksz_switch_init(struct ksz_device *dev) { int i; - mutex_init(&dev->reg_mutex); - mutex_init(&dev->stats_mutex); - mutex_init(&dev->alu_mutex); - mutex_init(&dev->vlan_mutex); - dev->ds->ops = &ksz_switch_ops; for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) { @@ -1206,6 +1201,11 @@ int ksz_switch_register(struct ksz_device *dev) if (dev->pdata) dev->chip_id = dev->pdata->chip_id; + mutex_init(&dev->reg_mutex); + mutex_init(&dev->stats_mutex); + mutex_init(&dev->alu_mutex); + mutex_init(&dev->vlan_mutex); + if (ksz_switch_detect(dev)) return -EINVAL; diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index d721ccf7d8be..38e399e0f30e 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -567,6 +567,8 @@ int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip) if (err) return err; + /* Keep the histogram mode bits */ + val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX; val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL; err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 6a633c70f603..99ef1daaa4d8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -407,13 +407,13 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { struct aq_nic_s *aq_nic = netdev_priv(ndev); + u32 fc = aq_nic->aq_nic_cfg.flow_control; pause->autoneg = 0; - if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX) - pause->rx_pause = 1; - if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX) - pause->tx_pause = 1; + pause->rx_pause = !!(fc & AQ_NIC_FC_RX); + pause->tx_pause = !!(fc & AQ_NIC_FC_TX); + } static int aq_ethtool_set_pauseparam(struct net_device *ndev, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index e8689241204e..a1e70da358ca 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -204,6 +204,10 @@ struct aq_hw_ops { int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); + int (*hw_set_offload)(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg); + + int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc); }; struct aq_fw_ops { @@ -226,6 +230,8 @@ struct aq_fw_ops { int (*update_stats)(struct aq_hw_s *self); + u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode); + int (*set_flow_control)(struct aq_hw_s *self); int (*set_power)(struct aq_hw_s *self, unsigned int power_state, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index e3ae29e523f0..7c07eef275eb 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -99,8 +99,11 @@ static int aq_ndev_set_features(struct net_device *ndev, struct aq_nic_s *aq_nic = netdev_priv(ndev); struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic); bool is_lro = false; + int err = 0; + + aq_cfg->features = features; - if (aq_cfg->hw_features & NETIF_F_LRO) { + if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) { is_lro = features & NETIF_F_LRO; if (aq_cfg->is_lro != is_lro) { @@ -112,8 +115,11 @@ static int aq_ndev_set_features(struct net_device *ndev, } } } + if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM) + err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw, + aq_cfg); - return 0; + return err; } static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 5fed24446687..7abdc0952425 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -118,12 +118,13 @@ void aq_nic_cfg_start(struct aq_nic_s *self) } cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk; - cfg->hw_features = cfg->aq_hw_caps->hw_features; + cfg->features = cfg->aq_hw_caps->hw_features; } static int aq_nic_update_link_status(struct aq_nic_s *self) { int err = self->aq_fw_ops->update_link_status(self->aq_hw); + u32 fc = 0; if (err) return err; @@ -133,6 +134,15 @@ static int aq_nic_update_link_status(struct aq_nic_s *self) AQ_CFG_DRV_NAME, self->link_status.mbps, self->aq_hw->aq_link_status.mbps); aq_nic_update_interrupt_moderation_settings(self); + + /* Driver has to update flow control settings on RX block + * on any link event. + * We should query FW whether it negotiated FC. + */ + if (self->aq_fw_ops->get_flow_control) + self->aq_fw_ops->get_flow_control(self->aq_hw, &fc); + if (self->aq_hw_ops->hw_set_fc) + self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0); } self->link_status = self->aq_hw->aq_link_status; @@ -590,7 +600,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) } } - if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) { + if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) { packet_filter |= IFF_MULTICAST; self->mc_list.count = i; self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, @@ -772,7 +782,9 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self, ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); - if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) + /* Asym is when either RX or TX, but not both */ + if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^ + !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)) ethtool_link_ksettings_add_link_mode(cmd, advertising, Asym_Pause); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index c1582f4e8e1b..44ec47a3d60a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -23,7 +23,7 @@ struct aq_vec_s; struct aq_nic_cfg_s { const struct aq_hw_caps_s *aq_hw_caps; - u64 hw_features; + u64 features; u32 rxds; /* rx ring size, descriptors # */ u32 txds; /* tx ring size, descriptors # */ u32 vecs; /* vecs==allocated irqs */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 3db91446cc67..74550ccc7a20 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -172,6 +172,27 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) return !!budget; } +static void aq_rx_checksum(struct aq_ring_s *self, + struct aq_ring_buff_s *buff, + struct sk_buff *skb) +{ + if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM)) + return; + + if (unlikely(buff->is_cso_err)) { + ++self->stats.rx.errors; + skb->ip_summed = CHECKSUM_NONE; + return; + } + if (buff->is_ip_cso) { + __skb_incr_checksum_unnecessary(skb); + if (buff->is_udp_cso || buff->is_tcp_cso) + __skb_incr_checksum_unnecessary(skb); + } else { + skb->ip_summed = CHECKSUM_NONE; + } +} + #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) int aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, @@ -267,18 +288,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self, } skb->protocol = eth_type_trans(skb, ndev); - if (unlikely(buff->is_cso_err)) { - ++self->stats.rx.errors; - skb->ip_summed = CHECKSUM_NONE; - } else { - if (buff->is_ip_cso) { - __skb_incr_checksum_unnecessary(skb); - if (buff->is_udp_cso || buff->is_tcp_cso) - __skb_incr_checksum_unnecessary(skb); - } else { - skb->ip_summed = CHECKSUM_NONE; - } - } + + aq_rx_checksum(self, buff, skb); skb_set_hash(skb, buff->rss_hash, buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 76d25d594a0f..f02592f43fe3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -100,12 +100,17 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self) return err; } +static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc) +{ + hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc); + return 0; +} + static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) { u32 tc = 0U; u32 buff_size = 0U; unsigned int i_priority = 0U; - bool is_rx_flow_control = false; /* TPS Descriptor rate init */ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); @@ -138,7 +143,6 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) /* QoS Rx buf size per TC */ tc = 0; - is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control); buff_size = HW_ATL_B0_RXBUF_MAX; hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); @@ -150,7 +154,8 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) (buff_size * (1024U / 32U) * 50U) / 100U, tc); - hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); + + hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc); /* QoS 802.1p priority -> TC mapping */ for (i_priority = 8U; i_priority--;) @@ -229,8 +234,10 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1); /* RX checksums offloads*/ - hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1); - hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1); + hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features & + NETIF_F_RXCSUM)); + hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features & + NETIF_F_RXCSUM)); /* LSO offloads*/ hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); @@ -655,9 +662,9 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE]; - unsigned int is_err = 1U; unsigned int is_rx_check_sum_enabled = 0U; unsigned int pkt_type = 0U; + u8 rx_stat = 0U; if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */ break; @@ -665,35 +672,35 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, buff = &ring->buff_ring[ring->hw_head]; - is_err = (0x0000003CU & rxd_wb->status); + rx_stat = (0x0000003CU & rxd_wb->status) >> 2; is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); - is_err &= ~0x20U; /* exclude validity bit */ pkt_type = 0xFFU & (rxd_wb->type >> 4); - if (is_rx_check_sum_enabled) { - if (0x0U == (pkt_type & 0x3U)) - buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U; + if (is_rx_check_sum_enabled & BIT(0) && + (0x0U == (pkt_type & 0x3U))) + buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U; + if (is_rx_check_sum_enabled & BIT(1)) { if (0x4U == (pkt_type & 0x1CU)) - buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; + buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U : + !!(rx_stat & BIT(3)); else if (0x0U == (pkt_type & 0x1CU)) - buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; - - /* Checksum offload workaround for small packets */ - if (rxd_wb->pkt_len <= 60) { - buff->is_ip_cso = 0U; - buff->is_cso_err = 0U; - } + buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U : + !!(rx_stat & BIT(3)); + } + buff->is_cso_err = !!(rx_stat & 0x6); + /* Checksum offload workaround for small packets */ + if (unlikely(rxd_wb->pkt_len <= 60)) { + buff->is_ip_cso = 0U; + buff->is_cso_err = 0U; } - - is_err &= ~0x18U; dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); - if (is_err || rxd_wb->type & 0x1000U) { - /* status error or DMA error */ + if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) { + /* MAC error or DMA error */ buff->is_error = 1U; } else { if (self->aq_nic_cfg->is_rss) { @@ -915,6 +922,12 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self) static int hw_atl_b0_hw_stop(struct aq_hw_s *self) { hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK); + + /* Invalidate Descriptor Cache to prevent writing to the cached + * descriptors and to the data pointer of those descriptors + */ + hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1); + return aq_hw_err_from_flags(self); } @@ -963,4 +976,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = { .hw_get_regs = hw_atl_utils_hw_get_regs, .hw_get_hw_stats = hw_atl_utils_get_hw_stats, .hw_get_fw_version = hw_atl_utils_get_fw_version, + .hw_set_offload = hw_atl_b0_hw_offload_set, + .hw_set_fc = hw_atl_b0_set_fc, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index be0a3a90dfad..5502ec5f0f69 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -619,6 +619,14 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode); } +void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR, + HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK, + HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT, + init); +} + void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_pkt_buff_size_per_tc, u32 buffer) { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index 7056c7342afc..41f239928c15 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -325,6 +325,9 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_pkt_buff_size_per_tc, u32 buffer); +/* set rdm rx dma descriptor cache init */ +void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init); + /* set rx xoff enable (per tc) */ void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, u32 buffer); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 716674a9b729..a715fa317b1c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -293,6 +293,24 @@ /* default value of bitfield desc{d}_reset */ #define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0 +/* rdm_desc_init_i bitfield definitions + * preprocessor definitions for the bitfield rdm_desc_init_i. + * port="pif_rdm_desc_init_i" + */ + +/* register address for bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR 0x00005a00 +/* bitmask for bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK 0xffffffff +/* inverted bitmask for bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSKN 0x00000000 +/* lower bit position of bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT 0 +/* width of bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_WIDTH 32 +/* default value of bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0 + /* rx int_desc_wrb_en bitfield definitions * preprocessor definitions for the bitfield "int_desc_wrb_en". * port="pif_rdm_int_desc_wrb_en_i" diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index 096ca5730887..7de3220d9cab 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -30,6 +30,8 @@ #define HW_ATL_FW2X_MPI_STATE_ADDR 0x370 #define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374 +#define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE) +#define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE) #define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY) #define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL) @@ -451,6 +453,24 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self) return 0; } +static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode) +{ + u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR); + + if (mpi_state & HW_ATL_FW2X_CAP_PAUSE) + if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE) + *fcmode = AQ_NIC_FC_RX; + else + *fcmode = AQ_NIC_FC_RX | AQ_NIC_FC_TX; + else + if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE) + *fcmode = AQ_NIC_FC_TX; + else + *fcmode = 0; + + return 0; +} + const struct aq_fw_ops aq_fw_2x_ops = { .init = aq_fw2x_init, .deinit = aq_fw2x_deinit, @@ -465,4 +485,5 @@ const struct aq_fw_ops aq_fw_2x_ops = { .set_eee_rate = aq_fw2x_set_eee_rate, .get_eee_rate = aq_fw2x_get_eee_rate, .set_flow_control = aq_fw2x_set_flow_control, + .get_flow_control = aq_fw2x_get_flow_control }; diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h index 78c5de467426..9d0e74f6b089 100644 --- a/drivers/net/ethernet/atheros/alx/alx.h +++ b/drivers/net/ethernet/atheros/alx/alx.h @@ -140,6 +140,5 @@ struct alx_priv { }; extern const struct ethtool_ops alx_ethtool_ops; -extern const char alx_drv_name[]; #endif diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 7968c644ad86..c131cfc1b79d 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -49,7 +49,7 @@ #include "hw.h" #include "reg.h" -const char alx_drv_name[] = "alx"; +static const char alx_drv_name[] = "alx"; static void alx_free_txbuf(struct alx_tx_queue *txq, int entry) { diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 4122553e224b..0e2d99c737e3 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1902,9 +1902,6 @@ static void bcm_sysport_netif_start(struct net_device *dev) intrl2_1_mask_clear(priv, 0xffffffff); else intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK); - - /* Last call before we start the real business */ - netif_tx_start_all_queues(dev); } static void rbuf_init(struct bcm_sysport_priv *priv) @@ -2048,6 +2045,8 @@ static int bcm_sysport_open(struct net_device *dev) bcm_sysport_netif_start(dev); + netif_tx_start_all_queues(dev); + return 0; out_clear_rx_int: @@ -2071,7 +2070,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev) struct bcm_sysport_priv *priv = netdev_priv(dev); /* stop all software from updating hardware */ - netif_tx_stop_all_queues(dev); + netif_tx_disable(dev); napi_disable(&priv->napi); cancel_work_sync(&priv->dim.dim.work); phy_stop(dev->phydev); @@ -2658,12 +2657,12 @@ static int __maybe_unused bcm_sysport_suspend(struct device *d) if (!netif_running(dev)) return 0; + netif_device_detach(dev); + bcm_sysport_netif_stop(dev); phy_suspend(dev->phydev); - netif_device_detach(dev); - /* Disable UniMAC RX */ umac_enable_set(priv, CMD_RX_EN, 0); @@ -2746,8 +2745,6 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) goto out_free_rx_ring; } - netif_device_attach(dev); - /* RX pipe enable */ topctrl_writel(priv, 0, RX_FLUSH_CNTL); @@ -2788,6 +2785,8 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) bcm_sysport_netif_start(dev); + netif_device_attach(dev); + return 0; out_free_rx_ring: diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 20c1681bb1af..2d6f090bf644 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2855,7 +2855,6 @@ static void bcmgenet_netif_start(struct net_device *dev) umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); - netif_tx_start_all_queues(dev); bcmgenet_enable_tx_napi(priv); /* Monitor link interrupts now */ @@ -2937,6 +2936,8 @@ static int bcmgenet_open(struct net_device *dev) bcmgenet_netif_start(dev); + netif_tx_start_all_queues(dev); + return 0; err_irq1: @@ -2958,7 +2959,7 @@ static void bcmgenet_netif_stop(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); bcmgenet_disable_tx_napi(priv); - netif_tx_stop_all_queues(dev); + netif_tx_disable(dev); /* Disable MAC receive */ umac_enable_set(priv, CMD_RX_EN, false); @@ -3620,13 +3621,13 @@ static int bcmgenet_suspend(struct device *d) if (!netif_running(dev)) return 0; + netif_device_detach(dev); + bcmgenet_netif_stop(dev); if (!device_may_wakeup(d)) phy_suspend(dev->phydev); - netif_device_detach(dev); - /* Prepare the device for Wake-on-LAN and switch to the slow clock */ if (device_may_wakeup(d) && priv->wolopts) { ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); @@ -3700,8 +3701,6 @@ static int bcmgenet_resume(struct device *d) /* Always enable ring 16 - descriptor ring */ bcmgenet_enable_dma(priv, dma_ctrl); - netif_device_attach(dev); - if (!device_may_wakeup(d)) phy_resume(dev->phydev); @@ -3710,6 +3709,8 @@ static int bcmgenet_resume(struct device *d) bcmgenet_netif_start(dev); + netif_device_attach(dev); + return 0; out_clk_disable: diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 3f96aa30068e..20fcf0d1c2ce 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3760,7 +3760,8 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) /* Hardware table is only clear when pf resets */ if (!(handle->flags & HNAE3_SUPPORT_VF)) { ret = hns3_restore_vlan(netdev); - return ret; + if (ret) + return ret; } ret = hns3_restore_fd_rules(netdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index aa5cb9834d73..494e562fe8c7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -1168,14 +1168,14 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev) */ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) { - struct hclge_vport *vport = hdev->vport; - u32 i, k, qs_bitmap; - int ret; + int i; for (i = 0; i < HCLGE_BP_GRP_NUM; i++) { - qs_bitmap = 0; + u32 qs_bitmap = 0; + int k, ret; for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hclge_vport *vport = &hdev->vport[k]; u16 qs_id = vport->qs_offset + tc; u8 grp, sub_grp; @@ -1185,8 +1185,6 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) HCLGE_BP_SUB_GRP_ID_S); if (i == grp) qs_bitmap |= (1 << sub_grp); - - vport++; } ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 7893beffcc71..c9d5d0a7fbf1 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1545,7 +1545,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_crq.v1.sge_len = cpu_to_be32(skb->len); tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); - if (adapter->vlan_header_insertion) { + if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) { tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index bc71a21c1dc2..21c2688d6308 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -12249,6 +12249,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | @@ -12266,13 +12268,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) /* record features VLANs can make use of */ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) - netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; - hw_features = hw_enc_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; + netdev->hw_features |= hw_features; netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 4c4b5717a627..b8548370f1c7 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -76,6 +76,8 @@ extern const char ice_drv_ver[]; #define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) #define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) +#define ICE_MAX_RESET_WAIT 20 + #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) @@ -189,7 +191,6 @@ struct ice_vsi { u64 tx_linearize; DECLARE_BITMAP(state, __ICE_STATE_NBITS); DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS); - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; unsigned int current_netdev_flags; u32 tx_restart; u32 tx_busy; @@ -369,5 +370,6 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); +void ice_napi_del(struct ice_vsi *vsi); #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 8cd6a2401fd9..554fd707a6d6 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -811,6 +811,9 @@ void ice_deinit_hw(struct ice_hw *hw) /* Attempt to disable FW logging before shutting down control queues */ ice_cfg_fw_log(hw, false); ice_shutdown_all_ctrlq(hw); + + /* Clear VSI contexts if not already cleared */ + ice_clear_all_vsi_ctx(hw); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 96923580f2a6..648acdb4c644 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1517,10 +1517,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) } if (!test_bit(__ICE_DOWN, pf->state)) { - /* Give it a little more time to try to come back */ + /* Give it a little more time to try to come back. If still + * down, restart autoneg link or reinitialize the interface. + */ msleep(75); if (!test_bit(__ICE_DOWN, pf->state)) return ice_nway_reset(netdev); + + ice_down(vsi); + ice_up(vsi); } return err; diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 5fdea6ec7675..596b9fb1c510 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -242,6 +242,8 @@ #define GLNVM_ULD 0x000B6008 #define GLNVM_ULD_CORER_DONE_M BIT(3) #define GLNVM_ULD_GLOBR_DONE_M BIT(4) +#define GLPCI_CNF2 0x000BE004 +#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1) #define PF_FUNC_RID 0x0009E880 #define PF_FUNC_RID_FUNC_NUM_S 0 #define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 5bacad01f0c9..1041fa2a7767 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1997,7 +1997,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL); if (status) { netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n", - ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status, + ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status, vsi->back->hw.adminq.sq_last_status); goto err_out; } @@ -2458,6 +2458,7 @@ int ice_vsi_release(struct ice_vsi *vsi) * on this wq */ if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) { + ice_napi_del(vsi); unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); vsi->netdev = NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 05993451147a..333312a1d595 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1465,7 +1465,7 @@ skip_req_irq: * ice_napi_del - Remove NAPI handler for the VSI * @vsi: VSI for which NAPI handler is to be removed */ -static void ice_napi_del(struct ice_vsi *vsi) +void ice_napi_del(struct ice_vsi *vsi) { int v_idx; @@ -1622,7 +1622,6 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - int ret; if (vid >= VLAN_N_VID) { netdev_err(netdev, "VLAN id requested %d is out of range %d\n", @@ -1635,7 +1634,8 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, /* Enable VLAN pruning when VLAN 0 is added */ if (unlikely(!vid)) { - ret = ice_cfg_vlan_pruning(vsi, true); + int ret = ice_cfg_vlan_pruning(vsi, true); + if (ret) return ret; } @@ -1644,12 +1644,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, * needed to continue allowing all untagged packets since VLAN prune * list is applied to all packets by the switch */ - ret = ice_vsi_add_vlan(vsi, vid); - - if (!ret) - set_bit(vid, vsi->active_vlans); - - return ret; + return ice_vsi_add_vlan(vsi, vid); } /** @@ -1677,8 +1672,6 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev, if (status) return status; - clear_bit(vid, vsi->active_vlans); - /* Disable VLAN pruning when VLAN 0 is removed */ if (unlikely(!vid)) status = ice_cfg_vlan_pruning(vsi, false); @@ -2002,6 +1995,22 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) } /** + * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines + * @pf: pointer to the PF structure + * + * There is no error returned here because the driver should be able to handle + * 128 Byte cache lines, so we only print a warning in case issues are seen, + * specifically with Tx. + */ +static void ice_verify_cacheline_size(struct ice_pf *pf) +{ + if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) + dev_warn(&pf->pdev->dev, + "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", + ICE_CACHE_LINE_BYTES); +} + +/** * ice_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in ice_pci_tbl @@ -2151,6 +2160,8 @@ static int ice_probe(struct pci_dev *pdev, /* since everything is good, start the service timer */ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); + ice_verify_cacheline_size(pf); + return 0; err_alloc_sw_unroll: @@ -2182,6 +2193,12 @@ static void ice_remove(struct pci_dev *pdev) if (!pf) return; + for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { + if (!ice_is_reset_in_progress(pf->state)) + break; + msleep(100); + } + set_bit(__ICE_DOWN, pf->state); ice_service_task_stop(pf); @@ -2510,31 +2527,6 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi) } /** - * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up - * @vsi: the VSI being brought back up - */ -static int ice_restore_vlan(struct ice_vsi *vsi) -{ - int err; - u16 vid; - - if (!vsi->netdev) - return -EINVAL; - - err = ice_vsi_vlan_setup(vsi); - if (err) - return err; - - for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) { - err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid); - if (err) - break; - } - - return err; -} - -/** * ice_vsi_cfg - Setup the VSI * @vsi: the VSI being configured * @@ -2546,7 +2538,9 @@ static int ice_vsi_cfg(struct ice_vsi *vsi) if (vsi->netdev) { ice_set_rx_mode(vsi->netdev); - err = ice_restore_vlan(vsi); + + err = ice_vsi_vlan_setup(vsi); + if (err) return err; } @@ -3296,7 +3290,7 @@ static void ice_rebuild(struct ice_pf *pf) struct device *dev = &pf->pdev->dev; struct ice_hw *hw = &pf->hw; enum ice_status ret; - int err; + int err, i; if (test_bit(__ICE_DOWN, pf->state)) goto clear_recovery; @@ -3370,6 +3364,22 @@ static void ice_rebuild(struct ice_pf *pf) } ice_reset_all_vfs(pf, true); + + for (i = 0; i < pf->num_alloc_vsi; i++) { + bool link_up; + + if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF) + continue; + ice_get_link_status(pf->vsi[i]->port_info, &link_up); + if (link_up) { + netif_carrier_on(pf->vsi[i]->netdev); + netif_tx_wake_all_queues(pf->vsi[i]->netdev); + } else { + netif_carrier_off(pf->vsi[i]->netdev); + netif_tx_stop_all_queues(pf->vsi[i]->netdev); + } + } + /* if we get here, reset flow is successful */ clear_bit(__ICE_RESET_FAILED, pf->state); return; diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 33403f39f1b3..40c9c6558956 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -348,6 +348,18 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) } /** + * ice_clear_all_vsi_ctx - clear all the VSI context entries + * @hw: pointer to the hw struct + */ +void ice_clear_all_vsi_ctx(struct ice_hw *hw) +{ + u16 i; + + for (i = 0; i < ICE_MAX_VSI; i++) + ice_clear_vsi_ctx(hw, i); +} + +/** * ice_add_vsi - add VSI context to the hardware and VSI handle list * @hw: pointer to the hw struct * @vsi_handle: unique VSI handle provided by drivers diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index b88d96a1ef69..d5ef0bd58bf9 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -190,6 +190,8 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd); bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle); struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle); +void ice_clear_all_vsi_ctx(struct ice_hw *hw); +/* Switch config */ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); /* Switch/bridge related commands */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 5dae968d853e..fe5bbabbb41e 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1520,7 +1520,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) /* update gso_segs and bytecount */ first->gso_segs = skb_shinfo(skb)->gso_segs; - first->bytecount = (first->gso_segs - 1) * off->header_len; + first->bytecount += (first->gso_segs - 1) * off->header_len; cd_tso_len = skb->len - off->header_len; cd_mss = skb_shinfo(skb)->gso_size; @@ -1556,15 +1556,15 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) * magnitude greater than our largest possible GSO size. * * This would then be implemented as: - * return (((size >> 12) * 85) >> 8) + 1; + * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; * * Since multiplication and division are commutative, we can reorder * operations into: - * return ((size * 85) >> 20) + 1; + * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; */ static unsigned int ice_txd_use_count(unsigned int size) { - return ((size * 85) >> 20) + 1; + return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; } /** @@ -1706,7 +1706,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) * + 1 desc for context descriptor, * otherwise try next time */ - if (ice_maybe_stop_tx(tx_ring, count + 4 + 1)) { + if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + + ICE_DESCS_FOR_CTX_DESC)) { tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 1d0f58bd389b..75d0eaf6c9dd 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -22,8 +22,21 @@ #define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */ #define ICE_MAX_TXQ_PER_TXQG 128 -/* Tx Descriptors needed, worst case */ -#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +/* We are assuming that the cache line is always 64 Bytes here for ice. + * In order to make sure that is a correct assumption there is a check in probe + * to print a warning if the read from GLPCI_CNF2 tells us that the cache line + * size is 128 bytes. We do it this way because we do not want to read the + * GLPCI_CNF2 register or a variable containing the value on every pass through + * the Tx path. + */ +#define ICE_CACHE_LINE_BYTES 64 +#define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \ + sizeof(struct ice_tx_desc)) +#define ICE_DESCS_FOR_CTX_DESC 1 +#define ICE_DESCS_FOR_SKB_DATA_PTR 1 +/* Tx descriptors needed, worst case */ +#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \ + ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR) #define ICE_DESC_UNUSED(R) \ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (R)->next_to_clean - (R)->next_to_use - 1) diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 12f9432abf11..f4dbc81c1988 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -92,12 +92,12 @@ struct ice_link_status { u64 phy_type_low; u16 max_frame_size; u16 link_speed; + u16 req_speeds; u8 lse_ena; /* Link Status Event notification */ u8 link_info; u8 an_info; u8 ext_info; u8 pacing; - u8 req_speeds; /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of * ice_aqc_get_phy_caps structure */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 45f10f8f01dc..e71065f9d391 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -348,7 +348,7 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid) struct ice_vsi_ctx ctxt = { 0 }; enum ice_status status; - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED | + ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED | ICE_AQ_VSI_PVLAN_INSERT_PVID | ICE_AQ_VSI_VLAN_EMOD_STR; ctxt.info.pvid = cpu_to_le16(vid); @@ -2171,7 +2171,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) if (!ice_vsi_add_vlan(vsi, vid)) { vf->num_vlan++; - set_bit(vid, vsi->active_vlans); /* Enable VLAN pruning when VLAN 0 is added */ if (unlikely(!vid)) @@ -2190,7 +2189,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) */ if (!ice_vsi_kill_vlan(vsi, vid)) { vf->num_vlan--; - clear_bit(vid, vsi->active_vlans); /* Disable VLAN pruning when removing VLAN 0 */ if (unlikely(!vid)) diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 29ced6b74d36..2b95dc9c7a6a 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -53,13 +53,15 @@ * 2^40 * 10^-9 / 60 = 18.3 minutes. * * SYSTIM is converted to real time using a timecounter. As - * timecounter_cyc2time() allows old timestamps, the timecounter - * needs to be updated at least once per half of the SYSTIM interval. - * Scheduling of delayed work is not very accurate, so we aim for 8 - * minutes to be sure the actual interval is shorter than 9.16 minutes. + * timecounter_cyc2time() allows old timestamps, the timecounter needs + * to be updated at least once per half of the SYSTIM interval. + * Scheduling of delayed work is not very accurate, and also the NIC + * clock can be adjusted to run up to 6% faster and the system clock + * up to 10% slower, so we aim for 6 minutes to be sure the actual + * interval in the NIC time is shorter than 9.16 minutes. */ -#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8) +#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 6) #define IGB_PTP_TX_TIMEOUT (HZ * 15) #define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT) #define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 5bfd349bf41a..3ba672e9e353 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -494,7 +494,7 @@ struct mvneta_port { #if defined(__LITTLE_ENDIAN) struct mvneta_tx_desc { u32 command; /* Options used by HW for packet transmitting.*/ - u16 reserverd1; /* csum_l4 (for future use) */ + u16 reserved1; /* csum_l4 (for future use) */ u16 data_size; /* Data size of transmitted packet in bytes */ u32 buf_phys_addr; /* Physical addr of transmitted buffer */ u32 reserved2; /* hw_cmd - (for future use, PMT) */ @@ -519,7 +519,7 @@ struct mvneta_rx_desc { #else struct mvneta_tx_desc { u16 data_size; /* Data size of transmitted packet in bytes */ - u16 reserverd1; /* csum_l4 (for future use) */ + u16 reserved1; /* csum_l4 (for future use) */ u32 command; /* Options used by HW for packet transmitting.*/ u32 reserved2; /* hw_cmd - (for future use, PMT) */ u32 buf_phys_addr; /* Physical addr of transmitted buffer */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 1857ee0f0871..6f5153afcab4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -1006,7 +1006,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->packets++; } ring->bytes += tx_info->nr_bytes; - netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes); AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); if (tx_info->inl) @@ -1044,7 +1043,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_stop_queue(ring->tx_queue); ring->queue_stopped++; } - send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue); + + send_doorbell = __netdev_tx_sent_queue(ring->tx_queue, + tx_info->nr_bytes, + skb->xmit_more); real_size = (real_size / 16) & 0x3f; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index a2df12b79f8e..9bec940330a4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3568,7 +3568,6 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) burst_size = 7; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: - is_bytes = true; rate = 4 * 1024; burst_size = 4; break; diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index cc1b373c0ace..46dc93d3b9b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -147,7 +147,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n", fcoe_pf_params->num_cqs, p_hwfn->hw_info.feat_num[QED_FCOE_CQ]); - return -EINVAL; + rc = -EINVAL; + goto err; } p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); @@ -156,14 +157,14 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid); if (rc) - return rc; + goto err; cxt_info.iid = dummy_cid; rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); if (rc) { DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n", dummy_cid); - return rc; + goto err; } p_cxt = cxt_info.p_cxt; SET_FIELD(p_cxt->tstorm_ag_context.flags3, @@ -240,6 +241,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, rc = qed_spq_post(p_hwfn, p_ent, NULL); return rc; + +err: + qed_sp_destroy_request(p_hwfn, p_ent); + return rc; } static int diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 1135387bd99d..4f8a685d1a55 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -200,6 +200,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n", p_params->num_queues, p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]); + qed_sp_destroy_request(p_hwfn, p_ent); return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 82a1bd1f8a8c..67c02ea93906 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -740,8 +740,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); if (rc) { - /* Return spq entry which is taken in qed_sp_init_request()*/ - qed_spq_return_entry(p_hwfn, p_ent); + qed_sp_destroy_request(p_hwfn, p_ent); return rc; } @@ -1355,6 +1354,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "%d is not supported yet\n", p_filter_cmd->opcode); + qed_sp_destroy_request(p_hwfn, *pp_ent); return -EINVAL; } @@ -2056,13 +2056,13 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, } else { rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); if (rc) - return rc; + goto err; if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id); if (rc) - return rc; + goto err; p_ramrod->rx_qid_valid = 1; p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); @@ -2083,6 +2083,10 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, (u64)p_params->addr, p_params->length); return qed_spq_post(p_hwfn, p_ent, NULL); + +err: + qed_sp_destroy_request(p_hwfn, p_ent); + return rc; } int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index f40f654398a0..a96364df4320 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1944,9 +1944,12 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_speed_mask) { u32 transceiver_type, transceiver_state; + int ret; - qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state, - &transceiver_type); + ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state, + &transceiver_type); + if (ret) + return ret; if (qed_is_transceiver_ready(transceiver_state, transceiver_type) == false) diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index c71391b9c757..62113438c880 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -1514,6 +1514,7 @@ qed_rdma_register_tid(void *rdma_cxt, default: rc = -EINVAL; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); + qed_sp_destroy_request(p_hwfn, p_ent); return rc; } SET_FIELD(p_ramrod->flags1, diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index f9167d1354bb..e49fada85410 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -745,6 +745,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", rc); + qed_sp_destroy_request(p_hwfn, p_ent); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index e95431f6acd4..3157c0d99441 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -167,6 +167,9 @@ struct qed_spq_entry { enum spq_mode comp_mode; struct qed_spq_comp_cb comp_cb; struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */ + + /* Posted entry for unlimited list entry in EBLOCK mode */ + struct qed_spq_entry *post_ent; }; struct qed_eq { @@ -396,6 +399,17 @@ struct qed_sp_init_data { struct qed_spq_comp_cb *p_comp_data; }; +/** + * @brief Returns a SPQ entry to the pool / frees the entry if allocated. + * Should be called on in error flows after initializing the SPQ entry + * and before posting it. + * + * @param p_hwfn + * @param p_ent + */ +void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent); + int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent, u8 cmd, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 77b6248ad3b9..888274fa208b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -47,6 +47,19 @@ #include "qed_sp.h" #include "qed_sriov.h" +void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent) +{ + /* qed_spq_get_entry() can either get an entry from the free_pool, + * or, if no entries are left, allocate a new entry and add it to + * the unlimited_pending list. + */ + if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending) + kfree(p_ent); + else + qed_spq_return_entry(p_hwfn, p_ent); +} + int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent, u8 cmd, u8 protocol, struct qed_sp_init_data *p_data) @@ -80,7 +93,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, case QED_SPQ_MODE_BLOCK: if (!p_data->p_comp_data) - return -EINVAL; + goto err; p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; break; @@ -95,7 +108,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, default: DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", p_ent->comp_mode); - return -EINVAL; + goto err; } DP_VERBOSE(p_hwfn, QED_MSG_SPQ, @@ -109,6 +122,11 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); return 0; + +err: + qed_sp_destroy_request(p_hwfn, p_ent); + + return -EINVAL; } static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type) diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index c4a6274dd625..0a9c5bb0fa48 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -142,6 +142,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn, DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); rc = qed_mcp_drain(p_hwfn, p_ptt); + qed_ptt_release(p_hwfn, p_ptt); if (rc) { DP_NOTICE(p_hwfn, "MCP drain failed\n"); goto err; @@ -150,18 +151,15 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn, /* Retry after drain */ rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); if (!rc) - goto out; + return 0; comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; - if (comp_done->done == 1) + if (comp_done->done == 1) { if (p_fw_ret) *p_fw_ret = comp_done->fw_return_code; -out: - qed_ptt_release(p_hwfn, p_ptt); - return 0; - + return 0; + } err: - qed_ptt_release(p_hwfn, p_ptt); DP_NOTICE(p_hwfn, "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", le32_to_cpu(p_ent->elem.hdr.cid), @@ -685,6 +683,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, /* EBLOCK responsible to free the allocated p_ent */ if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) kfree(p_ent); + else + p_ent->post_ent = p_en2; p_ent = p_en2; } @@ -767,6 +767,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) SPQ_HIGH_PRI_RESERVE_DEFAULT); } +/* Avoid overriding of SPQ entries when getting out-of-order completions, by + * marking the completions in a bitmap and increasing the chain consumer only + * for the first successive completed entries. + */ +static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo) +{ + u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; + struct qed_spq *p_spq = p_hwfn->p_spq; + + __set_bit(pos, p_spq->p_comp_bitmap); + while (test_bit(p_spq->comp_bitmap_idx, + p_spq->p_comp_bitmap)) { + __clear_bit(p_spq->comp_bitmap_idx, + p_spq->p_comp_bitmap); + p_spq->comp_bitmap_idx++; + qed_chain_return_produced(&p_spq->chain); + } +} + int qed_spq_post(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent, u8 *fw_return_code) { @@ -824,11 +843,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, p_ent->queue == &p_spq->unlimited_pending); if (p_ent->queue == &p_spq->unlimited_pending) { - /* This is an allocated p_ent which does not need to - * return to pool. - */ + struct qed_spq_entry *p_post_ent = p_ent->post_ent; + kfree(p_ent); - return rc; + + /* Return the entry which was actually posted */ + p_ent = p_post_ent; } if (rc) @@ -842,7 +862,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, spq_post_fail2: spin_lock_bh(&p_spq->lock); list_del(&p_ent->list); - qed_chain_return_produced(&p_spq->chain); + qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo); spq_post_fail: /* return to the free pool */ @@ -874,25 +894,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, spin_lock_bh(&p_spq->lock); list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { if (p_ent->elem.hdr.echo == echo) { - u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; - list_del(&p_ent->list); - - /* Avoid overriding of SPQ entries when getting - * out-of-order completions, by marking the completions - * in a bitmap and increasing the chain consumer only - * for the first successive completed entries. - */ - __set_bit(pos, p_spq->p_comp_bitmap); - - while (test_bit(p_spq->comp_bitmap_idx, - p_spq->p_comp_bitmap)) { - __clear_bit(p_spq->comp_bitmap_idx, - p_spq->p_comp_bitmap); - p_spq->comp_bitmap_idx++; - qed_chain_return_produced(&p_spq->chain); - } - + qed_spq_comp_bmap_update(p_hwfn, echo); p_spq->comp_count++; found = p_ent; break; @@ -931,11 +934,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, QED_MSG_SPQ, "Got a completion without a callback function\n"); - if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) || - (found->queue == &p_spq->unlimited_pending)) + if (found->comp_mode != QED_SPQ_MODE_EBLOCK) /* EBLOCK is responsible for returning its own entry into the - * free list, unless it originally added the entry into the - * unlimited pending list. + * free list. */ qed_spq_return_entry(p_hwfn, found); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 9b08a9d9e151..ca6290fa0f30 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -101,6 +101,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) default: DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", p_hwfn->hw_info.personality); + qed_sp_destroy_request(p_hwfn, p_ent); return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 9647578cbe6a..14f26bf3b388 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -459,7 +459,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, struct cmd_desc_type0 *first_desc, struct sk_buff *skb, struct qlcnic_host_tx_ring *tx_ring) { - u8 l4proto, opcode = 0, hdr_len = 0; + u8 l4proto, opcode = 0, hdr_len = 0, tag_vlan = 0; u16 flags = 0, vlan_tci = 0; int copied, offset, copy_len, size; struct cmd_desc_type0 *hwdesc; @@ -472,14 +472,16 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, flags = QLCNIC_FLAGS_VLAN_TAGGED; vlan_tci = ntohs(vh->h_vlan_TCI); protocol = ntohs(vh->h_vlan_encapsulated_proto); + tag_vlan = 1; } else if (skb_vlan_tag_present(skb)) { flags = QLCNIC_FLAGS_VLAN_OOB; vlan_tci = skb_vlan_tag_get(skb); + tag_vlan = 1; } if (unlikely(adapter->tx_pvid)) { - if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) + if (tag_vlan && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) return -EIO; - if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) + if (tag_vlan && (adapter->flags & QLCNIC_TAGGING_ENABLED)) goto set_flags; flags = QLCNIC_FLAGS_VLAN_OOB; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 0afc3d335d56..d11c16aeb19a 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -234,7 +234,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, struct net_device *real_dev, struct rmnet_endpoint *ep) { - struct rmnet_priv *priv; + struct rmnet_priv *priv = netdev_priv(rmnet_dev); int rc; if (ep->egress_dev) @@ -247,6 +247,8 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; rmnet_dev->hw_features |= NETIF_F_SG; + priv->real_dev = real_dev; + rc = register_netdevice(rmnet_dev); if (!rc) { ep->egress_dev = rmnet_dev; @@ -255,9 +257,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, rmnet_dev->rtnl_link_ops = &rmnet_link_ops; - priv = netdev_priv(rmnet_dev); priv->mux_id = id; - priv->real_dev = real_dev; netdev_dbg(rmnet_dev, "rmnet dev created\n"); } diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index b1b305f8f414..272b9ca66314 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -365,7 +365,8 @@ struct dma_features { /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ #define BUF_SIZE_16KiB 16384 -#define BUF_SIZE_8KiB 8192 +/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */ +#define BUF_SIZE_8KiB 8188 #define BUF_SIZE_4KiB 4096 #define BUF_SIZE_2KiB 2048 diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index ca9d7e48034c..40d6356a7e73 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h @@ -31,7 +31,7 @@ /* Enhanced descriptors */ static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) { - p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1) + p->des1 |= cpu_to_le32((BUF_SIZE_8KiB << ERDES1_BUFFER2_SIZE_SHIFT) & ERDES1_BUFFER2_SIZE_MASK); diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 77914c89d749..5ef91a790f9d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -262,7 +262,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, int end) { p->des0 |= cpu_to_le32(RDES0_OWN); - p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK); + p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK); if (mode == STMMAC_CHAIN_MODE) ehn_desc_rx_set_on_chain(p); diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index abc3f85270cd..d8c5bc412219 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -140,7 +140,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p) static int set_16kib_bfsize(int mtu) { int ret = 0; - if (unlikely(mtu >= BUF_SIZE_8KiB)) + if (unlikely(mtu > BUF_SIZE_8KiB)) ret = BUF_SIZE_16KiB; return ret; } diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c index 3b7f10a5f06a..c5cae8e74dc4 100644 --- a/drivers/net/fddi/defza.c +++ b/drivers/net/fddi/defza.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0+ /* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices. * * Copyright (c) 2018 Maciej W. Rozycki @@ -56,7 +56,7 @@ #define DRV_VERSION "v.1.1.4" #define DRV_RELDATE "Oct 6 2018" -static char version[] = +static const char version[] = DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n"; MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>"); @@ -784,7 +784,7 @@ err_rx: static void fza_tx_smt(struct net_device *dev) { struct fza_private *fp = netdev_priv(dev); - struct fza_buffer_tx __iomem *smt_tx_ptr, *skb_data_ptr; + struct fza_buffer_tx __iomem *smt_tx_ptr; int i, len; u32 own; @@ -799,6 +799,7 @@ static void fza_tx_smt(struct net_device *dev) if (!netif_queue_stopped(dev)) { if (dev_nit_active(dev)) { + struct fza_buffer_tx *skb_data_ptr; struct sk_buff *skb; /* Length must be a multiple of 4 as only word diff --git a/drivers/net/fddi/defza.h b/drivers/net/fddi/defza.h index b06acf32738e..93bda61be8e3 100644 --- a/drivers/net/fddi/defza.h +++ b/drivers/net/fddi/defza.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0+ */ /* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices. * * Copyright (c) 2018 Maciej W. Rozycki @@ -235,6 +235,7 @@ struct fza_ring_cmd { #define FZA_RING_CMD 0x200400 /* command ring address */ #define FZA_RING_CMD_SIZE 0x40 /* command descriptor ring * size + */ /* Command constants. */ #define FZA_RING_CMD_MASK 0x7fffffff #define FZA_RING_CMD_NOP 0x00000000 /* nop */ diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index e86ea105c802..704537010453 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -92,7 +92,7 @@ static int bcm54612e_config_init(struct phy_device *phydev) return 0; } -static int bcm5481x_config(struct phy_device *phydev) +static int bcm54xx_config_clock_delay(struct phy_device *phydev) { int rc, val; @@ -429,7 +429,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev) ret = genphy_config_aneg(phydev); /* Then we can set up the delay. */ - bcm5481x_config(phydev); + bcm54xx_config_clock_delay(phydev); if (of_property_read_bool(np, "enet-phy-lane-swap")) { /* Lane Swap - Undocumented register...magic! */ @@ -442,6 +442,19 @@ static int bcm5481_config_aneg(struct phy_device *phydev) return ret; } +static int bcm54616s_config_aneg(struct phy_device *phydev) +{ + int ret; + + /* Aneg firsly. */ + ret = genphy_config_aneg(phydev); + + /* Then we can set up the delay. */ + bcm54xx_config_clock_delay(phydev); + + return ret; +} + static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set) { int val; @@ -636,6 +649,7 @@ static struct phy_driver broadcom_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, + .config_aneg = bcm54616s_config_aneg, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, }, { diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 7fc8508b5231..271e8adc39f1 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -220,7 +220,7 @@ static struct phy_driver realtek_drvs[] = { .flags = PHY_HAS_INTERRUPT, }, { .phy_id = 0x001cc816, - .name = "RTL8201F 10/100Mbps Ethernet", + .name = "RTL8201F Fast Ethernet", .phy_id_mask = 0x001fffff, .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 262e7a3c23cb..f2d01cb6f958 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1321,6 +1321,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->ethtool_ops = &smsc95xx_ethtool_ops; dev->net->flags |= IFF_MULTICAST; dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; + dev->net->min_mtu = ETH_MIN_MTU; + dev->net->max_mtu = ETH_DATA_LEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; pdata->dev = dev; @@ -1598,6 +1600,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) return ret; } + cancel_delayed_work_sync(&pdata->carrier_check); + if (pdata->suspend_flags) { netdev_warn(dev->net, "error during last resume\n"); pdata->suspend_flags = 0; @@ -1840,6 +1844,11 @@ done: */ if (ret && PMSG_IS_AUTO(message)) usbnet_resume(intf); + + if (ret) + schedule_delayed_work(&pdata->carrier_check, + CARRIER_CHECK_DELAY); + return ret; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 2e65be8b1387..559d567693b8 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1519,8 +1519,10 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) if (ns->ndev) nvme_nvm_update_nvm_info(ns); #ifdef CONFIG_NVME_MULTIPATH - if (ns->head->disk) + if (ns->head->disk) { nvme_update_disk_info(ns->head->disk, ns, id); + blk_queue_stack_limits(ns->head->disk->queue, ns->queue); + } #endif } diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 5e3cc8c59a39..9901afd804ce 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -285,6 +285,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* set to a default value for 512 until disk is validated */ blk_queue_logical_block_size(q, 512); + blk_set_stacking_limits(&q->limits); /* we need to propagate up the VMC settings */ if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index f4efe289dc7b..a5f9bbce863f 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -420,7 +420,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl, struct pci_dev *p2p_dev; int ret; - if (!ctrl->p2p_client) + if (!ctrl->p2p_client || !ns->use_p2pmem) return; if (ns->p2p_dev) { diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index ddce100be57a..3f7971d3706d 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -122,7 +122,6 @@ struct nvmet_rdma_device { int inline_page_count; }; -static struct workqueue_struct *nvmet_rdma_delete_wq; static bool nvmet_rdma_use_srq; module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); MODULE_PARM_DESC(use_srq, "Use shared receive queue."); @@ -1274,12 +1273,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, if (queue->host_qid == 0) { /* Let inflight controller teardown complete */ - flush_workqueue(nvmet_rdma_delete_wq); + flush_scheduled_work(); } ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); if (ret) { - queue_work(nvmet_rdma_delete_wq, &queue->release_work); + schedule_work(&queue->release_work); /* Destroying rdma_cm id is not needed here */ return 0; } @@ -1344,7 +1343,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) if (disconnect) { rdma_disconnect(queue->cm_id); - queue_work(nvmet_rdma_delete_wq, &queue->release_work); + schedule_work(&queue->release_work); } } @@ -1374,7 +1373,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, mutex_unlock(&nvmet_rdma_queue_mutex); pr_err("failed to connect queue %d\n", queue->idx); - queue_work(nvmet_rdma_delete_wq, &queue->release_work); + schedule_work(&queue->release_work); } /** @@ -1656,17 +1655,8 @@ static int __init nvmet_rdma_init(void) if (ret) goto err_ib_client; - nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq", - WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); - if (!nvmet_rdma_delete_wq) { - ret = -ENOMEM; - goto err_unreg_transport; - } - return 0; -err_unreg_transport: - nvmet_unregister_transport(&nvmet_rdma_ops); err_ib_client: ib_unregister_client(&nvmet_rdma_ib_client); return ret; @@ -1674,7 +1664,6 @@ err_ib_client: static void __exit nvmet_rdma_exit(void) { - destroy_workqueue(nvmet_rdma_delete_wq); nvmet_unregister_transport(&nvmet_rdma_ops); ib_unregister_client(&nvmet_rdma_ib_client); WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); diff --git a/drivers/of/device.c b/drivers/of/device.c index 0f27fad9fe94..5592437bb3d1 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -149,9 +149,11 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma) * set by the driver. */ mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1); - dev->bus_dma_mask = mask; dev->coherent_dma_mask &= mask; *dev->dma_mask &= mask; + /* ...but only set bus mask if we found valid dma-ranges earlier */ + if (!ret) + dev->bus_dma_mask = mask; coherent = of_dma_is_coherent(np); dev_dbg(dev, "device is%sdma coherent\n", diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c index 35c64a4295e0..fe6b13608e51 100644 --- a/drivers/of/of_numa.c +++ b/drivers/of/of_numa.c @@ -104,9 +104,14 @@ static int __init of_numa_parse_distance_map_v1(struct device_node *map) distance = of_read_number(matrix, 1); matrix++; + if ((nodea == nodeb && distance != LOCAL_DISTANCE) || + (nodea != nodeb && distance <= LOCAL_DISTANCE)) { + pr_err("Invalid distance[node%d -> node%d] = %d\n", + nodea, nodeb, distance); + return -EINVAL; + } + numa_set_distance(nodea, nodeb, distance); - pr_debug("distance[node%d -> node%d] = %d\n", - nodea, nodeb, distance); /* Set default distance of node B->A same as A->B */ if (nodeb > nodea) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 6843bc7ee9f2..04e294d1d16d 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -87,6 +87,18 @@ struct qeth_dbf_info { #define SENSE_RESETTING_EVENT_BYTE 1 #define SENSE_RESETTING_EVENT_FLAG 0x80 +static inline u32 qeth_get_device_id(struct ccw_device *cdev) +{ + struct ccw_dev_id dev_id; + u32 id; + + ccw_device_get_id(cdev, &dev_id); + id = dev_id.devno; + id |= (u32) (dev_id.ssid << 16); + + return id; +} + /* * Common IO related definitions */ @@ -97,7 +109,8 @@ struct qeth_dbf_info { #define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev) #define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev) #define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev) -#define CHANNEL_ID(channel) dev_name(&channel->ccwdev->dev) +#define CCW_DEVID(cdev) (qeth_get_device_id(cdev)) +#define CARD_DEVID(card) (CCW_DEVID(CARD_RDEV(card))) /** * card stuff @@ -830,6 +843,11 @@ struct qeth_trap_id { /*some helper functions*/ #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") +static inline bool qeth_netdev_is_registered(struct net_device *dev) +{ + return dev->netdev_ops != NULL; +} + static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, unsigned int elements) { @@ -973,7 +991,7 @@ int qeth_wait_for_threads(struct qeth_card *, unsigned long); int qeth_do_run_thread(struct qeth_card *, unsigned long); void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long); void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long); -int qeth_core_hardsetup_card(struct qeth_card *); +int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok); void qeth_print_status_message(struct qeth_card *); int qeth_init_qdio_queues(struct qeth_card *); int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, @@ -1028,11 +1046,6 @@ int qeth_configure_cq(struct qeth_card *, enum qeth_cq); int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); void qeth_trace_features(struct qeth_card *); void qeth_close_dev(struct qeth_card *); -int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16, - long, - int (*reply_cb)(struct qeth_card *, - struct qeth_reply *, unsigned long), - void *); int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long); struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 3274f13aad57..4bce5ae65a55 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -167,6 +167,8 @@ const char *qeth_get_cardname_short(struct qeth_card *card) return "OSD_1000"; case QETH_LINK_TYPE_10GBIT_ETH: return "OSD_10GIG"; + case QETH_LINK_TYPE_25GBIT_ETH: + return "OSD_25GIG"; case QETH_LINK_TYPE_LANE_ETH100: return "OSD_FE_LANE"; case QETH_LINK_TYPE_LANE_TR: @@ -554,8 +556,8 @@ static int __qeth_issue_next_read(struct qeth_card *card) if (!iob) { dev_warn(&card->gdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); - QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob " - "available\n", dev_name(&card->gdev->dev)); + QETH_DBF_MESSAGE(2, "issue_next_read on device %x failed: no iob available\n", + CARD_DEVID(card)); return -ENOMEM; } qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); @@ -563,8 +565,8 @@ static int __qeth_issue_next_read(struct qeth_card *card) rc = ccw_device_start(channel->ccwdev, channel->ccw, (addr_t) iob, 0, 0); if (rc) { - QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " - "rc=%i\n", dev_name(&card->gdev->dev), rc); + QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", + rc, CARD_DEVID(card)); atomic_set(&channel->irq_pending, 0); card->read_or_write_problem = 1; qeth_schedule_recovery(card); @@ -613,16 +615,14 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, const char *ipa_name; int com = cmd->hdr.command; ipa_name = qeth_get_ipa_cmd_name(com); + if (rc) - QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned " - "x%X \"%s\"\n", - ipa_name, com, dev_name(&card->gdev->dev), - QETH_CARD_IFNAME(card), rc, - qeth_get_ipa_msg(rc)); + QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n", + ipa_name, com, CARD_DEVID(card), rc, + qeth_get_ipa_msg(rc)); else - QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n", - ipa_name, com, dev_name(&card->gdev->dev), - QETH_CARD_IFNAME(card)); + QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n", + ipa_name, com, CARD_DEVID(card)); } static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, @@ -711,7 +711,7 @@ static int qeth_check_idx_response(struct qeth_card *card, QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); if ((buffer[2] & 0xc0) == 0xc0) { - QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#02x\n", + QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", buffer[4]); QETH_CARD_TEXT(card, 2, "ckidxres"); QETH_CARD_TEXT(card, 2, " idxterm"); @@ -972,8 +972,8 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, QETH_CARD_TEXT(card, 2, "CGENCHK"); dev_warn(&cdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); - QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n", - dev_name(&cdev->dev), dstat, cstat); + QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n", + CCW_DEVID(cdev), dstat, cstat); print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 16, 1, irb, 64, 1); return 1; @@ -1013,8 +1013,8 @@ static long qeth_check_irb_error(struct qeth_card *card, switch (PTR_ERR(irb)) { case -EIO: - QETH_DBF_MESSAGE(2, "%s i/o-error on device\n", - dev_name(&cdev->dev)); + QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n", + CCW_DEVID(cdev)); QETH_CARD_TEXT(card, 2, "ckirberr"); QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); break; @@ -1031,8 +1031,8 @@ static long qeth_check_irb_error(struct qeth_card *card, } break; default: - QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n", - dev_name(&cdev->dev), PTR_ERR(irb)); + QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n", + PTR_ERR(irb), CCW_DEVID(cdev)); QETH_CARD_TEXT(card, 2, "ckirberr"); QETH_CARD_TEXT(card, 2, " rc???"); } @@ -1114,9 +1114,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, dev_warn(&channel->ccwdev->dev, "The qeth device driver failed to recover " "an error on the device\n"); - QETH_DBF_MESSAGE(2, "%s sense data available. cstat " - "0x%X dstat 0x%X\n", - dev_name(&channel->ccwdev->dev), cstat, dstat); + QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n", + CCW_DEVID(channel->ccwdev), cstat, + dstat); print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); print_hex_dump(KERN_WARNING, "qeth: sense data ", @@ -1890,8 +1890,8 @@ static int qeth_idx_activate_channel(struct qeth_card *card, if (channel->state != CH_STATE_ACTIVATING) { dev_warn(&channel->ccwdev->dev, "The qeth device driver" " failed to recover an error on the device\n"); - QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", - dev_name(&channel->ccwdev->dev)); + QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n", + CCW_DEVID(channel->ccwdev)); QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); return -ETIME; } @@ -1926,17 +1926,15 @@ static void qeth_idx_write_cb(struct qeth_card *card, "The adapter is used exclusively by another " "host\n"); else - QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:" - " negative reply\n", - dev_name(&channel->ccwdev->dev)); + QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", + CCW_DEVID(channel->ccwdev)); goto out; } memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { - QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: " - "function level mismatch (sent: 0x%x, received: " - "0x%x)\n", dev_name(&channel->ccwdev->dev), - card->info.func_level, temp); + QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", + CCW_DEVID(channel->ccwdev), + card->info.func_level, temp); goto out; } channel->state = CH_STATE_UP; @@ -1973,9 +1971,8 @@ static void qeth_idx_read_cb(struct qeth_card *card, "insufficient authorization\n"); break; default: - QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:" - " negative reply\n", - dev_name(&channel->ccwdev->dev)); + QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", + CCW_DEVID(channel->ccwdev)); } QETH_CARD_TEXT_(card, 2, "idxread%c", QETH_IDX_ACT_CAUSE_CODE(iob->data)); @@ -1984,10 +1981,9 @@ static void qeth_idx_read_cb(struct qeth_card *card, memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); if (temp != qeth_peer_func_level(card->info.func_level)) { - QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function " - "level mismatch (sent: 0x%x, received: 0x%x)\n", - dev_name(&channel->ccwdev->dev), - card->info.func_level, temp); + QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", + CCW_DEVID(channel->ccwdev), + card->info.func_level, temp); goto out; } memcpy(&card->token.issuer_rm_r, @@ -2096,9 +2092,8 @@ int qeth_send_control_data(struct qeth_card *card, int len, (addr_t) iob, 0, 0, event_timeout); spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) { - QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " - "ccw_device_start rc = %i\n", - dev_name(&channel->ccwdev->dev), rc); + QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", + CARD_DEVID(card), rc); QETH_CARD_TEXT_(card, 2, " err%d", rc); spin_lock_irq(&card->lock); list_del_init(&reply->list); @@ -2853,8 +2848,8 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, } else { dev_warn(&card->gdev->dev, "The qeth driver ran out of channel command buffers\n"); - QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers", - dev_name(&card->gdev->dev)); + QETH_DBF_MESSAGE(1, "device %x ran out of channel command buffers", + CARD_DEVID(card)); } return iob; @@ -2989,10 +2984,9 @@ static int qeth_query_ipassists_cb(struct qeth_card *card, return 0; default: if (cmd->hdr.return_code) { - QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled " - "rc=%d\n", - dev_name(&card->gdev->dev), - cmd->hdr.return_code); + QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", + CARD_DEVID(card), + cmd->hdr.return_code); return 0; } } @@ -3004,8 +2998,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card, card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; } else - QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected" - "\n", dev_name(&card->gdev->dev)); + QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n", + CARD_DEVID(card)); return 0; } @@ -4297,10 +4291,9 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, cmd->data.setadapterparms.hdr.return_code); if (cmd->data.setadapterparms.hdr.return_code != SET_ACCESS_CTRL_RC_SUCCESS) - QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n", - card->gdev->dev.kobj.name, - access_ctrl_req->subcmd_code, - cmd->data.setadapterparms.hdr.return_code); + QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", + access_ctrl_req->subcmd_code, CARD_DEVID(card), + cmd->data.setadapterparms.hdr.return_code); switch (cmd->data.setadapterparms.hdr.return_code) { case SET_ACCESS_CTRL_RC_SUCCESS: if (card->options.isolation == ISOLATION_MODE_NONE) { @@ -4312,14 +4305,14 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, } break; case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: - QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already " - "deactivated\n", dev_name(&card->gdev->dev)); + QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n", + CARD_DEVID(card)); if (fallback) card->options.isolation = card->options.prev_isolation; break; case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: - QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already" - " activated\n", dev_name(&card->gdev->dev)); + QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n", + CARD_DEVID(card)); if (fallback) card->options.isolation = card->options.prev_isolation; break; @@ -4405,10 +4398,8 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback) rc = qeth_setadpparms_set_access_ctrl(card, card->options.isolation, fallback); if (rc) { - QETH_DBF_MESSAGE(3, - "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n", - card->gdev->dev.kobj.name, - rc); + QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", + rc, CARD_DEVID(card)); rc = -EOPNOTSUPP; } } else if (card->options.isolation != ISOLATION_MODE_NONE) { @@ -4443,7 +4434,8 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) rc = BMCR_FULLDPLX; if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && (card->info.link_type != QETH_LINK_TYPE_OSN) && - (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH)) + (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) && + (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH)) rc |= BMCR_SPEED100; break; case MII_BMSR: /* Basic mode status register */ @@ -4634,8 +4626,8 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata) rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, qeth_snmp_command_cb, (void *)&qinfo); if (rc) - QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n", - QETH_CARD_IFNAME(card), rc); + QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n", + CARD_DEVID(card), rc); else { if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) rc = -EFAULT; @@ -4869,8 +4861,8 @@ static void qeth_determine_capabilities(struct qeth_card *card) rc = qeth_read_conf_data(card, (void **) &prcd, &length); if (rc) { - QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n", - dev_name(&card->gdev->dev), rc); + QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n", + CARD_DEVID(card), rc); QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out_offline; } @@ -5086,7 +5078,7 @@ static struct ccw_driver qeth_ccw_driver = { .remove = ccwgroup_remove_ccwdev, }; -int qeth_core_hardsetup_card(struct qeth_card *card) +int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok) { int retries = 3; int rc; @@ -5096,8 +5088,8 @@ int qeth_core_hardsetup_card(struct qeth_card *card) qeth_update_from_chp_desc(card); retry: if (retries < 3) - QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", - dev_name(&card->gdev->dev)); + QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", + CARD_DEVID(card)); rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); @@ -5161,13 +5153,20 @@ retriable: if (rc == IPA_RC_LAN_OFFLINE) { dev_warn(&card->gdev->dev, "The LAN is offline\n"); - netif_carrier_off(card->dev); + *carrier_ok = false; } else { rc = -ENODEV; goto out; } } else { - netif_carrier_on(card->dev); + *carrier_ok = true; + } + + if (qeth_netdev_is_registered(card->dev)) { + if (*carrier_ok) + netif_carrier_on(card->dev); + else + netif_carrier_off(card->dev); } card->options.ipa4.supported_funcs = 0; @@ -5201,8 +5200,8 @@ retriable: out: dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " "an error on the device\n"); - QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n", - dev_name(&card->gdev->dev), rc); + QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n", + CARD_DEVID(card), rc); return rc; } EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); @@ -5481,11 +5480,12 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, } EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd); -int qeth_send_setassparms(struct qeth_card *card, - struct qeth_cmd_buffer *iob, __u16 len, long data, - int (*reply_cb)(struct qeth_card *, - struct qeth_reply *, unsigned long), - void *reply_param) +static int qeth_send_setassparms(struct qeth_card *card, + struct qeth_cmd_buffer *iob, u16 len, + long data, int (*reply_cb)(struct qeth_card *, + struct qeth_reply *, + unsigned long), + void *reply_param) { int rc; struct qeth_ipa_cmd *cmd; @@ -5501,7 +5501,6 @@ int qeth_send_setassparms(struct qeth_card *card, rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param); return rc; } -EXPORT_SYMBOL_GPL(qeth_send_setassparms); int qeth_send_simple_setassparms_prot(struct qeth_card *card, enum qeth_ipa_funcs ipa_func, @@ -6170,8 +6169,14 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd, WARN_ON_ONCE(1); } - /* fallthrough from high to low, to select all legal speeds: */ + /* partially does fall through, to also select lower speeds */ switch (maxspeed) { + case SPEED_25000: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 25000baseSR_Full); + break; case SPEED_10000: ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); @@ -6254,6 +6259,10 @@ int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev, cmd->base.speed = SPEED_10000; cmd->base.port = PORT_FIBRE; break; + case QETH_LINK_TYPE_25GBIT_ETH: + cmd->base.speed = SPEED_25000; + cmd->base.port = PORT_FIBRE; + break; default: cmd->base.speed = SPEED_10; cmd->base.port = PORT_TP; @@ -6320,6 +6329,9 @@ int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev, case CARD_INFO_PORTS_10G: cmd->base.speed = SPEED_10000; break; + case CARD_INFO_PORTS_25G: + cmd->base.speed = SPEED_25000; + break; } return 0; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index e85090467afe..3e54be201b27 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -90,6 +90,7 @@ enum qeth_link_types { QETH_LINK_TYPE_GBIT_ETH = 0x03, QETH_LINK_TYPE_OSN = 0x04, QETH_LINK_TYPE_10GBIT_ETH = 0x10, + QETH_LINK_TYPE_25GBIT_ETH = 0x12, QETH_LINK_TYPE_LANE_ETH100 = 0x81, QETH_LINK_TYPE_LANE_TR = 0x82, QETH_LINK_TYPE_LANE_ETH1000 = 0x83, @@ -347,6 +348,7 @@ enum qeth_card_info_port_speed { CARD_INFO_PORTS_100M = 0x00000006, CARD_INFO_PORTS_1G = 0x00000007, CARD_INFO_PORTS_10G = 0x00000008, + CARD_INFO_PORTS_25G = 0x0000000A, }; /* (SET)DELIP(M) IPA stuff ***************************************************/ @@ -436,7 +438,7 @@ struct qeth_ipacmd_setassparms { __u32 flags_32bit; struct qeth_ipa_caps caps; struct qeth_checksum_cmd chksum; - struct qeth_arp_cache_entry add_arp_entry; + struct qeth_arp_cache_entry arp_entry; struct qeth_arp_query_data query_arp; struct qeth_tso_start_data tso; __u8 ip[16]; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 23aaf373f631..2914a1a69f83 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -146,11 +146,11 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) QETH_CARD_TEXT(card, 2, "L2Wmac"); rc = qeth_l2_send_setdelmac(card, mac, cmd); if (rc == -EEXIST) - QETH_DBF_MESSAGE(2, "MAC %pM already registered on %s\n", - mac, QETH_CARD_IFNAME(card)); + QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n", + CARD_DEVID(card)); else if (rc) - QETH_DBF_MESSAGE(2, "Failed to register MAC %pM on %s: %d\n", - mac, QETH_CARD_IFNAME(card), rc); + QETH_DBF_MESSAGE(2, "Failed to register MAC on device %x: %d\n", + CARD_DEVID(card), rc); return rc; } @@ -163,8 +163,8 @@ static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) QETH_CARD_TEXT(card, 2, "L2Rmac"); rc = qeth_l2_send_setdelmac(card, mac, cmd); if (rc) - QETH_DBF_MESSAGE(2, "Failed to delete MAC %pM on %s: %d\n", - mac, QETH_CARD_IFNAME(card), rc); + QETH_DBF_MESSAGE(2, "Failed to delete MAC on device %u: %d\n", + CARD_DEVID(card), rc); return rc; } @@ -260,9 +260,9 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "L2sdvcb"); if (cmd->hdr.return_code) { - QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x.\n", + QETH_DBF_MESSAGE(2, "Error in processing VLAN %u on device %x: %#x.\n", cmd->data.setdelvlan.vlan_id, - QETH_CARD_IFNAME(card), cmd->hdr.return_code); + CARD_DEVID(card), cmd->hdr.return_code); QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command); QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); } @@ -455,8 +455,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) rc = qeth_vm_request_mac(card); if (!rc) goto out; - QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %s: x%x\n", - CARD_BUS_ID(card), rc); + QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n", + CARD_DEVID(card), rc); QETH_DBF_TEXT_(SETUP, 2, "err%04x", rc); /* fall back to alternative mechanism: */ } @@ -468,8 +468,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) rc = qeth_setadpparms_change_macaddr(card); if (!rc) goto out; - QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %s: x%x\n", - CARD_BUS_ID(card), rc); + QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n", + CARD_DEVID(card), rc); QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc); /* fall back once more: */ } @@ -826,7 +826,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) if (cgdev->state == CCWGROUP_ONLINE) qeth_l2_set_offline(cgdev); - unregister_netdev(card->dev); + if (qeth_netdev_is_registered(card->dev)) + unregister_netdev(card->dev); } static const struct ethtool_ops qeth_l2_ethtool_ops = { @@ -862,11 +863,11 @@ static const struct net_device_ops qeth_l2_netdev_ops = { .ndo_set_features = qeth_set_features }; -static int qeth_l2_setup_netdev(struct qeth_card *card) +static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok) { int rc; - if (card->dev->netdev_ops) + if (qeth_netdev_is_registered(card->dev)) return 0; card->dev->priv_flags |= IFF_UNICAST_FLT; @@ -919,6 +920,9 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) qeth_l2_request_initial_mac(card); netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); rc = register_netdev(card->dev); + if (!rc && carrier_ok) + netif_carrier_on(card->dev); + if (rc) card->dev->netdev_ops = NULL; return rc; @@ -949,6 +953,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) struct qeth_card *card = dev_get_drvdata(&gdev->dev); int rc = 0; enum qeth_card_states recover_flag; + bool carrier_ok; mutex_lock(&card->discipline_mutex); mutex_lock(&card->conf_mutex); @@ -956,7 +961,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); recover_flag = card->state; - rc = qeth_core_hardsetup_card(card); + rc = qeth_core_hardsetup_card(card, &carrier_ok); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); rc = -ENODEV; @@ -967,7 +972,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) dev_info(&card->gdev->dev, "The device represents a Bridge Capable Port\n"); - rc = qeth_l2_setup_netdev(card); + rc = qeth_l2_setup_netdev(card, carrier_ok); if (rc) goto out_remove; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 0b161cc1fd2e..f08b745c2007 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -278,9 +278,6 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) QETH_CARD_TEXT(card, 4, "clearip"); - if (recover && card->options.sniffer) - return; - spin_lock_bh(&card->ip_lock); hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { @@ -494,9 +491,8 @@ int qeth_l3_setrouting_v4(struct qeth_card *card) QETH_PROT_IPV4); if (rc) { card->options.route4.type = NO_ROUTER; - QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type" - " on %s. Type set to 'no router'.\n", rc, - QETH_CARD_IFNAME(card)); + QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n", + rc, CARD_DEVID(card)); } return rc; } @@ -518,9 +514,8 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) QETH_PROT_IPV6); if (rc) { card->options.route6.type = NO_ROUTER; - QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type" - " on %s. Type set to 'no router'.\n", rc, - QETH_CARD_IFNAME(card)); + QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n", + rc, CARD_DEVID(card)); } return rc; } @@ -663,6 +658,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card, int rc = 0; int cnt = 3; + if (card->options.sniffer) + return 0; if (addr->proto == QETH_PROT_IPV4) { QETH_CARD_TEXT(card, 2, "setaddr4"); @@ -697,6 +694,9 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, { int rc = 0; + if (card->options.sniffer) + return 0; + if (addr->proto == QETH_PROT_IPV4) { QETH_CARD_TEXT(card, 2, "deladdr4"); QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); @@ -1070,8 +1070,8 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply, } break; default: - QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n", - cmd->data.diagass.action, QETH_CARD_IFNAME(card)); + QETH_DBF_MESSAGE(2, "Unknown sniffer action (%#06x) on device %x\n", + cmd->data.diagass.action, CARD_DEVID(card)); } return 0; @@ -1517,32 +1517,25 @@ static void qeth_l3_set_rx_mode(struct net_device *dev) qeth_l3_handle_promisc_mode(card); } -static const char *qeth_l3_arp_get_error_cause(int *rc) +static int qeth_l3_arp_makerc(int rc) { - switch (*rc) { - case QETH_IPA_ARP_RC_FAILED: - *rc = -EIO; - return "operation failed"; + switch (rc) { + case IPA_RC_SUCCESS: + return 0; case QETH_IPA_ARP_RC_NOTSUPP: - *rc = -EOPNOTSUPP; - return "operation not supported"; - case QETH_IPA_ARP_RC_OUT_OF_RANGE: - *rc = -EINVAL; - return "argument out of range"; case QETH_IPA_ARP_RC_Q_NOTSUPP: - *rc = -EOPNOTSUPP; - return "query operation not supported"; + return -EOPNOTSUPP; + case QETH_IPA_ARP_RC_OUT_OF_RANGE: + return -EINVAL; case QETH_IPA_ARP_RC_Q_NO_DATA: - *rc = -ENOENT; - return "no query data available"; + return -ENOENT; default: - return "unknown error"; + return -EIO; } } static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) { - int tmp; int rc; QETH_CARD_TEXT(card, 3, "arpstnoe"); @@ -1560,13 +1553,10 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, IPA_CMD_ASS_ARP_SET_NO_ENTRIES, no_entries); - if (rc) { - tmp = rc; - QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on " - "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card), - qeth_l3_arp_get_error_cause(&rc), tmp, tmp); - } - return rc; + if (rc) + QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n", + CARD_DEVID(card), rc); + return qeth_l3_arp_makerc(rc); } static __u32 get_arp_entry_size(struct qeth_card *card, @@ -1716,7 +1706,6 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card, { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; - int tmp; int rc; QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot); @@ -1735,15 +1724,10 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card, rc = qeth_l3_send_ipa_arp_cmd(card, iob, QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN, qeth_l3_arp_query_cb, (void *)qinfo); - if (rc) { - tmp = rc; - QETH_DBF_MESSAGE(2, - "Error while querying ARP cache on %s: %s " - "(0x%x/%d)\n", QETH_CARD_IFNAME(card), - qeth_l3_arp_get_error_cause(&rc), tmp, tmp); - } - - return rc; + if (rc) + QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n", + CARD_DEVID(card), rc); + return qeth_l3_arp_makerc(rc); } static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) @@ -1793,15 +1777,18 @@ out: return rc; } -static int qeth_l3_arp_add_entry(struct qeth_card *card, - struct qeth_arp_cache_entry *entry) +static int qeth_l3_arp_modify_entry(struct qeth_card *card, + struct qeth_arp_cache_entry *entry, + enum qeth_arp_process_subcmds arp_cmd) { + struct qeth_arp_cache_entry *cmd_entry; struct qeth_cmd_buffer *iob; - char buf[16]; - int tmp; int rc; - QETH_CARD_TEXT(card, 3, "arpadent"); + if (arp_cmd == IPA_CMD_ASS_ARP_ADD_ENTRY) + QETH_CARD_TEXT(card, 3, "arpadd"); + else + QETH_CARD_TEXT(card, 3, "arpdel"); /* * currently GuestLAN only supports the ARP assist function @@ -1814,71 +1801,25 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card, return -EOPNOTSUPP; } - iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, - IPA_CMD_ASS_ARP_ADD_ENTRY, - sizeof(struct qeth_arp_cache_entry), - QETH_PROT_IPV4); + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd, + sizeof(*cmd_entry), QETH_PROT_IPV4); if (!iob) return -ENOMEM; - rc = qeth_send_setassparms(card, iob, - sizeof(struct qeth_arp_cache_entry), - (unsigned long) entry, - qeth_setassparms_cb, NULL); - if (rc) { - tmp = rc; - qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); - QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s " - "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card), - qeth_l3_arp_get_error_cause(&rc), tmp, tmp); - } - return rc; -} - -static int qeth_l3_arp_remove_entry(struct qeth_card *card, - struct qeth_arp_cache_entry *entry) -{ - struct qeth_cmd_buffer *iob; - char buf[16] = {0, }; - int tmp; - int rc; - QETH_CARD_TEXT(card, 3, "arprment"); + cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry; + ether_addr_copy(cmd_entry->macaddr, entry->macaddr); + memcpy(cmd_entry->ipaddr, entry->ipaddr, 4); + rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL); + if (rc) + QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n", + arp_cmd, CARD_DEVID(card), rc); - /* - * currently GuestLAN only supports the ARP assist function - * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY; - * thus we say EOPNOTSUPP for this ARP function - */ - if (card->info.guestlan) - return -EOPNOTSUPP; - if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { - return -EOPNOTSUPP; - } - memcpy(buf, entry, 12); - iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, - IPA_CMD_ASS_ARP_REMOVE_ENTRY, - 12, - QETH_PROT_IPV4); - if (!iob) - return -ENOMEM; - rc = qeth_send_setassparms(card, iob, - 12, (unsigned long)buf, - qeth_setassparms_cb, NULL); - if (rc) { - tmp = rc; - memset(buf, 0, 16); - qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); - QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s" - " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card), - qeth_l3_arp_get_error_cause(&rc), tmp, tmp); - } - return rc; + return qeth_l3_arp_makerc(rc); } static int qeth_l3_arp_flush_cache(struct qeth_card *card) { int rc; - int tmp; QETH_CARD_TEXT(card, 3, "arpflush"); @@ -1894,19 +1835,17 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card) } rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, IPA_CMD_ASS_ARP_FLUSH_CACHE, 0); - if (rc) { - tmp = rc; - QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s " - "(0x%x/%d)\n", QETH_CARD_IFNAME(card), - qeth_l3_arp_get_error_cause(&rc), tmp, tmp); - } - return rc; + if (rc) + QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n", + CARD_DEVID(card), rc); + return qeth_l3_arp_makerc(rc); } static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct qeth_card *card = dev->ml_priv; struct qeth_arp_cache_entry arp_entry; + enum qeth_arp_process_subcmds arp_cmd; int rc = 0; switch (cmd) { @@ -1925,27 +1864,16 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data); break; case SIOC_QETH_ARP_ADD_ENTRY: - if (!capable(CAP_NET_ADMIN)) { - rc = -EPERM; - break; - } - if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data, - sizeof(struct qeth_arp_cache_entry))) - rc = -EFAULT; - else - rc = qeth_l3_arp_add_entry(card, &arp_entry); - break; case SIOC_QETH_ARP_REMOVE_ENTRY: - if (!capable(CAP_NET_ADMIN)) { - rc = -EPERM; - break; - } - if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data, - sizeof(struct qeth_arp_cache_entry))) - rc = -EFAULT; - else - rc = qeth_l3_arp_remove_entry(card, &arp_entry); - break; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (copy_from_user(&arp_entry, rq->ifr_data, sizeof(arp_entry))) + return -EFAULT; + + arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ? + IPA_CMD_ASS_ARP_ADD_ENTRY : + IPA_CMD_ASS_ARP_REMOVE_ENTRY; + return qeth_l3_arp_modify_entry(card, &arp_entry, arp_cmd); case SIOC_QETH_ARP_FLUSH_CACHE: if (!capable(CAP_NET_ADMIN)) { rc = -EPERM; @@ -2383,12 +2311,12 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_neigh_setup = qeth_l3_neigh_setup, }; -static int qeth_l3_setup_netdev(struct qeth_card *card) +static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok) { unsigned int headroom; int rc; - if (card->dev->netdev_ops) + if (qeth_netdev_is_registered(card->dev)) return 0; if (card->info.type == QETH_CARD_TYPE_OSD || @@ -2457,6 +2385,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); rc = register_netdev(card->dev); + if (!rc && carrier_ok) + netif_carrier_on(card->dev); + out: if (rc) card->dev->netdev_ops = NULL; @@ -2497,7 +2428,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) if (cgdev->state == CCWGROUP_ONLINE) qeth_l3_set_offline(cgdev); - unregister_netdev(card->dev); + if (qeth_netdev_is_registered(card->dev)) + unregister_netdev(card->dev); qeth_l3_clear_ip_htable(card, 0); qeth_l3_clear_ipato_list(card); } @@ -2507,6 +2439,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) struct qeth_card *card = dev_get_drvdata(&gdev->dev); int rc = 0; enum qeth_card_states recover_flag; + bool carrier_ok; mutex_lock(&card->discipline_mutex); mutex_lock(&card->conf_mutex); @@ -2514,14 +2447,14 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); recover_flag = card->state; - rc = qeth_core_hardsetup_card(card); + rc = qeth_core_hardsetup_card(card, &carrier_ok); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); rc = -ENODEV; goto out_remove; } - rc = qeth_l3_setup_netdev(card); + rc = qeth_l3_setup_netdev(card, carrier_ok); if (rc) goto out_remove; diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 446166ba0bec..90b686e586c6 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -13,7 +13,7 @@ obj-$(CONFIG_ARCH_GEMINI) += gemini/ obj-$(CONFIG_ARCH_MXC) += imx/ obj-$(CONFIG_SOC_XWAY) += lantiq/ obj-y += mediatek/ -obj-$(CONFIG_ARCH_MESON) += amlogic/ +obj-y += amlogic/ obj-y += qcom/ obj-y += renesas/ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig index 2f282b472912..5501ad5650b2 100644 --- a/drivers/soc/amlogic/Kconfig +++ b/drivers/soc/amlogic/Kconfig @@ -7,6 +7,15 @@ config MESON_CANVAS help Say yes to support the canvas IP for Amlogic SoCs. +config MESON_CLK_MEASURE + bool "Amlogic Meson SoC Clock Measure driver" + depends on ARCH_MESON || COMPILE_TEST + default ARCH_MESON + select REGMAP_MMIO + help + Say yes to support of Measuring a set of internal SoC clocks + from the debugfs interface. + config MESON_GX_SOCINFO bool "Amlogic Meson GX SoC Information driver" depends on ARCH_MESON || COMPILE_TEST diff --git a/drivers/soc/amlogic/Makefile b/drivers/soc/amlogic/Makefile index 0ab16d35ac36..bf2d109f61e9 100644 --- a/drivers/soc/amlogic/Makefile +++ b/drivers/soc/amlogic/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_MESON_CANVAS) += meson-canvas.o +obj-$(CONFIG_MESON_CLK_MEASURE) += meson-clk-measure.o obj-$(CONFIG_MESON_GX_SOCINFO) += meson-gx-socinfo.o obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o obj-$(CONFIG_MESON_MX_SOCINFO) += meson-mx-socinfo.o diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c new file mode 100644 index 000000000000..daea191a66fa --- /dev/null +++ b/drivers/soc/amlogic/meson-clk-measure.c @@ -0,0 +1,350 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2018 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + */ + +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/bitfield.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/regmap.h> + +#define MSR_CLK_DUTY 0x0 +#define MSR_CLK_REG0 0x4 +#define MSR_CLK_REG1 0x8 +#define MSR_CLK_REG2 0xc + +#define MSR_DURATION GENMASK(15, 0) +#define MSR_ENABLE BIT(16) +#define MSR_CONT BIT(17) /* continuous measurement */ +#define MSR_INTR BIT(18) /* interrupts */ +#define MSR_RUN BIT(19) +#define MSR_CLK_SRC GENMASK(26, 20) +#define MSR_BUSY BIT(31) + +#define MSR_VAL_MASK GENMASK(15, 0) + +#define DIV_MIN 32 +#define DIV_STEP 32 +#define DIV_MAX 640 + +#define CLK_MSR_MAX 128 + +struct meson_msr_id { + struct meson_msr *priv; + unsigned int id; + const char *name; +}; + +struct meson_msr { + struct regmap *regmap; + struct meson_msr_id msr_table[CLK_MSR_MAX]; +}; + +#define CLK_MSR_ID(__id, __name) \ + [__id] = {.id = __id, .name = __name,} + +static struct meson_msr_id clk_msr_m8[CLK_MSR_MAX] = { + CLK_MSR_ID(0, "ring_osc_out_ee0"), + CLK_MSR_ID(1, "ring_osc_out_ee1"), + CLK_MSR_ID(2, "ring_osc_out_ee2"), + CLK_MSR_ID(3, "a9_ring_osck"), + CLK_MSR_ID(6, "vid_pll"), + CLK_MSR_ID(7, "clk81"), + CLK_MSR_ID(8, "encp"), + CLK_MSR_ID(9, "encl"), + CLK_MSR_ID(11, "eth_rmii"), + CLK_MSR_ID(13, "amclk"), + CLK_MSR_ID(14, "fec_clk_0"), + CLK_MSR_ID(15, "fec_clk_1"), + CLK_MSR_ID(16, "fec_clk_2"), + CLK_MSR_ID(18, "a9_clk_div16"), + CLK_MSR_ID(19, "hdmi_sys"), + CLK_MSR_ID(20, "rtc_osc_clk_out"), + CLK_MSR_ID(21, "i2s_clk_in_src0"), + CLK_MSR_ID(22, "clk_rmii_from_pad"), + CLK_MSR_ID(23, "hdmi_ch0_tmds"), + CLK_MSR_ID(24, "lvds_fifo"), + CLK_MSR_ID(26, "sc_clk_int"), + CLK_MSR_ID(28, "sar_adc"), + CLK_MSR_ID(30, "mpll_clk_test_out"), + CLK_MSR_ID(31, "audac_clkpi"), + CLK_MSR_ID(32, "vdac"), + CLK_MSR_ID(33, "sdhc_rx"), + CLK_MSR_ID(34, "sdhc_sd"), + CLK_MSR_ID(35, "mali"), + CLK_MSR_ID(36, "hdmi_tx_pixel"), + CLK_MSR_ID(38, "vdin_meas"), + CLK_MSR_ID(39, "pcm_sclk"), + CLK_MSR_ID(40, "pcm_mclk"), + CLK_MSR_ID(41, "eth_rx_tx"), + CLK_MSR_ID(42, "pwm_d"), + CLK_MSR_ID(43, "pwm_c"), + CLK_MSR_ID(44, "pwm_b"), + CLK_MSR_ID(45, "pwm_a"), + CLK_MSR_ID(46, "pcm2_sclk"), + CLK_MSR_ID(47, "ddr_dpll_pt"), + CLK_MSR_ID(48, "pwm_f"), + CLK_MSR_ID(49, "pwm_e"), + CLK_MSR_ID(59, "hcodec"), + CLK_MSR_ID(60, "usb_32k_alt"), + CLK_MSR_ID(61, "gpio"), + CLK_MSR_ID(62, "vid2_pll"), + CLK_MSR_ID(63, "mipi_csi_cfg"), +}; + +static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = { + CLK_MSR_ID(0, "ring_osc_out_ee_0"), + CLK_MSR_ID(1, "ring_osc_out_ee_1"), + CLK_MSR_ID(2, "ring_osc_out_ee_2"), + CLK_MSR_ID(3, "a53_ring_osc"), + CLK_MSR_ID(4, "gp0_pll"), + CLK_MSR_ID(6, "enci"), + CLK_MSR_ID(7, "clk81"), + CLK_MSR_ID(8, "encp"), + CLK_MSR_ID(9, "encl"), + CLK_MSR_ID(10, "vdac"), + CLK_MSR_ID(11, "rgmii_tx"), + CLK_MSR_ID(12, "pdm"), + CLK_MSR_ID(13, "amclk"), + CLK_MSR_ID(14, "fec_0"), + CLK_MSR_ID(15, "fec_1"), + CLK_MSR_ID(16, "fec_2"), + CLK_MSR_ID(17, "sys_pll_div16"), + CLK_MSR_ID(18, "sys_cpu_div16"), + CLK_MSR_ID(19, "hdmitx_sys"), + CLK_MSR_ID(20, "rtc_osc_out"), + CLK_MSR_ID(21, "i2s_in_src0"), + CLK_MSR_ID(22, "eth_phy_ref"), + CLK_MSR_ID(23, "hdmi_todig"), + CLK_MSR_ID(26, "sc_int"), + CLK_MSR_ID(28, "sar_adc"), + CLK_MSR_ID(31, "mpll_test_out"), + CLK_MSR_ID(32, "vdec"), + CLK_MSR_ID(35, "mali"), + CLK_MSR_ID(36, "hdmi_tx_pixel"), + CLK_MSR_ID(37, "i958"), + CLK_MSR_ID(38, "vdin_meas"), + CLK_MSR_ID(39, "pcm_sclk"), + CLK_MSR_ID(40, "pcm_mclk"), + CLK_MSR_ID(41, "eth_rx_or_rmii"), + CLK_MSR_ID(42, "mp0_out"), + CLK_MSR_ID(43, "fclk_div5"), + CLK_MSR_ID(44, "pwm_b"), + CLK_MSR_ID(45, "pwm_a"), + CLK_MSR_ID(46, "vpu"), + CLK_MSR_ID(47, "ddr_dpll_pt"), + CLK_MSR_ID(48, "mp1_out"), + CLK_MSR_ID(49, "mp2_out"), + CLK_MSR_ID(50, "mp3_out"), + CLK_MSR_ID(51, "nand_core"), + CLK_MSR_ID(52, "sd_emmc_b"), + CLK_MSR_ID(53, "sd_emmc_a"), + CLK_MSR_ID(55, "vid_pll_div_out"), + CLK_MSR_ID(56, "cci"), + CLK_MSR_ID(57, "wave420l_c"), + CLK_MSR_ID(58, "wave420l_b"), + CLK_MSR_ID(59, "hcodec"), + CLK_MSR_ID(60, "alt_32k"), + CLK_MSR_ID(61, "gpio_msr"), + CLK_MSR_ID(62, "hevc"), + CLK_MSR_ID(66, "vid_lock"), + CLK_MSR_ID(70, "pwm_f"), + CLK_MSR_ID(71, "pwm_e"), + CLK_MSR_ID(72, "pwm_d"), + CLK_MSR_ID(73, "pwm_c"), + CLK_MSR_ID(75, "aoclkx2_int"), + CLK_MSR_ID(76, "aoclk_int"), + CLK_MSR_ID(77, "rng_ring_osc_0"), + CLK_MSR_ID(78, "rng_ring_osc_1"), + CLK_MSR_ID(79, "rng_ring_osc_2"), + CLK_MSR_ID(80, "rng_ring_osc_3"), + CLK_MSR_ID(81, "vapb"), + CLK_MSR_ID(82, "ge2d"), +}; + +static int meson_measure_id(struct meson_msr_id *clk_msr_id, + unsigned int duration) +{ + struct meson_msr *priv = clk_msr_id->priv; + unsigned int val; + int ret; + + regmap_write(priv->regmap, MSR_CLK_REG0, 0); + + /* Set measurement duration */ + regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_DURATION, + FIELD_PREP(MSR_DURATION, duration - 1)); + + /* Set ID */ + regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_CLK_SRC, + FIELD_PREP(MSR_CLK_SRC, clk_msr_id->id)); + + /* Enable & Start */ + regmap_update_bits(priv->regmap, MSR_CLK_REG0, + MSR_RUN | MSR_ENABLE, + MSR_RUN | MSR_ENABLE); + + ret = regmap_read_poll_timeout(priv->regmap, MSR_CLK_REG0, + val, !(val & MSR_BUSY), 10, 10000); + if (ret) + return ret; + + /* Disable */ + regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_ENABLE, 0); + + /* Get the value in multiple of gate time counts */ + regmap_read(priv->regmap, MSR_CLK_REG2, &val); + + if (val >= MSR_VAL_MASK) + return -EINVAL; + + return DIV_ROUND_CLOSEST_ULL((val & MSR_VAL_MASK) * 1000000ULL, + duration); +} + +static int meson_measure_best_id(struct meson_msr_id *clk_msr_id, + unsigned int *precision) +{ + unsigned int duration = DIV_MAX; + int ret; + + /* Start from max duration and down to min duration */ + do { + ret = meson_measure_id(clk_msr_id, duration); + if (ret >= 0) + *precision = (2 * 1000000) / duration; + else + duration -= DIV_STEP; + } while (duration >= DIV_MIN && ret == -EINVAL); + + return ret; +} + +static int clk_msr_show(struct seq_file *s, void *data) +{ + struct meson_msr_id *clk_msr_id = s->private; + unsigned int precision = 0; + int val; + + val = meson_measure_best_id(clk_msr_id, &precision); + if (val < 0) + return val; + + seq_printf(s, "%d\t+/-%dHz\n", val, precision); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(clk_msr); + +static int clk_msr_summary_show(struct seq_file *s, void *data) +{ + struct meson_msr_id *msr_table = s->private; + unsigned int precision = 0; + int val, i; + + seq_puts(s, " clock rate precision\n"); + seq_puts(s, "---------------------------------------------\n"); + + for (i = 0 ; i < CLK_MSR_MAX ; ++i) { + if (!msr_table[i].name) + continue; + + val = meson_measure_best_id(&msr_table[i], &precision); + if (val < 0) + return val; + + seq_printf(s, " %-20s %10d +/-%dHz\n", + msr_table[i].name, val, precision); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(clk_msr_summary); + +static const struct regmap_config meson_clk_msr_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = MSR_CLK_REG2, +}; + +static int meson_msr_probe(struct platform_device *pdev) +{ + const struct meson_msr_id *match_data; + struct meson_msr *priv; + struct resource *res; + struct dentry *root, *clks; + void __iomem *base; + int i; + + priv = devm_kzalloc(&pdev->dev, sizeof(struct meson_msr), + GFP_KERNEL); + if (!priv) + return -ENOMEM; + + match_data = device_get_match_data(&pdev->dev); + if (!match_data) { + dev_err(&pdev->dev, "failed to get match data\n"); + return -ENODEV; + } + + memcpy(priv->msr_table, match_data, sizeof(priv->msr_table)); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) { + dev_err(&pdev->dev, "io resource mapping failed\n"); + return PTR_ERR(base); + } + + priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, + &meson_clk_msr_regmap_config); + if (IS_ERR(priv->regmap)) + return PTR_ERR(priv->regmap); + + root = debugfs_create_dir("meson-clk-msr", NULL); + clks = debugfs_create_dir("clks", root); + + debugfs_create_file("measure_summary", 0444, root, + priv->msr_table, &clk_msr_summary_fops); + + for (i = 0 ; i < CLK_MSR_MAX ; ++i) { + if (!priv->msr_table[i].name) + continue; + + priv->msr_table[i].priv = priv; + + debugfs_create_file(priv->msr_table[i].name, 0444, clks, + &priv->msr_table[i], &clk_msr_fops); + } + + return 0; +} + +static const struct of_device_id meson_msr_match_table[] = { + { + .compatible = "amlogic,meson-gx-clk-measure", + .data = (void *)clk_msr_gx, + }, + { + .compatible = "amlogic,meson8-clk-measure", + .data = (void *)clk_msr_m8, + }, + { + .compatible = "amlogic,meson8b-clk-measure", + .data = (void *)clk_msr_m8, + }, + { /* sentinel */ } +}; + +static struct platform_driver meson_msr_driver = { + .probe = meson_msr_probe, + .driver = { + .name = "meson_msr", + .of_match_table = meson_msr_match_table, + }, +}; +builtin_platform_driver(meson_msr_driver); diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c index 4dd03b099c89..096a83cf0caf 100644 --- a/drivers/soc/atmel/soc.c +++ b/drivers/soc/atmel/soc.c @@ -66,6 +66,8 @@ static const struct at91_soc __initconst socs[] = { AT91_SOC(AT91SAM9XE128_CIDR_MATCH, 0, "at91sam9xe128", "at91sam9xe128"), AT91_SOC(AT91SAM9XE256_CIDR_MATCH, 0, "at91sam9xe256", "at91sam9xe256"), AT91_SOC(AT91SAM9XE512_CIDR_MATCH, 0, "at91sam9xe512", "at91sam9xe512"), + AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_EXID_MATCH, + "sam9x60", "sam9x60"), #endif #ifdef CONFIG_SOC_SAMA5 AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D21CU_EXID_MATCH, @@ -90,12 +92,20 @@ static const struct at91_soc __initconst socs[] = { "sama5d27c 128MiB SiP", "sama5d2"), AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27C_D5M_EXID_MATCH, "sama5d27c 64MiB SiP", "sama5d2"), + AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27C_LD1G_EXID_MATCH, + "sama5d27c 128MiB LPDDR2 SiP", "sama5d2"), + AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27C_LD2G_EXID_MATCH, + "sama5d27c 256MiB LPDDR2 SiP", "sama5d2"), AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CU_EXID_MATCH, "sama5d28", "sama5d2"), AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CN_EXID_MATCH, "sama5d28", "sama5d2"), AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28C_D1G_EXID_MATCH, "sama5d28c 128MiB SiP", "sama5d2"), + AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28C_LD1G_EXID_MATCH, + "sama5d28c 128MiB LPDDR2 SiP", "sama5d2"), + AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28C_LD2G_EXID_MATCH, + "sama5d28c 256MiB LPDDR2 SiP", "sama5d2"), AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D31_EXID_MATCH, "sama5d31", "sama5d3"), AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D33_EXID_MATCH, diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h index 94cd5d1ab502..ee652e4841a5 100644 --- a/drivers/soc/atmel/soc.h +++ b/drivers/soc/atmel/soc.h @@ -42,6 +42,7 @@ at91_soc_init(const struct at91_soc *socs); #define AT91SAM9G45_CIDR_MATCH 0x019b05a0 #define AT91SAM9X5_CIDR_MATCH 0x019a05a0 #define AT91SAM9N12_CIDR_MATCH 0x019a07a0 +#define SAM9X60_CIDR_MATCH 0x019b35a0 #define AT91SAM9M11_EXID_MATCH 0x00000001 #define AT91SAM9M10_EXID_MATCH 0x00000002 @@ -58,6 +59,8 @@ at91_soc_init(const struct at91_soc *socs); #define AT91SAM9N12_EXID_MATCH 0x00000006 #define AT91SAM9CN11_EXID_MATCH 0x00000009 +#define SAM9X60_EXID_MATCH 0x00000000 + #define AT91SAM9XE128_CIDR_MATCH 0x329973a0 #define AT91SAM9XE256_CIDR_MATCH 0x329a93a0 #define AT91SAM9XE512_CIDR_MATCH 0x329aa3a0 @@ -73,9 +76,13 @@ at91_soc_init(const struct at91_soc *socs); #define SAMA5D26CU_EXID_MATCH 0x00000012 #define SAMA5D27C_D1G_EXID_MATCH 0x00000033 #define SAMA5D27C_D5M_EXID_MATCH 0x00000032 +#define SAMA5D27C_LD1G_EXID_MATCH 0x00000061 +#define SAMA5D27C_LD2G_EXID_MATCH 0x00000062 #define SAMA5D27CU_EXID_MATCH 0x00000011 #define SAMA5D27CN_EXID_MATCH 0x00000021 #define SAMA5D28C_D1G_EXID_MATCH 0x00000013 +#define SAMA5D28C_LD1G_EXID_MATCH 0x00000071 +#define SAMA5D28C_LD2G_EXID_MATCH 0x00000072 #define SAMA5D28CU_EXID_MATCH 0x00000010 #define SAMA5D28CN_EXID_MATCH 0x00000020 diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c index 14185451901d..bf9123f727e8 100644 --- a/drivers/soc/bcm/brcmstb/common.c +++ b/drivers/soc/bcm/brcmstb/common.c @@ -31,13 +31,17 @@ static const struct of_device_id brcmstb_machine_match[] = { bool soc_is_brcmstb(void) { + const struct of_device_id *match; struct device_node *root; root = of_find_node_by_path("/"); if (!root) return false; - return of_match_node(brcmstb_machine_match, root) != NULL; + match = of_match_node(brcmstb_machine_match, root); + of_node_put(root); + + return match != NULL; } u32 brcmstb_get_family_id(void) diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c index a5577dd5eb08..8ee06347447c 100644 --- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c +++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c @@ -404,7 +404,7 @@ noinline int brcmstb_pm_s3_finish(void) { struct brcmstb_s3_params *params = ctrl.s3_params; dma_addr_t params_pa = ctrl.s3_params_pa; - phys_addr_t reentry = virt_to_phys(&cpu_resume); + phys_addr_t reentry = virt_to_phys(&cpu_resume_arm); enum bsp_initiate_command cmd; u32 flags; diff --git a/drivers/soc/bcm/raspberrypi-power.c b/drivers/soc/bcm/raspberrypi-power.c index a78dfe0a2b50..5d1aacdd84ef 100644 --- a/drivers/soc/bcm/raspberrypi-power.c +++ b/drivers/soc/bcm/raspberrypi-power.c @@ -1,9 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* (C) 2015 Pengutronix, Alexander Aring <aar@pengutronix.de> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Authors: * Alexander Aring <aar@pengutronix.de> * Eric Anholt <eric@anholt.net> diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig index a5b86a28f343..2112d18dbb7b 100644 --- a/drivers/soc/imx/Kconfig +++ b/drivers/soc/imx/Kconfig @@ -1,8 +1,8 @@ menu "i.MX SoC drivers" -config IMX7_PM_DOMAINS - bool "i.MX7 PM domains" - depends on SOC_IMX7D || (COMPILE_TEST && OF) +config IMX_GPCV2_PM_DOMAINS + bool "i.MX GPCv2 PM domains" + depends on SOC_IMX7D || SOC_IMX8MQ || (COMPILE_TEST && OF) depends on PM select PM_GENERIC_DOMAINS default y if SOC_IMX7D diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile index aab41a5cc317..506a6f3c2b9b 100644 --- a/drivers/soc/imx/Makefile +++ b/drivers/soc/imx/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o -obj-$(CONFIG_IMX7_PM_DOMAINS) += gpcv2.o +obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c index aa3729ecaa9e..7d14a4b4e82a 100644 --- a/drivers/soc/imx/gpc.c +++ b/drivers/soc/imx/gpc.c @@ -35,7 +35,7 @@ #define GPU_VPU_PUP_REQ BIT(1) #define GPU_VPU_PDN_REQ BIT(0) -#define GPC_CLK_MAX 6 +#define GPC_CLK_MAX 7 #define PGC_DOMAIN_FLAG_NO_PD BIT(0) diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index e7b5994fee9d..8b4f48a2ca57 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c @@ -14,23 +14,54 @@ #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <dt-bindings/power/imx7-power.h> +#include <dt-bindings/power/imx8mq-power.h> #define GPC_LPCR_A_CORE_BSC 0x000 #define GPC_PGC_CPU_MAPPING 0x0ec -#define USB_HSIC_PHY_A_CORE_DOMAIN BIT(6) -#define USB_OTG2_PHY_A_CORE_DOMAIN BIT(5) -#define USB_OTG1_PHY_A_CORE_DOMAIN BIT(4) -#define PCIE_PHY_A_CORE_DOMAIN BIT(3) -#define MIPI_PHY_A_CORE_DOMAIN BIT(2) + +#define IMX7_USB_HSIC_PHY_A_CORE_DOMAIN BIT(6) +#define IMX7_USB_OTG2_PHY_A_CORE_DOMAIN BIT(5) +#define IMX7_USB_OTG1_PHY_A_CORE_DOMAIN BIT(4) +#define IMX7_PCIE_PHY_A_CORE_DOMAIN BIT(3) +#define IMX7_MIPI_PHY_A_CORE_DOMAIN BIT(2) + +#define IMX8M_PCIE2_A53_DOMAIN BIT(15) +#define IMX8M_MIPI_CSI2_A53_DOMAIN BIT(14) +#define IMX8M_MIPI_CSI1_A53_DOMAIN BIT(13) +#define IMX8M_DISP_A53_DOMAIN BIT(12) +#define IMX8M_HDMI_A53_DOMAIN BIT(11) +#define IMX8M_VPU_A53_DOMAIN BIT(10) +#define IMX8M_GPU_A53_DOMAIN BIT(9) +#define IMX8M_DDR2_A53_DOMAIN BIT(8) +#define IMX8M_DDR1_A53_DOMAIN BIT(7) +#define IMX8M_OTG2_A53_DOMAIN BIT(5) +#define IMX8M_OTG1_A53_DOMAIN BIT(4) +#define IMX8M_PCIE1_A53_DOMAIN BIT(3) +#define IMX8M_MIPI_A53_DOMAIN BIT(2) #define GPC_PU_PGC_SW_PUP_REQ 0x0f8 #define GPC_PU_PGC_SW_PDN_REQ 0x104 -#define USB_HSIC_PHY_SW_Pxx_REQ BIT(4) -#define USB_OTG2_PHY_SW_Pxx_REQ BIT(3) -#define USB_OTG1_PHY_SW_Pxx_REQ BIT(2) -#define PCIE_PHY_SW_Pxx_REQ BIT(1) -#define MIPI_PHY_SW_Pxx_REQ BIT(0) + +#define IMX7_USB_HSIC_PHY_SW_Pxx_REQ BIT(4) +#define IMX7_USB_OTG2_PHY_SW_Pxx_REQ BIT(3) +#define IMX7_USB_OTG1_PHY_SW_Pxx_REQ BIT(2) +#define IMX7_PCIE_PHY_SW_Pxx_REQ BIT(1) +#define IMX7_MIPI_PHY_SW_Pxx_REQ BIT(0) + +#define IMX8M_PCIE2_SW_Pxx_REQ BIT(13) +#define IMX8M_MIPI_CSI2_SW_Pxx_REQ BIT(12) +#define IMX8M_MIPI_CSI1_SW_Pxx_REQ BIT(11) +#define IMX8M_DISP_SW_Pxx_REQ BIT(10) +#define IMX8M_HDMI_SW_Pxx_REQ BIT(9) +#define IMX8M_VPU_SW_Pxx_REQ BIT(8) +#define IMX8M_GPU_SW_Pxx_REQ BIT(7) +#define IMX8M_DDR2_SW_Pxx_REQ BIT(6) +#define IMX8M_DDR1_SW_Pxx_REQ BIT(5) +#define IMX8M_OTG2_SW_Pxx_REQ BIT(3) +#define IMX8M_OTG1_SW_Pxx_REQ BIT(2) +#define IMX8M_PCIE1_SW_Pxx_REQ BIT(1) +#define IMX8M_MIPI_SW_Pxx_REQ BIT(0) #define GPC_M4_PU_PDN_FLG 0x1bc @@ -40,9 +71,22 @@ * GPC_PGC memory map are incorrect, below offset * values are from design RTL. */ -#define PGC_MIPI 16 -#define PGC_PCIE 17 -#define PGC_USB_HSIC 20 +#define IMX7_PGC_MIPI 16 +#define IMX7_PGC_PCIE 17 +#define IMX7_PGC_USB_HSIC 20 + +#define IMX8M_PGC_MIPI 16 +#define IMX8M_PGC_PCIE1 17 +#define IMX8M_PGC_OTG1 18 +#define IMX8M_PGC_OTG2 19 +#define IMX8M_PGC_DDR1 21 +#define IMX8M_PGC_GPU 23 +#define IMX8M_PGC_VPU 24 +#define IMX8M_PGC_DISP 26 +#define IMX8M_PGC_MIPI_CSI1 27 +#define IMX8M_PGC_MIPI_CSI2 28 +#define IMX8M_PGC_PCIE2 29 + #define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40) #define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc) @@ -67,6 +111,7 @@ struct imx_pgc_domain { struct imx_pgc_domain_data { const struct imx_pgc_domain *domains; size_t domains_num; + const struct regmap_access_table *reg_access_table; }; static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd, @@ -166,11 +211,11 @@ static const struct imx_pgc_domain imx7_pgc_domains[] = { .name = "mipi-phy", }, .bits = { - .pxx = MIPI_PHY_SW_Pxx_REQ, - .map = MIPI_PHY_A_CORE_DOMAIN, + .pxx = IMX7_MIPI_PHY_SW_Pxx_REQ, + .map = IMX7_MIPI_PHY_A_CORE_DOMAIN, }, .voltage = 1000000, - .pgc = PGC_MIPI, + .pgc = IMX7_PGC_MIPI, }, [IMX7_POWER_DOMAIN_PCIE_PHY] = { @@ -178,11 +223,11 @@ static const struct imx_pgc_domain imx7_pgc_domains[] = { .name = "pcie-phy", }, .bits = { - .pxx = PCIE_PHY_SW_Pxx_REQ, - .map = PCIE_PHY_A_CORE_DOMAIN, + .pxx = IMX7_PCIE_PHY_SW_Pxx_REQ, + .map = IMX7_PCIE_PHY_A_CORE_DOMAIN, }, .voltage = 1000000, - .pgc = PGC_PCIE, + .pgc = IMX7_PGC_PCIE, }, [IMX7_POWER_DOMAIN_USB_HSIC_PHY] = { @@ -190,17 +235,195 @@ static const struct imx_pgc_domain imx7_pgc_domains[] = { .name = "usb-hsic-phy", }, .bits = { - .pxx = USB_HSIC_PHY_SW_Pxx_REQ, - .map = USB_HSIC_PHY_A_CORE_DOMAIN, + .pxx = IMX7_USB_HSIC_PHY_SW_Pxx_REQ, + .map = IMX7_USB_HSIC_PHY_A_CORE_DOMAIN, }, .voltage = 1200000, - .pgc = PGC_USB_HSIC, + .pgc = IMX7_PGC_USB_HSIC, }, }; +static const struct regmap_range imx7_yes_ranges[] = { + regmap_reg_range(GPC_LPCR_A_CORE_BSC, + GPC_M4_PU_PDN_FLG), + regmap_reg_range(GPC_PGC_CTRL(IMX7_PGC_MIPI), + GPC_PGC_SR(IMX7_PGC_MIPI)), + regmap_reg_range(GPC_PGC_CTRL(IMX7_PGC_PCIE), + GPC_PGC_SR(IMX7_PGC_PCIE)), + regmap_reg_range(GPC_PGC_CTRL(IMX7_PGC_USB_HSIC), + GPC_PGC_SR(IMX7_PGC_USB_HSIC)), +}; + +static const struct regmap_access_table imx7_access_table = { + .yes_ranges = imx7_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(imx7_yes_ranges), +}; + static const struct imx_pgc_domain_data imx7_pgc_domain_data = { .domains = imx7_pgc_domains, .domains_num = ARRAY_SIZE(imx7_pgc_domains), + .reg_access_table = &imx7_access_table, +}; + +static const struct imx_pgc_domain imx8m_pgc_domains[] = { + [IMX8M_POWER_DOMAIN_MIPI] = { + .genpd = { + .name = "mipi", + }, + .bits = { + .pxx = IMX8M_MIPI_SW_Pxx_REQ, + .map = IMX8M_MIPI_A53_DOMAIN, + }, + .pgc = IMX8M_PGC_MIPI, + }, + + [IMX8M_POWER_DOMAIN_PCIE1] = { + .genpd = { + .name = "pcie1", + }, + .bits = { + .pxx = IMX8M_PCIE1_SW_Pxx_REQ, + .map = IMX8M_PCIE1_A53_DOMAIN, + }, + .pgc = IMX8M_PGC_PCIE1, + }, + + [IMX8M_POWER_DOMAIN_USB_OTG1] = { + .genpd = { + .name = "usb-otg1", + }, + .bits = { + .pxx = IMX8M_OTG1_SW_Pxx_REQ, + .map = IMX8M_OTG1_A53_DOMAIN, + }, + .pgc = IMX8M_PGC_OTG1, + }, + + [IMX8M_POWER_DOMAIN_USB_OTG2] = { + .genpd = { + .name = "usb-otg2", + }, + .bits = { + .pxx = IMX8M_OTG2_SW_Pxx_REQ, + .map = IMX8M_OTG2_A53_DOMAIN, + }, + .pgc = IMX8M_PGC_OTG2, + }, + + [IMX8M_POWER_DOMAIN_DDR1] = { + .genpd = { + .name = "ddr1", + }, + .bits = { + .pxx = IMX8M_DDR1_SW_Pxx_REQ, + .map = IMX8M_DDR2_A53_DOMAIN, + }, + .pgc = IMX8M_PGC_DDR1, + }, + + [IMX8M_POWER_DOMAIN_GPU] = { + .genpd = { + .name = "gpu", + }, + .bits = { + .pxx = IMX8M_GPU_SW_Pxx_REQ, + .map = IMX8M_GPU_A53_DOMAIN, + }, + .pgc = IMX8M_PGC_GPU, + }, + + [IMX8M_POWER_DOMAIN_VPU] = { + .genpd = { + .name = "vpu", + }, + .bits = { + .pxx = IMX8M_VPU_SW_Pxx_REQ, + .map = IMX8M_VPU_A53_DOMAIN, + }, + .pgc = IMX8M_PGC_VPU, + }, + + [IMX8M_POWER_DOMAIN_DISP] = { + .genpd = { + .name = "disp", + }, + .bits = { + .pxx = IMX8M_DISP_SW_Pxx_REQ, + .map = IMX8M_DISP_A53_DOMAIN, + }, + .pgc = IMX8M_PGC_DISP, + }, + + [IMX8M_POWER_DOMAIN_MIPI_CSI1] = { + .genpd = { + .name = "mipi-csi1", + }, + .bits = { + .pxx = IMX8M_MIPI_CSI1_SW_Pxx_REQ, + .map = IMX8M_MIPI_CSI1_A53_DOMAIN, + }, + .pgc = IMX8M_PGC_MIPI_CSI1, + }, + + [IMX8M_POWER_DOMAIN_MIPI_CSI2] = { + .genpd = { + .name = "mipi-csi2", + }, + .bits = { + .pxx = IMX8M_MIPI_CSI2_SW_Pxx_REQ, + .map = IMX8M_MIPI_CSI2_A53_DOMAIN, + }, + .pgc = IMX8M_PGC_MIPI_CSI2, + }, + + [IMX8M_POWER_DOMAIN_PCIE2] = { + .genpd = { + .name = "pcie2", + }, + .bits = { + .pxx = IMX8M_PCIE2_SW_Pxx_REQ, + .map = IMX8M_PCIE2_A53_DOMAIN, + }, + .pgc = IMX8M_PGC_PCIE2, + }, +}; + +static const struct regmap_range imx8m_yes_ranges[] = { + regmap_reg_range(GPC_LPCR_A_CORE_BSC, + GPC_M4_PU_PDN_FLG), + regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_MIPI), + GPC_PGC_SR(IMX8M_PGC_MIPI)), + regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_PCIE1), + GPC_PGC_SR(IMX8M_PGC_PCIE1)), + regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_OTG1), + GPC_PGC_SR(IMX8M_PGC_OTG1)), + regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_OTG2), + GPC_PGC_SR(IMX8M_PGC_OTG2)), + regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_DDR1), + GPC_PGC_SR(IMX8M_PGC_DDR1)), + regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_GPU), + GPC_PGC_SR(IMX8M_PGC_GPU)), + regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_VPU), + GPC_PGC_SR(IMX8M_PGC_VPU)), + regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_DISP), + GPC_PGC_SR(IMX8M_PGC_DISP)), + regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_MIPI_CSI1), + GPC_PGC_SR(IMX8M_PGC_MIPI_CSI1)), + regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_MIPI_CSI2), + GPC_PGC_SR(IMX8M_PGC_MIPI_CSI2)), + regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_PCIE2), + GPC_PGC_SR(IMX8M_PGC_PCIE2)), +}; + +static const struct regmap_access_table imx8m_access_table = { + .yes_ranges = imx8m_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(imx8m_yes_ranges), +}; + +static const struct imx_pgc_domain_data imx8m_pgc_domain_data = { + .domains = imx8m_pgc_domains, + .domains_num = ARRAY_SIZE(imx8m_pgc_domains), + .reg_access_table = &imx8m_access_table, }; static int imx_pgc_domain_probe(struct platform_device *pdev) @@ -217,7 +440,7 @@ static int imx_pgc_domain_probe(struct platform_device *pdev) dev_err(domain->dev, "Failed to get domain's regulator\n"); return PTR_ERR(domain->regulator); } - } else { + } else if (domain->voltage) { regulator_set_voltage(domain->regulator, domain->voltage, domain->voltage); } @@ -265,27 +488,15 @@ builtin_platform_driver(imx_pgc_domain_driver) static int imx_gpcv2_probe(struct platform_device *pdev) { - static const struct imx_pgc_domain_data *domain_data; - static const struct regmap_range yes_ranges[] = { - regmap_reg_range(GPC_LPCR_A_CORE_BSC, - GPC_M4_PU_PDN_FLG), - regmap_reg_range(GPC_PGC_CTRL(PGC_MIPI), - GPC_PGC_SR(PGC_MIPI)), - regmap_reg_range(GPC_PGC_CTRL(PGC_PCIE), - GPC_PGC_SR(PGC_PCIE)), - regmap_reg_range(GPC_PGC_CTRL(PGC_USB_HSIC), - GPC_PGC_SR(PGC_USB_HSIC)), - }; - static const struct regmap_access_table access_table = { - .yes_ranges = yes_ranges, - .n_yes_ranges = ARRAY_SIZE(yes_ranges), - }; - static const struct regmap_config regmap_config = { + const struct imx_pgc_domain_data *domain_data = + of_device_get_match_data(&pdev->dev); + + struct regmap_config regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, - .rd_table = &access_table, - .wr_table = &access_table, + .rd_table = domain_data->reg_access_table, + .wr_table = domain_data->reg_access_table, .max_register = SZ_4K, }; struct device *dev = &pdev->dev; @@ -313,8 +524,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev) return ret; } - domain_data = of_device_get_match_data(&pdev->dev); - for_each_child_of_node(pgc_np, np) { struct platform_device *pd_pdev; struct imx_pgc_domain *domain; @@ -372,6 +581,7 @@ static int imx_gpcv2_probe(struct platform_device *pdev) static const struct of_device_id imx_gpcv2_dt_ids[] = { { .compatible = "fsl,imx7d-gpc", .data = &imx7_pgc_domain_data, }, + { .compatible = "fsl,imx8mq-gpc", .data = &imx8m_pgc_domain_data, }, { } }; diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig index a7d0667338f2..17bd7590464f 100644 --- a/drivers/soc/mediatek/Kconfig +++ b/drivers/soc/mediatek/Kconfig @@ -4,6 +4,18 @@ menu "MediaTek SoC drivers" depends on ARCH_MEDIATEK || COMPILE_TEST +config MTK_CMDQ + tristate "MediaTek CMDQ Support" + depends on ARCH_MEDIATEK || COMPILE_TEST + select MAILBOX + select MTK_CMDQ_MBOX + select MTK_INFRACFG + help + Say yes here to add support for the MediaTek Command Queue (CMDQ) + driver. The CMDQ is used to help read/write registers with critical + time limitation, such as updating display configuration during the + vblank. + config MTK_INFRACFG bool "MediaTek INFRACFG Support" select REGMAP diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile index 12998b08819e..64ce5eeaba32 100644 --- a/drivers/soc/mediatek/Makefile +++ b/drivers/soc/mediatek/Makefile @@ -1,3 +1,4 @@ +obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq-helper.o obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c new file mode 100644 index 000000000000..ff9fef5a032b --- /dev/null +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -0,0 +1,300 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2018 MediaTek Inc. + +#include <linux/completion.h> +#include <linux/errno.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/mailbox_controller.h> +#include <linux/soc/mediatek/mtk-cmdq.h> + +#define CMDQ_ARG_A_WRITE_MASK 0xffff +#define CMDQ_WRITE_ENABLE_MASK BIT(0) +#define CMDQ_EOC_IRQ_EN BIT(0) +#define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \ + << 32 | CMDQ_EOC_IRQ_EN) + +static void cmdq_client_timeout(struct timer_list *t) +{ + struct cmdq_client *client = from_timer(client, t, timer); + + dev_err(client->client.dev, "cmdq timeout!\n"); +} + +struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout) +{ + struct cmdq_client *client; + + client = kzalloc(sizeof(*client), GFP_KERNEL); + if (!client) + return (struct cmdq_client *)-ENOMEM; + + client->timeout_ms = timeout; + if (timeout != CMDQ_NO_TIMEOUT) { + spin_lock_init(&client->lock); + timer_setup(&client->timer, cmdq_client_timeout, 0); + } + client->pkt_cnt = 0; + client->client.dev = dev; + client->client.tx_block = false; + client->chan = mbox_request_channel(&client->client, index); + + if (IS_ERR(client->chan)) { + long err; + + dev_err(dev, "failed to request channel\n"); + err = PTR_ERR(client->chan); + kfree(client); + + return ERR_PTR(err); + } + + return client; +} +EXPORT_SYMBOL(cmdq_mbox_create); + +void cmdq_mbox_destroy(struct cmdq_client *client) +{ + if (client->timeout_ms != CMDQ_NO_TIMEOUT) { + spin_lock(&client->lock); + del_timer_sync(&client->timer); + spin_unlock(&client->lock); + } + mbox_free_channel(client->chan); + kfree(client); +} +EXPORT_SYMBOL(cmdq_mbox_destroy); + +struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size) +{ + struct cmdq_pkt *pkt; + struct device *dev; + dma_addr_t dma_addr; + + pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); + if (!pkt) + return ERR_PTR(-ENOMEM); + pkt->va_base = kzalloc(size, GFP_KERNEL); + if (!pkt->va_base) { + kfree(pkt); + return ERR_PTR(-ENOMEM); + } + pkt->buf_size = size; + pkt->cl = (void *)client; + + dev = client->chan->mbox->dev; + dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size); + kfree(pkt->va_base); + kfree(pkt); + return ERR_PTR(-ENOMEM); + } + + pkt->pa_base = dma_addr; + + return pkt; +} +EXPORT_SYMBOL(cmdq_pkt_create); + +void cmdq_pkt_destroy(struct cmdq_pkt *pkt) +{ + struct cmdq_client *client = (struct cmdq_client *)pkt->cl; + + dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size, + DMA_TO_DEVICE); + kfree(pkt->va_base); + kfree(pkt); +} +EXPORT_SYMBOL(cmdq_pkt_destroy); + +static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code, + u32 arg_a, u32 arg_b) +{ + u64 *cmd_ptr; + + if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) { + /* + * In the case of allocated buffer size (pkt->buf_size) is used + * up, the real required size (pkt->cmdq_buf_size) is still + * increased, so that the user knows how much memory should be + * ultimately allocated after appending all commands and + * flushing the command packet. Therefor, the user can call + * cmdq_pkt_create() again with the real required buffer size. + */ + pkt->cmd_buf_size += CMDQ_INST_SIZE; + WARN_ONCE(1, "%s: buffer size %u is too small !\n", + __func__, (u32)pkt->buf_size); + return -ENOMEM; + } + cmd_ptr = pkt->va_base + pkt->cmd_buf_size; + (*cmd_ptr) = (u64)((code << CMDQ_OP_CODE_SHIFT) | arg_a) << 32 | arg_b; + pkt->cmd_buf_size += CMDQ_INST_SIZE; + + return 0; +} + +int cmdq_pkt_write(struct cmdq_pkt *pkt, u32 value, u32 subsys, u32 offset) +{ + u32 arg_a = (offset & CMDQ_ARG_A_WRITE_MASK) | + (subsys << CMDQ_SUBSYS_SHIFT); + + return cmdq_pkt_append_command(pkt, CMDQ_CODE_WRITE, arg_a, value); +} +EXPORT_SYMBOL(cmdq_pkt_write); + +int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u32 value, + u32 subsys, u32 offset, u32 mask) +{ + u32 offset_mask = offset; + int err = 0; + + if (mask != 0xffffffff) { + err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask); + offset_mask |= CMDQ_WRITE_ENABLE_MASK; + } + err |= cmdq_pkt_write(pkt, value, subsys, offset_mask); + + return err; +} +EXPORT_SYMBOL(cmdq_pkt_write_mask); + +int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u32 event) +{ + u32 arg_b; + + if (event >= CMDQ_MAX_EVENT) + return -EINVAL; + + /* + * WFE arg_b + * bit 0-11: wait value + * bit 15: 1 - wait, 0 - no wait + * bit 16-27: update value + * bit 31: 1 - update, 0 - no update + */ + arg_b = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE; + + return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, arg_b); +} +EXPORT_SYMBOL(cmdq_pkt_wfe); + +int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u32 event) +{ + if (event >= CMDQ_MAX_EVENT) + return -EINVAL; + + return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, + CMDQ_WFE_UPDATE); +} +EXPORT_SYMBOL(cmdq_pkt_clear_event); + +static int cmdq_pkt_finalize(struct cmdq_pkt *pkt) +{ + int err; + + /* insert EOC and generate IRQ for each command iteration */ + err = cmdq_pkt_append_command(pkt, CMDQ_CODE_EOC, 0, CMDQ_EOC_IRQ_EN); + + /* JUMP to end */ + err |= cmdq_pkt_append_command(pkt, CMDQ_CODE_JUMP, 0, CMDQ_JUMP_PASS); + + return err; +} + +static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data) +{ + struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data; + struct cmdq_task_cb *cb = &pkt->cb; + struct cmdq_client *client = (struct cmdq_client *)pkt->cl; + + if (client->timeout_ms != CMDQ_NO_TIMEOUT) { + unsigned long flags = 0; + + spin_lock_irqsave(&client->lock, flags); + if (--client->pkt_cnt == 0) + del_timer(&client->timer); + else + mod_timer(&client->timer, jiffies + + msecs_to_jiffies(client->timeout_ms)); + spin_unlock_irqrestore(&client->lock, flags); + } + + dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base, + pkt->cmd_buf_size, DMA_TO_DEVICE); + if (cb->cb) { + data.data = cb->data; + cb->cb(data); + } +} + +int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb, + void *data) +{ + int err; + unsigned long flags = 0; + struct cmdq_client *client = (struct cmdq_client *)pkt->cl; + + err = cmdq_pkt_finalize(pkt); + if (err < 0) + return err; + + pkt->cb.cb = cb; + pkt->cb.data = data; + pkt->async_cb.cb = cmdq_pkt_flush_async_cb; + pkt->async_cb.data = pkt; + + dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base, + pkt->cmd_buf_size, DMA_TO_DEVICE); + + if (client->timeout_ms != CMDQ_NO_TIMEOUT) { + spin_lock_irqsave(&client->lock, flags); + if (client->pkt_cnt++ == 0) + mod_timer(&client->timer, jiffies + + msecs_to_jiffies(client->timeout_ms)); + spin_unlock_irqrestore(&client->lock, flags); + } + + mbox_send_message(client->chan, pkt); + /* We can send next packet immediately, so just call txdone. */ + mbox_client_txdone(client->chan, 0); + + return 0; +} +EXPORT_SYMBOL(cmdq_pkt_flush_async); + +struct cmdq_flush_completion { + struct completion cmplt; + bool err; +}; + +static void cmdq_pkt_flush_cb(struct cmdq_cb_data data) +{ + struct cmdq_flush_completion *cmplt; + + cmplt = (struct cmdq_flush_completion *)data.data; + if (data.sta != CMDQ_CB_NORMAL) + cmplt->err = true; + else + cmplt->err = false; + complete(&cmplt->cmplt); +} + +int cmdq_pkt_flush(struct cmdq_pkt *pkt) +{ + struct cmdq_flush_completion cmplt; + int err; + + init_completion(&cmplt.cmplt); + err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt); + if (err < 0) + return err; + wait_for_completion(&cmplt.cmplt); + + return cmplt.err ? -EFAULT : 0; +} +EXPORT_SYMBOL(cmdq_pkt_flush); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 684cb51694d1..fcbf8a2e4080 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -75,11 +75,6 @@ config QCOM_QMI_HELPERS tristate depends on ARCH_QCOM || COMPILE_TEST depends on NET - help - Helper library for handling QMI encoded messages. QMI encoded - messages are used in communication between the majority of QRTR - clients and this helpers provide the common functionality needed for - doing this from a kernel driver. config QCOM_RMTFS_MEM tristate "Qualcomm Remote Filesystem memory driver" diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c index a6f646295f06..c701b3b010f1 100644 --- a/drivers/soc/qcom/cmd-db.c +++ b/drivers/soc/qcom/cmd-db.c @@ -101,8 +101,7 @@ static bool cmd_db_magic_matches(const struct cmd_db_header *header) static struct cmd_db_header *cmd_db_header; - -static inline void *rsc_to_entry_header(struct rsc_hdr *hdr) +static inline const void *rsc_to_entry_header(const struct rsc_hdr *hdr) { u16 offset = le16_to_cpu(hdr->header_offset); @@ -110,7 +109,7 @@ static inline void *rsc_to_entry_header(struct rsc_hdr *hdr) } static inline void * -rsc_offset(struct rsc_hdr *hdr, struct entry_header *ent) +rsc_offset(const struct rsc_hdr *hdr, const struct entry_header *ent) { u16 offset = le16_to_cpu(hdr->data_offset); u16 loffset = le16_to_cpu(ent->offset); @@ -134,11 +133,11 @@ int cmd_db_ready(void) } EXPORT_SYMBOL(cmd_db_ready); -static int cmd_db_get_header(const char *id, struct entry_header *eh, - struct rsc_hdr *rh) +static int cmd_db_get_header(const char *id, const struct entry_header **eh, + const struct rsc_hdr **rh) { - struct rsc_hdr *rsc_hdr; - struct entry_header *ent; + const struct rsc_hdr *rsc_hdr; + const struct entry_header *ent; int ret, i, j; u8 query[8]; @@ -146,9 +145,6 @@ static int cmd_db_get_header(const char *id, struct entry_header *eh, if (ret) return ret; - if (!eh || !rh) - return -EINVAL; - /* Pad out query string to same length as in DB */ strncpy(query, id, sizeof(query)); @@ -159,14 +155,13 @@ static int cmd_db_get_header(const char *id, struct entry_header *eh, ent = rsc_to_entry_header(rsc_hdr); for (j = 0; j < le16_to_cpu(rsc_hdr->cnt); j++, ent++) { - if (memcmp(ent->id, query, sizeof(ent->id)) == 0) - break; - } - - if (j < le16_to_cpu(rsc_hdr->cnt)) { - memcpy(eh, ent, sizeof(*ent)); - memcpy(rh, rsc_hdr, sizeof(*rh)); - return 0; + if (memcmp(ent->id, query, sizeof(ent->id)) == 0) { + if (eh) + *eh = ent; + if (rh) + *rh = rsc_hdr; + return 0; + } } } @@ -186,69 +181,40 @@ static int cmd_db_get_header(const char *id, struct entry_header *eh, u32 cmd_db_read_addr(const char *id) { int ret; - struct entry_header ent; - struct rsc_hdr rsc_hdr; + const struct entry_header *ent; - ret = cmd_db_get_header(id, &ent, &rsc_hdr); + ret = cmd_db_get_header(id, &ent, NULL); - return ret < 0 ? 0 : le32_to_cpu(ent.addr); + return ret < 0 ? 0 : le32_to_cpu(ent->addr); } EXPORT_SYMBOL(cmd_db_read_addr); /** * cmd_db_read_aux_data() - Query command db for aux data. * - * @id: Resource to retrieve AUX Data on. - * @data: Data buffer to copy returned aux data to. Returns size on NULL - * @len: Caller provides size of data buffer passed in. + * @id: Resource to retrieve AUX Data on + * @len: size of data buffer returned * - * Return: size of data on success, errno otherwise + * Return: pointer to data on success, error pointer otherwise */ -int cmd_db_read_aux_data(const char *id, u8 *data, size_t len) +const void *cmd_db_read_aux_data(const char *id, size_t *len) { int ret; - struct entry_header ent; - struct rsc_hdr rsc_hdr; - u16 ent_len; - - if (!data) - return -EINVAL; + const struct entry_header *ent; + const struct rsc_hdr *rsc_hdr; ret = cmd_db_get_header(id, &ent, &rsc_hdr); if (ret) - return ret; + return ERR_PTR(ret); - ent_len = le16_to_cpu(ent.len); - if (len < ent_len) - return -EINVAL; - - len = min_t(u16, ent_len, len); - memcpy(data, rsc_offset(&rsc_hdr, &ent), len); + if (len) + *len = le16_to_cpu(ent->len); - return len; + return rsc_offset(rsc_hdr, ent); } EXPORT_SYMBOL(cmd_db_read_aux_data); /** - * cmd_db_read_aux_data_len - Get the length of the auxiliary data stored in DB. - * - * @id: Resource to retrieve AUX Data. - * - * Return: size on success, 0 on error - */ -size_t cmd_db_read_aux_data_len(const char *id) -{ - int ret; - struct entry_header ent; - struct rsc_hdr rsc_hdr; - - ret = cmd_db_get_header(id, &ent, &rsc_hdr); - - return ret < 0 ? 0 : le16_to_cpu(ent.len); -} -EXPORT_SYMBOL(cmd_db_read_aux_data_len); - -/** * cmd_db_read_slave_id - Get the slave ID for a given resource address * * @id: Resource id to query the DB for version @@ -258,15 +224,14 @@ EXPORT_SYMBOL(cmd_db_read_aux_data_len); enum cmd_db_hw_type cmd_db_read_slave_id(const char *id) { int ret; - struct entry_header ent; - struct rsc_hdr rsc_hdr; + const struct entry_header *ent; u32 addr; - ret = cmd_db_get_header(id, &ent, &rsc_hdr); + ret = cmd_db_get_header(id, &ent, NULL); if (ret < 0) return CMD_DB_HW_INVALID; - addr = le32_to_cpu(ent.addr); + addr = le32_to_cpu(ent->addr); return (addr >> SLAVE_ID_SHIFT) & SLAVE_ID_MASK; } EXPORT_SYMBOL(cmd_db_read_slave_id); diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c index 192ca761b2cb..80667f7be52c 100644 --- a/drivers/soc/qcom/llcc-slice.c +++ b/drivers/soc/qcom/llcc-slice.c @@ -95,7 +95,8 @@ EXPORT_SYMBOL_GPL(llcc_slice_getd); */ void llcc_slice_putd(struct llcc_slice_desc *desc) { - kfree(desc); + if (!IS_ERR_OR_NULL(desc)) + kfree(desc); } EXPORT_SYMBOL_GPL(llcc_slice_putd); @@ -142,6 +143,9 @@ int llcc_slice_activate(struct llcc_slice_desc *desc) int ret; u32 act_ctrl_val; + if (IS_ERR_OR_NULL(desc)) + return -EINVAL; + mutex_lock(&drv_data->lock); if (test_bit(desc->slice_id, drv_data->bitmap)) { mutex_unlock(&drv_data->lock); @@ -176,6 +180,9 @@ int llcc_slice_deactivate(struct llcc_slice_desc *desc) u32 act_ctrl_val; int ret; + if (IS_ERR_OR_NULL(desc)) + return -EINVAL; + mutex_lock(&drv_data->lock); if (!test_bit(desc->slice_id, drv_data->bitmap)) { mutex_unlock(&drv_data->lock); @@ -203,6 +210,9 @@ EXPORT_SYMBOL_GPL(llcc_slice_deactivate); */ int llcc_get_slice_id(struct llcc_slice_desc *desc) { + if (IS_ERR_OR_NULL(desc)) + return -EINVAL; + return desc->slice_id; } EXPORT_SYMBOL_GPL(llcc_get_slice_id); @@ -213,6 +223,9 @@ EXPORT_SYMBOL_GPL(llcc_get_slice_id); */ size_t llcc_get_slice_size(struct llcc_slice_desc *desc) { + if (IS_ERR_OR_NULL(desc)) + return 0; + return desc->slice_size; } EXPORT_SYMBOL_GPL(llcc_get_slice_size); @@ -360,5 +373,5 @@ int qcom_llcc_probe(struct platform_device *pdev, return ret; } EXPORT_SYMBOL_GPL(qcom_llcc_probe); - MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Qualcomm Last Level Cache Controller"); diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c index ee89ffb6dde8..6b8ef01472e9 100644 --- a/drivers/soc/qcom/qcom-geni-se.c +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -215,6 +215,16 @@ static void geni_se_io_init(void __iomem *base) writel_relaxed(FORCE_DEFAULT, base + GENI_FORCE_DEFAULT_REG); } +static void geni_se_irq_clear(struct geni_se *se) +{ + writel_relaxed(0, se->base + SE_GSI_EVENT_EN); + writel_relaxed(0xffffffff, se->base + SE_GENI_M_IRQ_CLEAR); + writel_relaxed(0xffffffff, se->base + SE_GENI_S_IRQ_CLEAR); + writel_relaxed(0xffffffff, se->base + SE_DMA_TX_IRQ_CLR); + writel_relaxed(0xffffffff, se->base + SE_DMA_RX_IRQ_CLR); + writel_relaxed(0xffffffff, se->base + SE_IRQ_EN); +} + /** * geni_se_init() - Initialize the GENI serial engine * @se: Pointer to the concerned serial engine. @@ -228,6 +238,7 @@ void geni_se_init(struct geni_se *se, u32 rx_wm, u32 rx_rfr) { u32 val; + geni_se_irq_clear(se); geni_se_io_init(se->base); geni_se_io_set_mode(se->base); @@ -249,12 +260,7 @@ static void geni_se_select_fifo_mode(struct geni_se *se) u32 proto = geni_se_read_proto(se); u32 val; - writel_relaxed(0, se->base + SE_GSI_EVENT_EN); - writel_relaxed(0xffffffff, se->base + SE_GENI_M_IRQ_CLEAR); - writel_relaxed(0xffffffff, se->base + SE_GENI_S_IRQ_CLEAR); - writel_relaxed(0xffffffff, se->base + SE_DMA_TX_IRQ_CLR); - writel_relaxed(0xffffffff, se->base + SE_DMA_RX_IRQ_CLR); - writel_relaxed(0xffffffff, se->base + SE_IRQ_EN); + geni_se_irq_clear(se); val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN); if (proto != GENI_SE_UART) { @@ -277,12 +283,7 @@ static void geni_se_select_dma_mode(struct geni_se *se) { u32 val; - writel_relaxed(0, se->base + SE_GSI_EVENT_EN); - writel_relaxed(0xffffffff, se->base + SE_GENI_M_IRQ_CLEAR); - writel_relaxed(0xffffffff, se->base + SE_GENI_S_IRQ_CLEAR); - writel_relaxed(0xffffffff, se->base + SE_DMA_TX_IRQ_CLR); - writel_relaxed(0xffffffff, se->base + SE_DMA_RX_IRQ_CLR); - writel_relaxed(0xffffffff, se->base + SE_IRQ_EN); + geni_se_irq_clear(se); val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN); val |= GENI_DMA_MODE_EN; diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c index 938ca41c56cd..c239a28e503f 100644 --- a/drivers/soc/qcom/qmi_interface.c +++ b/drivers/soc/qcom/qmi_interface.c @@ -318,7 +318,7 @@ int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn, txn->dest = c_struct; mutex_lock(&qmi->txn_lock); - ret = idr_alloc_cyclic(&qmi->txns, txn, 0, INT_MAX, GFP_KERNEL); + ret = idr_alloc_cyclic(&qmi->txns, txn, 0, U16_MAX, GFP_KERNEL); if (ret < 0) pr_err("failed to allocate transaction id\n"); diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c index 93517ed83355..b8e63724a49d 100644 --- a/drivers/soc/qcom/smd-rpm.c +++ b/drivers/soc/qcom/smd-rpm.c @@ -227,6 +227,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = { { .compatible = "qcom,rpm-msm8974" }, { .compatible = "qcom,rpm-msm8996" }, { .compatible = "qcom,rpm-msm8998" }, + { .compatible = "qcom,rpm-qcs404" }, {} }; MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match); diff --git a/drivers/soc/renesas/r8a77965-sysc.c b/drivers/soc/renesas/r8a77965-sysc.c index d7f7928e3c07..e0533beb50fd 100644 --- a/drivers/soc/renesas/r8a77965-sysc.c +++ b/drivers/soc/renesas/r8a77965-sysc.c @@ -28,7 +28,6 @@ static const struct rcar_sysc_area r8a77965_areas[] __initconst = { { "a2vc1", 0x3c0, 1, R8A77965_PD_A2VC1, R8A77965_PD_A3VC }, { "3dg-a", 0x100, 0, R8A77965_PD_3DG_A, R8A77965_PD_ALWAYS_ON }, { "3dg-b", 0x100, 1, R8A77965_PD_3DG_B, R8A77965_PD_3DG_A }, - { "a3ir", 0x180, 0, R8A77965_PD_A3IR, R8A77965_PD_ALWAYS_ON }, }; const struct rcar_sysc_info r8a77965_sysc_info __initconst = { diff --git a/drivers/soc/renesas/r8a77970-sysc.c b/drivers/soc/renesas/r8a77970-sysc.c index 35b30d6a8958..280c48b80f24 100644 --- a/drivers/soc/renesas/r8a77970-sysc.c +++ b/drivers/soc/renesas/r8a77970-sysc.c @@ -20,12 +20,11 @@ static const struct rcar_sysc_area r8a77970_areas[] __initconst = { PD_CPU_NOCR }, { "ca53-cpu1", 0x200, 1, R8A77970_PD_CA53_CPU1, R8A77970_PD_CA53_SCU, PD_CPU_NOCR }, - { "cr7", 0x240, 0, R8A77970_PD_CR7, R8A77970_PD_ALWAYS_ON }, { "a3ir", 0x180, 0, R8A77970_PD_A3IR, R8A77970_PD_ALWAYS_ON }, { "a2ir0", 0x400, 0, R8A77970_PD_A2IR0, R8A77970_PD_A3IR }, { "a2ir1", 0x400, 1, R8A77970_PD_A2IR1, R8A77970_PD_A3IR }, - { "a2ir2", 0x400, 2, R8A77970_PD_A2IR2, R8A77970_PD_A3IR }, - { "a2ir3", 0x400, 3, R8A77970_PD_A2IR3, R8A77970_PD_A3IR }, + { "a2dp", 0x400, 2, R8A77970_PD_A2DP, R8A77970_PD_A3IR }, + { "a2cn", 0x400, 3, R8A77970_PD_A2CN, R8A77970_PD_A3IR }, { "a2sc0", 0x400, 4, R8A77970_PD_A2SC0, R8A77970_PD_A3IR }, { "a2sc1", 0x400, 5, R8A77970_PD_A2SC1, R8A77970_PD_A3IR }, }; diff --git a/drivers/soc/renesas/r8a77980-sysc.c b/drivers/soc/renesas/r8a77980-sysc.c index 9265fb525ef3..a8dbe55e8ba8 100644 --- a/drivers/soc/renesas/r8a77980-sysc.c +++ b/drivers/soc/renesas/r8a77980-sysc.c @@ -38,12 +38,12 @@ static const struct rcar_sysc_area r8a77980_areas[] __initconst = { { "a2sc2", 0x400, 8, R8A77980_PD_A2SC2, R8A77980_PD_A3IR }, { "a2sc3", 0x400, 9, R8A77980_PD_A2SC3, R8A77980_PD_A3IR }, { "a2sc4", 0x400, 10, R8A77980_PD_A2SC4, R8A77980_PD_A3IR }, - { "a2pd0", 0x400, 11, R8A77980_PD_A2PD0, R8A77980_PD_A3IR }, - { "a2pd1", 0x400, 12, R8A77980_PD_A2PD1, R8A77980_PD_A3IR }, + { "a2dp0", 0x400, 11, R8A77980_PD_A2DP0, R8A77980_PD_A3IR }, + { "a2dp1", 0x400, 12, R8A77980_PD_A2DP1, R8A77980_PD_A3IR }, { "a2cn", 0x400, 13, R8A77980_PD_A2CN, R8A77980_PD_A3IR }, - { "a3vip", 0x2c0, 0, R8A77980_PD_A3VIP, R8A77980_PD_ALWAYS_ON }, - { "a3vip1", 0x300, 0, R8A77980_PD_A3VIP1, R8A77980_PD_A3VIP }, - { "a3vip2", 0x280, 0, R8A77980_PD_A3VIP2, R8A77980_PD_A3VIP }, + { "a3vip0", 0x2c0, 0, R8A77980_PD_A3VIP0, R8A77980_PD_ALWAYS_ON }, + { "a3vip1", 0x300, 0, R8A77980_PD_A3VIP1, R8A77980_PD_ALWAYS_ON }, + { "a3vip2", 0x280, 0, R8A77980_PD_A3VIP2, R8A77980_PD_ALWAYS_ON }, }; const struct rcar_sysc_info r8a77980_sysc_info __initconst = { diff --git a/drivers/soc/renesas/r8a77990-sysc.c b/drivers/soc/renesas/r8a77990-sysc.c index 15579ebc5ed2..664b244eb1dd 100644 --- a/drivers/soc/renesas/r8a77990-sysc.c +++ b/drivers/soc/renesas/r8a77990-sysc.c @@ -28,19 +28,6 @@ static struct rcar_sysc_area r8a77990_areas[] __initdata = { { "3dg-b", 0x100, 1, R8A77990_PD_3DG_B, R8A77990_PD_3DG_A }, }; -static void __init rcar_sysc_fix_parent(struct rcar_sysc_area *areas, - unsigned int num_areas, u8 id, - int new_parent) -{ - unsigned int i; - - for (i = 0; i < num_areas; i++) - if (areas[i].isr_bit == id) { - areas[i].parent = new_parent; - return; - } -} - /* Fixups for R-Car E3 ES1.0 revision */ static const struct soc_device_attribute r8a77990[] __initconst = { { .soc_id = "r8a77990", .revision = "ES1.0" }, @@ -50,12 +37,10 @@ static const struct soc_device_attribute r8a77990[] __initconst = { static int __init r8a77990_sysc_init(void) { if (soc_device_match(r8a77990)) { - rcar_sysc_fix_parent(r8a77990_areas, - ARRAY_SIZE(r8a77990_areas), - R8A77990_PD_3DG_A, R8A77990_PD_3DG_B); - rcar_sysc_fix_parent(r8a77990_areas, - ARRAY_SIZE(r8a77990_areas), - R8A77990_PD_3DG_B, R8A77990_PD_ALWAYS_ON); + /* Fix incorrect 3DG hierarchy */ + swap(r8a77990_areas[7], r8a77990_areas[8]); + r8a77990_areas[7].parent = R8A77990_PD_ALWAYS_ON; + r8a77990_areas[8].parent = R8A77990_PD_3DG_B; } return 0; diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c index af53363eda03..0c80fab4f8de 100644 --- a/drivers/soc/renesas/rcar-sysc.c +++ b/drivers/soc/renesas/rcar-sysc.c @@ -105,6 +105,15 @@ static int rcar_sysc_power(const struct rcar_sysc_ch *sysc_ch, bool on) spin_lock_irqsave(&rcar_sysc_lock, flags); + /* + * The interrupt source needs to be enabled, but masked, to prevent the + * CPU from receiving it. + */ + iowrite32(ioread32(rcar_sysc_base + SYSCIMR) | isr_mask, + rcar_sysc_base + SYSCIMR); + iowrite32(ioread32(rcar_sysc_base + SYSCIER) | isr_mask, + rcar_sysc_base + SYSCIER); + iowrite32(isr_mask, rcar_sysc_base + SYSCISCR); /* Submit power shutoff or resume request until it was accepted */ @@ -146,16 +155,6 @@ static int rcar_sysc_power(const struct rcar_sysc_ch *sysc_ch, bool on) return ret; } -static int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch) -{ - return rcar_sysc_power(sysc_ch, false); -} - -static int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch) -{ - return rcar_sysc_power(sysc_ch, true); -} - static bool rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch) { unsigned int st; @@ -184,7 +183,7 @@ static int rcar_sysc_pd_power_off(struct generic_pm_domain *genpd) struct rcar_sysc_pd *pd = to_rcar_pd(genpd); pr_debug("%s: %s\n", __func__, genpd->name); - return rcar_sysc_power_down(&pd->ch); + return rcar_sysc_power(&pd->ch, false); } static int rcar_sysc_pd_power_on(struct generic_pm_domain *genpd) @@ -192,7 +191,7 @@ static int rcar_sysc_pd_power_on(struct generic_pm_domain *genpd) struct rcar_sysc_pd *pd = to_rcar_pd(genpd); pr_debug("%s: %s\n", __func__, genpd->name); - return rcar_sysc_power_up(&pd->ch); + return rcar_sysc_power(&pd->ch, true); } static bool has_cpg_mstp; @@ -252,7 +251,7 @@ static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd) goto finalize; } - rcar_sysc_power_up(&pd->ch); + rcar_sysc_power(&pd->ch, true); finalize: error = pm_genpd_init(genpd, gov, false); @@ -334,7 +333,6 @@ static int __init rcar_sysc_pd_init(void) const struct of_device_id *match; struct rcar_pm_domains *domains; struct device_node *np; - u32 syscier, syscimr; void __iomem *base; unsigned int i; int error; @@ -373,27 +371,6 @@ static int __init rcar_sysc_pd_init(void) domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains); rcar_sysc_onecell_data = &domains->onecell_data; - for (i = 0, syscier = 0; i < info->num_areas; i++) - syscier |= BIT(info->areas[i].isr_bit); - - /* - * Mask all interrupt sources to prevent the CPU from receiving them. - * Make sure not to clear reserved bits that were set before. - */ - syscimr = ioread32(base + SYSCIMR); - syscimr |= syscier; - pr_debug("%pOF: syscimr = 0x%08x\n", np, syscimr); - iowrite32(syscimr, base + SYSCIMR); - - /* - * SYSC needs all interrupt sources enabled to control power. - */ - pr_debug("%pOF: syscier = 0x%08x\n", np, syscier); - iowrite32(syscier, base + SYSCIER); - - /* - * First, create all PM domains - */ for (i = 0; i < info->num_areas; i++) { const struct rcar_sysc_area *area = &info->areas[i]; struct rcar_sysc_pd *pd; @@ -421,22 +398,17 @@ static int __init rcar_sysc_pd_init(void) goto out_put; domains->domains[area->isr_bit] = &pd->genpd; - } - /* - * Second, link all PM domains to their parents - */ - for (i = 0; i < info->num_areas; i++) { - const struct rcar_sysc_area *area = &info->areas[i]; - - if (!area->name || area->parent < 0) + if (area->parent < 0) continue; error = pm_genpd_add_subdomain(domains->domains[area->parent], - domains->domains[area->isr_bit]); - if (error) + &pd->genpd); + if (error) { pr_warn("Failed to add PM subdomain %s to parent %u\n", area->name, area->parent); + goto out_put; + } } error = of_genpd_add_provider_onecell(np, &domains->onecell_data); @@ -478,8 +450,7 @@ static int rcar_sysc_power_cpu(unsigned int idx, bool on) if (!(pd->flags & PD_CPU) || pd->ch.chan_bit != idx) continue; - return on ? rcar_sysc_power_up(&pd->ch) - : rcar_sysc_power_down(&pd->ch); + return rcar_sysc_power(&pd->ch, on); } return -ENOENT; diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c index 6f86a726bb45..847c7c482b26 100644 --- a/drivers/soc/rockchip/pm_domains.c +++ b/drivers/soc/rockchip/pm_domains.c @@ -21,7 +21,9 @@ #include <linux/mfd/syscon.h> #include <dt-bindings/power/px30-power.h> #include <dt-bindings/power/rk3036-power.h> +#include <dt-bindings/power/rk3066-power.h> #include <dt-bindings/power/rk3128-power.h> +#include <dt-bindings/power/rk3188-power.h> #include <dt-bindings/power/rk3228-power.h> #include <dt-bindings/power/rk3288-power.h> #include <dt-bindings/power/rk3328-power.h> @@ -737,6 +739,14 @@ static const struct rockchip_domain_info rk3036_pm_domains[] = { [RK3036_PD_SYS] = DOMAIN_RK3036(8, 22, 29, false), }; +static const struct rockchip_domain_info rk3066_pm_domains[] = { + [RK3066_PD_GPU] = DOMAIN(9, 9, 3, 24, 29, false), + [RK3066_PD_VIDEO] = DOMAIN(8, 8, 4, 23, 28, false), + [RK3066_PD_VIO] = DOMAIN(7, 7, 5, 22, 27, false), + [RK3066_PD_PERI] = DOMAIN(6, 6, 2, 25, 30, false), + [RK3066_PD_CPU] = DOMAIN(-1, 5, 1, 26, 31, false), +}; + static const struct rockchip_domain_info rk3128_pm_domains[] = { [RK3128_PD_CORE] = DOMAIN_RK3288(0, 0, 4, false), [RK3128_PD_MSCH] = DOMAIN_RK3288(-1, -1, 6, true), @@ -745,6 +755,14 @@ static const struct rockchip_domain_info rk3128_pm_domains[] = { [RK3128_PD_GPU] = DOMAIN_RK3288(1, 1, 3, false), }; +static const struct rockchip_domain_info rk3188_pm_domains[] = { + [RK3188_PD_GPU] = DOMAIN(9, 9, 3, 24, 29, false), + [RK3188_PD_VIDEO] = DOMAIN(8, 8, 4, 23, 28, false), + [RK3188_PD_VIO] = DOMAIN(7, 7, 5, 22, 27, false), + [RK3188_PD_PERI] = DOMAIN(6, 6, 2, 25, 30, false), + [RK3188_PD_CPU] = DOMAIN(5, 5, 1, 26, 31, false), +}; + static const struct rockchip_domain_info rk3228_pm_domains[] = { [RK3228_PD_CORE] = DOMAIN_RK3036(0, 0, 16, true), [RK3228_PD_MSCH] = DOMAIN_RK3036(1, 1, 17, true), @@ -846,6 +864,17 @@ static const struct rockchip_pmu_info rk3036_pmu = { .domain_info = rk3036_pm_domains, }; +static const struct rockchip_pmu_info rk3066_pmu = { + .pwr_offset = 0x08, + .status_offset = 0x0c, + .req_offset = 0x38, /* PMU_MISC_CON1 */ + .idle_offset = 0x0c, + .ack_offset = 0x0c, + + .num_domains = ARRAY_SIZE(rk3066_pm_domains), + .domain_info = rk3066_pm_domains, +}; + static const struct rockchip_pmu_info rk3128_pmu = { .pwr_offset = 0x04, .status_offset = 0x08, @@ -857,6 +886,17 @@ static const struct rockchip_pmu_info rk3128_pmu = { .domain_info = rk3128_pm_domains, }; +static const struct rockchip_pmu_info rk3188_pmu = { + .pwr_offset = 0x08, + .status_offset = 0x0c, + .req_offset = 0x38, /* PMU_MISC_CON1 */ + .idle_offset = 0x0c, + .ack_offset = 0x0c, + + .num_domains = ARRAY_SIZE(rk3188_pm_domains), + .domain_info = rk3188_pm_domains, +}; + static const struct rockchip_pmu_info rk3228_pmu = { .req_offset = 0x40c, .idle_offset = 0x488, @@ -949,10 +989,18 @@ static const struct of_device_id rockchip_pm_domain_dt_match[] = { .data = (void *)&rk3036_pmu, }, { + .compatible = "rockchip,rk3066-power-controller", + .data = (void *)&rk3066_pmu, + }, + { .compatible = "rockchip,rk3128-power-controller", .data = (void *)&rk3128_pmu, }, { + .compatible = "rockchip,rk3188-power-controller", + .data = (void *)&rk3188_pmu, + }, + { .compatible = "rockchip,rk3228-power-controller", .data = (void *)&rk3228_pmu, }, diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c index b4b0f3480bd3..1b0d50f36349 100644 --- a/drivers/soc/sunxi/sunxi_sram.c +++ b/drivers/soc/sunxi/sunxi_sram.c @@ -155,17 +155,7 @@ static int sunxi_sram_show(struct seq_file *s, void *data) return 0; } -static int sunxi_sram_open(struct inode *inode, struct file *file) -{ - return single_open(file, sunxi_sram_show, inode->i_private); -} - -static const struct file_operations sunxi_sram_fops = { - .open = sunxi_sram_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(sunxi_sram); static inline struct sunxi_sram_desc *to_sram_desc(const struct sunxi_sram_data *data) { @@ -300,6 +290,10 @@ static const struct sunxi_sramc_variant sun4i_a10_sramc_variant = { /* Nothing special */ }; +static const struct sunxi_sramc_variant sun8i_h3_sramc_variant = { + .has_emac_clock = true, +}; + static const struct sunxi_sramc_variant sun50i_a64_sramc_variant = { .has_emac_clock = true, }; @@ -379,7 +373,7 @@ static const struct of_device_id sunxi_sram_dt_match[] = { }, { .compatible = "allwinner,sun8i-h3-system-control", - .data = &sun4i_a10_sramc_variant, + .data = &sun8i_h3_sramc_variant, }, { .compatible = "allwinner,sun50i-a64-sram-controller", @@ -389,6 +383,10 @@ static const struct of_device_id sunxi_sram_dt_match[] = { .compatible = "allwinner,sun50i-a64-system-control", .data = &sun50i_a64_sramc_variant, }, + { + .compatible = "allwinner,sun50i-h5-system-control", + .data = &sun50i_a64_sramc_variant, + }, { }, }; MODULE_DEVICE_TABLE(of, sunxi_sram_dt_match); diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c index cd8f41351add..7bfb154d6fa5 100644 --- a/drivers/soc/tegra/common.c +++ b/drivers/soc/tegra/common.c @@ -22,11 +22,15 @@ static const struct of_device_id tegra_machine_match[] = { bool soc_is_tegra(void) { + const struct of_device_id *match; struct device_node *root; root = of_find_node_by_path("/"); if (!root) return false; - return of_match_node(tegra_machine_match, root) != NULL; + match = of_match_node(tegra_machine_match, root); + of_node_put(root); + + return match != NULL; } diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 1fa840e3d930..8c46b0aace0b 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -2,6 +2,7 @@ * drivers/soc/tegra/pmc.c * * Copyright (c) 2010 Google, Inc + * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Author: * Colin Cross <ccross@google.com> @@ -29,9 +30,12 @@ #include <linux/init.h> #include <linux/io.h> #include <linux/iopoll.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_clk.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinconf.h> @@ -48,7 +52,10 @@ #include <soc/tegra/fuse.h> #include <soc/tegra/pmc.h> +#include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/pinctrl/pinctrl-tegra-io-pad.h> +#include <dt-bindings/gpio/tegra186-gpio.h> +#include <dt-bindings/gpio/tegra194-gpio.h> #define PMC_CNTRL 0x0 #define PMC_CNTRL_INTR_POLARITY BIT(17) /* inverts INTR polarity */ @@ -92,7 +99,6 @@ #define PMC_SENSOR_CTRL_SCRATCH_WRITE BIT(2) #define PMC_SENSOR_CTRL_ENABLE_RST BIT(1) -#define PMC_RST_STATUS 0x1b4 #define PMC_RST_STATUS_POR 0 #define PMC_RST_STATUS_WATCHDOG 1 #define PMC_RST_STATUS_SENSOR 2 @@ -126,6 +132,16 @@ #define GPU_RG_CNTRL 0x2d4 /* Tegra186 and later */ +#define WAKE_AOWAKE_CNTRL(x) (0x000 + ((x) << 2)) +#define WAKE_AOWAKE_CNTRL_LEVEL (1 << 3) +#define WAKE_AOWAKE_MASK_W(x) (0x180 + ((x) << 2)) +#define WAKE_AOWAKE_MASK_R(x) (0x300 + ((x) << 2)) +#define WAKE_AOWAKE_STATUS_W(x) (0x30c + ((x) << 2)) +#define WAKE_AOWAKE_STATUS_R(x) (0x48c + ((x) << 2)) +#define WAKE_AOWAKE_TIER0_ROUTING(x) (0x4b4 + ((x) << 2)) +#define WAKE_AOWAKE_TIER1_ROUTING(x) (0x4c0 + ((x) << 2)) +#define WAKE_AOWAKE_TIER2_ROUTING(x) (0x4cc + ((x) << 2)) + #define WAKE_AOWAKE_CTRL 0x4f4 #define WAKE_AOWAKE_CTRL_INTR_POLARITY BIT(0) @@ -151,8 +167,45 @@ struct tegra_pmc_regs { unsigned int dpd_status; unsigned int dpd2_req; unsigned int dpd2_status; + unsigned int rst_status; + unsigned int rst_source_shift; + unsigned int rst_source_mask; + unsigned int rst_level_shift; + unsigned int rst_level_mask; +}; + +struct tegra_wake_event { + const char *name; + unsigned int id; + unsigned int irq; + struct { + unsigned int instance; + unsigned int pin; + } gpio; }; +#define TEGRA_WAKE_IRQ(_name, _id, _irq) \ + { \ + .name = _name, \ + .id = _id, \ + .irq = _irq, \ + .gpio = { \ + .instance = UINT_MAX, \ + .pin = UINT_MAX, \ + }, \ + } + +#define TEGRA_WAKE_GPIO(_name, _id, _instance, _pin) \ + { \ + .name = _name, \ + .id = _id, \ + .irq = 0, \ + .gpio = { \ + .instance = _instance, \ + .pin = _pin, \ + }, \ + } + struct tegra_pmc_soc { unsigned int num_powergates; const char *const *powergates; @@ -175,6 +228,45 @@ struct tegra_pmc_soc { void (*setup_irq_polarity)(struct tegra_pmc *pmc, struct device_node *np, bool invert); + + const char * const *reset_sources; + unsigned int num_reset_sources; + const char * const *reset_levels; + unsigned int num_reset_levels; + + const struct tegra_wake_event *wake_events; + unsigned int num_wake_events; +}; + +static const char * const tegra186_reset_sources[] = { + "SYS_RESET", + "AOWDT", + "MCCPLEXWDT", + "BPMPWDT", + "SCEWDT", + "SPEWDT", + "APEWDT", + "BCCPLEXWDT", + "SENSOR", + "AOTAG", + "VFSENSOR", + "SWREST", + "SC7", + "HSM", + "CORESIGHT" +}; + +static const char * const tegra186_reset_levels[] = { + "L0", "L1", "L2", "WARM" +}; + +static const char * const tegra30_reset_sources[] = { + "POWER_ON_RESET", + "WATCHDOG", + "SENSOR", + "SW_MAIN", + "LP0", + "AOTAG" }; /** @@ -230,6 +322,9 @@ struct tegra_pmc { struct mutex powergates_lock; struct pinctrl_dev *pctl_dev; + + struct irq_domain *domain; + struct irq_chip irq; }; static struct tegra_pmc *pmc = &(struct tegra_pmc) { @@ -538,16 +633,10 @@ EXPORT_SYMBOL(tegra_powergate_power_off); */ int tegra_powergate_is_powered(unsigned int id) { - int status; - if (!tegra_powergate_is_valid(id)) return -EINVAL; - mutex_lock(&pmc->powergates_lock); - status = tegra_powergate_state(id); - mutex_unlock(&pmc->powergates_lock); - - return status; + return tegra_powergate_state(id); } /** @@ -717,17 +806,7 @@ static int powergate_show(struct seq_file *s, void *data) return 0; } -static int powergate_open(struct inode *inode, struct file *file) -{ - return single_open(file, powergate_show, inode->i_private); -} - -static const struct file_operations powergate_fops = { - .open = powergate_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(powergate); static int tegra_powergate_debugfs_init(void) { @@ -847,22 +926,6 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) goto remove_resets; } - /* - * FIXME: If XHCI is enabled for Tegra, then power-up the XUSB - * host and super-speed partitions. Once the XHCI driver - * manages the partitions itself this code can be removed. Note - * that we don't register these partitions with the genpd core - * to avoid it from powering down the partitions as they appear - * to be unused. - */ - if (IS_ENABLED(CONFIG_USB_XHCI_TEGRA) && - (id == TEGRA_POWERGATE_XUSBA || id == TEGRA_POWERGATE_XUSBC)) { - if (off) - WARN_ON(tegra_powergate_power_up(pg, true)); - - goto remove_resets; - } - err = pm_genpd_init(&pg->genpd, NULL, off); if (err < 0) { pr_err("failed to initialise PM domain %pOFn: %d\n", np, @@ -1543,6 +1606,225 @@ static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc) return err; } +static ssize_t reset_reason_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 value, rst_src; + + value = tegra_pmc_readl(pmc->soc->regs->rst_status); + rst_src = (value & pmc->soc->regs->rst_source_mask) >> + pmc->soc->regs->rst_source_shift; + + return sprintf(buf, "%s\n", pmc->soc->reset_sources[rst_src]); +} + +static DEVICE_ATTR_RO(reset_reason); + +static ssize_t reset_level_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 value, rst_lvl; + + value = tegra_pmc_readl(pmc->soc->regs->rst_status); + rst_lvl = (value & pmc->soc->regs->rst_level_mask) >> + pmc->soc->regs->rst_level_shift; + + return sprintf(buf, "%s\n", pmc->soc->reset_levels[rst_lvl]); +} + +static DEVICE_ATTR_RO(reset_level); + +static void tegra_pmc_reset_sysfs_init(struct tegra_pmc *pmc) +{ + struct device *dev = pmc->dev; + int err = 0; + + if (pmc->soc->reset_sources) { + err = device_create_file(dev, &dev_attr_reset_reason); + if (err < 0) + dev_warn(dev, + "failed to create attr \"reset_reason\": %d\n", + err); + } + + if (pmc->soc->reset_levels) { + err = device_create_file(dev, &dev_attr_reset_level); + if (err < 0) + dev_warn(dev, + "failed to create attr \"reset_level\": %d\n", + err); + } +} + +static int tegra_pmc_irq_translate(struct irq_domain *domain, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + if (WARN_ON(fwspec->param_count < 2)) + return -EINVAL; + + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + + return 0; +} + +static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int num_irqs, void *data) +{ + struct tegra_pmc *pmc = domain->host_data; + const struct tegra_pmc_soc *soc = pmc->soc; + struct irq_fwspec *fwspec = data; + unsigned int i; + int err = 0; + + for (i = 0; i < soc->num_wake_events; i++) { + const struct tegra_wake_event *event = &soc->wake_events[i]; + + if (fwspec->param_count == 2) { + struct irq_fwspec spec; + + if (event->id != fwspec->param[0]) + continue; + + err = irq_domain_set_hwirq_and_chip(domain, virq, + event->id, + &pmc->irq, pmc); + if (err < 0) + break; + + spec.fwnode = &pmc->dev->of_node->fwnode; + spec.param_count = 3; + spec.param[0] = GIC_SPI; + spec.param[1] = event->irq; + spec.param[2] = fwspec->param[1]; + + err = irq_domain_alloc_irqs_parent(domain, virq, + num_irqs, &spec); + + break; + } + + if (fwspec->param_count == 3) { + if (event->gpio.instance != fwspec->param[0] || + event->gpio.pin != fwspec->param[1]) + continue; + + err = irq_domain_set_hwirq_and_chip(domain, virq, + event->id, + &pmc->irq, pmc); + + break; + } + } + + if (i == soc->num_wake_events) + err = irq_domain_set_hwirq_and_chip(domain, virq, ULONG_MAX, + &pmc->irq, pmc); + + return err; +} + +static const struct irq_domain_ops tegra_pmc_irq_domain_ops = { + .translate = tegra_pmc_irq_translate, + .alloc = tegra_pmc_irq_alloc, +}; + +static int tegra_pmc_irq_set_wake(struct irq_data *data, unsigned int on) +{ + struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data); + unsigned int offset, bit; + u32 value; + + offset = data->hwirq / 32; + bit = data->hwirq % 32; + + /* clear wake status */ + writel(0x1, pmc->wake + WAKE_AOWAKE_STATUS_W(data->hwirq)); + + /* route wake to tier 2 */ + value = readl(pmc->wake + WAKE_AOWAKE_TIER2_ROUTING(offset)); + + if (!on) + value &= ~(1 << bit); + else + value |= 1 << bit; + + writel(value, pmc->wake + WAKE_AOWAKE_TIER2_ROUTING(offset)); + + /* enable wakeup event */ + writel(!!on, pmc->wake + WAKE_AOWAKE_MASK_W(data->hwirq)); + + return 0; +} + +static int tegra_pmc_irq_set_type(struct irq_data *data, unsigned int type) +{ + struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data); + u32 value; + + if (data->hwirq == ULONG_MAX) + return 0; + + value = readl(pmc->wake + WAKE_AOWAKE_CNTRL(data->hwirq)); + + switch (type) { + case IRQ_TYPE_EDGE_RISING: + case IRQ_TYPE_LEVEL_HIGH: + value |= WAKE_AOWAKE_CNTRL_LEVEL; + break; + + case IRQ_TYPE_EDGE_FALLING: + case IRQ_TYPE_LEVEL_LOW: + value &= ~WAKE_AOWAKE_CNTRL_LEVEL; + break; + + case IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING: + value ^= WAKE_AOWAKE_CNTRL_LEVEL; + break; + + default: + return -EINVAL; + } + + writel(value, pmc->wake + WAKE_AOWAKE_CNTRL(data->hwirq)); + + return 0; +} + +static int tegra_pmc_irq_init(struct tegra_pmc *pmc) +{ + struct irq_domain *parent = NULL; + struct device_node *np; + + np = of_irq_find_parent(pmc->dev->of_node); + if (np) { + parent = irq_find_host(np); + of_node_put(np); + } + + if (!parent) + return 0; + + pmc->irq.name = dev_name(pmc->dev); + pmc->irq.irq_mask = irq_chip_mask_parent; + pmc->irq.irq_unmask = irq_chip_unmask_parent; + pmc->irq.irq_eoi = irq_chip_eoi_parent; + pmc->irq.irq_set_affinity = irq_chip_set_affinity_parent; + pmc->irq.irq_set_type = tegra_pmc_irq_set_type; + pmc->irq.irq_set_wake = tegra_pmc_irq_set_wake; + + pmc->domain = irq_domain_add_hierarchy(parent, 0, 96, pmc->dev->of_node, + &tegra_pmc_irq_domain_ops, pmc); + if (!pmc->domain) { + dev_err(pmc->dev, "failed to allocate domain\n"); + return -ENOMEM; + } + + return 0; +} + static int tegra_pmc_probe(struct platform_device *pdev) { void __iomem *base; @@ -1612,6 +1894,8 @@ static int tegra_pmc_probe(struct platform_device *pdev) tegra_pmc_init_tsense_reset(pmc); + tegra_pmc_reset_sysfs_init(pmc); + if (IS_ENABLED(CONFIG_DEBUG_FS)) { err = tegra_powergate_debugfs_init(); if (err < 0) @@ -1629,6 +1913,10 @@ static int tegra_pmc_probe(struct platform_device *pdev) if (err) goto cleanup_restart_handler; + err = tegra_pmc_irq_init(pmc); + if (err < 0) + goto cleanup_restart_handler; + mutex_lock(&pmc->powergates_lock); iounmap(pmc->base); pmc->base = base; @@ -1678,6 +1966,11 @@ static const struct tegra_pmc_regs tegra20_pmc_regs = { .dpd_status = 0x1bc, .dpd2_req = 0x1c0, .dpd2_status = 0x1c4, + .rst_status = 0x1b4, + .rst_source_shift = 0x0, + .rst_source_mask = 0x7, + .rst_level_shift = 0x0, + .rst_level_mask = 0x0, }; static void tegra20_pmc_init(struct tegra_pmc *pmc) @@ -1735,6 +2028,10 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = { .regs = &tegra20_pmc_regs, .init = tegra20_pmc_init, .setup_irq_polarity = tegra20_pmc_setup_irq_polarity, + .reset_sources = NULL, + .num_reset_sources = 0, + .reset_levels = NULL, + .num_reset_levels = 0, }; static const char * const tegra30_powergates[] = { @@ -1776,6 +2073,10 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = { .regs = &tegra20_pmc_regs, .init = tegra20_pmc_init, .setup_irq_polarity = tegra20_pmc_setup_irq_polarity, + .reset_sources = tegra30_reset_sources, + .num_reset_sources = 5, + .reset_levels = NULL, + .num_reset_levels = 0, }; static const char * const tegra114_powergates[] = { @@ -1821,6 +2122,10 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = { .regs = &tegra20_pmc_regs, .init = tegra20_pmc_init, .setup_irq_polarity = tegra20_pmc_setup_irq_polarity, + .reset_sources = tegra30_reset_sources, + .num_reset_sources = 5, + .reset_levels = NULL, + .num_reset_levels = 0, }; static const char * const tegra124_powergates[] = { @@ -1926,6 +2231,10 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = { .regs = &tegra20_pmc_regs, .init = tegra20_pmc_init, .setup_irq_polarity = tegra20_pmc_setup_irq_polarity, + .reset_sources = tegra30_reset_sources, + .num_reset_sources = 5, + .reset_levels = NULL, + .num_reset_levels = 0, }; static const char * const tegra210_powergates[] = { @@ -2027,6 +2336,10 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = { .regs = &tegra20_pmc_regs, .init = tegra20_pmc_init, .setup_irq_polarity = tegra20_pmc_setup_irq_polarity, + .reset_sources = tegra30_reset_sources, + .num_reset_sources = 5, + .reset_levels = NULL, + .num_reset_levels = 0, }; #define TEGRA186_IO_PAD_TABLE(_pad) \ @@ -2084,6 +2397,11 @@ static const struct tegra_pmc_regs tegra186_pmc_regs = { .dpd_status = 0x78, .dpd2_req = 0x7c, .dpd2_status = 0x80, + .rst_status = 0x70, + .rst_source_shift = 0x2, + .rst_source_mask = 0x3C, + .rst_level_shift = 0x0, + .rst_level_mask = 0x3, }; static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc, @@ -2121,6 +2439,11 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc, iounmap(wake); } +static const struct tegra_wake_event tegra186_wake_events[] = { + TEGRA_WAKE_GPIO("power", 29, 1, TEGRA_AON_GPIO(FF, 0)), + TEGRA_WAKE_IRQ("rtc", 73, 10), +}; + static const struct tegra_pmc_soc tegra186_pmc_soc = { .num_powergates = 0, .powergates = NULL, @@ -2136,10 +2459,87 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = { .regs = &tegra186_pmc_regs, .init = NULL, .setup_irq_polarity = tegra186_pmc_setup_irq_polarity, + .reset_sources = tegra186_reset_sources, + .num_reset_sources = 14, + .reset_levels = tegra186_reset_levels, + .num_reset_levels = 3, + .num_wake_events = ARRAY_SIZE(tegra186_wake_events), + .wake_events = tegra186_wake_events, +}; + +static const struct tegra_io_pad_soc tegra194_io_pads[] = { + { .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_CLK_BIAS, .dpd = 4, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_CLK3, .dpd = 5, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 7, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_EQOS, .dpd = 8, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_CLK2_BIAS, .dpd = 9, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 10, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_DAP3, .dpd = 11, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_DAP5, .dpd = 12, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PWR_CTL, .dpd = 15, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_SOC_GPIO53, .dpd = 16, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_GP_PWM2, .dpd = 18, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_GP_PWM3, .dpd = 19, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_SOC_GPIO12, .dpd = 20, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_SOC_GPIO13, .dpd = 21, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_SOC_GPIO10, .dpd = 22, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_UART4, .dpd = 23, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_UART5, .dpd = 24, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_DBG, .dpd = 25, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_HDMI_DP3, .dpd = 26, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_HDMI_DP2, .dpd = 27, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_HDMI_DP0, .dpd = 28, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_HDMI_DP1, .dpd = 29, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = 32, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_CTL2, .dpd = 33, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_L0_RST_N, .dpd = 34, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_L1_RST_N, .dpd = 35, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_SDMMC4, .dpd = 36, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_PEX_L5_RST_N, .dpd = 37, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CSIC, .dpd = 43, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CSID, .dpd = 44, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CSIE, .dpd = 45, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CSIF, .dpd = 46, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_SPI, .dpd = 47, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_UFS, .dpd = 49, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CSIG, .dpd = 50, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CSIH, .dpd = 51, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_EDP, .dpd = 53, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_SDMMC1_HV, .dpd = 55, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_SDMMC3_HV, .dpd = 56, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_CONN, .dpd = 60, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_AUDIO_HV, .dpd = 61, .voltage = UINT_MAX }, +}; + +static const struct tegra_wake_event tegra194_wake_events[] = { + TEGRA_WAKE_GPIO("power", 29, 1, TEGRA194_AON_GPIO(EE, 4)), + TEGRA_WAKE_IRQ("rtc", 73, 10), +}; + +static const struct tegra_pmc_soc tegra194_pmc_soc = { + .num_powergates = 0, + .powergates = NULL, + .num_cpu_powergates = 0, + .cpu_powergates = NULL, + .has_tsense_reset = false, + .has_gpu_clamps = false, + .num_io_pads = ARRAY_SIZE(tegra194_io_pads), + .io_pads = tegra194_io_pads, + .regs = &tegra186_pmc_regs, + .init = NULL, + .setup_irq_polarity = tegra186_pmc_setup_irq_polarity, + .num_wake_events = ARRAY_SIZE(tegra194_wake_events), + .wake_events = tegra194_wake_events, }; static const struct of_device_id tegra_pmc_match[] = { - { .compatible = "nvidia,tegra194-pmc", .data = &tegra186_pmc_soc }, + { .compatible = "nvidia,tegra194-pmc", .data = &tegra194_pmc_soc }, { .compatible = "nvidia,tegra186-pmc", .data = &tegra186_pmc_soc }, { .compatible = "nvidia,tegra210-pmc", .data = &tegra210_pmc_soc }, { .compatible = "nvidia,tegra132-pmc", .data = &tegra124_pmc_soc }, diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c index f5cb8c0af09f..d65e361c5de1 100644 --- a/drivers/soc/ti/wkup_m3_ipc.c +++ b/drivers/soc/ti/wkup_m3_ipc.c @@ -57,6 +57,7 @@ static struct wkup_m3_ipc *m3_ipc_state; static const struct wkup_m3_wakeup_src wakeups[] = { + {.irq_nr = 16, .src = "PRCM"}, {.irq_nr = 35, .src = "USB0_PHY"}, {.irq_nr = 36, .src = "USB1_PHY"}, {.irq_nr = 40, .src = "I2C0"}, diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index ff6ba6d86cd8..cc56cb3b3eca 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1614,10 +1614,10 @@ static void sci_request_dma(struct uart_port *port) hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); s->rx_timer.function = rx_timer_fn; + s->chan_rx_saved = s->chan_rx = chan; + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) sci_submit_rx(s); - - s->chan_rx_saved = s->chan_rx = chan; } } @@ -3102,6 +3102,7 @@ static struct uart_driver sci_uart_driver = { static int sci_remove(struct platform_device *dev) { struct sci_port *port = platform_get_drvdata(dev); + unsigned int type = port->port.type; /* uart_remove_... clears it */ sci_ports_in_use &= ~BIT(port->port.line); uart_remove_one_port(&sci_uart_driver, &port->port); @@ -3112,8 +3113,7 @@ static int sci_remove(struct platform_device *dev) sysfs_remove_file(&dev->dev.kobj, &dev_attr_rx_fifo_trigger.attr); } - if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB || - port->port.type == PORT_HSCIF) { + if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF) { sysfs_remove_file(&dev->dev.kobj, &dev_attr_rx_fifo_timeout.attr); } diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c index 7576ceace571..f438eaa68246 100644 --- a/drivers/tty/tty_baudrate.c +++ b/drivers/tty/tty_baudrate.c @@ -77,7 +77,7 @@ speed_t tty_termios_baud_rate(struct ktermios *termios) else cbaud += 15; } - return baud_table[cbaud]; + return cbaud >= n_baud_table ? 0 : baud_table[cbaud]; } EXPORT_SYMBOL(tty_termios_baud_rate); @@ -113,7 +113,7 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios) else cbaud += 15; } - return baud_table[cbaud]; + return cbaud >= n_baud_table ? 0 : baud_table[cbaud]; #else /* IBSHIFT */ return tty_termios_baud_rate(termios); #endif /* IBSHIFT */ diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 55370e651db3..41ec8e5010f3 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1548,7 +1548,7 @@ static void csi_K(struct vc_data *vc, int vpar) scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count); vc->vc_need_wrap = 0; if (con_should_update(vc)) - do_update_region(vc, (unsigned long) start, count); + do_update_region(vc, (unsigned long)(start + offset), count); } static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar positions */ diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig index e36d6c73c4a4..78118883f96c 100644 --- a/drivers/usb/typec/ucsi/Kconfig +++ b/drivers/usb/typec/ucsi/Kconfig @@ -23,6 +23,16 @@ config TYPEC_UCSI if TYPEC_UCSI +config UCSI_CCG + tristate "UCSI Interface Driver for Cypress CCGx" + depends on I2C + help + This driver enables UCSI support on platforms that expose a + Cypress CCGx Type-C controller over I2C interface. + + To compile the driver as a module, choose M here: the module will be + called ucsi_ccg. + config UCSI_ACPI tristate "UCSI ACPI Interface Driver" depends on ACPI diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile index 7afbea512207..2f4900b26210 100644 --- a/drivers/usb/typec/ucsi/Makefile +++ b/drivers/usb/typec/ucsi/Makefile @@ -8,3 +8,5 @@ typec_ucsi-y := ucsi.o typec_ucsi-$(CONFIG_TRACING) += trace.o obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o + +obj-$(CONFIG_UCSI_CCG) += ucsi_ccg.o diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c new file mode 100644 index 000000000000..de8a43bdff68 --- /dev/null +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * UCSI driver for Cypress CCGx Type-C controller + * + * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved. + * Author: Ajay Gupta <ajayg@nvidia.com> + * + * Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c + */ +#include <linux/acpi.h> +#include <linux/delay.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/platform_device.h> + +#include <asm/unaligned.h> +#include "ucsi.h" + +struct ucsi_ccg { + struct device *dev; + struct ucsi *ucsi; + struct ucsi_ppm ppm; + struct i2c_client *client; +}; + +#define CCGX_RAB_INTR_REG 0x06 +#define CCGX_RAB_UCSI_CONTROL 0x39 +#define CCGX_RAB_UCSI_CONTROL_START BIT(0) +#define CCGX_RAB_UCSI_CONTROL_STOP BIT(1) +#define CCGX_RAB_UCSI_DATA_BLOCK(offset) (0xf000 | ((offset) & 0xff)) + +static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len) +{ + struct i2c_client *client = uc->client; + const struct i2c_adapter_quirks *quirks = client->adapter->quirks; + unsigned char buf[2]; + struct i2c_msg msgs[] = { + { + .addr = client->addr, + .flags = 0x0, + .len = sizeof(buf), + .buf = buf, + }, + { + .addr = client->addr, + .flags = I2C_M_RD, + .buf = data, + }, + }; + u32 rlen, rem_len = len, max_read_len = len; + int status; + + /* check any max_read_len limitation on i2c adapter */ + if (quirks && quirks->max_read_len) + max_read_len = quirks->max_read_len; + + while (rem_len > 0) { + msgs[1].buf = &data[len - rem_len]; + rlen = min_t(u16, rem_len, max_read_len); + msgs[1].len = rlen; + put_unaligned_le16(rab, buf); + status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (status < 0) { + dev_err(uc->dev, "i2c_transfer failed %d\n", status); + return status; + } + rab += rlen; + rem_len -= rlen; + } + + return 0; +} + +static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len) +{ + struct i2c_client *client = uc->client; + unsigned char *buf; + struct i2c_msg msgs[] = { + { + .addr = client->addr, + .flags = 0x0, + } + }; + int status; + + buf = kzalloc(len + sizeof(rab), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + put_unaligned_le16(rab, buf); + memcpy(buf + sizeof(rab), data, len); + + msgs[0].len = len + sizeof(rab); + msgs[0].buf = buf; + + status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (status < 0) { + dev_err(uc->dev, "i2c_transfer failed %d\n", status); + kfree(buf); + return status; + } + + kfree(buf); + return 0; +} + +static int ucsi_ccg_init(struct ucsi_ccg *uc) +{ + unsigned int count = 10; + u8 data; + int status; + + data = CCGX_RAB_UCSI_CONTROL_STOP; + status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data)); + if (status < 0) + return status; + + data = CCGX_RAB_UCSI_CONTROL_START; + status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data)); + if (status < 0) + return status; + + /* + * Flush CCGx RESPONSE queue by acking interrupts. Above ucsi control + * register write will push response which must be cleared. + */ + do { + status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data)); + if (status < 0) + return status; + + if (!data) + return 0; + + status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data)); + if (status < 0) + return status; + + usleep_range(10000, 11000); + } while (--count); + + return -ETIMEDOUT; +} + +static int ucsi_ccg_send_data(struct ucsi_ccg *uc) +{ + u8 *ppm = (u8 *)uc->ppm.data; + int status; + u16 rab; + + rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_out)); + status = ccg_write(uc, rab, ppm + + offsetof(struct ucsi_data, message_out), + sizeof(uc->ppm.data->message_out)); + if (status < 0) + return status; + + rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, ctrl)); + return ccg_write(uc, rab, ppm + offsetof(struct ucsi_data, ctrl), + sizeof(uc->ppm.data->ctrl)); +} + +static int ucsi_ccg_recv_data(struct ucsi_ccg *uc) +{ + u8 *ppm = (u8 *)uc->ppm.data; + int status; + u16 rab; + + rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, cci)); + status = ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, cci), + sizeof(uc->ppm.data->cci)); + if (status < 0) + return status; + + rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_in)); + return ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, message_in), + sizeof(uc->ppm.data->message_in)); +} + +static int ucsi_ccg_ack_interrupt(struct ucsi_ccg *uc) +{ + int status; + unsigned char data; + + status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data)); + if (status < 0) + return status; + + return ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data)); +} + +static int ucsi_ccg_sync(struct ucsi_ppm *ppm) +{ + struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm); + int status; + + status = ucsi_ccg_recv_data(uc); + if (status < 0) + return status; + + /* ack interrupt to allow next command to run */ + return ucsi_ccg_ack_interrupt(uc); +} + +static int ucsi_ccg_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl) +{ + struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm); + + ppm->data->ctrl.raw_cmd = ctrl->raw_cmd; + return ucsi_ccg_send_data(uc); +} + +static irqreturn_t ccg_irq_handler(int irq, void *data) +{ + struct ucsi_ccg *uc = data; + + ucsi_notify(uc->ucsi); + + return IRQ_HANDLED; +} + +static int ucsi_ccg_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct ucsi_ccg *uc; + int status; + u16 rab; + + uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL); + if (!uc) + return -ENOMEM; + + uc->ppm.data = devm_kzalloc(dev, sizeof(struct ucsi_data), GFP_KERNEL); + if (!uc->ppm.data) + return -ENOMEM; + + uc->ppm.cmd = ucsi_ccg_cmd; + uc->ppm.sync = ucsi_ccg_sync; + uc->dev = dev; + uc->client = client; + + /* reset ccg device and initialize ucsi */ + status = ucsi_ccg_init(uc); + if (status < 0) { + dev_err(uc->dev, "ucsi_ccg_init failed - %d\n", status); + return status; + } + + status = devm_request_threaded_irq(dev, client->irq, NULL, + ccg_irq_handler, + IRQF_ONESHOT | IRQF_TRIGGER_HIGH, + dev_name(dev), uc); + if (status < 0) { + dev_err(uc->dev, "request_threaded_irq failed - %d\n", status); + return status; + } + + uc->ucsi = ucsi_register_ppm(dev, &uc->ppm); + if (IS_ERR(uc->ucsi)) { + dev_err(uc->dev, "ucsi_register_ppm failed\n"); + return PTR_ERR(uc->ucsi); + } + + rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, version)); + status = ccg_read(uc, rab, (u8 *)(uc->ppm.data) + + offsetof(struct ucsi_data, version), + sizeof(uc->ppm.data->version)); + if (status < 0) { + ucsi_unregister_ppm(uc->ucsi); + return status; + } + + i2c_set_clientdata(client, uc); + return 0; +} + +static int ucsi_ccg_remove(struct i2c_client *client) +{ + struct ucsi_ccg *uc = i2c_get_clientdata(client); + + ucsi_unregister_ppm(uc->ucsi); + + return 0; +} + +static const struct i2c_device_id ucsi_ccg_device_id[] = { + {"ccgx-ucsi", 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id); + +static struct i2c_driver ucsi_ccg_driver = { + .driver = { + .name = "ucsi_ccg", + }, + .probe = ucsi_ccg_probe, + .remove = ucsi_ccg_remove, + .id_table = ucsi_ccg_device_id, +}; + +module_i2c_driver(ucsi_ccg_driver); + +MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>"); +MODULE_DESCRIPTION("UCSI driver for Cypress CCGx Type-C controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index f15f89df1f36..7ea6fb6a2e5d 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -914,7 +914,7 @@ int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args) ret = xenmem_reservation_increase(args->nr_pages, args->frames); if (ret != args->nr_pages) { - pr_debug("Failed to decrease reservation for DMA buffer\n"); + pr_debug("Failed to increase reservation for DMA buffer\n"); ret = -EFAULT; } else { ret = 0; diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c index df1ed37c3269..de01a6d0059d 100644 --- a/drivers/xen/privcmd-buf.c +++ b/drivers/xen/privcmd-buf.c @@ -21,15 +21,9 @@ MODULE_LICENSE("GPL"); -static unsigned int limit = 64; -module_param(limit, uint, 0644); -MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by " - "the privcmd-buf device per open file"); - struct privcmd_buf_private { struct mutex lock; struct list_head list; - unsigned int allocated; }; struct privcmd_buf_vma_private { @@ -60,13 +54,10 @@ static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv) { unsigned int i; - vma_priv->file_priv->allocated -= vma_priv->n_pages; - list_del(&vma_priv->list); for (i = 0; i < vma_priv->n_pages; i++) - if (vma_priv->pages[i]) - __free_page(vma_priv->pages[i]); + __free_page(vma_priv->pages[i]); kfree(vma_priv); } @@ -146,8 +137,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma) unsigned int i; int ret = 0; - if (!(vma->vm_flags & VM_SHARED) || count > limit || - file_priv->allocated + count > limit) + if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *), @@ -155,19 +145,15 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma) if (!vma_priv) return -ENOMEM; - vma_priv->n_pages = count; - count = 0; - for (i = 0; i < vma_priv->n_pages; i++) { + for (i = 0; i < count; i++) { vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!vma_priv->pages[i]) break; - count++; + vma_priv->n_pages++; } mutex_lock(&file_priv->lock); - file_priv->allocated += count; - vma_priv->file_priv = file_priv; vma_priv->users = 1; |