summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig11
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/battery.c31
-rw-r--r--drivers/acpi/cppc_acpi.c13
-rw-r--r--drivers/acpi/internal.h25
-rw-r--r--drivers/acpi/osl.c12
-rw-r--r--drivers/acpi/pfr_telemetry.c3
-rw-r--r--drivers/acpi/processor_driver.c9
-rw-r--r--drivers/acpi/processor_perflib.c13
-rw-r--r--drivers/acpi/sbshc.c9
-rw-r--r--drivers/acpi/video_detect.c16
-rw-r--r--drivers/acpi/x86/utils.c49
-rw-r--r--drivers/base/arch_topology.c6
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/tpm/tpm-chip.c4
-rw-r--r--drivers/char/tpm/tpm-interface.c32
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c2
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c12
-rw-r--r--drivers/clk/qcom/videocc-sm8350.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c26
-rw-r--r--drivers/edac/qcom_edac.c8
-rw-r--r--drivers/firmware/arm_scmi/bus.c7
-rw-r--r--drivers/firmware/arm_scmi/common.h2
-rw-r--r--drivers/firmware/arm_scmi/driver.c10
-rw-r--r--drivers/firmware/microchip/mpfs-auto-update.c42
-rw-r--r--drivers/firmware/qcom/qcom_scm.c17
-rw-r--r--drivers/firmware/smccc/smccc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c49
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c74
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h2
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_context.c33
-rw-r--r--drivers/gpu/drm/imagination/pvr_context.h21
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h10
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c22
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.h1
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c4
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h2
-rw-r--r--drivers/gpu/drm/xe/xe_device.c10
-rw-r--r--drivers/gpu/drm/xe/xe_device.h14
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h9
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c13
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c15
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c11
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c2
-rw-r--r--drivers/gpu/drm/xe/xe_wait_user_fence.c7
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hwmon/Kconfig3
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c6
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h1
-rw-r--r--drivers/i2c/muxes/i2c-mux-mule.c4
-rw-r--r--drivers/irqchip/irq-gic-v3.c7
-rw-r--r--drivers/md/dm-cache-target.c59
-rw-r--r--drivers/md/dm-unstripe.c4
-rw-r--r--drivers/md/dm-verity-target.c9
-rw-r--r--drivers/md/dm-verity.h1
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c6
-rw-r--r--drivers/media/cec/usb/pulse8/pulse8-cec.c2
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c28
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c4
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c8
-rw-r--r--drivers/media/dvb-core/dvbdev.c16
-rw-r--r--drivers/media/dvb-frontends/cx24116.c7
-rw-r--r--drivers/media/dvb-frontends/stb0899_algo.c2
-rw-r--r--drivers/media/i2c/adv7604.c26
-rw-r--r--drivers/media/i2c/ar0521.c4
-rw-r--r--drivers/media/pci/mgb4/mgb4_cmt.c2
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c17
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.h4
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c17
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c38
-rw-r--r--drivers/net/can/c_can/c_can_main.c7
-rw-r--r--drivers/net/can/cc770/Kconfig2
-rw-r--r--drivers/net/can/m_can/m_can.c3
-rw-r--r--drivers/net/can/rockchip/Kconfig3
-rw-r--r--drivers/net/can/sja1000/Kconfig2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c8
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c10
-rw-r--r--drivers/net/ethernet/arc/emac_main.c27
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c9
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c18
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c59
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c33
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c45
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c40
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c11
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c75
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h6
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c5
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c4
-rw-r--r--drivers/net/phy/dp83848.c2
-rw-r--r--drivers/net/virtio_net.c119
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c2
-rw-r--r--drivers/nvme/host/core.c21
-rw-r--r--drivers/platform/x86/Kconfig22
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c5
-rw-r--r--drivers/platform/x86/amd/pmf/core.c1
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c1
-rw-r--r--drivers/platform/x86/dell/Kconfig1
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c1
-rw-r--r--drivers/platform/x86/dell/dell-wmi-base.c6
-rw-r--r--drivers/platform/x86/hp/Kconfig1
-rw-r--r--drivers/platform/x86/ideapad-laptop.c3
-rw-r--r--drivers/platform/x86/intel/Kconfig2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c28
-rw-r--r--drivers/pwm/pwm-imx-tpm.c4
-rw-r--r--drivers/regulator/rk808-regulator.c2
-rw-r--r--drivers/regulator/rtq2208-regulator.c2
-rw-r--r--drivers/rpmsg/qcom_glink_native.c10
-rw-r--r--drivers/scsi/sd_zbc.c3
-rw-r--r--drivers/soc/qcom/llcc-qcom.c3
-rw-r--r--drivers/soc/qcom/pmic_glink.c25
-rw-r--r--drivers/soc/qcom/socinfo.c8
-rw-r--r--drivers/staging/media/av7110/av7110.h4
-rw-r--r--drivers/staging/media/av7110/av7110_ca.c25
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c6
-rw-r--r--drivers/thermal/qcom/lmh.c7
-rw-r--r--drivers/thermal/thermal_of.c21
-rw-r--r--drivers/thunderbolt/retimer.c2
-rw-r--r--drivers/thunderbolt/usb4.c2
-rw-r--r--drivers/ufs/core/ufshcd.c10
-rw-r--r--drivers/usb/dwc3/core.c25
-rw-r--r--drivers/usb/musb/sunxi.c2
-rw-r--r--drivers/usb/serial/io_edgeport.c8
-rw-r--r--drivers/usb/serial/option.c6
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c8
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c2
172 files changed, 1147 insertions, 792 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index d67f63d93b2a..d65cd08ba8e1 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -132,8 +132,17 @@ config ACPI_REV_OVERRIDE_POSSIBLE
makes it possible to force the kernel to return "5" as the supported
ACPI revision via the "acpi_rev_override" command line switch.
+config ACPI_EC
+ bool "Embedded Controller"
+ depends on HAS_IOPORT
+ default X86
+ help
+ This driver handles communication with the microcontroller
+ on many x86 laptops and other machines.
+
config ACPI_EC_DEBUGFS
tristate "EC read/write access through /sys/kernel/debug/ec"
+ depends on ACPI_EC
help
Say N to disable Embedded Controller /sys/kernel/debug interface
@@ -433,7 +442,7 @@ config ACPI_HOTPLUG_IOAPIC
config ACPI_SBS
tristate "Smart Battery System"
- depends on X86
+ depends on X86 && ACPI_EC
select POWER_SUPPLY
help
This driver supports the Smart Battery System, another
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 61ca4afe83dc..40208a0f5dfb 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -41,7 +41,7 @@ acpi-y += resource.o
acpi-y += acpi_processor.o
acpi-y += processor_core.o
acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o
-acpi-y += ec.o
+acpi-$(CONFIG_ACPI_EC) += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o
obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 65fa3444367a..aed4a37da03e 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1218,15 +1218,21 @@ static int acpi_battery_add(struct acpi_device *device)
if (device->dep_unmet)
return -EPROBE_DEFER;
- battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL);
+ battery = devm_kzalloc(&device->dev, sizeof(*battery), GFP_KERNEL);
if (!battery)
return -ENOMEM;
battery->device = device;
strscpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
strscpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
device->driver_data = battery;
- mutex_init(&battery->lock);
- mutex_init(&battery->sysfs_lock);
+ result = devm_mutex_init(&device->dev, &battery->lock);
+ if (result)
+ return result;
+
+ result = devm_mutex_init(&device->dev, &battery->sysfs_lock);
+ if (result)
+ return result;
+
if (acpi_has_method(battery->device->handle, "_BIX"))
set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
@@ -1238,7 +1244,9 @@ static int acpi_battery_add(struct acpi_device *device)
device->status.battery_present ? "present" : "absent");
battery->pm_nb.notifier_call = battery_notify;
- register_pm_notifier(&battery->pm_nb);
+ result = register_pm_notifier(&battery->pm_nb);
+ if (result)
+ goto fail;
device_init_wakeup(&device->dev, 1);
@@ -1254,9 +1262,6 @@ fail_pm:
unregister_pm_notifier(&battery->pm_nb);
fail:
sysfs_remove_battery(battery);
- mutex_destroy(&battery->lock);
- mutex_destroy(&battery->sysfs_lock);
- kfree(battery);
return result;
}
@@ -1276,13 +1281,8 @@ static void acpi_battery_remove(struct acpi_device *device)
device_init_wakeup(&device->dev, 0);
unregister_pm_notifier(&battery->pm_nb);
sysfs_remove_battery(battery);
-
- mutex_destroy(&battery->lock);
- mutex_destroy(&battery->sysfs_lock);
- kfree(battery);
}
-#ifdef CONFIG_PM_SLEEP
/* this is needed to learn about changes made in suspended state */
static int acpi_battery_resume(struct device *dev)
{
@@ -1299,11 +1299,8 @@ static int acpi_battery_resume(struct device *dev)
acpi_battery_update(battery, true);
return 0;
}
-#else
-#define acpi_battery_resume NULL
-#endif
-static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
static struct acpi_driver acpi_battery_driver = {
.name = "battery",
@@ -1313,7 +1310,7 @@ static struct acpi_driver acpi_battery_driver = {
.add = acpi_battery_add,
.remove = acpi_battery_remove,
},
- .drv.pm = &acpi_battery_pm,
+ .drv.pm = pm_sleep_ptr(&acpi_battery_pm),
.drv.probe_type = PROBE_PREFER_ASYNCHRONOUS,
};
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 1a40f0514eaa..f193e713825a 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -671,10 +671,6 @@ static int pcc_data_alloc(int pcc_ss_id)
* )
*/
-#ifndef arch_init_invariance_cppc
-static inline void arch_init_invariance_cppc(void) { }
-#endif
-
/**
* acpi_cppc_processor_probe - Search for per CPU _CPC objects.
* @pr: Ptr to acpi_processor containing this CPU's logical ID.
@@ -905,8 +901,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
- arch_init_invariance_cppc();
-
kfree(output.pointer);
return 0;
@@ -1017,7 +1011,8 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = 0;
size = GET_BIT_WIDTH(reg);
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ if (IS_ENABLED(CONFIG_HAS_IOPORT) &&
+ reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
u32 val_u32;
acpi_status status;
@@ -1091,7 +1086,8 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
size = GET_BIT_WIDTH(reg);
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ if (IS_ENABLED(CONFIG_HAS_IOPORT) &&
+ reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
acpi_status status;
status = acpi_os_write_port((acpi_io_address)reg->address,
@@ -1146,7 +1142,6 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return -EFAULT;
}
val = MASK_VAL_WRITE(reg, prev_val, val);
- val |= prev_val;
}
switch (size) {
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index ced7dff9a5db..00910ccd7eda 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -215,6 +215,8 @@ extern struct acpi_ec *first_ec;
/* External interfaces use first EC only, so remember */
typedef int (*acpi_ec_query_func) (void *data);
+#ifdef CONFIG_ACPI_EC
+
void acpi_ec_init(void);
void acpi_ec_ecdt_probe(void);
void acpi_ec_dsdt_probe(void);
@@ -231,6 +233,29 @@ void acpi_ec_flush_work(void);
bool acpi_ec_dispatch_gpe(void);
#endif
+#else
+
+static inline void acpi_ec_init(void) {}
+static inline void acpi_ec_ecdt_probe(void) {}
+static inline void acpi_ec_dsdt_probe(void) {}
+static inline void acpi_ec_block_transactions(void) {}
+static inline void acpi_ec_unblock_transactions(void) {}
+static inline int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
+ acpi_handle handle, acpi_ec_query_func func,
+ void *data)
+{
+ return -ENXIO;
+}
+static inline void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) {}
+static inline void acpi_ec_register_opregions(struct acpi_device *adev) {}
+
+static inline void acpi_ec_flush_work(void) {}
+static inline bool acpi_ec_dispatch_gpe(void)
+{
+ return false;
+}
+
+#endif
/*--------------------------------------------------------------------------
Suspend/Resume
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 70af3fbbebe5..fed446aace42 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -642,6 +642,15 @@ acpi_status acpi_os_read_port(acpi_io_address port, u32 *value, u32 width)
{
u32 dummy;
+ if (!IS_ENABLED(CONFIG_HAS_IOPORT)) {
+ /*
+ * set all-1 result as if reading from non-existing
+ * I/O port
+ */
+ *value = GENMASK(width, 0);
+ return AE_NOT_IMPLEMENTED;
+ }
+
if (value)
*value = 0;
else
@@ -665,6 +674,9 @@ EXPORT_SYMBOL(acpi_os_read_port);
acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
{
+ if (!IS_ENABLED(CONFIG_HAS_IOPORT))
+ return AE_NOT_IMPLEMENTED;
+
if (width <= 8) {
outb(value, port);
} else if (width <= 16) {
diff --git a/drivers/acpi/pfr_telemetry.c b/drivers/acpi/pfr_telemetry.c
index cd6ca7121a14..32bdf8cbe8f2 100644
--- a/drivers/acpi/pfr_telemetry.c
+++ b/drivers/acpi/pfr_telemetry.c
@@ -272,9 +272,6 @@ static long pfrt_log_ioctl(struct file *file, unsigned int cmd, unsigned long ar
case PFRT_LOG_IOC_GET_INFO:
info.log_level = get_pfrt_log_level(pfrt_log_dev);
- if (ret < 0)
- return ret;
-
info.log_type = pfrt_log_dev->info.log_type;
info.log_revid = pfrt_log_dev->info.log_revid;
if (copy_to_user(p, &info, sizeof(info)))
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index cb52dd000b95..3b281bc1e73c 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -237,6 +237,9 @@ static struct notifier_block acpi_processor_notifier_block = {
.notifier_call = acpi_processor_notifier,
};
+void __weak acpi_processor_init_invariance_cppc(void)
+{ }
+
/*
* We keep the driver loaded even when ACPI is not running.
* This is needed for the powernow-k8 driver, that works even without
@@ -270,6 +273,12 @@ static int __init acpi_processor_driver_init(void)
NULL, acpi_soft_cpu_dead);
acpi_processor_throttling_init();
+
+ /*
+ * Frequency invariance calculations on AMD platforms can't be run until
+ * after acpi_cppc_processor_probe() has been called for all online CPUs
+ */
+ acpi_processor_init_invariance_cppc();
return 0;
err:
driver_unregister(&acpi_processor_driver);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 4265814c74f8..53996f1a2d80 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -24,8 +24,6 @@
#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
-static DEFINE_MUTEX(performance_mutex);
-
/*
* _PPC support is implemented as a CPUfreq policy notifier:
* This means each time a CPUfreq driver registered also with
@@ -209,6 +207,10 @@ void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
}
}
+#ifdef CONFIG_X86
+
+static DEFINE_MUTEX(performance_mutex);
+
static int acpi_processor_get_performance_control(struct acpi_processor *pr)
{
int result = 0;
@@ -267,7 +269,6 @@ end:
return result;
}
-#ifdef CONFIG_X86
/*
* Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
* in their ACPI data. Calculate the real values and fix up the _PSS data.
@@ -298,9 +299,6 @@ static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
px->core_frequency = (100 * (fid + 8)) >> did;
}
}
-#else
-static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
-#endif
static int acpi_processor_get_performance_states(struct acpi_processor *pr)
{
@@ -440,13 +438,11 @@ int acpi_processor_get_performance_info(struct acpi_processor *pr)
* the BIOS is older than the CPU and does not know its frequencies
*/
update_bios:
-#ifdef CONFIG_X86
if (acpi_has_method(pr->handle, "_PPC")) {
if(boot_cpu_has(X86_FEATURE_EST))
pr_warn(FW_BUG "BIOS needs update for CPU "
"frequency support\n");
}
-#endif
return result;
}
EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
@@ -788,3 +784,4 @@ unlock:
mutex_unlock(&performance_mutex);
}
EXPORT_SYMBOL(acpi_processor_unregister_performance);
+#endif
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 7e2ddc74a1f0..1a2bf520be23 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include "sbshc.h"
+#include "internal.h"
#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
#define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
@@ -236,12 +237,6 @@ static int smbus_alarm(void *context)
return 0;
}
-typedef int (*acpi_ec_query_func) (void *data);
-
-extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
- acpi_handle handle, acpi_ec_query_func func,
- void *data);
-
static int acpi_smbus_hc_add(struct acpi_device *device)
{
int status;
@@ -278,8 +273,6 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
return 0;
}
-extern void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
-
static void acpi_smbus_hc_remove(struct acpi_device *device)
{
struct acpi_smb_hc *hc;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 015bd8e66c1c..d507d5e08435 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -551,6 +551,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* Apple MacBook Air 7,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir7,2"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Apple MacBook Air 9,1 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -566,6 +574,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ .callback = video_detect_force_native,
+ /* Apple MacBook Pro 11,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro11,2"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
.callback = video_detect_force_native,
/* Apple MacBook Pro 12,1 */
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 6af546b21574..423565c31d5e 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -12,6 +12,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
@@ -392,6 +393,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
},
{
+ /* Vexia Edu Atla 10 tablet 9V version */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_UART1_SKIP |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ },
+ {
/* Whitelabel (sold as various brands) TM800A550L */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
@@ -439,18 +453,35 @@ static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bo
struct acpi_device *adev = ACPI_COMPANION(controller_parent);
const struct dmi_system_id *dmi_id;
long quirks = 0;
- u64 uid;
- int ret;
+ u64 uid = 0;
- ret = acpi_dev_uid_to_integer(adev, &uid);
- if (ret)
+ dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
+ if (!dmi_id)
return 0;
- dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
- if (dmi_id)
- quirks = (unsigned long)dmi_id->driver_data;
+ quirks = (unsigned long)dmi_id->driver_data;
+
+ /* uid is left at 0 on errors and 0 is not a valid UART UID */
+ acpi_dev_uid_to_integer(adev, &uid);
+
+ /* For PCI UARTs without an UID */
+ if (!uid && dev_is_pci(controller_parent)) {
+ struct pci_dev *pdev = to_pci_dev(controller_parent);
+
+ /*
+ * Devfn values for PCI UARTs on Bay Trail SoCs, which are
+ * the only devices where this fallback is necessary.
+ */
+ if (pdev->devfn == PCI_DEVFN(0x1e, 3))
+ uid = 1;
+ else if (pdev->devfn == PCI_DEVFN(0x1e, 4))
+ uid = 2;
+ }
+
+ if (!uid)
+ return 0;
- if (!dev_is_platform(controller_parent)) {
+ if (!dev_is_platform(controller_parent) && !dev_is_pci(controller_parent)) {
/* PNP enumerated UARTs */
if ((quirks & ACPI_QUIRK_PNP_UART1_SKIP) && uid == 1)
*skip = true;
@@ -505,7 +536,7 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
* Set skip to true so that the tty core creates a serdev ctrl device.
* The backlight driver will manually create the serdev client device.
*/
- if (acpi_dev_hid_match(adev, "DELL0501")) {
+ if (adev && acpi_dev_hid_match(adev, "DELL0501")) {
*skip = true;
/*
* Create a platform dev for dell-uart-backlight to bind to.
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 75fcb75d5515..3ebe77566788 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -366,7 +366,7 @@ void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate)
#ifdef CONFIG_ACPI_CPPC_LIB
#include <acpi/cppc_acpi.h>
-void topology_init_cpu_capacity_cppc(void)
+static inline void topology_init_cpu_capacity_cppc(void)
{
u64 capacity, capacity_scale = 0;
struct cppc_perf_caps perf_caps;
@@ -417,6 +417,10 @@ void topology_init_cpu_capacity_cppc(void)
exit:
free_raw_capacity();
}
+void acpi_processor_init_invariance_cppc(void)
+{
+ topology_init_cpu_capacity_cppc();
+}
#endif
#ifdef CONFIG_CPU_FREQ
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 7c8dd0abcfdf..8fb33c90482f 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -238,6 +238,7 @@ config APPLICOM
config SONYPI
tristate "Sony Vaio Programmable I/O Control Device support"
depends on X86_32 && PCI && INPUT
+ depends on ACPI_EC || !ACPI
help
This driver enables access to the Sony Programmable I/O Control
Device which can be found in many (all ?) Sony Vaio laptops.
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 1ff99a7091bb..7df7abaf3e52 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -525,10 +525,6 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
- /* Give back zero bytes, as TPM chip has not yet fully resumed: */
- if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
- return 0;
-
return tpm_get_random(chip, data, max);
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 8134f002b121..b1daa0d7b341 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -370,6 +370,13 @@ int tpm_pm_suspend(struct device *dev)
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc) {
+ /* Can be safely set out of locks, as no action cannot race: */
+ chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
+ goto out;
+ }
+
if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
goto suspended;
@@ -377,21 +384,19 @@ int tpm_pm_suspend(struct device *dev)
!pm_suspend_via_firmware())
goto suspended;
- rc = tpm_try_get_ops(chip);
- if (!rc) {
- if (chip->flags & TPM_CHIP_FLAG_TPM2) {
- tpm2_end_auth_session(chip);
- tpm2_shutdown(chip, TPM2_SU_STATE);
- } else {
- rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
- }
-
- tpm_put_ops(chip);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ tpm2_end_auth_session(chip);
+ tpm2_shutdown(chip, TPM2_SU_STATE);
+ goto suspended;
}
+ rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+
suspended:
chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
+ tpm_put_ops(chip);
+out:
if (rc)
dev_err(dev, "Ignoring error %d while suspending\n", rc);
return 0;
@@ -440,11 +445,18 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
if (!chip)
return -ENODEV;
+ /* Give back zero bytes, as TPM chip has not yet fully resumed: */
+ if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) {
+ rc = 0;
+ goto out;
+ }
+
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_get_random(chip, out, max);
else
rc = tpm1_get_random(chip, out, max);
+out:
tpm_put_ops(chip);
return rc;
}
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index f9105443d7db..be9bee6ab65f 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -40,7 +40,7 @@
#define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
# define PLL_POST_DIV_SHIFT 8
-# define PLL_POST_DIV_MASK(p) GENMASK((p)->width - 1, 0)
+# define PLL_POST_DIV_MASK(p) GENMASK((p)->width ? (p)->width - 1 : 3, 0)
# define PLL_ALPHA_MSB BIT(15)
# define PLL_ALPHA_EN BIT(24)
# define PLL_ALPHA_MODE BIT(25)
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
index 0f578771071f..8ea25aa25dff 100644
--- a/drivers/clk/qcom/gcc-x1e80100.c
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -3123,7 +3123,7 @@ static struct clk_branch gcc_pcie_3_pipe_clk = {
static struct clk_branch gcc_pcie_3_pipediv2_clk = {
.halt_reg = 0x58060,
- .halt_check = BRANCH_HALT_VOTED,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52020,
.enable_mask = BIT(5),
@@ -3248,7 +3248,7 @@ static struct clk_branch gcc_pcie_4_pipe_clk = {
static struct clk_branch gcc_pcie_4_pipediv2_clk = {
.halt_reg = 0x6b054,
- .halt_check = BRANCH_HALT_VOTED,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52010,
.enable_mask = BIT(27),
@@ -3373,7 +3373,7 @@ static struct clk_branch gcc_pcie_5_pipe_clk = {
static struct clk_branch gcc_pcie_5_pipediv2_clk = {
.halt_reg = 0x2f054,
- .halt_check = BRANCH_HALT_VOTED,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52018,
.enable_mask = BIT(19),
@@ -3511,7 +3511,7 @@ static struct clk_branch gcc_pcie_6a_pipe_clk = {
static struct clk_branch gcc_pcie_6a_pipediv2_clk = {
.halt_reg = 0x31060,
- .halt_check = BRANCH_HALT_VOTED,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52018,
.enable_mask = BIT(28),
@@ -3649,7 +3649,7 @@ static struct clk_branch gcc_pcie_6b_pipe_clk = {
static struct clk_branch gcc_pcie_6b_pipediv2_clk = {
.halt_reg = 0x8d060,
- .halt_check = BRANCH_HALT_VOTED,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52010,
.enable_mask = BIT(28),
@@ -6155,7 +6155,7 @@ static struct gdsc gcc_usb3_mp_ss1_phy_gdsc = {
.pd = {
.name = "gcc_usb3_mp_ss1_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
diff --git a/drivers/clk/qcom/videocc-sm8350.c b/drivers/clk/qcom/videocc-sm8350.c
index 5bd6fe3e1298..874d4da95ff8 100644
--- a/drivers/clk/qcom/videocc-sm8350.c
+++ b/drivers/clk/qcom/videocc-sm8350.c
@@ -452,7 +452,7 @@ static struct gdsc mvs0_gdsc = {
.pd = {
.name = "mvs0_gdsc",
},
- .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -461,7 +461,7 @@ static struct gdsc mvs1_gdsc = {
.pd = {
.name = "mvs1_gdsc",
},
- .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b0018f371ea3..cd2ac1ba53d2 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1034,7 +1034,7 @@ static void __hybrid_init_cpu_capacity_scaling(void)
hybrid_update_cpu_capacity_scaling();
}
-static void hybrid_init_cpu_capacity_scaling(void)
+static void hybrid_init_cpu_capacity_scaling(bool refresh)
{
bool disable_itmt = false;
@@ -1045,7 +1045,7 @@ static void hybrid_init_cpu_capacity_scaling(void)
* scaling has been enabled already and the driver is just changing the
* operation mode.
*/
- if (hybrid_max_perf_cpu) {
+ if (refresh) {
__hybrid_init_cpu_capacity_scaling();
goto unlock;
}
@@ -1071,6 +1071,18 @@ unlock:
sched_clear_itmt_support();
}
+static bool hybrid_clear_max_perf_cpu(void)
+{
+ bool ret;
+
+ guard(mutex)(&hybrid_capacity_lock);
+
+ ret = !!hybrid_max_perf_cpu;
+ hybrid_max_perf_cpu = NULL;
+
+ return ret;
+}
+
static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
{
u64 cap;
@@ -2263,6 +2275,11 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
} else {
cpu->pstate.scaling = perf_ctl_scaling;
}
+ /*
+ * If the CPU is going online for the first time and it was
+ * offline initially, asym capacity scaling needs to be updated.
+ */
+ hybrid_update_capacity(cpu);
} else {
cpu->pstate.scaling = perf_ctl_scaling;
cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu);
@@ -3352,6 +3369,7 @@ static void intel_pstate_driver_cleanup(void)
static int intel_pstate_register_driver(struct cpufreq_driver *driver)
{
+ bool refresh_cpu_cap_scaling;
int ret;
if (driver == &intel_pstate)
@@ -3364,6 +3382,8 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
arch_set_max_freq_ratio(global.turbo_disabled);
+ refresh_cpu_cap_scaling = hybrid_clear_max_perf_cpu();
+
intel_pstate_driver = driver;
ret = cpufreq_register_driver(intel_pstate_driver);
if (ret) {
@@ -3373,7 +3393,7 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
global.min_perf_pct = min_perf_pct_min();
- hybrid_init_cpu_capacity_scaling();
+ hybrid_init_cpu_capacity_scaling(refresh_cpu_cap_scaling);
return 0;
}
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index d3cd4cc54ace..a9a8ba067007 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -342,9 +342,11 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
int ecc_irq;
int rc;
- rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap);
- if (rc)
- return rc;
+ if (!llcc_driv_data->ecc_irq_configured) {
+ rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap);
+ if (rc)
+ return rc;
+ }
/* Allocate edac control info */
edev_ctl = edac_device_alloc_ctl_info(0, "qcom-llcc", 1, "bank",
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 96b2e5f9a8ef..157172a5f2b5 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -325,7 +325,10 @@ EXPORT_SYMBOL_GPL(scmi_driver_unregister);
static void scmi_device_release(struct device *dev)
{
- kfree(to_scmi_dev(dev));
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ kfree_const(scmi_dev->name);
+ kfree(scmi_dev);
}
static void __scmi_device_destroy(struct scmi_device *scmi_dev)
@@ -338,7 +341,6 @@ static void __scmi_device_destroy(struct scmi_device *scmi_dev)
if (scmi_dev->protocol_id == SCMI_PROTOCOL_SYSTEM)
atomic_set(&scmi_syspower_registered, 0);
- kfree_const(scmi_dev->name);
ida_free(&scmi_bus_id, scmi_dev->id);
device_unregister(&scmi_dev->dev);
}
@@ -410,7 +412,6 @@ __scmi_device_create(struct device_node *np, struct device *parent,
return scmi_dev;
put_dev:
- kfree_const(scmi_dev->name);
put_device(&scmi_dev->dev);
ida_free(&scmi_bus_id, id);
return NULL;
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index c4b8e7ff88aa..cdec50a698a1 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -163,6 +163,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
* used to initialize this channel
* @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel
+ * @is_p2a: A flag to identify a channel as P2A (RX)
* @rx_timeout_ms: The configured RX timeout in milliseconds.
* @handle: Pointer to SCMI entity handle
* @no_completion_irq: Flag to indicate that this channel has no completion
@@ -174,6 +175,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
struct scmi_chan_info {
int id;
struct device *dev;
+ bool is_p2a;
unsigned int rx_timeout_ms;
struct scmi_handle *handle;
bool no_completion_irq;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index a477b5ade38d..f8934d049d68 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -1048,6 +1048,11 @@ static inline void scmi_xfer_command_release(struct scmi_info *info,
static inline void scmi_clear_channel(struct scmi_info *info,
struct scmi_chan_info *cinfo)
{
+ if (!cinfo->is_p2a) {
+ dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n");
+ return;
+ }
+
if (info->desc->ops->clear_channel)
info->desc->ops->clear_channel(cinfo);
}
@@ -2638,6 +2643,7 @@ static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
if (!cinfo)
return -ENOMEM;
+ cinfo->is_p2a = !tx;
cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
/* Create a unique name for this transport device */
@@ -3042,10 +3048,10 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier));
- ret = of_property_read_u32(dev->of_node, "max-rx-timeout-ms",
+ ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms",
&trans->desc->max_rx_timeout_ms);
if (ret && ret != -EINVAL)
- dev_err(dev, "Malformed max-rx-timeout-ms DT property.\n");
+ dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n");
dev_info(dev, "SCMI max-rx-timeout: %dms\n",
trans->desc->max_rx_timeout_ms);
diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c
index 9ca5ee58edbd..0f7ec8848202 100644
--- a/drivers/firmware/microchip/mpfs-auto-update.c
+++ b/drivers/firmware/microchip/mpfs-auto-update.c
@@ -76,14 +76,11 @@
#define AUTO_UPDATE_INFO_SIZE SZ_1M
#define AUTO_UPDATE_BITSTREAM_BASE (AUTO_UPDATE_DIRECTORY_SIZE + AUTO_UPDATE_INFO_SIZE)
-#define AUTO_UPDATE_TIMEOUT_MS 60000
-
struct mpfs_auto_update_priv {
struct mpfs_sys_controller *sys_controller;
struct device *dev;
struct mtd_info *flash;
struct fw_upload *fw_uploader;
- struct completion programming_complete;
size_t size_per_bitstream;
bool cancel_request;
};
@@ -156,19 +153,6 @@ static void mpfs_auto_update_cancel(struct fw_upload *fw_uploader)
static enum fw_upload_err mpfs_auto_update_poll_complete(struct fw_upload *fw_uploader)
{
- struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle;
- int ret;
-
- /*
- * There is no meaningful way to get the status of the programming while
- * it is in progress, so attempting anything other than waiting for it
- * to complete would be misplaced.
- */
- ret = wait_for_completion_timeout(&priv->programming_complete,
- msecs_to_jiffies(AUTO_UPDATE_TIMEOUT_MS));
- if (!ret)
- return FW_UPLOAD_ERR_TIMEOUT;
-
return FW_UPLOAD_ERR_NONE;
}
@@ -349,33 +333,23 @@ static enum fw_upload_err mpfs_auto_update_write(struct fw_upload *fw_uploader,
u32 offset, u32 size, u32 *written)
{
struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle;
- enum fw_upload_err err = FW_UPLOAD_ERR_NONE;
int ret;
- reinit_completion(&priv->programming_complete);
-
ret = mpfs_auto_update_write_bitstream(fw_uploader, data, offset, size, written);
- if (ret) {
- err = FW_UPLOAD_ERR_RW_ERROR;
- goto out;
- }
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
- if (priv->cancel_request) {
- err = FW_UPLOAD_ERR_CANCELED;
- goto out;
- }
+ if (priv->cancel_request)
+ return FW_UPLOAD_ERR_CANCELED;
if (mpfs_auto_update_is_bitstream_info(data, size))
- goto out;
+ return FW_UPLOAD_ERR_NONE;
ret = mpfs_auto_update_verify_image(fw_uploader);
if (ret)
- err = FW_UPLOAD_ERR_FW_INVALID;
+ return FW_UPLOAD_ERR_FW_INVALID;
-out:
- complete(&priv->programming_complete);
-
- return err;
+ return FW_UPLOAD_ERR_NONE;
}
static const struct fw_upload_ops mpfs_auto_update_ops = {
@@ -461,8 +435,6 @@ static int mpfs_auto_update_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret,
"The current bitstream does not support auto-update\n");
- init_completion(&priv->programming_complete);
-
fw_uploader = firmware_upload_register(THIS_MODULE, dev, "mpfs-auto-update",
&mpfs_auto_update_ops, priv);
if (IS_ERR(fw_uploader))
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 10986cb11ec0..2e4260ba5f79 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -112,6 +112,7 @@ enum qcom_scm_qseecom_tz_cmd_info {
};
#define QSEECOM_MAX_APP_NAME_SIZE 64
+#define SHMBRIDGE_RESULT_NOTSUPP 4
/* Each bit configures cold/warm boot address for one of the 4 CPUs */
static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
@@ -216,7 +217,7 @@ static DEFINE_SPINLOCK(scm_query_lock);
struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
{
- return __scm->mempool;
+ return __scm ? __scm->mempool : NULL;
}
static enum qcom_scm_convention __get_convention(void)
@@ -545,7 +546,7 @@ static void qcom_scm_set_download_mode(u32 dload_mode)
} else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode);
- } else {
+ } else if (dload_mode) {
dev_err(__scm->dev,
"No available mechanism for setting download mode\n");
}
@@ -1361,6 +1362,8 @@ EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
int qcom_scm_shm_bridge_enable(void)
{
+ int ret;
+
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_MP,
.cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE,
@@ -1373,7 +1376,15 @@ int qcom_scm_shm_bridge_enable(void)
QCOM_SCM_MP_SHM_BRIDGE_ENABLE))
return -EOPNOTSUPP;
- return qcom_scm_call(__scm->dev, &desc, &res) ?: res.result[0];
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ if (ret)
+ return ret;
+
+ if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP)
+ return -EOPNOTSUPP;
+
+ return res.result[0];
}
EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable);
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index d670635914ec..a74600d9f2d7 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -16,7 +16,6 @@ static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE;
bool __ro_after_init smccc_trng_available = false;
-u64 __ro_after_init smccc_has_sve_hint = false;
s32 __ro_after_init smccc_soc_id_version = SMCCC_RET_NOT_SUPPORTED;
s32 __ro_after_init smccc_soc_id_revision = SMCCC_RET_NOT_SUPPORTED;
@@ -28,9 +27,6 @@ void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
smccc_conduit = conduit;
smccc_trng_available = smccc_probe_trng();
- if (IS_ENABLED(CONFIG_ARM64_SVE) &&
- smccc_version >= ARM_SMCCC_VERSION_1_3)
- smccc_has_sve_hint = true;
if ((smccc_version >= ARM_SMCCC_VERSION_1_2) &&
(smccc_conduit != SMCCC_CONDUIT_NONE)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 1f5a296f5ed2..7dd55ed57c1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -172,8 +172,8 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
&buffer);
obj = (union acpi_object *)buffer.pointer;
- /* Fail if calling the method fails and ATIF is supported */
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ /* Fail if calling the method fails */
+ if (ACPI_FAILURE(status)) {
DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
acpi_format_exception(status));
kfree(obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index cbef720de779..9da4414de617 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -402,7 +402,7 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz
int r;
uint32_t *data, x;
- if (size & 0x3 || *pos & 0x3)
+ if (size > 4096 || size & 0x3 || *pos & 0x3)
return -EINVAL;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
@@ -1648,7 +1648,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
ent = debugfs_create_file(debugfs_regs_names[i],
- S_IFREG | 0444, root,
+ S_IFREG | 0400, root,
adev, debugfs_regs[i]);
if (!i && !IS_ERR_OR_NULL(ent))
i_size_write(ent->d_inode, adev->rmmio_size);
@@ -2100,11 +2100,11 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_securedisplay_debugfs_init(adev);
amdgpu_fw_attestation_debugfs_init(adev);
- debugfs_create_file("amdgpu_evict_vram", 0444, root, adev,
+ debugfs_create_file("amdgpu_evict_vram", 0400, root, adev,
&amdgpu_evict_vram_fops);
- debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev,
+ debugfs_create_file("amdgpu_evict_gtt", 0400, root, adev,
&amdgpu_evict_gtt_fops);
- debugfs_create_file("amdgpu_test_ib", 0444, root, adev,
+ debugfs_create_file("amdgpu_test_ib", 0400, root, adev,
&amdgpu_debugfs_test_ib_fops);
debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
&amdgpu_debugfs_vm_info_fops);
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 5e8833e4fed2..ccfd2a4b4acc 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -482,7 +482,7 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
case AMDGPU_SPX_PARTITION_MODE:
return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
case AMDGPU_DPX_PARTITION_MODE:
- return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0;
+ return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0;
case AMDGPU_TPX_PARTITION_MODE:
return (adev->gmc.num_mem_partitions == 1 ||
adev->gmc.num_mem_partitions == 3) &&
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 13421a58210d..07e9ce99694f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9429,6 +9429,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
bool mode_set_reset_required = false;
u32 i;
struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count};
+ bool set_backlight_level = false;
/* Disable writeback */
for_each_old_connector_in_state(state, connector, old_con_state, i) {
@@ -9548,6 +9549,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
acrtc->hw_mode = new_crtc_state->mode;
crtc->hwmode = new_crtc_state->mode;
mode_set_reset_required = true;
+ set_backlight_level = true;
} else if (modereset_required(new_crtc_state)) {
drm_dbg_atomic(dev,
"Atomic commit: RESET. crtc id %d:[%p]\n",
@@ -9599,6 +9601,19 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
acrtc->otg_inst = status->primary_otg_inst;
}
}
+
+ /* During boot up and resume the DC layer will reset the panel brightness
+ * to fix a flicker issue.
+ * It will cause the dm->actual_brightness is not the current panel brightness
+ * level. (the dm->brightness is the correct panel level)
+ * So we set the backlight level with dm->brightness value after set mode
+ */
+ if (set_backlight_level) {
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (dm->backlight_dev[i])
+ amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
+ }
+ }
}
static void dm_set_writeback(struct amdgpu_display_manager *dm,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 0d8498ab9b23..be8fbb04ad98 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -3127,7 +3127,9 @@ static enum bp_result bios_parser_get_vram_info(
struct atom_data_revision revision;
// vram info moved to umc_info for DCN4x
- if (info && DATA_TABLES(umc_info)) {
+ if (dcb->ctx->dce_version >= DCN_VERSION_4_01 &&
+ dcb->ctx->dce_version < DCN_VERSION_MAX &&
+ info && DATA_TABLES(umc_info)) {
header = GET_IMAGE(struct atom_common_table_header,
DATA_TABLES(umc_info));
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 80e60ea2d11e..ee1bcfaae3e3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1259,26 +1259,33 @@ static int smu_sw_init(void *handle)
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ smu->user_dpm_profile.user_workload_mask = 0;
atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
- smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
- smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
- smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
- smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
- smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
- smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
- smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
+ smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
+ smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
+ smu->workload_priority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
+ smu->workload_priority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
+ smu->workload_priority[PP_SMC_POWER_PROFILE_VR] = 4;
+ smu->workload_priority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
+ smu->workload_priority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
if (smu->is_apu ||
- !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
- smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
- else
- smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
+ !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) {
+ smu->driver_workload_mask =
+ 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+ } else {
+ smu->driver_workload_mask =
+ 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
+ smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ }
+ smu->workload_mask = smu->driver_workload_mask |
+ smu->user_dpm_profile.user_workload_mask;
smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
@@ -2348,17 +2355,20 @@ static int smu_switch_power_profile(void *handle,
return -EINVAL;
if (!en) {
- smu->workload_mask &= ~(1 << smu->workload_prority[type]);
+ smu->driver_workload_mask &= ~(1 << smu->workload_priority[type]);
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
} else {
- smu->workload_mask |= (1 << smu->workload_prority[type]);
+ smu->driver_workload_mask |= (1 << smu->workload_priority[type]);
index = fls(smu->workload_mask);
index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
}
+ smu->workload_mask = smu->driver_workload_mask |
+ smu->user_dpm_profile.user_workload_mask;
+
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, workload, 0);
@@ -3049,12 +3059,23 @@ static int smu_set_power_profile_mode(void *handle,
uint32_t param_size)
{
struct smu_context *smu = handle;
+ int ret;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->set_power_profile_mode)
return -EOPNOTSUPP;
- return smu_bump_power_profile_mode(smu, param, param_size);
+ if (smu->user_dpm_profile.user_workload_mask &
+ (1 << smu->workload_priority[param[param_size]]))
+ return 0;
+
+ smu->user_dpm_profile.user_workload_mask =
+ (1 << smu->workload_priority[param[param_size]]);
+ smu->workload_mask = smu->user_dpm_profile.user_workload_mask |
+ smu->driver_workload_mask;
+ ret = smu_bump_power_profile_mode(smu, param, param_size);
+
+ return ret;
}
static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index b44a185d07e8..d60d9a12a47e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -240,6 +240,7 @@ struct smu_user_dpm_profile {
/* user clock state information */
uint32_t clk_mask[SMU_CLK_COUNT];
uint32_t clk_dependency;
+ uint32_t user_workload_mask;
};
#define SMU_TABLE_INIT(tables, table_id, s, a, d) \
@@ -557,7 +558,8 @@ struct smu_context {
bool disable_uclk_switch;
uint32_t workload_mask;
- uint32_t workload_prority[WORKLOAD_POLICY_MAX];
+ uint32_t driver_workload_mask;
+ uint32_t workload_priority[WORKLOAD_POLICY_MAX];
uint32_t workload_setting[WORKLOAD_POLICY_MAX];
uint32_t power_profile_mode;
uint32_t default_power_profile_mode;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index c0f6b59369b7..31fe512028f4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1455,7 +1455,6 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
return -EINVAL;
}
-
if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
(smu->smc_fw_version >= 0x360d00)) {
if (size != 10)
@@ -1523,14 +1522,14 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
- 1 << workload_type,
+ smu->workload_mask,
NULL);
if (ret) {
dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
return ret;
}
- smu->power_profile_mode = profile_mode;
+ smu_cmn_assign_power_profile(smu);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 16af1a329621..12223f507977 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2081,10 +2081,13 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
smu->power_profile_mode);
if (workload_type < 0)
return -EINVAL;
+
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type, NULL);
+ smu->workload_mask, NULL);
if (ret)
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
+ else
+ smu_cmn_assign_power_profile(smu);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 9c3c48297cba..3b7b2ec8319a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1786,10 +1786,13 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
smu->power_profile_mode);
if (workload_type < 0)
return -EINVAL;
+
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type, NULL);
+ smu->workload_mask, NULL);
if (ret)
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
+ else
+ smu_cmn_assign_power_profile(smu);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 1fe020f1f4db..952ee22cbc90 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -1079,7 +1079,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input,
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
- 1 << workload_type,
+ smu->workload_mask,
NULL);
if (ret) {
dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
@@ -1087,7 +1087,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input,
return ret;
}
- smu->power_profile_mode = profile_mode;
+ smu_cmn_assign_power_profile(smu);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index cc0504b063fa..62316a6707ef 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -890,14 +890,14 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
- 1 << workload_type,
+ smu->workload_mask,
NULL);
if (ret) {
dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
return ret;
}
- smu->power_profile_mode = profile_mode;
+ smu_cmn_assign_power_profile(smu);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index d53e162dcd8d..5dd7ceca64fe 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2485,7 +2485,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
- u32 workload_mask, selected_workload_mask;
+ u32 workload_mask;
smu->power_profile_mode = input[size];
@@ -2552,7 +2552,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
return -EINVAL;
- selected_workload_mask = workload_mask = 1 << workload_type;
+ workload_mask = 1 << workload_type;
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
@@ -2567,12 +2567,22 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
workload_mask |= 1 << workload_type;
}
+ smu->workload_mask |= workload_mask;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
- workload_mask,
+ smu->workload_mask,
NULL);
- if (!ret)
- smu->workload_mask = selected_workload_mask;
+ if (!ret) {
+ smu_cmn_assign_power_profile(smu);
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) {
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ PP_SMC_POWER_PROFILE_FULLSCREEN3D);
+ smu->power_profile_mode = smu->workload_mask & (1 << workload_type)
+ ? PP_SMC_POWER_PROFILE_FULLSCREEN3D
+ : PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ }
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index b891a5e0a396..9d0b19419de0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2499,13 +2499,14 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp
smu->power_profile_mode);
if (workload_type < 0)
return -EINVAL;
+
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type, NULL);
+ smu->workload_mask, NULL);
if (ret)
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
else
- smu->workload_mask = (1 << workload_type);
+ smu_cmn_assign_power_profile(smu);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index e83ea2bc7f9c..1aa13d32ceb2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -367,54 +367,6 @@ static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu)
return 0;
}
-#ifndef atom_smc_dpm_info_table_14_0_0
-struct atom_smc_dpm_info_table_14_0_0 {
- struct atom_common_table_header table_header;
- BoardTable_t BoardTable;
-};
-#endif
-
-static int smu_v14_0_2_append_powerplay_table(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *smc_pptable = table_context->driver_pptable;
- struct atom_smc_dpm_info_table_14_0_0 *smc_dpm_table;
- BoardTable_t *BoardTable = &smc_pptable->BoardTable;
- int index, ret;
-
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- smc_dpm_info);
-
- ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
- (uint8_t **)&smc_dpm_table);
- if (ret)
- return ret;
-
- memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t));
-
- return 0;
-}
-
-#if 0
-static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu,
- void **table,
- uint32_t *size)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
- void *combo_pptable = smu_table->combo_pptable;
- int ret = 0;
-
- ret = smu_cmn_get_combo_pptable(smu);
- if (ret)
- return ret;
-
- *table = combo_pptable;
- *size = sizeof(struct smu_14_0_powerplay_table);
-
- return 0;
-}
-#endif
-
static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu,
void **table,
uint32_t *size)
@@ -436,16 +388,12 @@ static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu,
static int smu_v14_0_2_setup_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
if (amdgpu_sriov_vf(smu->adev))
return 0;
- if (!adev->scpm_enabled)
- ret = smu_v14_0_setup_pptable(smu);
- else
- ret = smu_v14_0_2_get_pptable_from_pmfw(smu,
+ ret = smu_v14_0_2_get_pptable_from_pmfw(smu,
&smu_table->power_play_table,
&smu_table->power_play_table_size);
if (ret)
@@ -455,16 +403,6 @@ static int smu_v14_0_2_setup_pptable(struct smu_context *smu)
if (ret)
return ret;
- /*
- * With SCPM enabled, the operation below will be handled
- * by PSP. Driver involvment is unnecessary and useless.
- */
- if (!adev->scpm_enabled) {
- ret = smu_v14_0_2_append_powerplay_table(smu);
- if (ret)
- return ret;
- }
-
ret = smu_v14_0_2_check_powerplay_table(smu);
if (ret)
return ret;
@@ -1869,12 +1807,11 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
return -EINVAL;
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetWorkloadMask,
- 1 << workload_type,
- NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+ smu->workload_mask, NULL);
+
if (!ret)
- smu->workload_mask = 1 << workload_type;
+ smu_cmn_assign_power_profile(smu);
return ret;
}
@@ -2799,7 +2736,6 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.setup_pptable = smu_v14_0_2_setup_pptable,
.check_fw_version = smu_v14_0_check_fw_version,
- .write_pptable = smu_cmn_write_pptable,
.set_driver_table_location = smu_v14_0_set_driver_table_location,
.system_features_control = smu_v14_0_system_features_control,
.set_allowed_mask = smu_v14_0_set_allowed_mask,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 91ad434bcdae..bdfc5e617333 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -1138,6 +1138,14 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
return ret;
}
+void smu_cmn_assign_power_profile(struct smu_context *smu)
+{
+ uint32_t index;
+ index = fls(smu->workload_mask);
+ index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+ smu->power_profile_mode = smu->workload_setting[index];
+}
+
bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
{
struct pci_dev *p = NULL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index 1de685defe85..8a801e389659 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -130,6 +130,8 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state);
+void smu_cmn_assign_power_profile(struct smu_context *smu);
+
/*
* Helper function to make sysfs_emit_at() happy. Align buf to
* the current page boundary and record the offset.
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 0830cae9a4d0..2d84d7ea1ab7 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -403,7 +403,6 @@ static const struct dmi_system_id orientation_data[] = {
}, { /* Lenovo Yoga Tab 3 X90F */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
.driver_data = (void *)&lcd1600x2560_rightside_up,
diff --git a/drivers/gpu/drm/imagination/pvr_context.c b/drivers/gpu/drm/imagination/pvr_context.c
index eded5e955cc0..4cb3494c0bb2 100644
--- a/drivers/gpu/drm/imagination/pvr_context.c
+++ b/drivers/gpu/drm/imagination/pvr_context.c
@@ -17,10 +17,14 @@
#include <drm/drm_auth.h>
#include <drm/drm_managed.h>
+
+#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/xarray.h>
@@ -354,6 +358,10 @@ int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_co
return err;
}
+ spin_lock(&pvr_dev->ctx_list_lock);
+ list_add_tail(&ctx->file_link, &pvr_file->contexts);
+ spin_unlock(&pvr_dev->ctx_list_lock);
+
return 0;
err_destroy_fw_obj:
@@ -380,6 +388,11 @@ pvr_context_release(struct kref *ref_count)
container_of(ref_count, struct pvr_context, ref_count);
struct pvr_device *pvr_dev = ctx->pvr_dev;
+ WARN_ON(in_interrupt());
+ spin_lock(&pvr_dev->ctx_list_lock);
+ list_del(&ctx->file_link);
+ spin_unlock(&pvr_dev->ctx_list_lock);
+
xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id);
pvr_context_destroy_queues(ctx);
pvr_fw_object_destroy(ctx->fw_obj);
@@ -437,11 +450,30 @@ pvr_context_destroy(struct pvr_file *pvr_file, u32 handle)
*/
void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file)
{
+ struct pvr_device *pvr_dev = pvr_file->pvr_dev;
struct pvr_context *ctx;
unsigned long handle;
xa_for_each(&pvr_file->ctx_handles, handle, ctx)
pvr_context_destroy(pvr_file, handle);
+
+ spin_lock(&pvr_dev->ctx_list_lock);
+ ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link);
+
+ while (!list_entry_is_head(ctx, &pvr_file->contexts, file_link)) {
+ list_del_init(&ctx->file_link);
+
+ if (pvr_context_get_if_referenced(ctx)) {
+ spin_unlock(&pvr_dev->ctx_list_lock);
+
+ pvr_vm_unmap_all(ctx->vm_ctx);
+
+ pvr_context_put(ctx);
+ spin_lock(&pvr_dev->ctx_list_lock);
+ }
+ ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link);
+ }
+ spin_unlock(&pvr_dev->ctx_list_lock);
}
/**
@@ -451,6 +483,7 @@ void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file)
void pvr_context_device_init(struct pvr_device *pvr_dev)
{
xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1);
+ spin_lock_init(&pvr_dev->ctx_list_lock);
}
/**
diff --git a/drivers/gpu/drm/imagination/pvr_context.h b/drivers/gpu/drm/imagination/pvr_context.h
index 0c7b97dfa6ba..07afa179cdf4 100644
--- a/drivers/gpu/drm/imagination/pvr_context.h
+++ b/drivers/gpu/drm/imagination/pvr_context.h
@@ -85,6 +85,9 @@ struct pvr_context {
/** @compute: Transfer queue. */
struct pvr_queue *transfer;
} queues;
+
+ /** @file_link: pvr_file PVR context list link. */
+ struct list_head file_link;
};
static __always_inline struct pvr_queue *
@@ -124,6 +127,24 @@ pvr_context_get(struct pvr_context *ctx)
}
/**
+ * pvr_context_get_if_referenced() - Take an additional reference on a still
+ * referenced context.
+ * @ctx: Context pointer.
+ *
+ * Call pvr_context_put() to release.
+ *
+ * Returns:
+ * * True on success, or
+ * * false if no context pointer passed, or the context wasn't still
+ * * referenced.
+ */
+static __always_inline bool
+pvr_context_get_if_referenced(struct pvr_context *ctx)
+{
+ return ctx != NULL && kref_get_unless_zero(&ctx->ref_count) != 0;
+}
+
+/**
* pvr_context_lookup() - Lookup context pointer from handle and file.
* @pvr_file: Pointer to pvr_file structure.
* @handle: Context handle.
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
index b574e23d484b..6d0dfacb677b 100644
--- a/drivers/gpu/drm/imagination/pvr_device.h
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/math.h>
#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/wait.h>
@@ -293,6 +294,12 @@ struct pvr_device {
/** @sched_wq: Workqueue for schedulers. */
struct workqueue_struct *sched_wq;
+
+ /**
+ * @ctx_list_lock: Lock to be held when accessing the context list in
+ * struct pvr_file.
+ */
+ spinlock_t ctx_list_lock;
};
/**
@@ -344,6 +351,9 @@ struct pvr_file {
* This array is used to allocate handles returned to userspace.
*/
struct xarray vm_ctx_handles;
+
+ /** @contexts: PVR context list. */
+ struct list_head contexts;
};
/**
diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c
index 1a0cb7aa9cea..fb17196e05f4 100644
--- a/drivers/gpu/drm/imagination/pvr_drv.c
+++ b/drivers/gpu/drm/imagination/pvr_drv.c
@@ -28,6 +28,7 @@
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -1326,6 +1327,8 @@ pvr_drm_driver_open(struct drm_device *drm_dev, struct drm_file *file)
*/
pvr_file->pvr_dev = pvr_dev;
+ INIT_LIST_HEAD(&pvr_file->contexts);
+
xa_init_flags(&pvr_file->ctx_handles, XA_FLAGS_ALLOC1);
xa_init_flags(&pvr_file->free_list_handles, XA_FLAGS_ALLOC1);
xa_init_flags(&pvr_file->hwrt_handles, XA_FLAGS_ALLOC1);
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index 97c0f772ed65..7bd6ba4c6e8a 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -14,6 +14,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_gpuvm.h>
+#include <linux/bug.h>
#include <linux/container_of.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -597,12 +598,26 @@ err_free:
}
/**
- * pvr_vm_context_release() - Teardown a VM context.
- * @ref_count: Pointer to reference counter of the VM context.
+ * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
+ * @vm_ctx: Target VM context.
*
* This function ensures that no mappings are left dangling by unmapping them
* all in order of ascending device-virtual address.
*/
+void
+pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
+{
+ WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
+ vm_ctx->gpuvm_mgr.mm_range));
+}
+
+/**
+ * pvr_vm_context_release() - Teardown a VM context.
+ * @ref_count: Pointer to reference counter of the VM context.
+ *
+ * This function also ensures that no mappings are left dangling by calling
+ * pvr_vm_unmap_all.
+ */
static void
pvr_vm_context_release(struct kref *ref_count)
{
@@ -612,8 +627,7 @@ pvr_vm_context_release(struct kref *ref_count)
if (vm_ctx->fw_mem_ctx_obj)
pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj);
- WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
- vm_ctx->gpuvm_mgr.mm_range));
+ pvr_vm_unmap_all(vm_ctx);
pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
drm_gem_private_object_fini(&vm_ctx->dummy_gem);
diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h
index f2a6463f2b05..79406243617c 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.h
+++ b/drivers/gpu/drm/imagination/pvr_vm.h
@@ -39,6 +39,7 @@ int pvr_vm_map(struct pvr_vm_context *vm_ctx,
struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
u64 device_addr, u64 size);
int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size);
+void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx);
dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx);
struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx);
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
index 4082c8f2951d..6fbff516c1c1 100644
--- a/drivers/gpu/drm/panthor/panthor_device.c
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -390,11 +390,15 @@ int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *
{
u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+
switch (offset) {
case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
if (vma->vm_end - vma->vm_start != PAGE_SIZE ||
(vma->vm_flags & (VM_WRITE | VM_EXEC)))
return -EINVAL;
+ vm_flags_clear(vma, VM_MAYWRITE);
break;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 5d5e25b1be95..7db2edb3374c 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -1580,7 +1580,9 @@ panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle)
{
struct panthor_vm *vm;
+ xa_lock(&pool->xa);
vm = panthor_vm_get(xa_load(&pool->xa, handle));
+ xa_unlock(&pool->xa);
return vm;
}
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 00ad34ed73a5..bd604b9f08e4 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -517,7 +517,7 @@
* [4-6] RSVD
* [7] Disabled
*/
-#define CCS_MODE XE_REG(0x14804)
+#define CCS_MODE XE_REG(0x14804, XE_REG_OPTION_MASKED)
#define CCS_MODE_CSLICE_0_3_MASK REG_GENMASK(11, 0) /* 3 bits per cslice */
#define CCS_MODE_CSLICE_MASK 0x7 /* CCS0-3 + rsvd */
#define CCS_MODE_CSLICE_WIDTH ilog2(CCS_MODE_CSLICE_MASK + 1)
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 10fd4601b9f2..a1987b554a8d 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -87,10 +87,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
mutex_init(&xef->exec_queue.lock);
xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
- spin_lock(&xe->clients.lock);
- xe->clients.count++;
- spin_unlock(&xe->clients.lock);
-
file->driver_priv = xef;
kref_init(&xef->refcount);
@@ -107,17 +103,12 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
static void xe_file_destroy(struct kref *ref)
{
struct xe_file *xef = container_of(ref, struct xe_file, refcount);
- struct xe_device *xe = xef->xe;
xa_destroy(&xef->exec_queue.xa);
mutex_destroy(&xef->exec_queue.lock);
xa_destroy(&xef->vm.xa);
mutex_destroy(&xef->vm.lock);
- spin_lock(&xe->clients.lock);
- xe->clients.count--;
- spin_unlock(&xe->clients.lock);
-
xe_drm_client_put(xef->client);
kfree(xef->process_name);
kfree(xef);
@@ -333,7 +324,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xe->info.force_execlist = xe_modparam.force_execlist;
spin_lock_init(&xe->irq.lock);
- spin_lock_init(&xe->clients.lock);
init_waitqueue_head(&xe->ufence_wq);
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 894f04770454..34620ef855c0 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -178,4 +178,18 @@ void xe_device_declare_wedged(struct xe_device *xe);
struct xe_file *xe_file_get(struct xe_file *xef);
void xe_file_put(struct xe_file *xef);
+/*
+ * Occasionally it is seen that the G2H worker starts running after a delay of more than
+ * a second even after being queued and activated by the Linux workqueue subsystem. This
+ * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
+ * Lunarlake Hybrid CPU. Issue disappears if we disable Lunarlake atom cores from BIOS
+ * and this is beyond xe kmd.
+ *
+ * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
+ */
+#define LNL_FLUSH_WORKQUEUE(wq__) \
+ flush_workqueue(wq__)
+#define LNL_FLUSH_WORK(wrk__) \
+ flush_work(wrk__)
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 09d731a9125c..687f3a9039bb 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -353,15 +353,6 @@ struct xe_device {
struct workqueue_struct *wq;
} sriov;
- /** @clients: drm clients info */
- struct {
- /** @clients.lock: Protects drm clients info */
- spinlock_t lock;
-
- /** @clients.count: number of drm clients */
- u64 count;
- } clients;
-
/** @usm: unified memory state */
struct {
/** @usm.asid: convert a ASID to VM */
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index f23ac1e2ed88..756b492f13b0 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -132,12 +132,16 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (XE_IOCTL_DBG(xe, !q))
return -ENOENT;
- if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
- return -EINVAL;
+ if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) {
+ err = -EINVAL;
+ goto err_exec_queue;
+ }
if (XE_IOCTL_DBG(xe, args->num_batch_buffer &&
- q->width != args->num_batch_buffer))
- return -EINVAL;
+ q->width != args->num_batch_buffer)) {
+ err = -EINVAL;
+ goto err_exec_queue;
+ }
if (XE_IOCTL_DBG(xe, q->ops->reset_status(q))) {
err = -ECANCELED;
@@ -220,6 +224,7 @@ retry:
fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
+ xe_vm_unlock(vm);
goto err_unlock_list;
}
for (i = 0; i < num_syncs; i++)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index d098d2dd1b2d..fd0f3b3c9101 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -260,8 +260,14 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
{
int i;
+ /*
+ * Before releasing our ref to lrc and xef, accumulate our run ticks
+ */
+ xe_exec_queue_update_run_ticks(q);
+
for (i = 0; i < q->width; ++i)
xe_lrc_put(q->lrc[i]);
+
__xe_exec_queue_free(q);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
index d2e4dc3aaf61..ffcbd05671fc 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
@@ -68,6 +68,12 @@ static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines)
}
}
+ /*
+ * Mask bits need to be set for the register. Though only Xe2+
+ * platforms require setting of mask bits, it won't harm for older
+ * platforms as these bits are unused there.
+ */
+ mode |= CCS_MODE_CSLICE_0_3_MASK << 16;
xe_mmio_write32(gt, CCS_MODE, mode);
xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n",
@@ -133,9 +139,10 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr,
}
/* CCS mode can only be updated when there are no drm clients */
- spin_lock(&xe->clients.lock);
- if (xe->clients.count) {
- spin_unlock(&xe->clients.lock);
+ mutex_lock(&xe->drm.filelist_mutex);
+ if (!list_empty(&xe->drm.filelist)) {
+ mutex_unlock(&xe->drm.filelist_mutex);
+ xe_gt_dbg(gt, "Rejecting compute mode change as there are active drm clients\n");
return -EBUSY;
}
@@ -146,7 +153,7 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr,
xe_gt_reset_async(gt);
}
- spin_unlock(&xe->clients.lock);
+ mutex_unlock(&xe->drm.filelist_mutex);
return count;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 8250ef71e685..afdb477ecf83 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -387,6 +387,8 @@ static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
* the xe_ggtt_clear() called by below xe_ggtt_remove_node().
*/
xe_ggtt_node_remove(node, false);
+ } else {
+ xe_ggtt_node_fini(node);
}
}
@@ -442,7 +444,7 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
config->ggtt_region = node;
return 0;
err:
- xe_ggtt_node_fini(node);
+ pf_release_ggtt(tile, node);
return err;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index bbb9e411d21f..9d82ea30f4df 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -72,6 +72,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
struct xe_device *xe = gt_to_xe(gt);
struct xe_gt_tlb_invalidation_fence *fence, *next;
+ LNL_FLUSH_WORK(&gt->uc.guc.ct.g2h_worker);
+
spin_lock_irq(&gt->tlb_invalidation.pending_lock);
list_for_each_entry_safe(fence, next,
&gt->tlb_invalidation.pending_fences, link) {
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 17986bfd8818..9c505d3517cd 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -897,17 +897,8 @@ retry_same_fence:
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
- /*
- * Occasionally it is seen that the G2H worker starts running after a delay of more than
- * a second even after being queued and activated by the Linux workqueue subsystem. This
- * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
- * Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS
- * and this is beyond xe kmd.
- *
- * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
- */
if (!ret) {
- flush_work(&ct->g2h_worker);
+ LNL_FLUSH_WORK(&ct->g2h_worker);
if (g2h_fence.done) {
xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
g2h_fence.seqno, action[0]);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index f903b0772722..4f5d00aea716 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -745,8 +745,6 @@ static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
{
struct xe_sched_job *job = to_xe_sched_job(drm_job);
- xe_exec_queue_update_run_ticks(job->q);
-
trace_xe_sched_job_free(job);
xe_sched_job_put(job);
}
diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c
index f5deb81eba01..5b4264ea38bd 100644
--- a/drivers/gpu/drm/xe/xe_wait_user_fence.c
+++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c
@@ -155,6 +155,13 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
}
if (!timeout) {
+ LNL_FLUSH_WORKQUEUE(xe->ordered_wq);
+ err = do_compare(addr, args->value, args->mask,
+ args->op);
+ if (err <= 0) {
+ drm_dbg(&xe->drm, "LNL_FLUSH_WORKQUEUE resolved ufence timeout\n");
+ break;
+ }
err = -ETIME;
break;
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 612ee6ddfc8d..582fd234eec7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1875,7 +1875,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
u32 len = hid_report_len(report) + 7;
- return kmalloc(len, flags);
+ return kzalloc(len, flags);
}
EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 08a3c863f80a..1e3a1f4d5c5e 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1752,7 +1752,7 @@ source "drivers/hwmon/occ/Kconfig"
config SENSORS_OXP
tristate "OneXPlayer EC fan control"
- depends on ACPI
+ depends on ACPI_EC
depends on X86
help
If you say yes here you get support for fan readings and control over
@@ -2592,6 +2592,7 @@ config SENSORS_ASUS_WMI
config SENSORS_ASUS_EC
tristate "ASUS EC Sensors"
depends on X86
+ depends on ACPI_EC
help
If you say yes here you get support for the ACPI embedded controller
hardware monitoring interface found in ASUS motherboards. The driver
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index f31d352d98b5..9d88b4fa03e4 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -524,7 +524,7 @@ err_release_lock:
void __i2c_dw_disable(struct dw_i2c_dev *dev)
{
struct i2c_timings *t = &dev->timings;
- unsigned int raw_intr_stats;
+ unsigned int raw_intr_stats, ic_stats;
unsigned int enable;
int timeout = 100;
bool abort_needed;
@@ -532,9 +532,11 @@ void __i2c_dw_disable(struct dw_i2c_dev *dev)
int ret;
regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_intr_stats);
+ regmap_read(dev->map, DW_IC_STATUS, &ic_stats);
regmap_read(dev->map, DW_IC_ENABLE, &enable);
- abort_needed = raw_intr_stats & DW_IC_INTR_MST_ON_HOLD;
+ abort_needed = (raw_intr_stats & DW_IC_INTR_MST_ON_HOLD) ||
+ (ic_stats & DW_IC_STATUS_MASTER_HOLD_TX_FIFO_EMPTY);
if (abort_needed) {
if (!(enable & DW_IC_ENABLE_ENABLE)) {
regmap_write(dev->map, DW_IC_ENABLE, DW_IC_ENABLE_ENABLE);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 8e8854ec9882..2d32896d0673 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -116,6 +116,7 @@
#define DW_IC_STATUS_RFNE BIT(3)
#define DW_IC_STATUS_MASTER_ACTIVITY BIT(5)
#define DW_IC_STATUS_SLAVE_ACTIVITY BIT(6)
+#define DW_IC_STATUS_MASTER_HOLD_TX_FIFO_EMPTY BIT(7)
#define DW_IC_SDA_HOLD_RX_SHIFT 16
#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, 16)
diff --git a/drivers/i2c/muxes/i2c-mux-mule.c b/drivers/i2c/muxes/i2c-mux-mule.c
index 8e942470b35f..284ff4afeeac 100644
--- a/drivers/i2c/muxes/i2c-mux-mule.c
+++ b/drivers/i2c/muxes/i2c-mux-mule.c
@@ -66,8 +66,8 @@ static int mule_i2c_mux_probe(struct platform_device *pdev)
priv = i2c_mux_priv(muxc);
priv->regmap = dev_get_regmap(mux_dev->parent, NULL);
- if (IS_ERR(priv->regmap))
- return dev_err_probe(mux_dev, PTR_ERR(priv->regmap),
+ if (!priv->regmap)
+ return dev_err_probe(mux_dev, -ENODEV,
"No parent i2c register map\n");
platform_set_drvdata(pdev, muxc);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index ce87205e3e82..8b6159f4cdaf 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -524,6 +524,13 @@ static int gic_irq_set_irqchip_state(struct irq_data *d,
}
gic_poke_irq(d, reg);
+
+ /*
+ * Force read-back to guarantee that the active state has taken
+ * effect, and won't race with a guest-driven deactivation.
+ */
+ if (reg == GICD_ISACTIVER)
+ gic_peek_irq(d, reg);
return 0;
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index aaeeabfab09b..40709310e327 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1905,16 +1905,13 @@ static void check_migrations(struct work_struct *ws)
* This function gets called on the error paths of the constructor, so we
* have to cope with a partially initialised struct.
*/
-static void destroy(struct cache *cache)
+static void __destroy(struct cache *cache)
{
- unsigned int i;
-
mempool_exit(&cache->migration_pool);
if (cache->prison)
dm_bio_prison_destroy_v2(cache->prison);
- cancel_delayed_work_sync(&cache->waker);
if (cache->wq)
destroy_workqueue(cache->wq);
@@ -1942,13 +1939,22 @@ static void destroy(struct cache *cache)
if (cache->policy)
dm_cache_policy_destroy(cache->policy);
+ bioset_exit(&cache->bs);
+
+ kfree(cache);
+}
+
+static void destroy(struct cache *cache)
+{
+ unsigned int i;
+
+ cancel_delayed_work_sync(&cache->waker);
+
for (i = 0; i < cache->nr_ctr_args ; i++)
kfree(cache->ctr_args[i]);
kfree(cache->ctr_args);
- bioset_exit(&cache->bs);
-
- kfree(cache);
+ __destroy(cache);
}
static void cache_dtr(struct dm_target *ti)
@@ -2003,7 +2009,6 @@ struct cache_args {
sector_t cache_sectors;
struct dm_dev *origin_dev;
- sector_t origin_sectors;
uint32_t block_size;
@@ -2084,6 +2089,7 @@ static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
char **error)
{
+ sector_t origin_sectors;
int r;
if (!at_least_one_arg(as, error))
@@ -2096,8 +2102,8 @@ static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
return r;
}
- ca->origin_sectors = get_dev_size(ca->origin_dev);
- if (ca->ti->len > ca->origin_sectors) {
+ origin_sectors = get_dev_size(ca->origin_dev);
+ if (ca->ti->len > origin_sectors) {
*error = "Device size larger than cached device";
return -EINVAL;
}
@@ -2407,7 +2413,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
- origin_blocks = cache->origin_sectors = ca->origin_sectors;
+ origin_blocks = cache->origin_sectors = ti->len;
origin_blocks = block_div(origin_blocks, ca->block_size);
cache->origin_blocks = to_oblock(origin_blocks);
@@ -2561,7 +2567,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
*result = cache;
return 0;
bad:
- destroy(cache);
+ __destroy(cache);
return r;
}
@@ -2612,7 +2618,7 @@ static int cache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
if (r) {
- destroy(cache);
+ __destroy(cache);
goto out;
}
@@ -2895,19 +2901,19 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
static bool can_resize(struct cache *cache, dm_cblock_t new_size)
{
if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
- if (cache->sized) {
- DMERR("%s: unable to extend cache due to missing cache table reload",
- cache_device_name(cache));
- return false;
- }
+ DMERR("%s: unable to extend cache due to missing cache table reload",
+ cache_device_name(cache));
+ return false;
}
/*
* We can't drop a dirty block when shrinking the cache.
*/
- while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
- new_size = to_cblock(from_cblock(new_size) + 1);
- if (is_dirty(cache, new_size)) {
+ if (cache->loaded_mappings) {
+ new_size = to_cblock(find_next_bit(cache->dirty_bitset,
+ from_cblock(cache->cache_size),
+ from_cblock(new_size)));
+ if (new_size != cache->cache_size) {
DMERR("%s: unable to shrink cache; cache block %llu is dirty",
cache_device_name(cache),
(unsigned long long) from_cblock(new_size));
@@ -2943,20 +2949,15 @@ static int cache_preresume(struct dm_target *ti)
/*
* Check to see if the cache has resized.
*/
- if (!cache->sized) {
- r = resize_cache_dev(cache, csize);
- if (r)
- return r;
-
- cache->sized = true;
-
- } else if (csize != cache->cache_size) {
+ if (!cache->sized || csize != cache->cache_size) {
if (!can_resize(cache, csize))
return -EINVAL;
r = resize_cache_dev(cache, csize);
if (r)
return r;
+
+ cache->sized = true;
}
if (!cache->loaded_mappings) {
diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c
index 48587c16c445..e8a9432057dc 100644
--- a/drivers/md/dm-unstripe.c
+++ b/drivers/md/dm-unstripe.c
@@ -85,8 +85,8 @@ static int unstripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
uc->physical_start = start;
- uc->unstripe_offset = uc->unstripe * uc->chunk_size;
- uc->unstripe_width = (uc->stripes - 1) * uc->chunk_size;
+ uc->unstripe_offset = (sector_t)uc->unstripe * uc->chunk_size;
+ uc->unstripe_width = (sector_t)(uc->stripes - 1) * uc->chunk_size;
uc->chunk_shift = is_power_of_2(uc->chunk_size) ? fls(uc->chunk_size) - 1 : 0;
tmp_len = ti->len;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 7d4d90b4395a..c142ec5458b7 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -356,9 +356,9 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
else if (verity_handle_err(v,
DM_VERITY_BLOCK_TYPE_METADATA,
hash_block)) {
- struct bio *bio =
- dm_bio_from_per_bio_data(io,
- v->ti->per_io_data_size);
+ struct bio *bio;
+ io->had_mismatch = true;
+ bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
dm_audit_log_bio(DM_MSG_PREFIX, "verify-metadata", bio,
block, 0);
r = -EIO;
@@ -482,6 +482,7 @@ static int verity_handle_data_hash_mismatch(struct dm_verity *v,
return -EIO; /* Error correction failed; Just return error */
if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA, blkno)) {
+ io->had_mismatch = true;
dm_audit_log_bio(DM_MSG_PREFIX, "verify-data", bio, blkno, 0);
return -EIO;
}
@@ -606,6 +607,7 @@ static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
if (unlikely(status != BLK_STS_OK) &&
unlikely(!(bio->bi_opf & REQ_RAHEAD)) &&
+ !io->had_mismatch &&
!verity_is_system_shutting_down()) {
if (v->error_mode == DM_VERITY_MODE_PANIC) {
panic("dm-verity device has I/O error");
@@ -779,6 +781,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
io->orig_bi_end_io = bio->bi_end_io;
io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
+ io->had_mismatch = false;
bio->bi_end_io = verity_end_io;
bio->bi_private = io;
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 6b75159bf835..c996140bda94 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -92,6 +92,7 @@ struct dm_verity_io {
sector_t block;
unsigned int n_blocks;
bool in_bh;
+ bool had_mismatch;
struct work_struct work;
struct work_struct bh_work;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ff4a6b570b76..19230404d8c2 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2290,8 +2290,10 @@ static struct mapped_device *alloc_dev(int minor)
* override accordingly.
*/
md->disk = blk_alloc_disk(NULL, md->numa_node_id);
- if (IS_ERR(md->disk))
+ if (IS_ERR(md->disk)) {
+ md->disk = NULL;
goto bad;
+ }
md->queue = md->disk->queue;
init_waitqueue_head(&md->wait);
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
index 8526f613a40e..cfbfc4c1b2e6 100644
--- a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
@@ -348,12 +348,12 @@ static int get_edid_tag_location(const u8 *edid, unsigned int size,
/* Return if not a CTA-861 extension block */
if (size < 256 || edid[0] != 0x02 || edid[1] != 0x03)
- return -1;
+ return -ENOENT;
/* search tag */
d = edid[0x02] & 0x7f;
if (d <= 4)
- return -1;
+ return -ENOENT;
i = 0x04;
end = 0x00 + d;
@@ -371,7 +371,7 @@ static int get_edid_tag_location(const u8 *edid, unsigned int size,
return offset + i;
i += len + 1;
} while (i < end);
- return -1;
+ return -ENOENT;
}
static void extron_edid_crc(u8 *edid)
diff --git a/drivers/media/cec/usb/pulse8/pulse8-cec.c b/drivers/media/cec/usb/pulse8/pulse8-cec.c
index ba67587bd43e..171366fe3544 100644
--- a/drivers/media/cec/usb/pulse8/pulse8-cec.c
+++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c
@@ -685,7 +685,7 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 4);
if (err)
return err;
- date = (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3];
+ date = ((unsigned)data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3];
dev_info(pulse8->dev, "Firmware build date %ptT\n", &date);
dev_dbg(pulse8->dev, "Persistent config:\n");
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index 642c48e8c1f5..ded11cd8dbf7 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -1795,6 +1795,9 @@ static void tpg_precalculate_line(struct tpg_data *tpg)
unsigned p;
unsigned x;
+ if (WARN_ON_ONCE(!tpg->src_width || !tpg->scaled_width))
+ return;
+
switch (tpg->pattern) {
case TPG_PAT_GREEN:
contrast = TPG_COLOR_100_RED;
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 29a8d876e6c2..b0523fc23506 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -1482,18 +1482,23 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
}
vb->planes[plane].dbuf_mapped = 1;
}
+ } else {
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ dma_buf_put(planes[plane].dbuf);
+ }
- /*
- * Now that everything is in order, copy relevant information
- * provided by userspace.
- */
- for (plane = 0; plane < vb->num_planes; ++plane) {
- vb->planes[plane].bytesused = planes[plane].bytesused;
- vb->planes[plane].length = planes[plane].length;
- vb->planes[plane].m.fd = planes[plane].m.fd;
- vb->planes[plane].data_offset = planes[plane].data_offset;
- }
+ /*
+ * Now that everything is in order, copy relevant information
+ * provided by userspace.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ vb->planes[plane].bytesused = planes[plane].bytesused;
+ vb->planes[plane].length = planes[plane].length;
+ vb->planes[plane].m.fd = planes[plane].m.fd;
+ vb->planes[plane].data_offset = planes[plane].data_offset;
+ }
+ if (reacquired) {
/*
* Call driver-specific initialization on the newly acquired buffer,
* if provided.
@@ -1503,9 +1508,6 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
dprintk(q, 1, "buffer initialization failed\n");
goto err_put_vb2_buf;
}
- } else {
- for (plane = 0; plane < vb->num_planes; ++plane)
- dma_buf_put(planes[plane].dbuf);
}
ret = call_vb_qop(vb, buf_prepare, vb);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 4f78f30b3646..a05aa271a1ba 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -443,8 +443,8 @@ static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wra
default:
fepriv->auto_step++;
- fepriv->auto_sub_step = -1; /* it'll be incremented to 0 in a moment */
- break;
+ fepriv->auto_sub_step = 0;
+ continue;
}
if (!ready) fepriv->auto_sub_step++;
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c
index 192a8230c4aa..29edaaff7a5c 100644
--- a/drivers/media/dvb-core/dvb_vb2.c
+++ b/drivers/media/dvb-core/dvb_vb2.c
@@ -366,9 +366,15 @@ int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp)
{
struct vb2_queue *q = &ctx->vb_q;
+ struct vb2_buffer *vb2 = vb2_get_buffer(q, exp->index);
int ret;
- ret = vb2_core_expbuf(&ctx->vb_q, &exp->fd, q->type, q->bufs[exp->index],
+ if (!vb2) {
+ dprintk(1, "[%s] invalid buffer index\n", ctx->name);
+ return -EINVAL;
+ }
+
+ ret = vb2_core_expbuf(&ctx->vb_q, &exp->fd, q->type, vb2,
0, exp->flags);
if (ret) {
dprintk(1, "[%s] index=%d errno=%d\n", ctx->name,
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index b43695bc51e7..9df7c213716a 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -86,10 +86,15 @@ static DECLARE_RWSEM(minor_rwsem);
static int dvb_device_open(struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev;
+ unsigned int minor = iminor(inode);
+
+ if (minor >= MAX_DVB_MINORS)
+ return -ENODEV;
mutex_lock(&dvbdev_mutex);
down_read(&minor_rwsem);
- dvbdev = dvb_minors[iminor(inode)];
+
+ dvbdev = dvb_minors[minor];
if (dvbdev && dvbdev->fops) {
int err = 0;
@@ -525,7 +530,10 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
for (minor = 0; minor < MAX_DVB_MINORS; minor++)
if (!dvb_minors[minor])
break;
- if (minor == MAX_DVB_MINORS) {
+#else
+ minor = nums2minor(adap->num, type, id);
+#endif
+ if (minor >= MAX_DVB_MINORS) {
if (new_node) {
list_del(&new_node->list_head);
kfree(dvbdevfops);
@@ -538,9 +546,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
mutex_unlock(&dvbdev_register_lock);
return -EINVAL;
}
-#else
- minor = nums2minor(adap->num, type, id);
-#endif
+
dvbdev->minor = minor;
dvb_minors[minor] = dvb_device_get(dvbdev);
up_write(&minor_rwsem);
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
index 8b978a9f74a4..f5dd3a81725a 100644
--- a/drivers/media/dvb-frontends/cx24116.c
+++ b/drivers/media/dvb-frontends/cx24116.c
@@ -741,6 +741,7 @@ static int cx24116_read_snr_pct(struct dvb_frontend *fe, u16 *snr)
{
struct cx24116_state *state = fe->demodulator_priv;
u8 snr_reading;
+ int ret;
static const u32 snr_tab[] = { /* 10 x Table (rounded up) */
0x00000, 0x0199A, 0x03333, 0x04ccD, 0x06667,
0x08000, 0x0999A, 0x0b333, 0x0cccD, 0x0e667,
@@ -749,7 +750,11 @@ static int cx24116_read_snr_pct(struct dvb_frontend *fe, u16 *snr)
dprintk("%s()\n", __func__);
- snr_reading = cx24116_readreg(state, CX24116_REG_QUALITY0);
+ ret = cx24116_readreg(state, CX24116_REG_QUALITY0);
+ if (ret < 0)
+ return ret;
+
+ snr_reading = ret;
if (snr_reading >= 0xa0 /* 100% */)
*snr = 0xffff;
diff --git a/drivers/media/dvb-frontends/stb0899_algo.c b/drivers/media/dvb-frontends/stb0899_algo.c
index df89c33dac23..40537c4ccb0d 100644
--- a/drivers/media/dvb-frontends/stb0899_algo.c
+++ b/drivers/media/dvb-frontends/stb0899_algo.c
@@ -269,7 +269,7 @@ static enum stb0899_status stb0899_search_carrier(struct stb0899_state *state)
short int derot_freq = 0, last_derot_freq = 0, derot_limit, next_loop = 3;
int index = 0;
- u8 cfr[2];
+ u8 cfr[2] = {0};
u8 reg;
internal->status = NOCARRIER;
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 48230d5109f0..272945a878b3 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -2519,10 +2519,10 @@ static int adv76xx_log_status(struct v4l2_subdev *sd)
const struct adv76xx_chip_info *info = state->info;
struct v4l2_dv_timings timings;
struct stdi_readback stdi;
- u8 reg_io_0x02 = io_read(sd, 0x02);
+ int ret;
+ u8 reg_io_0x02;
u8 edid_enabled;
u8 cable_det;
-
static const char * const csc_coeff_sel_rb[16] = {
"bypassed", "YPbPr601 -> RGB", "reserved", "YPbPr709 -> RGB",
"reserved", "RGB -> YPbPr601", "reserved", "RGB -> YPbPr709",
@@ -2621,13 +2621,21 @@ static int adv76xx_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "-----Color space-----\n");
v4l2_info(sd, "RGB quantization range ctrl: %s\n",
rgb_quantization_range_txt[state->rgb_quantization_range]);
- v4l2_info(sd, "Input color space: %s\n",
- input_color_space_txt[reg_io_0x02 >> 4]);
- v4l2_info(sd, "Output color space: %s %s, alt-gamma %s\n",
- (reg_io_0x02 & 0x02) ? "RGB" : "YCbCr",
- (((reg_io_0x02 >> 2) & 0x01) ^ (reg_io_0x02 & 0x01)) ?
- "(16-235)" : "(0-255)",
- (reg_io_0x02 & 0x08) ? "enabled" : "disabled");
+
+ ret = io_read(sd, 0x02);
+ if (ret < 0) {
+ v4l2_info(sd, "Can't read Input/Output color space\n");
+ } else {
+ reg_io_0x02 = ret;
+
+ v4l2_info(sd, "Input color space: %s\n",
+ input_color_space_txt[reg_io_0x02 >> 4]);
+ v4l2_info(sd, "Output color space: %s %s, alt-gamma %s\n",
+ (reg_io_0x02 & 0x02) ? "RGB" : "YCbCr",
+ (((reg_io_0x02 >> 2) & 0x01) ^ (reg_io_0x02 & 0x01)) ?
+ "(16-235)" : "(0-255)",
+ (reg_io_0x02 & 0x08) ? "enabled" : "disabled");
+ }
v4l2_info(sd, "Color space conversion: %s\n",
csc_coeff_sel_rb[cp_read(sd, info->cp_csc) >> 4]);
diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c
index fc27238dd4d3..24873149096c 100644
--- a/drivers/media/i2c/ar0521.c
+++ b/drivers/media/i2c/ar0521.c
@@ -255,10 +255,10 @@ static u32 calc_pll(struct ar0521_dev *sensor, u32 freq, u16 *pre_ptr, u16 *mult
continue; /* Minimum value */
if (new_mult > 254)
break; /* Maximum, larger pre won't work either */
- if (sensor->extclk_freq * (u64)new_mult < AR0521_PLL_MIN *
+ if (sensor->extclk_freq * (u64)new_mult < (u64)AR0521_PLL_MIN *
new_pre)
continue;
- if (sensor->extclk_freq * (u64)new_mult > AR0521_PLL_MAX *
+ if (sensor->extclk_freq * (u64)new_mult > (u64)AR0521_PLL_MAX *
new_pre)
break; /* Larger pre won't work either */
new_pll = div64_round_up(sensor->extclk_freq * (u64)new_mult,
diff --git a/drivers/media/pci/mgb4/mgb4_cmt.c b/drivers/media/pci/mgb4/mgb4_cmt.c
index 70dc78ef193c..a25b68403bc6 100644
--- a/drivers/media/pci/mgb4/mgb4_cmt.c
+++ b/drivers/media/pci/mgb4/mgb4_cmt.c
@@ -227,6 +227,8 @@ void mgb4_cmt_set_vin_freq_range(struct mgb4_vin_dev *vindev,
u32 config;
size_t i;
+ freq_range = array_index_nospec(freq_range, ARRAY_SIZE(cmt_vals_in));
+
addr = cmt_addrs_in[vindev->config->id];
reg_set = cmt_vals_in[freq_range];
diff --git a/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
index d2c4a0178b3c..1db4609b3557 100644
--- a/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
@@ -775,11 +775,14 @@ static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
(unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sos + 2;
jpeg_buffer.curr = 0;
- word = 0;
-
if (get_word_be(&jpeg_buffer, &word))
return;
- jpeg_buffer.size = (long)word - 2;
+
+ if (word < 2)
+ jpeg_buffer.size = 0;
+ else
+ jpeg_buffer.size = (long)word - 2;
+
jpeg_buffer.data += 2;
jpeg_buffer.curr = 0;
@@ -1058,6 +1061,7 @@ static int get_word_be(struct s5p_jpeg_buffer *buf, unsigned int *word)
if (byte == -1)
return -1;
*word = (unsigned int)byte | temp;
+
return 0;
}
@@ -1145,7 +1149,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
if (get_word_be(&jpeg_buffer, &word))
break;
length = (long)word - 2;
- if (!length)
+ if (length <= 0)
return false;
sof = jpeg_buffer.curr; /* after 0xffc0 */
sof_len = length;
@@ -1176,7 +1180,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
if (get_word_be(&jpeg_buffer, &word))
break;
length = (long)word - 2;
- if (!length)
+ if (length <= 0)
return false;
if (n_dqt >= S5P_JPEG_MAX_MARKER)
return false;
@@ -1189,7 +1193,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
if (get_word_be(&jpeg_buffer, &word))
break;
length = (long)word - 2;
- if (!length)
+ if (length <= 0)
return false;
if (n_dht >= S5P_JPEG_MAX_MARKER)
return false;
@@ -1214,6 +1218,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
if (get_word_be(&jpeg_buffer, &word))
break;
length = (long)word - 2;
+ /* No need to check underflows as skip() does it */
skip(&jpeg_buffer, length);
break;
}
diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
index 00e0d08af357..4f330f4fc6be 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.c
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
@@ -910,7 +910,7 @@ static int vivid_create_queue(struct vivid_dev *dev,
* videobuf2-core.c to MAX_BUFFER_INDEX.
*/
if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- q->max_num_buffers = 64;
+ q->max_num_buffers = MAX_VID_CAP_BUFFERS;
if (buf_type == V4L2_BUF_TYPE_SDR_CAPTURE)
q->max_num_buffers = 1024;
if (buf_type == V4L2_BUF_TYPE_VBI_CAPTURE)
diff --git a/drivers/media/test-drivers/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h
index cc18a3bc6dc0..d2d52763b119 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.h
+++ b/drivers/media/test-drivers/vivid/vivid-core.h
@@ -26,6 +26,8 @@
#define MAX_INPUTS 16
/* The maximum number of outputs */
#define MAX_OUTPUTS 16
+/* The maximum number of video capture buffers */
+#define MAX_VID_CAP_BUFFERS 64
/* The maximum up or down scaling factor is 4 */
#define MAX_ZOOM 4
/* The maximum image width/height are set to 4K DMT */
@@ -481,7 +483,7 @@ struct vivid_dev {
/* video capture */
struct tpg_data tpg;
unsigned ms_vid_cap;
- bool must_blank[VIDEO_MAX_FRAME];
+ bool must_blank[MAX_VID_CAP_BUFFERS];
const struct vivid_fmt *fmt_cap;
struct v4l2_fract timeperframe_vid_cap;
diff --git a/drivers/media/test-drivers/vivid/vivid-ctrls.c b/drivers/media/test-drivers/vivid/vivid-ctrls.c
index 8bb38bc7b8cc..2b5c8fbcd0a2 100644
--- a/drivers/media/test-drivers/vivid/vivid-ctrls.c
+++ b/drivers/media/test-drivers/vivid/vivid-ctrls.c
@@ -553,7 +553,7 @@ static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl)
break;
case VIVID_CID_PERCENTAGE_FILL:
tpg_s_perc_fill(&dev->tpg, ctrl->val);
- for (i = 0; i < VIDEO_MAX_FRAME; i++)
+ for (i = 0; i < MAX_VID_CAP_BUFFERS; i++)
dev->must_blank[i] = ctrl->val < 100;
break;
case VIVID_CID_INSERT_SAV:
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
index 69620e0a35a0..6a790ac8cbe6 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
@@ -213,7 +213,7 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
dev->vid_cap_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__);
- for (i = 0; i < VIDEO_MAX_FRAME; i++)
+ for (i = 0; i < MAX_VID_CAP_BUFFERS; i++)
dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100;
if (dev->start_streaming_error) {
dev->start_streaming_error = false;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
index e5a364efd5e6..95a2202879d8 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-api.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
@@ -753,9 +753,10 @@ static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c)
for (i = 0; i < master->ncontrols; i++)
cur_to_new(master->cluster[i]);
ret = call_op(master, g_volatile_ctrl);
- new_to_user(c, ctrl);
+ if (!ret)
+ ret = new_to_user(c, ctrl);
} else {
- cur_to_user(c, ctrl);
+ ret = cur_to_user(c, ctrl);
}
v4l2_ctrl_unlock(master);
return ret;
@@ -770,7 +771,10 @@ int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control)
if (!ctrl || !ctrl->is_int)
return -EINVAL;
ret = get_ctrl(ctrl, &c);
- control->value = c.value;
+
+ if (!ret)
+ control->value = c.value;
+
return ret;
}
EXPORT_SYMBOL(v4l2_g_ctrl);
@@ -811,10 +815,11 @@ static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
int ret;
v4l2_ctrl_lock(ctrl);
- user_to_new(c, ctrl);
- ret = set_ctrl(fh, ctrl, 0);
+ ret = user_to_new(c, ctrl);
+ if (!ret)
+ ret = set_ctrl(fh, ctrl, 0);
if (!ret)
- cur_to_user(c, ctrl);
+ ret = cur_to_user(c, ctrl);
v4l2_ctrl_unlock(ctrl);
return ret;
}
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 0f81586a19df..68ce4920e01e 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -892,28 +892,40 @@ static void gl9767_disable_ssc_pll(struct pci_dev *pdev)
gl9767_vhs_read(pdev);
}
+static void gl9767_set_low_power_negotiation(struct pci_dev *pdev, bool enable)
+{
+ u32 value;
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
+ if (enable)
+ value &= ~PCIE_GLI_9767_CFG_LOW_PWR_OFF;
+ else
+ value |= PCIE_GLI_9767_CFG_LOW_PWR_OFF;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
+
+ gl9767_vhs_read(pdev);
+}
+
static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct mmc_ios *ios = &host->mmc->ios;
struct pci_dev *pdev;
- u32 value;
u16 clk;
pdev = slot->chip->pdev;
host->mmc->actual_clock = 0;
- gl9767_vhs_write(pdev);
-
- pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
- value |= PCIE_GLI_9767_CFG_LOW_PWR_OFF;
- pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
-
+ gl9767_set_low_power_negotiation(pdev, false);
gl9767_disable_ssc_pll(pdev);
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
- if (clock == 0)
+ if (clock == 0) {
+ gl9767_set_low_power_negotiation(pdev, true);
return;
+ }
clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
if (clock == 200000000 && ios->timing == MMC_TIMING_UHS_SDR104) {
@@ -922,12 +934,7 @@ static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock)
}
sdhci_enable_clk(host, clk);
-
- pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
- value &= ~PCIE_GLI_9767_CFG_LOW_PWR_OFF;
- pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
-
- gl9767_vhs_read(pdev);
+ gl9767_set_low_power_negotiation(pdev, true);
}
static void gli_set_9767(struct sdhci_host *host)
@@ -1061,6 +1068,9 @@ static int gl9767_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios)
sdhci_writew(host, value, SDHCI_CLOCK_CONTROL);
}
+ pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
+ value &= ~PCIE_GLI_9767_CFG_LOW_PWR_OFF;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
gl9767_vhs_read(pdev);
return 0;
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index c63f7fc1e691..511615dc3341 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -1011,7 +1011,6 @@ static int c_can_handle_bus_err(struct net_device *dev,
/* common for all type of bus errors */
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
@@ -1027,26 +1026,32 @@ static int c_can_handle_bus_err(struct net_device *dev,
case LEC_STUFF_ERROR:
netdev_dbg(dev, "stuff error\n");
cf->data[2] |= CAN_ERR_PROT_STUFF;
+ stats->rx_errors++;
break;
case LEC_FORM_ERROR:
netdev_dbg(dev, "form error\n");
cf->data[2] |= CAN_ERR_PROT_FORM;
+ stats->rx_errors++;
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ stats->tx_errors++;
break;
case LEC_BIT1_ERROR:
netdev_dbg(dev, "bit1 error\n");
cf->data[2] |= CAN_ERR_PROT_BIT1;
+ stats->tx_errors++;
break;
case LEC_BIT0_ERROR:
netdev_dbg(dev, "bit0 error\n");
cf->data[2] |= CAN_ERR_PROT_BIT0;
+ stats->tx_errors++;
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ stats->rx_errors++;
break;
default:
break;
diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig
index 467ef19de1c1..aae25c2f849e 100644
--- a/drivers/net/can/cc770/Kconfig
+++ b/drivers/net/can/cc770/Kconfig
@@ -7,7 +7,7 @@ if CAN_CC770
config CAN_CC770_ISA
tristate "ISA Bus based legacy CC770 driver"
- depends on ISA
+ depends on HAS_IOPORT
help
This driver adds legacy support for CC770 and AN82527 chips
connected to the ISA bus using I/O port, memory mapped or
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index a978b960f1f1..16e9e7d7527d 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1765,7 +1765,8 @@ static int m_can_close(struct net_device *dev)
netif_stop_queue(dev);
m_can_stop(dev);
- free_irq(dev->irq, dev);
+ if (dev->irq)
+ free_irq(dev->irq, dev);
m_can_clean(dev);
diff --git a/drivers/net/can/rockchip/Kconfig b/drivers/net/can/rockchip/Kconfig
index e029e2a3ca4b..d203c530551f 100644
--- a/drivers/net/can/rockchip/Kconfig
+++ b/drivers/net/can/rockchip/Kconfig
@@ -2,7 +2,8 @@
config CAN_ROCKCHIP_CANFD
tristate "Rockchip CAN-FD controller"
- depends on OF || COMPILE_TEST
+ depends on OF
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
select CAN_RX_OFFLOAD
help
Say Y here if you want to use CAN-FD controller found on
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 01168db4c106..2f516cc6d22c 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -87,7 +87,7 @@ config CAN_PLX_PCI
config CAN_SJA1000_ISA
tristate "ISA Bus based legacy SJA1000 driver"
- depends on ISA
+ depends on HAS_IOPORT
help
This driver adds legacy support for SJA1000 chips connected to
the ISA bus using I/O port, memory mapped or indirect access.
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index e684991fa391..7209a831f0f2 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Copyright (c) 2019, 2020, 2021, 2024 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
// Based on:
@@ -483,9 +483,11 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
};
const struct ethtool_coalesce ec = {
.rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq,
- .rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq,
+ .rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq == 0 ?
+ 1 : priv->rx_obj_num_coalesce_irq,
.tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq,
- .tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq,
+ .tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq == 0 ?
+ 1 : priv->tx_obj_num_coalesce_irq,
};
struct can_ram_layout layout;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
index f732556d233a..d3ac865933fd 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
@@ -16,9 +16,9 @@
#include "mcp251xfd.h"
-static inline bool mcp251xfd_tx_fifo_sta_full(u32 fifo_sta)
+static inline bool mcp251xfd_tx_fifo_sta_empty(u32 fifo_sta)
{
- return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
+ return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF;
}
static inline int
@@ -122,7 +122,11 @@ mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p)
if (err)
return err;
- if (mcp251xfd_tx_fifo_sta_full(fifo_sta)) {
+ /* If the chip says the TX-FIFO is empty, but there are no TX
+ * buffers free in the ring, we assume all have been sent.
+ */
+ if (mcp251xfd_tx_fifo_sta_empty(fifo_sta) &&
+ mcp251xfd_get_tx_free(tx_ring) == 0) {
*len_p = tx_ring->obj_num;
return 0;
}
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 31ee477dd131..8283aeee35fb 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -111,6 +111,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
+ struct device *dev = ndev->dev.parent;
unsigned int i;
for (i = 0; i < TX_BD_NUM; i++) {
@@ -140,7 +141,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
stats->tx_bytes += skb->len;
}
- dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
+ dma_unmap_single(dev, dma_unmap_addr(tx_buff, addr),
dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
/* return the sk_buff to system */
@@ -174,6 +175,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
static int arc_emac_rx(struct net_device *ndev, int budget)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
unsigned int work_done;
for (work_done = 0; work_done < budget; work_done++) {
@@ -223,9 +225,9 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
continue;
}
- addr = dma_map_single(&ndev->dev, (void *)skb->data,
+ addr = dma_map_single(dev, (void *)skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, addr)) {
+ if (dma_mapping_error(dev, addr)) {
if (net_ratelimit())
netdev_err(ndev, "cannot map dma buffer\n");
dev_kfree_skb(skb);
@@ -237,7 +239,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
}
/* unmap previosly mapped skb */
- dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+ dma_unmap_single(dev, dma_unmap_addr(rx_buff, addr),
dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
pktlen = info & LEN_MASK;
@@ -423,6 +425,7 @@ static int arc_emac_open(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
struct phy_device *phy_dev = ndev->phydev;
+ struct device *dev = ndev->dev.parent;
int i;
phy_dev->autoneg = AUTONEG_ENABLE;
@@ -445,9 +448,9 @@ static int arc_emac_open(struct net_device *ndev)
if (unlikely(!rx_buff->skb))
return -ENOMEM;
- addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
+ addr = dma_map_single(dev, (void *)rx_buff->skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, addr)) {
+ if (dma_mapping_error(dev, addr)) {
netdev_err(ndev, "cannot dma map\n");
dev_kfree_skb(rx_buff->skb);
return -ENOMEM;
@@ -548,6 +551,7 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
static void arc_free_tx_queue(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
unsigned int i;
for (i = 0; i < TX_BD_NUM; i++) {
@@ -555,7 +559,7 @@ static void arc_free_tx_queue(struct net_device *ndev)
struct buffer_state *tx_buff = &priv->tx_buff[i];
if (tx_buff->skb) {
- dma_unmap_single(&ndev->dev,
+ dma_unmap_single(dev,
dma_unmap_addr(tx_buff, addr),
dma_unmap_len(tx_buff, len),
DMA_TO_DEVICE);
@@ -579,6 +583,7 @@ static void arc_free_tx_queue(struct net_device *ndev)
static void arc_free_rx_queue(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
unsigned int i;
for (i = 0; i < RX_BD_NUM; i++) {
@@ -586,7 +591,7 @@ static void arc_free_rx_queue(struct net_device *ndev)
struct buffer_state *rx_buff = &priv->rx_buff[i];
if (rx_buff->skb) {
- dma_unmap_single(&ndev->dev,
+ dma_unmap_single(dev,
dma_unmap_addr(rx_buff, addr),
dma_unmap_len(rx_buff, len),
DMA_FROM_DEVICE);
@@ -679,6 +684,7 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
unsigned int len, *txbd_curr = &priv->txbd_curr;
struct net_device_stats *stats = &ndev->stats;
__le32 *info = &priv->txbd[*txbd_curr].info;
+ struct device *dev = ndev->dev.parent;
dma_addr_t addr;
if (skb_padto(skb, ETH_ZLEN))
@@ -692,10 +698,9 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_BUSY;
}
- addr = dma_map_single(&ndev->dev, (void *)skb->data, len,
- DMA_TO_DEVICE);
+ addr = dma_map_single(dev, (void *)skb->data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&ndev->dev, addr))) {
+ if (unlikely(dma_mapping_error(dev, addr))) {
stats->tx_dropped++;
stats->tx_errors++;
dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 87f40c2ba904..078b1a72c161 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -133,6 +133,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
struct arc_emac_mdio_bus_data *data = &priv->bus_data;
struct device_node *np = priv->dev->of_node;
const char *name = "Synopsys MII Bus";
+ struct device_node *mdio_node;
struct mii_bus *bus;
int error;
@@ -164,7 +165,13 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", bus->name);
- error = of_mdiobus_register(bus, priv->dev->of_node);
+ /* Backwards compatibility for EMAC nodes without MDIO subnode. */
+ mdio_node = of_get_child_by_name(np, "mdio");
+ if (!mdio_node)
+ mdio_node = of_node_get(np);
+
+ error = of_mdiobus_register(bus, mdio_node);
+ of_node_put(mdio_node);
if (error) {
mdiobus_free(bus);
return dev_err_probe(priv->dev, error,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
index 6f0e58a2a58a..9e1d44ae92cc 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
@@ -56,7 +56,7 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd,
__entry->fd_format = qm_fd_get_format(fd);
__entry->fd_offset = qm_fd_get_offset(fd);
__entry->fd_length = qm_fd_get_length(fd);
- __entry->fd_status = fd->status;
+ __entry->fd_status = __be32_to_cpu(fd->status);
__assign_str(name);
),
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 8f6b0bf48139..c95a7c083b0f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -665,19 +665,11 @@ static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!num_vfs) {
enetc_msg_psi_free(pf);
- kfree(pf->vf_state);
pf->num_vfs = 0;
pci_disable_sriov(pdev);
} else {
pf->num_vfs = num_vfs;
- pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state),
- GFP_KERNEL);
- if (!pf->vf_state) {
- pf->num_vfs = 0;
- return -ENOMEM;
- }
-
err = enetc_msg_psi_init(pf);
if (err) {
dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err);
@@ -696,7 +688,6 @@ static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs)
err_en_sriov:
enetc_msg_psi_free(pf);
err_msg_psi:
- kfree(pf->vf_state);
pf->num_vfs = 0;
return err;
@@ -1286,6 +1277,12 @@ static int enetc_pf_probe(struct pci_dev *pdev,
pf = enetc_si_priv(si);
pf->si = si;
pf->total_vfs = pci_sriov_get_totalvfs(pdev);
+ if (pf->total_vfs) {
+ pf->vf_state = kcalloc(pf->total_vfs, sizeof(struct enetc_vf_state),
+ GFP_KERNEL);
+ if (!pf->vf_state)
+ goto err_alloc_vf_state;
+ }
err = enetc_setup_mac_addresses(node, pf);
if (err)
@@ -1363,6 +1360,8 @@ err_alloc_si_res:
free_netdev(ndev);
err_alloc_netdev:
err_setup_mac_addresses:
+ kfree(pf->vf_state);
+err_alloc_vf_state:
enetc_psi_destroy(pdev);
err_psi_create:
return err;
@@ -1389,6 +1388,7 @@ static void enetc_pf_remove(struct pci_dev *pdev)
enetc_free_si_resources(priv);
free_netdev(si->ndev);
+ kfree(pf->vf_state);
enetc_psi_destroy(pdev);
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index dfcaac302e24..b15db70769e5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -78,11 +78,18 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct sockaddr *saddr = addr;
+ int err;
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- return enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
+ err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
+ if (err)
+ return err;
+
+ eth_hw_addr_set(ndev, saddr->sa_data);
+
+ return 0;
}
static int enetc_vf_set_features(struct net_device *ndev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 67b0bf310daa..9a63fbc69408 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -25,8 +25,11 @@ void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
if (!pci_id)
continue;
- if (IS_ENABLED(CONFIG_PCI_IOV))
+ if (IS_ENABLED(CONFIG_PCI_IOV)) {
+ device_lock(&ae_dev->pdev->dev);
pci_disable_sriov(ae_dev->pdev);
+ device_unlock(&ae_dev->pdev->dev);
+ }
}
}
EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 841e5af7b2be..807eb3bbb11c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1293,10 +1293,8 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
/* save the buffer addr until the last read operation */
*save_buf = read_buf;
- }
- /* get data ready for the first time to read */
- if (!*ppos) {
+ /* get data ready for the first time to read */
ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
read_buf, hns3_dbg_cmd[index].buf_len);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index b09f0cca34dc..4cbc4d069a1f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -11,7 +11,6 @@
#include <linux/irq.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
-#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
@@ -381,24 +380,6 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
#define HNS3_INVALID_PTYPE \
ARRAY_SIZE(hns3_rx_ptype_tbl)
-static void hns3_dma_map_sync(struct device *dev, unsigned long iova)
-{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iommu_iotlb_gather iotlb_gather;
- size_t granule;
-
- if (!domain || !iommu_is_dma_domain(domain))
- return;
-
- granule = 1 << __ffs(domain->pgsize_bitmap);
- iova = ALIGN_DOWN(iova, granule);
- iotlb_gather.start = iova;
- iotlb_gather.end = iova + granule - 1;
- iotlb_gather.pgsize = granule;
-
- iommu_iotlb_sync(domain, &iotlb_gather);
-}
-
static irqreturn_t hns3_irq_handle(int irq, void *vector)
{
struct hns3_enet_tqp_vector *tqp_vector = vector;
@@ -1051,8 +1032,6 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
{
u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
- struct net_device *netdev = ring_to_netdev(ring);
- struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_tx_spare *tx_spare;
struct page *page;
dma_addr_t dma;
@@ -1094,7 +1073,6 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
tx_spare->buf = page_address(page);
tx_spare->len = PAGE_SIZE << order;
ring->tx_spare = tx_spare;
- ring->tx_copybreak = priv->tx_copybreak;
return;
dma_mapping_error:
@@ -1746,9 +1724,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
unsigned int type)
{
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
- struct hnae3_handle *handle = ring->tqp->handle;
struct device *dev = ring_to_dev(ring);
- struct hnae3_ae_dev *ae_dev;
unsigned int size;
dma_addr_t dma;
@@ -1780,13 +1756,6 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
return -ENOMEM;
}
- /* Add a SYNC command to sync io-pgtale to avoid errors in pgtable
- * prefetch
- */
- ae_dev = hns3_get_ae_dev(handle);
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
- hns3_dma_map_sync(dev, dma);
-
desc_cb->priv = priv;
desc_cb->length = size;
desc_cb->dma = dma;
@@ -2483,6 +2452,7 @@ static int hns3_nic_set_features(struct net_device *netdev,
return ret;
}
+ netdev->features = features;
return 0;
}
@@ -4898,30 +4868,6 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
devm_kfree(&pdev->dev, priv->tqp_vector);
}
-static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
-{
-#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
-#define HNS3_MAX_PACKET_SIZE (64 * 1024)
-
- struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
- struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
- struct hnae3_handle *handle = priv->ae_handle;
-
- if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
- return;
-
- if (!(domain && iommu_is_dma_domain(domain)))
- return;
-
- priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
- priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;
-
- if (priv->tx_copybreak < priv->min_tx_copybreak)
- priv->tx_copybreak = priv->min_tx_copybreak;
- if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
- handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
-}
-
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
unsigned int ring_type)
{
@@ -5155,7 +5101,6 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
int i, j;
int ret;
- hns3_update_tx_spare_buf_config(priv);
for (i = 0; i < ring_num; i++) {
ret = hns3_alloc_ring_memory(&priv->ring[i]);
if (ret) {
@@ -5360,8 +5305,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->ae_handle = handle;
priv->tx_timeout_count = 0;
priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
- priv->min_tx_copybreak = 0;
- priv->min_tx_spare_buf_size = 0;
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index caf7a4df8585..d36c4ed16d8d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -596,8 +596,6 @@ struct hns3_nic_priv {
struct hns3_enet_coalesce rx_coal;
u32 tx_copybreak;
u32 rx_copybreak;
- u32 min_tx_copybreak;
- u32 min_tx_spare_buf_size;
};
union l3_hdr_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 97eaeec1952b..b1e988347347 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -1933,31 +1933,6 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
return ret;
}
-static int hns3_check_tx_copybreak(struct net_device *netdev, u32 copybreak)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
-
- if (copybreak < priv->min_tx_copybreak) {
- netdev_err(netdev, "tx copybreak %u should be no less than %u!\n",
- copybreak, priv->min_tx_copybreak);
- return -EINVAL;
- }
- return 0;
-}
-
-static int hns3_check_tx_spare_buf_size(struct net_device *netdev, u32 buf_size)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
-
- if (buf_size < priv->min_tx_spare_buf_size) {
- netdev_err(netdev,
- "tx spare buf size %u should be no less than %u!\n",
- buf_size, priv->min_tx_spare_buf_size);
- return -EINVAL;
- }
- return 0;
-}
-
static int hns3_set_tunable(struct net_device *netdev,
const struct ethtool_tunable *tuna,
const void *data)
@@ -1974,10 +1949,6 @@ static int hns3_set_tunable(struct net_device *netdev,
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
- ret = hns3_check_tx_copybreak(netdev, *(u32 *)data);
- if (ret)
- return ret;
-
priv->tx_copybreak = *(u32 *)data;
for (i = 0; i < h->kinfo.num_tqps; i++)
@@ -1992,10 +1963,6 @@ static int hns3_set_tunable(struct net_device *netdev,
break;
case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
- ret = hns3_check_tx_spare_buf_size(netdev, *(u32 *)data);
- if (ret)
- return ret;
-
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
new_tx_spare_buf_size = *(u32 *)data;
netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 728f4777e51f..bd86efd92a5a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -6,7 +6,6 @@
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -3585,17 +3584,6 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
return ret;
}
-static void hclge_set_reset_pending(struct hclge_dev *hdev,
- enum hnae3_reset_type reset_type)
-{
- /* When an incorrect reset type is executed, the get_reset_level
- * function generates the HNAE3_NONE_RESET flag. As a result, this
- * type do not need to pending.
- */
- if (reset_type != HNAE3_NONE_RESET)
- set_bit(reset_type, &hdev->reset_pending);
-}
-
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
@@ -3616,7 +3604,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*/
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
- hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
+ set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
hdev->rst_stats.imp_rst_cnt++;
@@ -3626,7 +3614,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
- hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
hdev->rst_stats.global_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
@@ -3781,7 +3769,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
HCLGE_NAME, pci_name(hdev->pdev));
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
- IRQ_NOAUTOEN, hdev->misc_vector.name, hdev);
+ 0, hdev->misc_vector.name, hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -4074,7 +4062,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF reset requested\n");
/* schedule again to check later */
- hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
+ set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
default:
@@ -4108,8 +4096,6 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
clear_bit(HNAE3_FLR_RESET, addr);
}
- clear_bit(HNAE3_NONE_RESET, addr);
-
if (hdev->reset_type != HNAE3_NONE_RESET &&
rst_level < hdev->reset_type)
return HNAE3_NONE_RESET;
@@ -4251,7 +4237,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
return false;
} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
hdev->rst_stats.reset_fail_cnt++;
- hclge_set_reset_pending(hdev, hdev->reset_type);
+ set_bit(hdev->reset_type, &hdev->reset_pending);
dev_info(&hdev->pdev->dev,
"re-schedule reset task(%u)\n",
hdev->rst_stats.reset_fail_cnt);
@@ -4494,20 +4480,8 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type)
{
-#define HCLGE_SUPPORT_RESET_TYPE \
- (BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
- BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
-
struct hclge_dev *hdev = ae_dev->priv;
- if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
- /* To prevent reset triggered by hclge_reset_event */
- set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
- dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
- rst_type);
- return;
- }
-
set_bit(rst_type, &hdev->default_reset_request);
}
@@ -11917,6 +11891,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_init_rxd_adv_layout(hdev);
+ /* Enable MISC vector(vector0) */
+ hclge_enable_vector(&hdev->misc_vector, true);
+
ret = hclge_init_wol(hdev);
if (ret)
dev_warn(&pdev->dev,
@@ -11929,10 +11906,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
- /* Enable MISC vector(vector0) */
- enable_irq(hdev->misc_vector.vector_irq);
- hclge_enable_vector(&hdev->misc_vector, true);
-
dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -12338,7 +12311,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
- disable_irq(hdev->misc_vector.vector_irq);
+ synchronize_irq(hdev->misc_vector.vector_irq);
/* Disable all hw interrupts */
hclge_config_mac_tnl_int(hdev, false);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index bab16c2191b2..5505caea88e9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -58,9 +58,6 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
struct hclge_dev *hdev = vport->back;
struct hclge_ptp *ptp = hdev->ptp;
- if (!ptp)
- return false;
-
if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
ptp->tx_skipped++;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
index 8c057192aae6..43c1c18fa81f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
@@ -510,9 +510,9 @@ out:
static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
struct hnae3_knic_private_info *kinfo)
{
+#define HCLGE_RING_REG_OFFSET 0x200
#define HCLGE_RING_INT_REG_OFFSET 0x4
- struct hnae3_queue *tqp;
int i, j, reg_num;
int data_num_sum;
u32 *reg = data;
@@ -533,11 +533,10 @@ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
reg_num = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < kinfo->num_tqps; j++) {
reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
- tqp = kinfo->tqp[j];
for (i = 0; i < reg_num; i++)
- *reg++ = readl_relaxed(tqp->io_base -
- HCLGE_TQP_REG_OFFSET +
- ring_reg_addr_list[i]);
+ *reg++ = hclge_read_dev(&hdev->hw,
+ ring_reg_addr_list[i] +
+ HCLGE_RING_REG_OFFSET * j);
}
data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 896f1eb172d3..094a7c7b5592 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1395,17 +1395,6 @@ static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
return ret;
}
-static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
- enum hnae3_reset_type reset_type)
-{
- /* When an incorrect reset type is executed, the get_reset_level
- * function generates the HNAE3_NONE_RESET flag. As a result, this
- * type do not need to pending.
- */
- if (reset_type != HNAE3_NONE_RESET)
- set_bit(reset_type, &hdev->reset_pending);
-}
-
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_WAIT_US 20000
@@ -1555,7 +1544,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
hdev->rst_stats.rst_fail_cnt);
if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
- hclgevf_set_reset_pending(hdev, hdev->reset_type);
+ set_bit(hdev->reset_type, &hdev->reset_pending);
if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
@@ -1675,8 +1664,6 @@ static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
clear_bit(HNAE3_FLR_RESET, addr);
}
- clear_bit(HNAE3_NONE_RESET, addr);
-
return rst_level;
}
@@ -1686,15 +1673,14 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
struct hclgevf_dev *hdev = ae_dev->priv;
+ dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
+
if (hdev->default_reset_request)
hdev->reset_level =
hclgevf_get_reset_level(&hdev->default_reset_request);
else
hdev->reset_level = HNAE3_VF_FUNC_RESET;
- dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
- hdev->reset_level);
-
/* reset of this VF requested */
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
@@ -1705,20 +1691,8 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type)
{
-#define HCLGEVF_SUPPORT_RESET_TYPE \
- (BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
- BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
- BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
-
struct hclgevf_dev *hdev = ae_dev->priv;
- if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
- /* To prevent reset triggered by hclge_reset_event */
- set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
- dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
- rst_type);
- return;
- }
set_bit(rst_type, &hdev->default_reset_request);
}
@@ -1875,14 +1849,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
*/
if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
/* prepare for full reset of stack + pcie interface */
- hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
+ set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
/* "defer" schedule the reset task again */
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
} else {
hdev->reset_attempts++;
- hclgevf_set_reset_pending(hdev, hdev->reset_level);
+ set_bit(hdev->reset_level, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
}
hclgevf_reset_task_schedule(hdev);
@@ -2005,7 +1979,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev,
"receive reset interrupt 0x%x!\n", rst_ing_reg);
- hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
+ set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
@@ -2315,7 +2289,6 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
- timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
mutex_init(&hdev->mbx_resp.mbx_mutex);
sema_init(&hdev->reset_sem, 1);
@@ -3015,6 +2988,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
HCLGEVF_DRIVER_NAME);
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
+ timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
index 7d9d9dbc7560..6db415d8b917 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -123,10 +123,10 @@ int hclgevf_get_regs_len(struct hnae3_handle *handle)
void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
void *data)
{
+#define HCLGEVF_RING_REG_OFFSET 0x200
#define HCLGEVF_RING_INT_REG_OFFSET 0x4
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hnae3_queue *tqp;
int i, j, reg_um;
u32 *reg = data;
@@ -147,11 +147,10 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
reg_um = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < hdev->num_tqps; j++) {
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
- tqp = &hdev->htqp[j].q;
for (i = 0; i < reg_um; i++)
- *reg++ = readl_relaxed(tqp->io_base -
- HCLGEVF_TQP_REG_OFFSET +
- ring_reg_addr_list[i]);
+ *reg++ = hclgevf_read_dev(&hdev->hw,
+ ring_reg_addr_list[i] +
+ HCLGEVF_RING_REG_OFFSET * j);
}
reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index ce227b56cf72..2f9655cf5dd9 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1205,12 +1205,10 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
if (ret_val)
goto out;
- if (hw->mac.type != e1000_pch_mtp) {
- ret_val = e1000e_force_smbus(hw);
- if (ret_val) {
- e_dbg("Failed to force SMBUS: %d\n", ret_val);
- goto release;
- }
+ ret_val = e1000e_force_smbus(hw);
+ if (ret_val) {
+ e_dbg("Failed to force SMBUS: %d\n", ret_val);
+ goto release;
}
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
@@ -1273,13 +1271,6 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
}
release:
- if (hw->mac.type == e1000_pch_mtp) {
- ret_val = e1000e_force_smbus(hw);
- if (ret_val)
- e_dbg("Failed to force SMBUS over MTL system: %d\n",
- ret_val);
- }
-
hw->phy.ops.release(hw);
out:
if (ret_val)
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2089a0e172bf..d4255c2706fa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -755,6 +755,7 @@ enum i40e_filter_state {
I40E_FILTER_ACTIVE, /* Added to switch by FW */
I40E_FILTER_FAILED, /* Rejected by FW */
I40E_FILTER_REMOVE, /* To be removed */
+ I40E_FILTER_NEW_SYNC, /* New, not sent yet, is in i40e_sync_vsi_filters() */
/* There is no 'removed' state; the filter struct is freed */
};
struct i40e_mac_filter {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index abf624d770e6..208c2f0857b6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -89,6 +89,7 @@ static char *i40e_filter_state_string[] = {
"ACTIVE",
"FAILED",
"REMOVE",
+ "NEW_SYNC",
};
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 25295ae370b2..55fb362eb508 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1255,6 +1255,7 @@ int i40e_count_filters(struct i40e_vsi *vsi)
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->state == I40E_FILTER_NEW ||
+ f->state == I40E_FILTER_NEW_SYNC ||
f->state == I40E_FILTER_ACTIVE)
++cnt;
}
@@ -1441,6 +1442,8 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
new->f = add_head;
new->state = add_head->state;
+ if (add_head->state == I40E_FILTER_NEW)
+ add_head->state = I40E_FILTER_NEW_SYNC;
/* Add the new filter to the tmp list */
hlist_add_head(&new->hlist, tmp_add_list);
@@ -1550,6 +1553,8 @@ static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi,
return -ENOMEM;
new_mac->f = add_head;
new_mac->state = add_head->state;
+ if (add_head->state == I40E_FILTER_NEW)
+ add_head->state = I40E_FILTER_NEW_SYNC;
/* Add the new filter to the tmp list */
hlist_add_head(&new_mac->hlist, tmp_add_list);
@@ -2437,7 +2442,8 @@ static int
i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
struct i40e_mac_filter *f)
{
- bool enable = f->state == I40E_FILTER_NEW;
+ bool enable = f->state == I40E_FILTER_NEW ||
+ f->state == I40E_FILTER_NEW_SYNC;
struct i40e_hw *hw = &vsi->back->hw;
int aq_ret;
@@ -2611,6 +2617,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* Add it to the hash list */
hlist_add_head(&new->hlist, &tmp_add_list);
+ f->state = I40E_FILTER_NEW_SYNC;
}
/* Count the number of active (current and new) VLAN
@@ -2762,7 +2769,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
spin_lock_bh(&vsi->mac_filter_hash_lock);
hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
/* Only update the state if we're still NEW */
- if (new->f->state == I40E_FILTER_NEW)
+ if (new->f->state == I40E_FILTER_NEW ||
+ new->f->state == I40E_FILTER_NEW_SYNC)
new->f->state = new->state;
hlist_del(&new->hlist);
netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index c0b3e70a7ea3..fb527434b58b 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -552,13 +552,14 @@ int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr)
{
ice_eswitch_stop_reprs(pf);
+ repr->ops.rem(repr);
+
xa_erase(&pf->eswitch.reprs, repr->id);
if (xa_empty(&pf->eswitch.reprs))
ice_eswitch_disable_switchdev(pf);
ice_eswitch_release_repr(pf, repr);
- repr->ops.rem(repr);
ice_repr_destroy(repr);
if (xa_empty(&pf->eswitch.reprs)) {
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 5412eff8ef23..ee9862ddfe15 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1830,11 +1830,12 @@ static int
ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
struct ice_fdir_fltr *input)
{
- u16 dest_vsi, q_index = 0;
+ s16 q_index = ICE_FDIR_NO_QUEUE_IDX;
u16 orig_q_index = 0;
struct ice_pf *pf;
struct ice_hw *hw;
int flow_type;
+ u16 dest_vsi;
u8 dest_ctl;
if (!vsi || !fsp || !input)
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index ab5b118daa2d..820023c0271f 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -53,6 +53,8 @@
*/
#define ICE_FDIR_IPV4_PKT_FLAG_MF 0x20
+#define ICE_FDIR_NO_QUEUE_IDX -1
+
enum ice_fltr_prgm_desc_dest {
ICE_FLTR_PRGM_DESC_DEST_DROP_PKT,
ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX,
@@ -186,7 +188,7 @@ struct ice_fdir_fltr {
u16 flex_fltr;
/* filter control */
- u16 q_index;
+ s16 q_index;
u16 orig_q_index;
u16 dest_vsi;
u8 dest_ctl;
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 2c31ad87587a..66544faab710 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -141,6 +141,7 @@ enum idpf_vport_state {
* @adapter: Adapter back pointer
* @vport: Vport back pointer
* @vport_id: Vport identifier
+ * @link_speed_mbps: Link speed in mbps
* @vport_idx: Relative vport index
* @state: See enum idpf_vport_state
* @netstats: Packet and byte stats
@@ -150,6 +151,7 @@ struct idpf_netdev_priv {
struct idpf_adapter *adapter;
struct idpf_vport *vport;
u32 vport_id;
+ u32 link_speed_mbps;
u16 vport_idx;
enum idpf_vport_state state;
struct rtnl_link_stats64 netstats;
@@ -287,7 +289,6 @@ struct idpf_port_stats {
* @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation
* @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up
- * @link_speed_mbps: Link speed in mbps
* @sw_marker_wq: workqueue for marker packets
*/
struct idpf_vport {
@@ -331,7 +332,6 @@ struct idpf_vport {
struct idpf_port_stats port_stats;
bool link_up;
- u32 link_speed_mbps;
wait_queue_head_t sw_marker_wq;
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 3806ddd3ce4a..59b1a1a09996 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -1296,24 +1296,19 @@ static void idpf_set_msglevel(struct net_device *netdev, u32 data)
static int idpf_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- struct idpf_vport *vport;
-
- idpf_vport_ctrl_lock(netdev);
- vport = idpf_netdev_to_vport(netdev);
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
ethtool_link_ksettings_zero_link_mode(cmd, supported);
cmd->base.autoneg = AUTONEG_DISABLE;
cmd->base.port = PORT_NONE;
- if (vport->link_up) {
+ if (netif_carrier_ok(netdev)) {
cmd->base.duplex = DUPLEX_FULL;
- cmd->base.speed = vport->link_speed_mbps;
+ cmd->base.speed = np->link_speed_mbps;
} else {
cmd->base.duplex = DUPLEX_UNKNOWN;
cmd->base.speed = SPEED_UNKNOWN;
}
- idpf_vport_ctrl_unlock(netdev);
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 4f20343e49a9..b4fbb99bfad2 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -1786,6 +1786,7 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
*/
err = idpf_vc_core_init(adapter);
if (err) {
+ cancel_delayed_work_sync(&adapter->mbx_task);
idpf_deinit_dflt_mbx(adapter);
goto unlock_mutex;
}
@@ -1860,7 +1861,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
* mess with. Nothing below should use those variables from new_vport
* and should instead always refer to them in vport if they need to.
*/
- memcpy(new_vport, vport, offsetof(struct idpf_vport, link_speed_mbps));
+ memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up));
/* Adjust resource parameters prior to reallocating resources */
switch (reset_cause) {
@@ -1906,7 +1907,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
/* Same comment as above regarding avoiding copying the wait_queues and
* mutexes applies here. We do not want to mess with those if possible.
*/
- memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
+ memcpy(vport, new_vport, offsetof(struct idpf_vport, link_up));
if (reset_cause == IDPF_SR_Q_CHANGE)
idpf_vport_alloc_vec_indexes(vport);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 15c00a01f1c0..d46c95f91b0d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -141,7 +141,7 @@ static void idpf_handle_event_link(struct idpf_adapter *adapter,
}
np = netdev_priv(vport->netdev);
- vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
+ np->link_speed_mbps = le32_to_cpu(v2e->link_speed);
if (vport->link_up == v2e->link_status)
return;
@@ -3063,7 +3063,6 @@ init_failed:
adapter->state = __IDPF_VER_CHECK;
if (adapter->vcxn_mngr)
idpf_vc_xn_shutdown(adapter->vcxn_mngr);
- idpf_deinit_dflt_mbx(adapter);
set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
msecs_to_jiffies(task_delay));
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index b93791d6b593..f5dc876eb500 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -394,6 +394,7 @@ err_out_free_irqs:
err_out_pci:
ionic_dev_teardown(ionic);
ionic_clear_pci(ionic);
+ ionic_debugfs_del_dev(ionic);
err_out:
mutex_destroy(&ionic->dev_cmd_lock);
ionic_devlink_free(ionic);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 208dbc68aaf9..7bf275f127c9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3780,6 +3780,7 @@ static int stmmac_request_irq_single(struct net_device *dev)
/* Request the Wake IRQ in case of another line
* is used for WoL
*/
+ priv->wol_irq_disabled = true;
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
ret = request_irq(priv->wol_irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 0520e9f4bea7..ba6db61dd227 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -337,9 +337,9 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct cppi5_host_desc_t *desc_rx;
struct device *dev = common->dev;
+ struct am65_cpsw_swdata *swdata;
dma_addr_t desc_dma;
dma_addr_t buf_dma;
- void *swdata;
desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
if (!desc_rx) {
@@ -363,7 +363,8 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE,
buf_dma, AM65_CPSW_MAX_PACKET_SIZE);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- *((void **)swdata) = page_address(page);
+ swdata->page = page;
+ swdata->flow_id = flow_idx;
return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx,
desc_rx, desc_dma);
@@ -519,36 +520,31 @@ static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_ch
static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
struct page *page,
- bool allow_direct,
- int desc_idx)
+ bool allow_direct)
{
page_pool_put_full_page(flow->page_pool, page, allow_direct);
- flow->pages[desc_idx] = NULL;
}
static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
{
- struct am65_cpsw_rx_flow *flow = data;
+ struct am65_cpsw_rx_chn *rx_chn = data;
struct cppi5_host_desc_t *desc_rx;
- struct am65_cpsw_rx_chn *rx_chn;
+ struct am65_cpsw_swdata *swdata;
dma_addr_t buf_dma;
+ struct page *page;
u32 buf_dma_len;
- void *page_addr;
- void **swdata;
- int desc_idx;
+ u32 flow_id;
- rx_chn = &flow->common->rx_chns;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- page_addr = *swdata;
+ page = swdata->page;
+ flow_id = swdata->flow_id;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
- rx_chn->dsize_log2);
- am65_cpsw_put_page(flow, virt_to_page(page_addr), false, desc_idx);
+ am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false);
}
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
@@ -703,14 +699,13 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
ret = -ENOMEM;
goto fail_rx;
}
- flow->pages[i] = page;
ret = am65_cpsw_nuss_rx_push(common, page, flow_idx);
if (ret < 0) {
dev_err(common->dev,
"cannot submit page to rx channel flow %d, error %d\n",
flow_idx, ret);
- am65_cpsw_put_page(flow, page, false, i);
+ am65_cpsw_put_page(flow, page, false);
goto fail_rx;
}
}
@@ -764,8 +759,8 @@ fail_tx:
fail_rx:
for (i = 0; i < common->rx_ch_num_flows; i++)
- k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
- am65_cpsw_nuss_rx_cleanup, 0);
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
+ am65_cpsw_nuss_rx_cleanup, !!i);
am65_cpsw_destroy_xdp_rxqs(common);
@@ -817,11 +812,11 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
dev_err(common->dev, "rx teardown timeout\n");
}
- for (i = 0; i < common->rx_ch_num_flows; i++) {
+ for (i = common->rx_ch_num_flows - 1; i >= 0; i--) {
napi_disable(&rx_chn->flows[i].napi_rx);
hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
- k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
- am65_cpsw_nuss_rx_cleanup, 0);
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
+ am65_cpsw_nuss_rx_cleanup, !!i);
}
k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
@@ -1028,7 +1023,7 @@ pool_free:
static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
struct am65_cpsw_port *port,
struct xdp_buff *xdp,
- int desc_idx, int cpu, int *len)
+ int cpu, int *len)
{
struct am65_cpsw_common *common = flow->common;
struct am65_cpsw_ndev_priv *ndev_priv;
@@ -1101,7 +1096,7 @@ drop:
}
page = virt_to_head_page(xdp->data);
- am65_cpsw_put_page(flow, page, true, desc_idx);
+ am65_cpsw_put_page(flow, page, true);
out:
return ret;
@@ -1150,16 +1145,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
struct am65_cpsw_ndev_stats *stats;
struct cppi5_host_desc_t *desc_rx;
struct device *dev = common->dev;
+ struct am65_cpsw_swdata *swdata;
struct page *page, *new_page;
dma_addr_t desc_dma, buf_dma;
struct am65_cpsw_port *port;
- int headroom, desc_idx, ret;
struct net_device *ndev;
u32 flow_idx = flow->id;
struct sk_buff *skb;
struct xdp_buff xdp;
+ int headroom, ret;
void *page_addr;
- void **swdata;
u32 *psdata;
*xdp_state = AM65_CPSW_XDP_PASS;
@@ -1182,8 +1177,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
__func__, flow_idx, &desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- page_addr = *swdata;
- page = virt_to_page(page_addr);
+ page = swdata->page;
+ page_addr = page_address(page);
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
@@ -1199,9 +1194,6 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
- rx_chn->dsize_log2);
-
skb = am65_cpsw_build_skb(page_addr, ndev,
AM65_CPSW_MAX_PACKET_SIZE);
if (unlikely(!skb)) {
@@ -1213,7 +1205,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
pkt_len, false);
- *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, desc_idx,
+ *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp,
cpu, &pkt_len);
if (*xdp_state != AM65_CPSW_XDP_PASS)
goto allocate;
@@ -1247,10 +1239,8 @@ allocate:
return -ENOMEM;
}
- flow->pages[desc_idx] = new_page;
-
if (netif_dormant(ndev)) {
- am65_cpsw_put_page(flow, new_page, true, desc_idx);
+ am65_cpsw_put_page(flow, new_page, true);
ndev->stats.rx_dropped++;
return 0;
}
@@ -1258,7 +1248,7 @@ allocate:
requeue:
ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx);
if (WARN_ON(ret < 0)) {
- am65_cpsw_put_page(flow, new_page, true, desc_idx);
+ am65_cpsw_put_page(flow, new_page, true);
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
}
@@ -2402,10 +2392,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
for (i = 0; i < common->rx_ch_num_flows; i++) {
flow = &rx_chn->flows[i];
flow->page_pool = NULL;
- flow->pages = devm_kcalloc(dev, AM65_CPSW_MAX_RX_DESC,
- sizeof(*flow->pages), GFP_KERNEL);
- if (!flow->pages)
- return -ENOMEM;
}
rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
@@ -2455,10 +2441,12 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
flow = &rx_chn->flows[i];
flow->id = i;
flow->common = common;
+ flow->irq = -EINVAL;
rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
rx_flow_cfg.rx_cfg.size = max_desc_num;
- rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
+ /* share same FDQ for all flows */
+ rx_flow_cfg.rxfdq_cfg.size = max_desc_num * rx_cfg.flow_id_num;
rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
@@ -2496,6 +2484,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
if (ret) {
dev_err(dev, "failure requesting rx %d irq %u, %d\n",
i, flow->irq, ret);
+ flow->irq = -EINVAL;
goto err;
}
}
@@ -3349,8 +3338,8 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
for (i = 0; i < common->rx_ch_num_flows; i++)
k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i,
- &rx_chan->flows[i],
- am65_cpsw_nuss_rx_cleanup, 0);
+ rx_chan,
+ am65_cpsw_nuss_rx_cleanup, !!i);
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index dc8d544230dc..92a27ba4c601 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -101,10 +101,14 @@ struct am65_cpsw_rx_flow {
struct hrtimer rx_hrtimer;
unsigned long rx_pace_timeout;
struct page_pool *page_pool;
- struct page **pages;
char name[32];
};
+struct am65_cpsw_swdata {
+ u32 flow_id;
+ struct page *page;
+};
+
struct am65_cpsw_rx_chn {
struct device *dev;
struct device *dma_dev;
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index a04d4073def9..2c37957478fb 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -222,7 +222,7 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
struct spi_transfer *xfer = &mses->spi_xfer;
struct spi_message *msg = &mses->spi_msg;
- struct sk_buff *tskb;
+ struct sk_buff *tskb = NULL;
int ret;
netif_dbg(mse, tx_queued, mse->ndev, "%s: skb %p, %d@%p\n",
@@ -235,7 +235,6 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
if (!tskb)
return -ENOMEM;
- dev_kfree_skb(txp);
txp = tskb;
}
@@ -257,6 +256,8 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
mse->stats.xfer_err++;
}
+ dev_kfree_skb(tskb);
+
return ret;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index d940853acc0b..1fcbcaa85ebd 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -924,13 +924,13 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
skbuf_dma->sg_len = sg_len;
dma_tx_desc->callback_param = lp;
dma_tx_desc->callback_result = axienet_dma_tx_cb;
- dmaengine_submit(dma_tx_desc);
- dma_async_issue_pending(lp->tx_chan);
txq = skb_get_tx_queue(lp->ndev, skb);
netdev_tx_sent_queue(txq, skb->len);
netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
+ dmaengine_submit(dma_tx_desc);
+ dma_async_issue_pending(lp->tx_chan);
return NETDEV_TX_OK;
xmit_error_unmap_sg:
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 937061acfc61..351411f0aa6f 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -147,6 +147,8 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
/* IRQ related */ \
.config_intr = dp83848_config_intr, \
.handle_interrupt = dp83848_handle_interrupt, \
+ \
+ .flags = PHY_RST_AFTER_CLK_EN, \
}
static struct phy_driver dp83848_driver[] = {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 792e9eadbfc3..53a038fcbe99 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -368,15 +368,16 @@ struct receive_queue {
* because table sizes may be differ according to the device configuration.
*/
#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
-#define VIRTIO_NET_RSS_MAX_TABLE_LEN 128
struct virtio_net_ctrl_rss {
u32 hash_types;
u16 indirection_table_mask;
u16 unclassified_queue;
- u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
+ u16 hash_cfg_reserved; /* for HASH_CONFIG (see virtio_net_hash_config for details) */
u16 max_tx_vq;
u8 hash_key_length;
u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
+
+ u16 *indirection_table;
};
/* Control VQ buffers: protected by the rtnl lock */
@@ -512,6 +513,25 @@ static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
struct page *page, void *buf,
int len, int truesize);
+static int rss_indirection_table_alloc(struct virtio_net_ctrl_rss *rss, u16 indir_table_size)
+{
+ if (!indir_table_size) {
+ rss->indirection_table = NULL;
+ return 0;
+ }
+
+ rss->indirection_table = kmalloc_array(indir_table_size, sizeof(u16), GFP_KERNEL);
+ if (!rss->indirection_table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void rss_indirection_table_free(struct virtio_net_ctrl_rss *rss)
+{
+ kfree(rss->indirection_table);
+}
+
static bool is_xdp_frame(void *ptr)
{
return (unsigned long)ptr & VIRTIO_XDP_FLAG;
@@ -3374,15 +3394,59 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
}
+static bool virtnet_commit_rss_command(struct virtnet_info *vi);
+
+static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pairs)
+{
+ u32 indir_val = 0;
+ int i = 0;
+
+ for (; i < vi->rss_indir_table_size; ++i) {
+ indir_val = ethtool_rxfh_indir_default(i, queue_pairs);
+ vi->rss.indirection_table[i] = indir_val;
+ }
+ vi->rss.max_tx_vq = queue_pairs;
+}
+
static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
- struct scatterlist sg;
+ struct virtio_net_ctrl_rss old_rss;
struct net_device *dev = vi->dev;
+ struct scatterlist sg;
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
+ /* Firstly check if we need update rss. Do updating if both (1) rss enabled and
+ * (2) no user configuration.
+ *
+ * During rss command processing, device updates queue_pairs using rss.max_tx_vq. That is,
+ * the device updates queue_pairs together with rss, so we can skip the sperate queue_pairs
+ * update (VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET below) and return directly.
+ */
+ if (vi->has_rss && !netif_is_rxfh_configured(dev)) {
+ memcpy(&old_rss, &vi->rss, sizeof(old_rss));
+ if (rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size)) {
+ vi->rss.indirection_table = old_rss.indirection_table;
+ return -ENOMEM;
+ }
+
+ virtnet_rss_update_by_qpairs(vi, queue_pairs);
+
+ if (!virtnet_commit_rss_command(vi)) {
+ /* restore ctrl_rss if commit_rss_command failed */
+ rss_indirection_table_free(&vi->rss);
+ memcpy(&vi->rss, &old_rss, sizeof(old_rss));
+
+ dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n",
+ queue_pairs);
+ return -EINVAL;
+ }
+ rss_indirection_table_free(&old_rss);
+ goto succ;
+ }
+
mq = kzalloc(sizeof(*mq), GFP_KERNEL);
if (!mq)
return -ENOMEM;
@@ -3395,12 +3459,12 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
queue_pairs);
return -EINVAL;
- } else {
- vi->curr_queue_pairs = queue_pairs;
- /* virtnet_open() will refill when device is going to up. */
- if (dev->flags & IFF_UP)
- schedule_delayed_work(&vi->refill, 0);
}
+succ:
+ vi->curr_queue_pairs = queue_pairs;
+ /* virtnet_open() will refill when device is going to up. */
+ if (dev->flags & IFF_UP)
+ schedule_delayed_work(&vi->refill, 0);
return 0;
}
@@ -3828,11 +3892,15 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi)
/* prepare sgs */
sg_init_table(sgs, 4);
- sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
+ sg_buf_size = offsetof(struct virtio_net_ctrl_rss, hash_cfg_reserved);
sg_set_buf(&sgs[0], &vi->rss, sg_buf_size);
- sg_buf_size = sizeof(uint16_t) * (vi->rss.indirection_table_mask + 1);
- sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
+ if (vi->has_rss) {
+ sg_buf_size = sizeof(uint16_t) * vi->rss_indir_table_size;
+ sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
+ } else {
+ sg_set_buf(&sgs[1], &vi->rss.hash_cfg_reserved, sizeof(uint16_t));
+ }
sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
- offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
@@ -3856,21 +3924,14 @@ err:
static void virtnet_init_default_rss(struct virtnet_info *vi)
{
- u32 indir_val = 0;
- int i = 0;
-
vi->rss.hash_types = vi->rss_hash_types_supported;
vi->rss_hash_types_saved = vi->rss_hash_types_supported;
vi->rss.indirection_table_mask = vi->rss_indir_table_size
? vi->rss_indir_table_size - 1 : 0;
vi->rss.unclassified_queue = 0;
- for (; i < vi->rss_indir_table_size; ++i) {
- indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
- vi->rss.indirection_table[i] = indir_val;
- }
+ virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs);
- vi->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
vi->rss.hash_key_length = vi->rss_key_size;
netdev_rss_key_fill(vi->rss.key, vi->rss_key_size);
@@ -6420,10 +6481,19 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_cread16(vdev, offsetof(struct virtio_net_config,
rss_max_indirection_table_length));
}
+ err = rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size);
+ if (err)
+ goto free;
if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_key_size =
virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
+ if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
+ dev_err(&vdev->dev, "rss_max_key_size=%u exceeds the limit %u.\n",
+ vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE);
+ err = -EINVAL;
+ goto free;
+ }
vi->rss_hash_types_supported =
virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
@@ -6551,6 +6621,15 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
+ if (vi->has_rss || vi->has_rss_hash_report) {
+ if (!virtnet_commit_rss_command(vi)) {
+ dev_warn(&vdev->dev, "RSS disabled because committing failed.\n");
+ dev->hw_features &= ~NETIF_F_RXHASH;
+ vi->has_rss_hash_report = false;
+ vi->has_rss = false;
+ }
+ }
+
virtnet_set_queues(vi, vi->curr_queue_pairs);
/* a random MAC address has been assigned, notify the device.
@@ -6674,6 +6753,8 @@ static void virtnet_remove(struct virtio_device *vdev)
remove_vq_common(vi);
+ rss_indirection_table_free(&vi->rss);
+
free_netdev(vi->dev);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
index 210d84c67ef9..7a9c09cd4fdc 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -226,7 +226,7 @@ int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl,
return 0;
err_unmap_skbs:
- while (--i > 0)
+ while (i--)
t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
return ret;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index b149b638453f..855b42c92284 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3795,7 +3795,8 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
int srcu_idx;
srcu_idx = srcu_read_lock(&ctrl->srcu);
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
+ srcu_read_lock_held(&ctrl->srcu)) {
if (ns->head->ns_id == nsid) {
if (!nvme_get_ns(ns))
continue;
@@ -4879,7 +4880,8 @@ void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
int srcu_idx;
srcu_idx = srcu_read_lock(&ctrl->srcu);
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
+ list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
+ srcu_read_lock_held(&ctrl->srcu))
blk_mark_disk_dead(ns->disk);
srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
@@ -4891,7 +4893,8 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl)
int srcu_idx;
srcu_idx = srcu_read_lock(&ctrl->srcu);
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
+ list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
+ srcu_read_lock_held(&ctrl->srcu))
blk_mq_unfreeze_queue(ns->queue);
srcu_read_unlock(&ctrl->srcu, srcu_idx);
clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
@@ -4904,7 +4907,8 @@ int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
int srcu_idx;
srcu_idx = srcu_read_lock(&ctrl->srcu);
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
+ srcu_read_lock_held(&ctrl->srcu)) {
timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
if (timeout <= 0)
break;
@@ -4920,7 +4924,8 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl)
int srcu_idx;
srcu_idx = srcu_read_lock(&ctrl->srcu);
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
+ list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
+ srcu_read_lock_held(&ctrl->srcu))
blk_mq_freeze_queue_wait(ns->queue);
srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
@@ -4933,7 +4938,8 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl)
set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
srcu_idx = srcu_read_lock(&ctrl->srcu);
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
+ list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
+ srcu_read_lock_held(&ctrl->srcu))
blk_freeze_queue_start(ns->queue);
srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
@@ -4981,7 +4987,8 @@ void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
int srcu_idx;
srcu_idx = srcu_read_lock(&ctrl->srcu);
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
+ list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
+ srcu_read_lock_held(&ctrl->srcu))
blk_sync_queue(ns->queue);
srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 3875abba5a79..0258dd879d64 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -52,6 +52,7 @@ config WMI_BMOF
config HUAWEI_WMI
tristate "Huawei WMI laptop extras driver"
depends on ACPI_BATTERY
+ depends on ACPI_EC
depends on ACPI_WMI
depends on INPUT
select INPUT_SPARSEKMAP
@@ -147,7 +148,7 @@ config YT2_1380
config ACERHDF
tristate "Acer Aspire One temperature and fan driver"
- depends on ACPI && THERMAL
+ depends on ACPI_EC && THERMAL
select THERMAL_GOV_BANG_BANG
help
This is a driver for Acer Aspire One netbooks. It allows to access
@@ -186,6 +187,7 @@ config ACER_WMI
depends on SERIO_I8042
depends on INPUT
depends on RFKILL || RFKILL = n
+ depends on ACPI_EC
depends on ACPI_WMI
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on HWMON
@@ -334,7 +336,7 @@ config MERAKI_MX100
config EEEPC_LAPTOP
tristate "Eee PC Hotkey Driver"
- depends on ACPI
+ depends on ACPI_EC
depends on INPUT
depends on RFKILL || RFKILL = n
depends on ACPI_VIDEO || ACPI_VIDEO = n
@@ -503,7 +505,7 @@ config SENSORS_HDAPS
config THINKPAD_ACPI
tristate "ThinkPad ACPI Laptop Extras"
- depends on ACPI
+ depends on ACPI_EC
depends on ACPI_BATTERY
depends on INPUT
depends on RFKILL || RFKILL = n
@@ -682,7 +684,7 @@ config MEEGOPAD_ANX7428
config MSI_EC
tristate "MSI EC Extras"
- depends on ACPI
+ depends on ACPI_EC
depends on ACPI_BATTERY
help
This driver allows various MSI laptops' functionalities to be
@@ -690,7 +692,7 @@ config MSI_EC
config MSI_LAPTOP
tristate "MSI Laptop Extras"
- depends on ACPI
+ depends on ACPI_EC
depends on BACKLIGHT_CLASS_DEVICE
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on RFKILL
@@ -796,7 +798,7 @@ config SAMSUNG_LAPTOP
config SAMSUNG_Q10
tristate "Samsung Q10 Extras"
- depends on ACPI
+ depends on ACPI_EC
select BACKLIGHT_CLASS_DEVICE
help
This driver provides support for backlight control on Samsung Q10
@@ -804,7 +806,7 @@ config SAMSUNG_Q10
config ACPI_TOSHIBA
tristate "Toshiba Laptop Extras"
- depends on ACPI
+ depends on ACPI_EC
depends on ACPI_BATTERY
depends on ACPI_WMI
select LEDS_CLASS
@@ -904,7 +906,7 @@ config ACPI_CMPC
config COMPAL_LAPTOP
tristate "Compal (and others) Laptop Extras"
- depends on ACPI
+ depends on ACPI_EC
depends on BACKLIGHT_CLASS_DEVICE
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on RFKILL
@@ -949,7 +951,7 @@ config PANASONIC_LAPTOP
config SONY_LAPTOP
tristate "Sony Laptop Extras"
- depends on ACPI
+ depends on ACPI_EC
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on BACKLIGHT_CLASS_DEVICE
depends on INPUT
@@ -972,7 +974,7 @@ config SONYPI_COMPAT
config SYSTEM76_ACPI
tristate "System76 ACPI Driver"
- depends on ACPI
+ depends on ACPI_EC
depends on ACPI_BATTERY
depends on HWMON
depends on INPUT
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index bbb8edb62e00..5669f94c3d06 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -998,6 +998,11 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, dev->s2d_msg_id, true);
amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, dev->s2d_msg_id, true);
+ if (!phys_addr_hi && !phys_addr_low) {
+ dev_err(dev->dev, "STB is not enabled on the system; disable enable_stb or contact system vendor\n");
+ return -EINVAL;
+ }
+
stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
/* Clear msg_port for other SMU operation */
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index d6af0ca036f1..347bb43a5f2b 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -261,6 +261,7 @@ int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer)
dev->mtable_size = sizeof(dev->m_table);
break;
case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
+ case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
dev->mtable_size = sizeof(dev->m_table_v2);
break;
default:
diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c
index b5183969f9bf..06226eb0eab3 100644
--- a/drivers/platform/x86/amd/pmf/spc.c
+++ b/drivers/platform/x86/amd/pmf/spc.c
@@ -86,6 +86,7 @@ static void amd_pmf_get_smu_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_ta
ARRAY_SIZE(dev->m_table.avg_core_c0residency), in);
break;
case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
+ case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
memcpy(&dev->m_table_v2, dev->buf, dev->mtable_size);
in->ev_info.socket_power = dev->m_table_v2.apu_power + dev->m_table_v2.dgpu_power;
in->ev_info.skin_temperature = dev->m_table_v2.skin_temp;
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index 68a49788a396..dc21227dd66e 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -194,6 +194,7 @@ config DELL_WMI
config DELL_WMI_PRIVACY
bool "Dell WMI Hardware Privacy Support"
depends on DELL_WMI
+ depends on ACPI_EC
help
This option adds integration with the "Dell Hardware Privacy"
feature of Dell laptops to the dell-wmi driver.
diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
index 73e41eb69cb5..01c72b91a50d 100644
--- a/drivers/platform/x86/dell/dell-smbios-base.c
+++ b/drivers/platform/x86/dell/dell-smbios-base.c
@@ -576,6 +576,7 @@ static int __init dell_smbios_init(void)
int ret, wmi, smm;
if (!dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL) &&
+ !dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Alienware", NULL) &&
!dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "www.dell.com", NULL)) {
pr_err("Unable to run on non-Dell system\n");
return -ENODEV;
diff --git a/drivers/platform/x86/dell/dell-wmi-base.c b/drivers/platform/x86/dell/dell-wmi-base.c
index 24fd7ffadda9..841a5414d28a 100644
--- a/drivers/platform/x86/dell/dell-wmi-base.c
+++ b/drivers/platform/x86/dell/dell-wmi-base.c
@@ -80,6 +80,12 @@ static const struct dmi_system_id dell_wmi_smbios_list[] __initconst = {
static const struct key_entry dell_wmi_keymap_type_0000[] = {
{ KE_IGNORE, 0x003a, { KEY_CAPSLOCK } },
+ /* Meta key lock */
+ { KE_IGNORE, 0xe000, { KEY_RIGHTMETA } },
+
+ /* Meta key unlock */
+ { KE_IGNORE, 0xe001, { KEY_RIGHTMETA } },
+
/* Key code is followed by brightness level */
{ KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } },
diff --git a/drivers/platform/x86/hp/Kconfig b/drivers/platform/x86/hp/Kconfig
index d776761cd5fd..dd51491b9bcd 100644
--- a/drivers/platform/x86/hp/Kconfig
+++ b/drivers/platform/x86/hp/Kconfig
@@ -37,6 +37,7 @@ config HP_ACCEL
config HP_WMI
tristate "HP WMI extras"
default m
+ depends on ACPI_EC
depends on ACPI_WMI
depends on INPUT
depends on RFKILL || RFKILL = n
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index c64dfc56651d..c908f52ed717 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1294,6 +1294,9 @@ static const struct key_entry ideapad_keymap[] = {
{ KE_KEY, 0x27 | IDEAPAD_WMI_KEY, { KEY_HELP } },
/* Refresh Rate Toggle */
{ KE_KEY, 0x0a | IDEAPAD_WMI_KEY, { KEY_REFRESH_RATE_TOGGLE } },
+ /* Specific to some newer models */
+ { KE_KEY, 0x3e | IDEAPAD_WMI_KEY, { KEY_MICMUTE } },
+ { KE_KEY, 0x3f | IDEAPAD_WMI_KEY, { KEY_RFKILL } },
{ KE_END },
};
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
index ad50bbabec61..eb698dcb9af9 100644
--- a/drivers/platform/x86/intel/Kconfig
+++ b/drivers/platform/x86/intel/Kconfig
@@ -62,7 +62,7 @@ config INTEL_INT0002_VGPIO
config INTEL_OAKTRAIL
tristate "Intel Oaktrail Platform Extras"
- depends on ACPI
+ depends on ACPI_EC
depends on ACPI_VIDEO || ACPI_VIDEO=n
depends on RFKILL && BACKLIGHT_CLASS_DEVICE && ACPI
help
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 4c1b0553f872..6371a9f765c1 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -7936,6 +7936,7 @@ static u8 fan_control_resume_level;
static int fan_watchdog_maxinterval;
static bool fan_with_ns_addr;
+static bool ecfw_with_fan_dec_rpm;
static struct mutex fan_mutex;
@@ -8682,7 +8683,11 @@ static ssize_t fan_fan1_input_show(struct device *dev,
if (res < 0)
return res;
- return sysfs_emit(buf, "%u\n", speed);
+ /* Check for fan speeds displayed in hexadecimal */
+ if (!ecfw_with_fan_dec_rpm)
+ return sysfs_emit(buf, "%u\n", speed);
+ else
+ return sysfs_emit(buf, "%x\n", speed);
}
static DEVICE_ATTR(fan1_input, S_IRUGO, fan_fan1_input_show, NULL);
@@ -8699,7 +8704,11 @@ static ssize_t fan_fan2_input_show(struct device *dev,
if (res < 0)
return res;
- return sysfs_emit(buf, "%u\n", speed);
+ /* Check for fan speeds displayed in hexadecimal */
+ if (!ecfw_with_fan_dec_rpm)
+ return sysfs_emit(buf, "%u\n", speed);
+ else
+ return sysfs_emit(buf, "%x\n", speed);
}
static DEVICE_ATTR(fan2_input, S_IRUGO, fan_fan2_input_show, NULL);
@@ -8775,6 +8784,7 @@ static const struct attribute_group fan_driver_attr_group = {
#define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */
#define TPACPI_FAN_NOFAN 0x0008 /* no fan available */
#define TPACPI_FAN_NS 0x0010 /* For EC with non-Standard register addresses */
+#define TPACPI_FAN_DECRPM 0x0020 /* For ECFW's with RPM in register as decimal */
static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1),
@@ -8803,6 +8813,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('R', '1', 'D', TPACPI_FAN_NS), /* 11e Gen5 GL-R */
TPACPI_Q_LNV3('R', '0', 'V', TPACPI_FAN_NS), /* 11e Gen5 KL-Y */
TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */
+ TPACPI_Q_LNV3('R', '0', 'Q', TPACPI_FAN_DECRPM),/* L480 */
};
static int __init fan_init(struct ibm_init_struct *iibm)
@@ -8847,6 +8858,13 @@ static int __init fan_init(struct ibm_init_struct *iibm)
tp_features.fan_ctrl_status_undef = 1;
}
+ /* Check for the EC/BIOS with RPM reported in decimal*/
+ if (quirks & TPACPI_FAN_DECRPM) {
+ pr_info("ECFW with fan RPM as decimal in EC register\n");
+ ecfw_with_fan_dec_rpm = 1;
+ tp_features.fan_ctrl_status_undef = 1;
+ }
+
if (gfan_handle) {
/* 570, 600e/x, 770e, 770x */
fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN;
@@ -9067,7 +9085,11 @@ static int fan_read(struct seq_file *m)
if (rc < 0)
return rc;
- seq_printf(m, "speed:\t\t%d\n", speed);
+ /* Check for fan speeds displayed in hexadecimal */
+ if (!ecfw_with_fan_dec_rpm)
+ seq_printf(m, "speed:\t\t%d\n", speed);
+ else
+ seq_printf(m, "speed:\t\t%x\n", speed);
if (fan_status_access_mode == TPACPI_FAN_RD_TPEC_NS) {
/*
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index 96ea343856f0..7ee7b65b9b90 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -106,7 +106,9 @@ static int pwm_imx_tpm_round_state(struct pwm_chip *chip,
p->prescale = prescale;
period_count = (clock_unit + ((1 << prescale) >> 1)) >> prescale;
- p->mod = period_count;
+ if (period_count == 0)
+ return -EINVAL;
+ p->mod = period_count - 1;
/* calculate real period HW can support */
tmp = (u64)period_count << prescale;
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 14b60abd6afc..01a8d0487918 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -1379,6 +1379,8 @@ static const struct regulator_desc rk809_reg[] = {
.n_linear_ranges = ARRAY_SIZE(rk817_buck1_voltage_ranges),
.vsel_reg = RK817_BUCK3_ON_VSEL_REG,
.vsel_mask = RK817_BUCK_VSEL_MASK,
+ .apply_reg = RK817_POWER_CONFIG,
+ .apply_bit = RK817_BUCK3_FB_RES_INTER,
.enable_reg = RK817_POWER_EN_REG(0),
.enable_mask = ENABLE_MASK(RK817_ID_DCDC3),
.enable_val = ENABLE_MASK(RK817_ID_DCDC3),
diff --git a/drivers/regulator/rtq2208-regulator.c b/drivers/regulator/rtq2208-regulator.c
index a5c126afc648..5925fa7a9a06 100644
--- a/drivers/regulator/rtq2208-regulator.c
+++ b/drivers/regulator/rtq2208-regulator.c
@@ -568,7 +568,7 @@ static int rtq2208_probe(struct i2c_client *i2c)
struct regmap *regmap;
struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX];
struct regulator_dev *rdev;
- struct regulator_config cfg;
+ struct regulator_config cfg = {};
struct rtq2208_rdev_map *rdev_map;
int i, ret = 0, idx, n_regulator = 0;
unsigned int regulator_idx_table[RTQ2208_LDO_MAX],
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 0b2f29006908..d3af1dfa3c7d 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -1440,14 +1440,18 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
goto unlock;
ret = wait_event_timeout(channel->intent_req_wq,
- READ_ONCE(channel->intent_req_result) >= 0 &&
- READ_ONCE(channel->intent_received),
+ READ_ONCE(channel->intent_req_result) == 0 ||
+ (READ_ONCE(channel->intent_req_result) > 0 &&
+ READ_ONCE(channel->intent_received)) ||
+ glink->abort_tx,
10 * HZ);
if (!ret) {
dev_err(glink->dev, "intent request timed out\n");
ret = -ETIMEDOUT;
+ } else if (glink->abort_tx) {
+ ret = -ECANCELED;
} else {
- ret = READ_ONCE(channel->intent_req_result) ? 0 : -ECANCELED;
+ ret = READ_ONCE(channel->intent_req_result) ? 0 : -EAGAIN;
}
unlock:
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index ee2b74238758..6ab27f4f4878 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -188,8 +188,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
while (bufsize >= SECTOR_SIZE) {
- buf = __vmalloc(bufsize,
- GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY);
+ buf = kvzalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
if (buf) {
*buflen = bufsize;
return buf;
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 8fa4ffd3a9b5..28bcc65e91be 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -139,6 +139,7 @@ struct qcom_llcc_config {
int size;
bool need_llcc_cfg;
bool no_edac;
+ bool irq_configured;
};
struct qcom_sct_config {
@@ -718,6 +719,7 @@ static const struct qcom_llcc_config x1e80100_cfg[] = {
.need_llcc_cfg = true,
.reg_offset = llcc_v2_1_reg_offset,
.edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ .irq_configured = true,
},
};
@@ -1345,6 +1347,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
drv_data->cfg = llcc_cfg;
drv_data->cfg_size = sz;
drv_data->edac_reg_offset = cfg->edac_reg_offset;
+ drv_data->ecc_irq_configured = cfg->irq_configured;
mutex_init(&drv_data->lock);
platform_set_drvdata(pdev, drv_data);
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index 9606222993fd..baa4ac6704a9 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -4,6 +4,7 @@
* Copyright (c) 2022, Linaro Ltd
*/
#include <linux/auxiliary_bus.h>
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -13,6 +14,8 @@
#include <linux/soc/qcom/pmic_glink.h>
#include <linux/spinlock.h>
+#define PMIC_GLINK_SEND_TIMEOUT (5 * HZ)
+
enum {
PMIC_GLINK_CLIENT_BATT = 0,
PMIC_GLINK_CLIENT_ALTMODE,
@@ -112,13 +115,29 @@ EXPORT_SYMBOL_GPL(pmic_glink_client_register);
int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len)
{
struct pmic_glink *pg = client->pg;
+ bool timeout_reached = false;
+ unsigned long start;
int ret;
mutex_lock(&pg->state_lock);
- if (!pg->ept)
+ if (!pg->ept) {
ret = -ECONNRESET;
- else
- ret = rpmsg_send(pg->ept, data, len);
+ } else {
+ start = jiffies;
+ for (;;) {
+ ret = rpmsg_send(pg->ept, data, len);
+ if (ret != -EAGAIN)
+ break;
+
+ if (timeout_reached) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ usleep_range(1000, 5000);
+ timeout_reached = time_after(jiffies, start + PMIC_GLINK_SEND_TIMEOUT);
+ }
+ }
mutex_unlock(&pg->state_lock);
return ret;
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 64fc4f41da77..ecfd3da9d5e8 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -786,10 +786,16 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u",
SOCINFO_MAJOR(le32_to_cpu(info->ver)),
SOCINFO_MINOR(le32_to_cpu(info->ver)));
- if (offsetof(struct socinfo, serial_num) <= item_size)
+ if (!qs->attr.soc_id || !qs->attr.revision)
+ return -ENOMEM;
+
+ if (offsetof(struct socinfo, serial_num) <= item_size) {
qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%u",
le32_to_cpu(info->serial_num));
+ if (!qs->attr.serial_number)
+ return -ENOMEM;
+ }
qs->soc_dev = soc_device_register(&qs->attr);
if (IS_ERR(qs->soc_dev))
diff --git a/drivers/staging/media/av7110/av7110.h b/drivers/staging/media/av7110/av7110.h
index ec461fd187af..b584754f4be0 100644
--- a/drivers/staging/media/av7110/av7110.h
+++ b/drivers/staging/media/av7110/av7110.h
@@ -88,6 +88,8 @@ struct infrared {
u32 ir_config;
};
+#define MAX_CI_SLOTS 2
+
/* place to store all the necessary device information */
struct av7110 {
/* devices */
@@ -163,7 +165,7 @@ struct av7110 {
/* CA */
- struct ca_slot_info ci_slot[2];
+ struct ca_slot_info ci_slot[MAX_CI_SLOTS];
enum av7110_video_mode vidmode;
struct dmxdev dmxdev;
diff --git a/drivers/staging/media/av7110/av7110_ca.c b/drivers/staging/media/av7110/av7110_ca.c
index 6ce212c64e5d..fce4023c9dea 100644
--- a/drivers/staging/media/av7110/av7110_ca.c
+++ b/drivers/staging/media/av7110/av7110_ca.c
@@ -26,23 +26,28 @@
void CI_handle(struct av7110 *av7110, u8 *data, u16 len)
{
+ unsigned slot_num;
+
dprintk(8, "av7110:%p\n", av7110);
if (len < 3)
return;
switch (data[0]) {
case CI_MSG_CI_INFO:
- if (data[2] != 1 && data[2] != 2)
+ if (data[2] != 1 && data[2] != MAX_CI_SLOTS)
break;
+
+ slot_num = array_index_nospec(data[2] - 1, MAX_CI_SLOTS);
+
switch (data[1]) {
case 0:
- av7110->ci_slot[data[2] - 1].flags = 0;
+ av7110->ci_slot[slot_num].flags = 0;
break;
case 1:
- av7110->ci_slot[data[2] - 1].flags |= CA_CI_MODULE_PRESENT;
+ av7110->ci_slot[slot_num].flags |= CA_CI_MODULE_PRESENT;
break;
case 2:
- av7110->ci_slot[data[2] - 1].flags |= CA_CI_MODULE_READY;
+ av7110->ci_slot[slot_num].flags |= CA_CI_MODULE_READY;
break;
}
break;
@@ -262,15 +267,19 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
case CA_GET_SLOT_INFO:
{
struct ca_slot_info *info = (struct ca_slot_info *)parg;
+ unsigned int slot_num;
if (info->num < 0 || info->num > 1) {
mutex_unlock(&av7110->ioctl_mutex);
return -EINVAL;
}
- av7110->ci_slot[info->num].num = info->num;
- av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ?
- CA_CI_LINK : CA_CI;
- memcpy(info, &av7110->ci_slot[info->num], sizeof(struct ca_slot_info));
+ slot_num = array_index_nospec(info->num, MAX_CI_SLOTS);
+
+ av7110->ci_slot[slot_num].num = info->num;
+ av7110->ci_slot[slot_num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ?
+ CA_CI_LINK : CA_CI;
+ memcpy(info, &av7110->ci_slot[slot_num],
+ sizeof(struct ca_slot_info));
break;
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 3dbeffc650d3..6c488b1e2624 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -593,7 +593,7 @@ vchiq_platform_init_state(struct vchiq_state *state)
{
struct vchiq_arm_state *platform_state;
- platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
+ platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
if (!platform_state)
return -ENOMEM;
@@ -1731,7 +1731,7 @@ static int vchiq_probe(struct platform_device *pdev)
return -ENOENT;
}
- mgmt = kzalloc(sizeof(*mgmt), GFP_KERNEL);
+ mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
if (!mgmt)
return -ENOMEM;
@@ -1789,8 +1789,6 @@ static void vchiq_remove(struct platform_device *pdev)
arm_state = vchiq_platform_get_arm_state(&mgmt->state);
kthread_stop(arm_state->ka_thread);
-
- kfree(mgmt);
}
static struct platform_driver vchiq_driver = {
diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
index 5225b3621a56..d2d49264cf83 100644
--- a/drivers/thermal/qcom/lmh.c
+++ b/drivers/thermal/qcom/lmh.c
@@ -73,7 +73,14 @@ static struct irq_chip lmh_irq_chip = {
static int lmh_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
struct lmh_hw_data *lmh_data = d->host_data;
+ static struct lock_class_key lmh_lock_key;
+ static struct lock_class_key lmh_request_key;
+ /*
+ * This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+ irq_set_lockdep_class(irq, &lmh_lock_key, &lmh_request_key);
irq_set_chip_and_handler(irq, &lmh_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, lmh_data);
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index a4caf7899f8e..07e09897165f 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -99,18 +99,15 @@ static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *n
struct device_node *trips;
int ret, count;
+ *ntrips = 0;
+
trips = of_get_child_by_name(np, "trips");
- if (!trips) {
- pr_err("Failed to find 'trips' node\n");
- return ERR_PTR(-EINVAL);
- }
+ if (!trips)
+ return NULL;
count = of_get_child_count(trips);
- if (!count) {
- pr_err("No trip point defined\n");
- ret = -EINVAL;
- goto out_of_node_put;
- }
+ if (!count)
+ return NULL;
tt = kzalloc(sizeof(*tt) * count, GFP_KERNEL);
if (!tt) {
@@ -133,7 +130,6 @@ static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *n
out_kfree:
kfree(tt);
- *ntrips = 0;
out_of_node_put:
of_node_put(trips);
@@ -401,11 +397,14 @@ static struct thermal_zone_device *thermal_of_zone_register(struct device_node *
trips = thermal_of_trips_init(np, &ntrips);
if (IS_ERR(trips)) {
- pr_err("Failed to find trip points for %pOFn id=%d\n", sensor, id);
+ pr_err("Failed to parse trip points for %pOFn id=%d\n", sensor, id);
ret = PTR_ERR(trips);
goto out_of_node_put;
}
+ if (!trips)
+ pr_info("No trip points found for %pOFn id=%d\n", sensor, id);
+
ret = thermal_of_monitor_init(np, &delay, &pdelay);
if (ret) {
pr_err("Failed to initialize monitoring delays from %pOFn\n", np);
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 7db9869a9f3f..89d2919d0193 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -532,6 +532,8 @@ int tb_retimer_scan(struct tb_port *port, bool add)
}
ret = 0;
+ if (!IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING))
+ max = min(last_idx, max);
/* Add retimers if they do not exist already */
for (i = 1; i <= max; i++) {
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 0a9b4aeb3fa1..402fdf8b1cde 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -48,7 +48,7 @@ enum usb4_ba_index {
/* Delays in us used with usb4_port_wait_for_bit() */
#define USB4_PORT_DELAY 50
-#define USB4_PORT_SB_DELAY 5000
+#define USB4_PORT_SB_DELAY 1000
static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
u32 *metadata, u8 *status,
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index f5846598d80e..abbe7135a977 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -8636,6 +8636,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
ufshcd_init_clk_scaling_sysfs(hba);
}
+ /*
+ * The RTC update code accesses the hba->ufs_device_wlun->sdev_gendev
+ * pointer and hence must only be started after the WLUN pointer has
+ * been initialized by ufshcd_scsi_add_wlus().
+ */
+ schedule_delayed_work(&hba->ufs_rtc_update_work,
+ msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
+
ufs_bsg_probe(hba);
scsi_scan_host(hba->host);
@@ -8795,8 +8803,6 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
ufshcd_force_reset_auto_bkops(hba);
ufshcd_set_timestamp_attr(hba);
- schedule_delayed_work(&hba->ufs_rtc_update_work,
- msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
/* Gear up to HS gear if supported */
if (hba->max_pwr_info.is_valid) {
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 427e5660f87c..98114c2827c0 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -2342,10 +2342,18 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
u32 reg;
int i;
- dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) &
- DWC3_GUSB2PHYCFG_SUSPHY) ||
- (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) &
- DWC3_GUSB3PIPECTL_SUSPHY);
+ if (!pm_runtime_suspended(dwc->dev) && !PMSG_IS_AUTO(msg)) {
+ dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) &
+ DWC3_GUSB2PHYCFG_SUSPHY) ||
+ (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) &
+ DWC3_GUSB3PIPECTL_SUSPHY);
+ /*
+ * TI AM62 platform requires SUSPHY to be
+ * enabled for system suspend to work.
+ */
+ if (!dwc->susphy_state)
+ dwc3_enable_susphy(dwc, true);
+ }
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_DEVICE:
@@ -2398,15 +2406,6 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
break;
}
- if (!PMSG_IS_AUTO(msg)) {
- /*
- * TI AM62 platform requires SUSPHY to be
- * enabled for system suspend to work.
- */
- if (!dwc->susphy_state)
- dwc3_enable_susphy(dwc, true);
- }
-
return 0;
}
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index d54283fd026b..05b6e7e52e02 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -293,8 +293,6 @@ static int sunxi_musb_exit(struct musb *musb)
if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
sunxi_sram_release(musb->controller->parent);
- devm_usb_put_phy(glue->dev, glue->xceiv);
-
return 0;
}
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index c7d6b5e3f898..28c71d99e857 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -770,11 +770,12 @@ static void edge_bulk_out_data_callback(struct urb *urb)
static void edge_bulk_out_cmd_callback(struct urb *urb)
{
struct edgeport_port *edge_port = urb->context;
+ struct device *dev = &urb->dev->dev;
int status = urb->status;
atomic_dec(&CmdUrbs);
- dev_dbg(&urb->dev->dev, "%s - FREE URB %p (outstanding %d)\n",
- __func__, urb, atomic_read(&CmdUrbs));
+ dev_dbg(dev, "%s - FREE URB %p (outstanding %d)\n", __func__, urb,
+ atomic_read(&CmdUrbs));
/* clean up the transfer buffer */
@@ -784,8 +785,7 @@ static void edge_bulk_out_cmd_callback(struct urb *urb)
usb_free_urb(urb);
if (status) {
- dev_dbg(&urb->dev->dev,
- "%s - nonzero write bulk status received: %d\n",
+ dev_dbg(dev, "%s - nonzero write bulk status received: %d\n",
__func__, status);
return;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 4f18f189f309..9ba5584061c8 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_VENDOR_ID 0x2c7c
/* These Quectel products use Quectel's vendor ID */
#define QUECTEL_PRODUCT_EC21 0x0121
+#define QUECTEL_PRODUCT_RG650V 0x0122
#define QUECTEL_PRODUCT_EM061K_LTA 0x0123
#define QUECTEL_PRODUCT_EM061K_LMS 0x0124
#define QUECTEL_PRODUCT_EC25 0x0125
@@ -1273,6 +1274,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG912Y, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG916Q, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0, 0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
@@ -2320,6 +2323,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0112, 0xff, 0xff, 0x30) }, /* Fibocom FG132 Diag */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0112, 0xff, 0xff, 0x40) }, /* Fibocom FG132 AT */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0112, 0xff, 0, 0) }, /* Fibocom FG132 NMEA */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff), /* Fibocom FM135 (laptop MBIM) */
.driver_info = RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index c7de9585feb2..13c664317a05 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -166,6 +166,8 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
{DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
{DEVICE_SWI(0x1199, 0x90d2)}, /* Sierra Wireless EM9191 QDL */
+ {DEVICE_SWI(0x1199, 0x90e4)}, /* Sierra Wireless EM86xx QDL*/
+ {DEVICE_SWI(0x1199, 0x90e5)}, /* Sierra Wireless EM86xx */
{DEVICE_SWI(0x1199, 0xc080)}, /* Sierra Wireless EM7590 QDL */
{DEVICE_SWI(0x1199, 0xc081)}, /* Sierra Wireless EM7590 */
{DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
index 5b7f52b74a40..726423684bae 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
@@ -227,6 +227,10 @@ qcom_pmic_typec_pdphy_pd_transmit_payload(struct pmic_typec_pdphy *pmic_typec_pd
spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
+ hdr_len = sizeof(msg->header);
+ txbuf_len = pd_header_cnt_le(msg->header) * 4;
+ txsize_len = hdr_len + txbuf_len - 1;
+
ret = regmap_read(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG,
&val);
@@ -244,10 +248,6 @@ qcom_pmic_typec_pdphy_pd_transmit_payload(struct pmic_typec_pdphy *pmic_typec_pd
if (ret)
goto done;
- hdr_len = sizeof(msg->header);
- txbuf_len = pd_header_cnt_le(msg->header) * 4;
- txsize_len = hdr_len + txbuf_len - 1;
-
/* Write message header sizeof(u16) to USB_PDPHY_TX_BUFFER_HDR_REG */
ret = regmap_bulk_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_HDR_REG,
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index ba58d11907bc..bccfc03b5986 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -482,6 +482,8 @@ static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
port = uc->orig;
new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd);
+ if (new_cam >= ARRAY_SIZE(uc->updated))
+ return;
new_port = &uc->updated[new_cam];
cam = new_port->linked_idx;
enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd);