summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/ec.c28
-rw-r--r--drivers/acpi/pptt.c4
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/libata-scsi.c25
-rw-r--r--drivers/base/base.h17
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/core.c38
-rw-r--r--drivers/base/dd.c7
-rw-r--r--drivers/block/ublk_drv.c214
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/virtio_console.c7
-rw-r--r--drivers/clk/clk.c4
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c12
-rw-r--r--drivers/comedi/drivers/jr3_pci.c2
-rw-r--r--drivers/cpufreq/Kconfig.arm20
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c10
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c2
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c10
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c13
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c18
-rw-r--r--drivers/crypto/atmel-sha204a.c6
-rw-r--r--drivers/crypto/ccp/sp-pci.c1
-rw-r--r--drivers/cxl/core/regs.c4
-rw-r--r--drivers/dma-buf/udmabuf.c2
-rw-r--r--drivers/dma/bcm2835-dma.c2
-rw-r--r--drivers/dma/dmatest.c6
-rw-r--r--drivers/firmware/stratix10-svc.c14
-rw-r--r--drivers/gpio/gpiolib-of.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c29
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c195
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h13
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h4
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c11
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h10
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c18
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c4
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c6
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h2
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c71
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.h3
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c22
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules2
-rw-r--r--drivers/i3c/master/svc-i3c-master.c17
-rw-r--r--drivers/iio/adc/ad4695.c34
-rw-r--r--drivers/iio/adc/ad7768-1.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c1
-rw-r--r--drivers/iommu/amd/iommu.c2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c4
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h1
-rw-r--r--drivers/iommu/iommu.c3
-rw-r--r--drivers/irqchip/irq-gic-v2m.c2
-rw-r--r--drivers/irqchip/irq-renesas-rzv2h.c91
-rw-r--r--drivers/mailbox/pcc.c15
-rw-r--r--drivers/mcb/mcb-parse.c2
-rw-r--r--drivers/md/raid1.c26
-rw-r--r--drivers/media/i2c/Kconfig1
-rw-r--r--drivers/media/i2c/imx214.c978
-rw-r--r--drivers/media/i2c/ov08x40.c56
-rw-r--r--drivers/misc/lkdtm/perms.c14
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c8
-rw-r--r--drivers/misc/mei/hw-me-regs.h1
-rw-r--r--drivers/misc/mei/pci-me.c1
-rw-r--r--drivers/misc/mei/vsc-tp.c26
-rw-r--r--drivers/mmc/host/sdhci-msm.c2
-rw-r--r--drivers/net/dsa/mt7530.c6
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c36
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c9
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h4
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c45
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c24
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c62
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/phy/dp83822.c40
-rw-r--r--drivers/net/phy/microchip.c46
-rw-r--r--drivers/net/phy/phy_device.c53
-rw-r--r--drivers/net/phy/phy_led_triggers.c23
-rw-r--r--drivers/net/phy/phylink.c164
-rw-r--r--drivers/net/virtio_net.c125
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c2
-rw-r--r--drivers/net/xen-netfront.c17
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c1
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c18
-rw-r--r--drivers/nvme/host/core.c9
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/target/core.c3
-rw-r--r--drivers/nvme/target/fc.c25
-rw-r--r--drivers/nvme/target/pci-epf.c14
-rw-r--r--drivers/of/resolver.c37
-rw-r--r--drivers/pci/msi/msi.c38
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usbdp.c1
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c23
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c3
-rw-r--r--drivers/platform/x86/x86-android-tablets/dmi.c14
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c124
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h3
-rw-r--r--drivers/pwm/core.c13
-rw-r--r--drivers/pwm/pwm-axi-pwmgen.c10
-rw-r--r--drivers/regulator/rk808-regulator.c4
-rw-r--r--drivers/rtc/rtc-pcf85063.c19
-rw-r--r--drivers/s390/char/sclp_con.c17
-rw-r--r--drivers/s390/char/sclp_tty.c12
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c20
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c1
-rw-r--r--drivers/scsi/scsi.c36
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/soc/qcom/ice.c48
-rw-r--r--drivers/spi/spi-imx.c5
-rw-r--r--drivers/spi/spi-tegra210-quad.c6
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.c10
-rw-r--r--drivers/thunderbolt/tb.c16
-rw-r--r--drivers/tty/serial/msm_serial.c6
-rw-r--r--drivers/tty/serial/sifive.c6
-rw-r--r--drivers/tty/vt/selection.c5
-rw-r--r--drivers/ufs/core/ufs-mcq.c12
-rw-r--r--drivers/ufs/core/ufshcd.c2
-rw-r--r--drivers/ufs/host/ufs-exynos.c44
-rw-r--r--drivers/ufs/host/ufs-exynos.h1
-rw-r--r--drivers/ufs/host/ufs-qcom.c2
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c44
-rw-r--r--drivers/usb/class/cdc-wdm.c21
-rw-r--r--drivers/usb/core/quirks.c9
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c10
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c4
-rw-r--r--drivers/usb/dwc3/gadget.c28
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/dev.c3
-rw-r--r--drivers/usb/host/max3421-hcd.c7
-rw-r--r--drivers/usb/host/ohci-pci.c23
-rw-r--r--drivers/usb/host/xhci-hub.c30
-rw-r--r--drivers/usb/host/xhci-mvebu.c10
-rw-r--r--drivers/usb/host/xhci-mvebu.h6
-rw-r--r--drivers/usb/host/xhci-plat.c2
-rw-r--r--drivers/usb/host/xhci-ring.c75
-rw-r--r--drivers/usb/host/xhci.c4
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h5
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/serial/usb-serial-simple.c7
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/class.c24
-rw-r--r--drivers/usb/typec/class.h1
-rw-r--r--drivers/usb/typec/ucsi/cros_ec_ucsi.c5
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c19
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h6
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c5
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c67
-rw-r--r--drivers/vhost/scsi.c80
-rw-r--r--drivers/virtio/virtio_pci_modern.c4
-rw-r--r--drivers/xen/Kconfig2
182 files changed, 2635 insertions, 1512 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 8db09d81918f..3c5f34892734 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2301,6 +2301,34 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
},
},
+ /*
+ * Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3929
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83L3"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83N6"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
+ }
+ },
{ },
};
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index a35dd0e41c27..f73ce6e13065 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -229,7 +229,7 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
node_entry = ACPI_PTR_DIFF(node, table_hdr);
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
- proc_sz = sizeof(struct acpi_pptt_processor *);
+ proc_sz = sizeof(struct acpi_pptt_processor);
while ((unsigned long)entry + proc_sz < table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
@@ -270,7 +270,7 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
table_end = (unsigned long)table_hdr + table_hdr->length;
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
- proc_sz = sizeof(struct acpi_pptt_processor *);
+ proc_sz = sizeof(struct acpi_pptt_processor);
/* find the processor structure associated with this cpuid */
while ((unsigned long)entry + proc_sz < table_end) {
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 76052006bd87..5fc2c8ee61b1 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -6373,7 +6373,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd offset %lx\n",
buffer->data_size, buffer->offsets_size,
- proc->alloc.vm_start - buffer->user_data);
+ buffer->user_data - proc->alloc.vm_start);
}
static void print_binder_work_ilocked(struct seq_file *m,
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 2796c0da8257..c0eb8c67a9ff 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2453,8 +2453,8 @@ static unsigned int ata_msense_control_ata_feature(struct ata_device *dev,
*/
put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]);
- if (dev->flags & ATA_DFLAG_CDL)
- buf[4] = 0x02; /* Support T2A and T2B pages */
+ if (dev->flags & ATA_DFLAG_CDL_ENABLED)
+ buf[4] = 0x02; /* T2A and T2B pages enabled */
else
buf[4] = 0;
@@ -3886,12 +3886,11 @@ static int ata_mselect_control_spg0(struct ata_queued_cmd *qc,
}
/*
- * Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode
+ * Translate MODE SELECT control mode page, sub-page f2h (ATA feature mode
* page) into a SET FEATURES command.
*/
-static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
- const u8 *buf, int len,
- u16 *fp)
+static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
+ const u8 *buf, int len, u16 *fp)
{
struct ata_device *dev = qc->dev;
struct ata_taskfile *tf = &qc->tf;
@@ -3909,17 +3908,27 @@ static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
/* Check cdl_ctrl */
switch (buf[0] & 0x03) {
case 0:
- /* Disable CDL */
+ /* Disable CDL if it is enabled */
+ if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))
+ return 0;
+ ata_dev_dbg(dev, "Disabling CDL\n");
cdl_action = 0;
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
break;
case 0x02:
- /* Enable CDL T2A/T2B: NCQ priority must be disabled */
+ /*
+ * Enable CDL if not already enabled. Since this is mutually
+ * exclusive with NCQ priority, allow this only if NCQ priority
+ * is disabled.
+ */
+ if (dev->flags & ATA_DFLAG_CDL_ENABLED)
+ return 0;
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
ata_dev_err(dev,
"NCQ priority must be disabled to enable CDL\n");
return -EINVAL;
}
+ ata_dev_dbg(dev, "Enabling CDL\n");
cdl_action = 1;
dev->flags |= ATA_DFLAG_CDL_ENABLED;
break;
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 0042e4774b0c..123031a757d9 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -73,6 +73,7 @@ static inline void subsys_put(struct subsys_private *sp)
kset_put(&sp->subsys);
}
+struct subsys_private *bus_to_subsys(const struct bus_type *bus);
struct subsys_private *class_to_subsys(const struct class *class);
struct driver_private {
@@ -180,6 +181,22 @@ int driver_add_groups(const struct device_driver *drv, const struct attribute_gr
void driver_remove_groups(const struct device_driver *drv, const struct attribute_group **groups);
void device_driver_detach(struct device *dev);
+static inline void device_set_driver(struct device *dev, const struct device_driver *drv)
+{
+ /*
+ * Majority (all?) read accesses to dev->driver happens either
+ * while holding device lock or in bus/driver code that is only
+ * invoked when the device is bound to a driver and there is no
+ * concern of the pointer being changed while it is being read.
+ * However when reading device's uevent file we read driver pointer
+ * without taking device lock (so we do not block there for
+ * arbitrary amount of time). We use WRITE_ONCE() here to prevent
+ * tearing so that READ_ONCE() can safely be used in uevent code.
+ */
+ // FIXME - this cast should not be needed "soon"
+ WRITE_ONCE(dev->driver, (struct device_driver *)drv);
+}
+
int devres_release_all(struct device *dev);
void device_block_probing(void);
void device_unblock_probing(void);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 6b9e65a42cd2..c8c7e0804024 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -57,7 +57,7 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
* NULL. A call to subsys_put() must be done when finished with the pointer in
* order for it to be properly freed.
*/
-static struct subsys_private *bus_to_subsys(const struct bus_type *bus)
+struct subsys_private *bus_to_subsys(const struct bus_type *bus)
{
struct subsys_private *sp = NULL;
struct kobject *kobj;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2fde698430df..93019bb6998e 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2624,6 +2624,35 @@ static const char *dev_uevent_name(const struct kobject *kobj)
return NULL;
}
+/*
+ * Try filling "DRIVER=<name>" uevent variable for a device. Because this
+ * function may race with binding and unbinding the device from a driver,
+ * we need to be careful. Binding is generally safe, at worst we miss the
+ * fact that the device is already bound to a driver (but the driver
+ * information that is delivered through uevents is best-effort, it may
+ * become obsolete as soon as it is generated anyways). Unbinding is more
+ * risky as driver pointer is transitioning to NULL, so READ_ONCE() should
+ * be used to make sure we are dealing with the same pointer, and to
+ * ensure that driver structure is not going to disappear from under us
+ * we take bus' drivers klist lock. The assumption that only registered
+ * driver can be bound to a device, and to unregister a driver bus code
+ * will take the same lock.
+ */
+static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
+
+ if (sp) {
+ scoped_guard(spinlock, &sp->klist_drivers.k_lock) {
+ struct device_driver *drv = READ_ONCE(dev->driver);
+ if (drv)
+ add_uevent_var(env, "DRIVER=%s", drv->name);
+ }
+
+ subsys_put(sp);
+ }
+}
+
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{
const struct device *dev = kobj_to_dev(kobj);
@@ -2655,8 +2684,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
if (dev->type && dev->type->name)
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
- if (dev->driver)
- add_uevent_var(env, "DRIVER=%s", dev->driver->name);
+ /* Add "DRIVER=%s" variable if the device is bound to a driver */
+ dev_driver_uevent(dev, env);
/* Add common DT information about the device */
of_device_uevent(dev, env);
@@ -2726,11 +2755,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
if (!env)
return -ENOMEM;
- /* Synchronize with really_probe() */
- device_lock(dev);
/* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(&dev->kobj, env);
- device_unlock(dev);
if (retval)
goto out;
@@ -3700,7 +3726,7 @@ done:
device_pm_remove(dev);
dpm_sysfs_remove(dev);
DPMError:
- dev->driver = NULL;
+ device_set_driver(dev, NULL);
bus_remove_device(dev);
BusError:
device_remove_attrs(dev);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index f0e4b4aba885..b526e0e0f52d 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -550,7 +550,7 @@ static void device_unbind_cleanup(struct device *dev)
arch_teardown_dma_ops(dev);
kfree(dev->dma_range_map);
dev->dma_range_map = NULL;
- dev->driver = NULL;
+ device_set_driver(dev, NULL);
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
@@ -629,8 +629,7 @@ static int really_probe(struct device *dev, const struct device_driver *drv)
}
re_probe:
- // FIXME - this cast should not be needed "soon"
- dev->driver = (struct device_driver *)drv;
+ device_set_driver(dev, drv);
/* If using pinctrl, bind pins now before probing */
ret = pinctrl_bind_pins(dev);
@@ -1014,7 +1013,7 @@ static int __device_attach(struct device *dev, bool allow_async)
if (ret == 0)
ret = 1;
else {
- dev->driver = NULL;
+ device_set_driver(dev, NULL);
ret = 0;
}
} else {
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 971b793dedd0..ab06a7a064fb 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -73,12 +73,24 @@
UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED)
struct ublk_rq_data {
- struct llist_node node;
-
struct kref ref;
};
struct ublk_uring_cmd_pdu {
+ /*
+ * Store requests in same batch temporarily for queuing them to
+ * daemon context.
+ *
+ * It should have been stored to request payload, but we do want
+ * to avoid extra pre-allocation, and uring_cmd payload is always
+ * free for us
+ */
+ struct request *req_list;
+
+ /*
+ * The following two are valid in this cmd whole lifetime, and
+ * setup in ublk uring_cmd handler
+ */
struct ublk_queue *ubq;
u16 tag;
};
@@ -141,8 +153,6 @@ struct ublk_queue {
struct task_struct *ubq_daemon;
char *io_cmd_buf;
- struct llist_head io_cmds;
-
unsigned long io_addr; /* mapped vm address */
unsigned int max_io_sz;
bool force_abort;
@@ -1114,7 +1124,7 @@ static void ublk_fail_rq_fn(struct kref *ref)
}
/*
- * Since __ublk_rq_task_work always fails requests immediately during
+ * Since ublk_rq_task_work_cb always fails requests immediately during
* exiting, __ublk_fail_req() is only called from abort context during
* exiting. So lock is unnecessary.
*
@@ -1163,10 +1173,10 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
blk_mq_end_request(rq, BLK_STS_IOERR);
}
-static inline void __ublk_rq_task_work(struct request *req,
- unsigned issue_flags)
+static void ublk_dispatch_req(struct ublk_queue *ubq,
+ struct request *req,
+ unsigned int issue_flags)
{
- struct ublk_queue *ubq = req->mq_hctx->driver_data;
int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag];
unsigned int mapped_bytes;
@@ -1242,36 +1252,52 @@ static inline void __ublk_rq_task_work(struct request *req,
ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
}
-static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
- unsigned issue_flags)
-{
- struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
- struct ublk_rq_data *data, *tmp;
-
- io_cmds = llist_reverse_order(io_cmds);
- llist_for_each_entry_safe(data, tmp, io_cmds, node)
- __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
-}
-
-static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
+static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
{
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
+ int tag = pdu->tag;
+ struct request *req = blk_mq_tag_to_rq(
+ ubq->dev->tag_set.tags[ubq->q_id], tag);
- ublk_forward_io_cmds(ubq, issue_flags);
+ ublk_dispatch_req(ubq, req, issue_flags);
}
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
{
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
+ struct ublk_io *io = &ubq->ios[rq->tag];
+
+ io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
+}
- if (llist_add(&data->node, &ubq->io_cmds)) {
- struct ublk_io *io = &ubq->ios[rq->tag];
+static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+ struct request *rq = pdu->req_list;
+ struct ublk_queue *ubq = rq->mq_hctx->driver_data;
+ struct request *next;
- io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
+ while (rq) {
+ next = rq->rq_next;
+ rq->rq_next = NULL;
+ ublk_dispatch_req(ubq, rq, issue_flags);
+ rq = next;
}
}
+static void ublk_queue_cmd_list(struct ublk_queue *ubq, struct rq_list *l)
+{
+ struct request *rq = rq_list_peek(l);
+ struct ublk_io *io = &ubq->ios[rq->tag];
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(io->cmd);
+
+ pdu->req_list = rq;
+ rq_list_init(l);
+ io_uring_cmd_complete_in_task(io->cmd, ublk_cmd_list_tw_cb);
+}
+
static enum blk_eh_timer_return ublk_timeout(struct request *rq)
{
struct ublk_queue *ubq = rq->mq_hctx->driver_data;
@@ -1310,21 +1336,13 @@ static enum blk_eh_timer_return ublk_timeout(struct request *rq)
return BLK_EH_RESET_TIMER;
}
-static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
+ bool check_cancel)
{
- struct ublk_queue *ubq = hctx->driver_data;
- struct request *rq = bd->rq;
blk_status_t res;
- if (unlikely(ubq->fail_io)) {
+ if (unlikely(ubq->fail_io))
return BLK_STS_TARGET;
- }
-
- /* fill iod to slot in io cmd buffer */
- res = ublk_setup_iod(ubq, rq);
- if (unlikely(res != BLK_STS_OK))
- return BLK_STS_IOERR;
/* With recovery feature enabled, force_abort is set in
* ublk_stop_dev() before calling del_gendisk(). We have to
@@ -1338,17 +1356,68 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
return BLK_STS_IOERR;
+ if (check_cancel && unlikely(ubq->canceling))
+ return BLK_STS_IOERR;
+
+ /* fill iod to slot in io cmd buffer */
+ res = ublk_setup_iod(ubq, rq);
+ if (unlikely(res != BLK_STS_OK))
+ return BLK_STS_IOERR;
+
+ blk_mq_start_request(rq);
+ return BLK_STS_OK;
+}
+
+static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct ublk_queue *ubq = hctx->driver_data;
+ struct request *rq = bd->rq;
+ blk_status_t res;
+
+ res = ublk_prep_req(ubq, rq, false);
+ if (res != BLK_STS_OK)
+ return res;
+
+ /*
+ * ->canceling has to be handled after ->force_abort and ->fail_io
+ * is dealt with, otherwise this request may not be failed in case
+ * of recovery, and cause hang when deleting disk
+ */
if (unlikely(ubq->canceling)) {
__ublk_abort_rq(ubq, rq);
return BLK_STS_OK;
}
- blk_mq_start_request(bd->rq);
ublk_queue_cmd(ubq, rq);
-
return BLK_STS_OK;
}
+static void ublk_queue_rqs(struct rq_list *rqlist)
+{
+ struct rq_list requeue_list = { };
+ struct rq_list submit_list = { };
+ struct ublk_queue *ubq = NULL;
+ struct request *req;
+
+ while ((req = rq_list_pop(rqlist))) {
+ struct ublk_queue *this_q = req->mq_hctx->driver_data;
+
+ if (ubq && ubq != this_q && !rq_list_empty(&submit_list))
+ ublk_queue_cmd_list(ubq, &submit_list);
+ ubq = this_q;
+
+ if (ublk_prep_req(ubq, req, true) == BLK_STS_OK)
+ rq_list_add_tail(&submit_list, req);
+ else
+ rq_list_add_tail(&requeue_list, req);
+ }
+
+ if (ubq && !rq_list_empty(&submit_list))
+ ublk_queue_cmd_list(ubq, &submit_list);
+ *rqlist = requeue_list;
+}
+
static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
unsigned int hctx_idx)
{
@@ -1361,6 +1430,7 @@ static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
static const struct blk_mq_ops ublk_mq_ops = {
.queue_rq = ublk_queue_rq,
+ .queue_rqs = ublk_queue_rqs,
.init_hctx = ublk_init_hctx,
.timeout = ublk_timeout,
};
@@ -1462,7 +1532,7 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
struct request *rq;
/*
- * Either we fail the request or ublk_rq_task_work_fn
+ * Either we fail the request or ublk_rq_task_work_cb
* will do it
*/
rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
@@ -1629,31 +1699,35 @@ static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
static void __ublk_quiesce_dev(struct ublk_device *ub)
{
+ int i;
+
pr_devel("%s: quiesce ub: dev_id %d state %s\n",
__func__, ub->dev_info.dev_id,
ub->dev_info.state == UBLK_S_DEV_LIVE ?
"LIVE" : "QUIESCED");
blk_mq_quiesce_queue(ub->ub_disk->queue);
+ /* mark every queue as canceling */
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_get_queue(ub, i)->canceling = true;
ublk_wait_tagset_rqs_idle(ub);
ub->dev_info.state = UBLK_S_DEV_QUIESCED;
+ blk_mq_unquiesce_queue(ub->ub_disk->queue);
}
-static void ublk_unquiesce_dev(struct ublk_device *ub)
+static void ublk_force_abort_dev(struct ublk_device *ub)
{
int i;
- pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
+ pr_devel("%s: force abort ub: dev_id %d state %s\n",
__func__, ub->dev_info.dev_id,
ub->dev_info.state == UBLK_S_DEV_LIVE ?
"LIVE" : "QUIESCED");
- /* quiesce_work has run. We let requeued rqs be aborted
- * before running fallback_wq. "force_abort" must be seen
- * after request queue is unqiuesced. Then del_gendisk()
- * can move on.
- */
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ if (ub->dev_info.state == UBLK_S_DEV_LIVE)
+ ublk_wait_tagset_rqs_idle(ub);
+
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_get_queue(ub, i)->force_abort = true;
-
blk_mq_unquiesce_queue(ub->ub_disk->queue);
/* We may have requeued some rqs in ublk_quiesce_queue() */
blk_mq_kick_requeue_list(ub->ub_disk->queue);
@@ -1681,11 +1755,8 @@ static void ublk_stop_dev(struct ublk_device *ub)
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_DEAD)
goto unlock;
- if (ublk_nosrv_dev_should_queue_io(ub)) {
- if (ub->dev_info.state == UBLK_S_DEV_LIVE)
- __ublk_quiesce_dev(ub);
- ublk_unquiesce_dev(ub);
- }
+ if (ublk_nosrv_dev_should_queue_io(ub))
+ ublk_force_abort_dev(ub);
del_gendisk(ub->ub_disk);
disk = ublk_detach_disk(ub);
put_disk(disk);
@@ -1743,15 +1814,6 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
mutex_unlock(&ub->mutex);
}
-static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
- int tag)
-{
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
- struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
-
- ublk_queue_cmd(ubq, req);
-}
-
static inline int ublk_check_cmd_op(u32 cmd_op)
{
u32 ioc_type = _IOC_TYPE(cmd_op);
@@ -1898,8 +1960,9 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
goto out;
ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
- break;
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
+ ublk_dispatch_req(ubq, req, issue_flags);
+ return -EIOCBQUEUED;
default:
goto out;
}
@@ -2790,7 +2853,6 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
ubq->ubq_daemon = NULL;
ubq->timeout = false;
- ubq->canceling = false;
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
@@ -2879,20 +2941,18 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
__func__, ublksrv_pid, header->dev_id);
- if (ublk_nosrv_dev_should_queue_io(ub)) {
- ub->dev_info.state = UBLK_S_DEV_LIVE;
- blk_mq_unquiesce_queue(ub->ub_disk->queue);
- pr_devel("%s: queue unquiesced, dev id %d.\n",
- __func__, header->dev_id);
- blk_mq_kick_requeue_list(ub->ub_disk->queue);
- } else {
- blk_mq_quiesce_queue(ub->ub_disk->queue);
- ub->dev_info.state = UBLK_S_DEV_LIVE;
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
- ublk_get_queue(ub, i)->fail_io = false;
- }
- blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ ubq->canceling = false;
+ ubq->fail_io = false;
}
+ blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ pr_devel("%s: queue unquiesced, dev id %d.\n",
+ __func__, header->dev_id);
+ blk_mq_kick_requeue_list(ub->ub_disk->queue);
ret = 0;
out_unlock:
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index f7dd455dd0dd..dda466f9181a 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -315,7 +315,7 @@ static int __init misc_init(void)
goto fail_remove;
err = -EIO;
- if (register_chrdev(MISC_MAJOR, "misc", &misc_fops))
+ if (__register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops))
goto fail_printk;
return 0;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 18f92dd44d45..fc698e2b1da1 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1579,8 +1579,8 @@ static void handle_control_message(struct virtio_device *vdev,
break;
case VIRTIO_CONSOLE_RESIZE: {
struct {
- __u16 rows;
- __u16 cols;
+ __virtio16 rows;
+ __virtio16 cols;
} size;
if (!is_console_port(port))
@@ -1588,7 +1588,8 @@ static void handle_control_message(struct virtio_device *vdev,
memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
sizeof(size));
- set_console_size(port, size.rows, size.cols);
+ set_console_size(port, virtio16_to_cpu(vdev, size.rows),
+ virtio16_to_cpu(vdev, size.cols));
port->cons.hvc->irq_requested = 1;
resize_console(port);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index cf7720b9172f..50faafbf5dda 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -5258,6 +5258,10 @@ of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
if (!clkspec)
return ERR_PTR(-EINVAL);
+ /* Check if node in clkspec is in disabled/fail state */
+ if (!of_device_is_available(clkspec->np))
+ return ERR_PTR(-ENOENT);
+
mutex_lock(&of_clk_mutex);
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np) {
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
index a4c1e92e1fd7..4e81a0bae022 100644
--- a/drivers/clk/renesas/rzv2h-cpg.c
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -447,8 +447,7 @@ static void rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv *priv,
{
unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
- unsigned int index = (mstop_index - 1) * 16;
- atomic_t *mstop = &priv->mstop_count[index];
+ atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
unsigned long flags;
unsigned int i;
u32 val = 0;
@@ -469,8 +468,7 @@ static void rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv *priv,
{
unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
- unsigned int index = (mstop_index - 1) * 16;
- atomic_t *mstop = &priv->mstop_count[index];
+ atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
unsigned long flags;
unsigned int i;
u32 val = 0;
@@ -630,8 +628,7 @@ rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
} else if (clock->mstop_data != BUS_MSTOP_NONE && mod->critical) {
unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, clock->mstop_data);
u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, clock->mstop_data);
- unsigned int index = (mstop_index - 1) * 16;
- atomic_t *mstop = &priv->mstop_count[index];
+ atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
unsigned long flags;
unsigned int i;
u32 val = 0;
@@ -926,6 +923,9 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev)
if (!priv->mstop_count)
return -ENOMEM;
+ /* Adjust for CPG_BUS_m_MSTOP starting from m = 1 */
+ priv->mstop_count -= 16;
+
priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) *
info->num_resets, GFP_KERNEL);
if (!priv->resets)
diff --git a/drivers/comedi/drivers/jr3_pci.c b/drivers/comedi/drivers/jr3_pci.c
index 951c23fa0369..75dce1ff2419 100644
--- a/drivers/comedi/drivers/jr3_pci.c
+++ b/drivers/comedi/drivers/jr3_pci.c
@@ -758,7 +758,7 @@ static void jr3_pci_detach(struct comedi_device *dev)
struct jr3_pci_dev_private *devpriv = dev->private;
if (devpriv)
- del_timer_sync(&devpriv->timer);
+ timer_shutdown_sync(&devpriv->timer);
comedi_pci_detach(dev);
}
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 4f9cb943d945..0d46402e3094 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -76,7 +76,7 @@ config ARM_VEXPRESS_SPC_CPUFREQ
config ARM_BRCMSTB_AVS_CPUFREQ
tristate "Broadcom STB AVS CPUfreq driver"
depends on (ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ) || COMPILE_TEST
- default y
+ default y if ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ
help
Some Broadcom STB SoCs use a co-processor running proprietary firmware
("AVS") to handle voltage and frequency scaling. This driver provides
@@ -88,7 +88,7 @@ config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
depends on ARCH_HIGHBANK || COMPILE_TEST
depends on CPUFREQ_DT && REGULATOR && PL320_MBOX
- default m
+ default m if ARCH_HIGHBANK
help
This adds the CPUFreq driver for Calxeda Highbank SoC
based boards.
@@ -133,7 +133,7 @@ config ARM_MEDIATEK_CPUFREQ
config ARM_MEDIATEK_CPUFREQ_HW
tristate "MediaTek CPUFreq HW driver"
depends on ARCH_MEDIATEK || COMPILE_TEST
- default m
+ default m if ARCH_MEDIATEK
help
Support for the CPUFreq HW driver.
Some MediaTek chipsets have a HW engine to offload the steps
@@ -181,7 +181,7 @@ config ARM_RASPBERRYPI_CPUFREQ
config ARM_S3C64XX_CPUFREQ
bool "Samsung S3C64XX"
depends on CPU_S3C6410 || COMPILE_TEST
- default y
+ default CPU_S3C6410
help
This adds the CPUFreq driver for Samsung S3C6410 SoC.
@@ -190,7 +190,7 @@ config ARM_S3C64XX_CPUFREQ
config ARM_S5PV210_CPUFREQ
bool "Samsung S5PV210 and S5PC110"
depends on CPU_S5PV210 || COMPILE_TEST
- default y
+ default CPU_S5PV210
help
This adds the CPUFreq driver for Samsung S5PV210 and
S5PC110 SoCs.
@@ -214,7 +214,7 @@ config ARM_SCMI_CPUFREQ
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR || COMPILE_TEST
- default y
+ default PLAT_SPEAR
help
This adds the CPUFreq driver support for SPEAr SOCs.
@@ -233,7 +233,7 @@ config ARM_TEGRA20_CPUFREQ
tristate "Tegra20/30 CPUFreq support"
depends on ARCH_TEGRA || COMPILE_TEST
depends on CPUFREQ_DT
- default y
+ default ARCH_TEGRA
help
This adds the CPUFreq driver support for Tegra20/30 SOCs.
@@ -241,7 +241,7 @@ config ARM_TEGRA124_CPUFREQ
bool "Tegra124 CPUFreq support"
depends on ARCH_TEGRA || COMPILE_TEST
depends on CPUFREQ_DT
- default y
+ default ARCH_TEGRA
help
This adds the CPUFreq driver support for Tegra124 SOCs.
@@ -256,14 +256,14 @@ config ARM_TEGRA194_CPUFREQ
tristate "Tegra194 CPUFreq support"
depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST)
depends on TEGRA_BPMP
- default y
+ default ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC
help
This adds CPU frequency driver support for Tegra194 SOCs.
config ARM_TI_CPUFREQ
bool "Texas Instruments CPUFreq support"
depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
- default y
+ default ARCH_OMAP2PLUS || ARCH_K3
help
This driver enables valid OPPs on the running platform based on
values contained within the SoC in use. Enable this in order to
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index 269b18c62d04..82007f6a24d2 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -134,11 +134,17 @@ static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = {
static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- struct apple_cpu_priv *priv = policy->driver_data;
+ struct cpufreq_policy *policy;
+ struct apple_cpu_priv *priv;
struct cpufreq_frequency_table *p;
unsigned int pstate;
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(!policy))
+ return 0;
+
+ priv = policy->driver_data;
+
if (priv->info->cur_pstate_mask) {
u32 reg = readl_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 8f512448382f..ba7c16c0e475 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -749,7 +749,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
int ret;
if (!policy)
- return -ENODEV;
+ return 0;
cpu_data = policy->driver_data;
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 914bf2c940a0..9c6eb1238f1b 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -37,11 +37,17 @@ static struct cpufreq_driver scmi_cpufreq_driver;
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- struct scmi_data *priv = policy->driver_data;
+ struct cpufreq_policy *policy;
+ struct scmi_data *priv;
unsigned long rate;
int ret;
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(!policy))
+ return 0;
+
+ priv = policy->driver_data;
+
ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
if (ret)
return 0;
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 1f97b949763f..9118856e1736 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -29,9 +29,16 @@ static struct scpi_ops *scpi_ops;
static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- struct scpi_data *priv = policy->driver_data;
- unsigned long rate = clk_get_rate(priv->clk);
+ struct cpufreq_policy *policy;
+ struct scpi_data *priv;
+ unsigned long rate;
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(!policy))
+ return 0;
+
+ priv = policy->driver_data;
+ rate = clk_get_rate(priv->clk);
return rate / 1000;
}
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 47d6840b3489..744312a44279 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -194,7 +194,9 @@ static int sun50i_cpufreq_get_efuse(void)
struct nvmem_cell *speedbin_nvmem;
const struct of_device_id *match;
struct device *cpu_dev;
- u32 *speedbin;
+ void *speedbin_ptr;
+ u32 speedbin = 0;
+ size_t len;
int ret;
cpu_dev = get_cpu_device(0);
@@ -217,14 +219,18 @@ static int sun50i_cpufreq_get_efuse(void)
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
"Could not get nvmem cell\n");
- speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
+ speedbin_ptr = nvmem_cell_read(speedbin_nvmem, &len);
nvmem_cell_put(speedbin_nvmem);
- if (IS_ERR(speedbin))
- return PTR_ERR(speedbin);
+ if (IS_ERR(speedbin_ptr))
+ return PTR_ERR(speedbin_ptr);
- ret = opp_data->efuse_xlate(*speedbin);
+ if (len <= 4)
+ memcpy(&speedbin, speedbin_ptr, len);
+ speedbin = le32_to_cpu(speedbin);
- kfree(speedbin);
+ ret = opp_data->efuse_xlate(speedbin);
+
+ kfree(speedbin_ptr);
return ret;
};
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index 75bebec2c757..0fcf4a39de27 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -163,6 +163,12 @@ static int atmel_sha204a_probe(struct i2c_client *client)
i2c_priv->hwrng.name = dev_name(&client->dev);
i2c_priv->hwrng.read = atmel_sha204a_rng_read;
+ /*
+ * According to review by Bill Cox [1], this HWRNG has very low entropy.
+ * [1] https://www.metzdowd.com/pipermail/cryptography/2014-December/023858.html
+ */
+ i2c_priv->hwrng.quality = 1;
+
ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng);
if (ret)
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 157f9a9ed636..2ebc878da160 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -532,6 +532,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
+ { PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
/* Last entry must be zero */
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index 117c2e94c761..5ca7b0eed568 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -581,7 +581,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
resource_size_t rcrb = ri->base;
void __iomem *addr;
u32 bar0, bar1;
- u16 cmd;
u32 id;
if (which == CXL_RCRB_UPSTREAM)
@@ -603,7 +602,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
}
id = readl(addr + PCI_VENDOR_ID);
- cmd = readw(addr + PCI_COMMAND);
bar0 = readl(addr + PCI_BASE_ADDRESS_0);
bar1 = readl(addr + PCI_BASE_ADDRESS_1);
iounmap(addr);
@@ -618,8 +616,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
dev_err(dev, "Failed to access Downstream Port RCRB\n");
return CXL_RESOURCE_NONE;
}
- if (!(cmd & PCI_COMMAND_MEMORY))
- return CXL_RESOURCE_NONE;
/* The RCRB is a Memory Window, and the MEM_TYPE_1M bit is obsolete */
if (bar0 & (PCI_BASE_ADDRESS_MEM_TYPE_1M | PCI_BASE_ADDRESS_SPACE_IO))
return CXL_RESOURCE_NONE;
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index cc7398cc17d6..e74e36a8ecda 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -393,7 +393,7 @@ static long udmabuf_create(struct miscdevice *device,
if (!ubuf)
return -ENOMEM;
- pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
+ pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
for (i = 0; i < head->count; i++) {
pgoff_t subpgcnt;
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 20b10c15c696..0117bb2e8591 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -893,7 +893,7 @@ static int bcm2835_dma_suspend_late(struct device *dev)
}
static const struct dev_pm_ops bcm2835_dma_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(bcm2835_dma_suspend_late, NULL)
+ LATE_SYSTEM_SLEEP_PM_OPS(bcm2835_dma_suspend_late, NULL)
};
static int bcm2835_dma_probe(struct platform_device *pdev)
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 91b2fbc0b864..d891dfca358e 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -841,9 +841,9 @@ static int dmatest_func(void *data)
} else {
dma_async_issue_pending(chan);
- wait_event_freezable_timeout(thread->done_wait,
- done->done,
- msecs_to_jiffies(params->timeout));
+ wait_event_timeout(thread->done_wait,
+ done->done,
+ msecs_to_jiffies(params->timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL,
NULL);
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 3c52cb73237a..e3f990d888d7 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -1224,22 +1224,28 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
if (!svc->intel_svc_fcs) {
dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
ret = -ENOMEM;
- goto err_unregister_dev;
+ goto err_unregister_rsu_dev;
}
ret = platform_device_add(svc->intel_svc_fcs);
if (ret) {
platform_device_put(svc->intel_svc_fcs);
- goto err_unregister_dev;
+ goto err_unregister_rsu_dev;
}
+ ret = of_platform_default_populate(dev_of_node(dev), NULL, dev);
+ if (ret)
+ goto err_unregister_fcs_dev;
+
dev_set_drvdata(dev, svc);
pr_info("Intel Service Layer Driver Initialized\n");
return 0;
-err_unregister_dev:
+err_unregister_fcs_dev:
+ platform_device_unregister(svc->intel_svc_fcs);
+err_unregister_rsu_dev:
platform_device_unregister(svc->stratix10_svc_rsu);
err_free_kfifo:
kfifo_free(&controller->svc_fifo);
@@ -1253,6 +1259,8 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev)
struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
+ of_platform_depopulate(ctrl->dev);
+
platform_device_unregister(svc->intel_svc_fcs);
platform_device_unregister(svc->stratix10_svc_rsu);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 176e9142fd8f..56f13e4fa361 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -259,6 +259,9 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
{ "fsl,imx8qm-fec", "phy-reset-gpios", "phy-reset-active-high" },
{ "fsl,s32v234-fec", "phy-reset-gpios", "phy-reset-active-high" },
#endif
+#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
+ { "atmel,hsmci", "cd-gpios", "cd-inverted" },
+#endif
#if IS_ENABLED(CONFIG_PCI_IMX6)
{ "fsl,imx6q-pcie", "reset-gpio", "reset-gpio-active-high" },
{ "fsl,imx6sx-pcie", "reset-gpio", "reset-gpio-active-high" },
@@ -285,9 +288,6 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
{ "regulator-gpio", "enable-gpio", "enable-active-high" },
{ "regulator-gpio", "enable-gpios", "enable-active-high" },
#endif
-#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
- { "atmel,hsmci", "cd-gpios", "cd-inverted" },
-#endif
};
unsigned int i;
bool active_high;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 69895fccb474..98f0c12df12b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -352,7 +352,6 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000
@@ -1119,6 +1118,7 @@ struct amdgpu_device {
bool in_s3;
bool in_s4;
bool in_s0ix;
+ suspend_state_t last_suspend_state;
enum pp_mp1_state mp1_state;
struct amdgpu_doorbell_index doorbell_index;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 24c255e05079..f2d77bc04e4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2515,8 +2515,20 @@ static int amdgpu_pmops_suspend(struct device *dev)
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
- if (!adev->in_s0ix && !adev->in_s3)
+ if (!adev->in_s0ix && !adev->in_s3) {
+ /* don't allow going deep first time followed by s2idle the next time */
+ if (adev->last_suspend_state != PM_SUSPEND_ON &&
+ adev->last_suspend_state != pm_suspend_target_state) {
+ drm_err_once(drm_dev, "Unsupported suspend state %d\n",
+ pm_suspend_target_state);
+ return -EINVAL;
+ }
return 0;
+ }
+
+ /* cache the state last used for suspend */
+ adev->last_suspend_state = pm_suspend_target_state;
+
return amdgpu_device_suspend(drm_dev, true);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index c1f35ded684e..506786784e32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -1411,9 +1411,11 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
struct drm_gpu_scheduler *sched = &ring->sched;
struct drm_sched_entity entity;
+ static atomic_t counter;
struct dma_fence *f;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
+ void *owner;
int i, r;
/* Initialize the scheduler entity */
@@ -1424,9 +1426,15 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
goto err;
}
- r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL,
- 64, 0,
- &job);
+ /*
+ * Use some unique dummy value as the owner to make sure we execute
+ * the cleaner shader on each submission. The value just need to change
+ * for each submission and is otherwise meaningless.
+ */
+ owner = (void *)(unsigned long)atomic_inc_return(&counter);
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
+ 64, 0, &job);
if (r)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 1c19a65e6553..ef74259c448d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -678,12 +678,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
uint32_t flush_type, bool all_hub,
uint32_t inst)
{
- u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT :
- adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
unsigned int ndw;
- int r;
+ int r, cnt = 0;
uint32_t seq;
/*
@@ -740,10 +738,21 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
+ !amdgpu_reset_pending(adev->reset_domain)) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY) {
dev_err(adev->dev, "timeout waiting for kiq fence\n");
r = -ETIME;
- }
+ } else
+ r = 0;
}
error_unlock_reset:
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 5ba263fe5512..1f32c531f610 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -6044,7 +6044,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -6122,7 +6122,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
@@ -6199,7 +6199,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -6574,7 +6574,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index cfb51baa581a..f1f53c768741 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -2391,7 +2391,7 @@ static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -2435,7 +2435,7 @@ static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -2480,7 +2480,7 @@ static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -3115,7 +3115,7 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
@@ -3333,7 +3333,7 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
@@ -4549,7 +4549,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
if (r)
return r;
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index c21b168f75a7..0c08785099f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -2306,7 +2306,7 @@ static int gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
@@ -2450,7 +2450,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
@@ -3469,7 +3469,7 @@ static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev)
if (r)
return r;
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 9bedca9a79c6..a88ad9951d32 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -268,7 +268,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
/* flush hdp cache */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -969,7 +969,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
adev->hdp.funcs->init_registers(adev);
/* Flush HDP after it is initialized */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 72751ab4c766..1eb97117fe7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -229,7 +229,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
/* flush hdp cache */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -896,7 +896,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
return r;
/* Flush HDP after it is initialized */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index c3c144a4f45e..0f136d6bbdc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -297,7 +297,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
return;
/* flush hdp cache */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -881,7 +881,7 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
return r;
/* Flush HDP after it is initialized */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 291549765c38..5250b470e5ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -2434,7 +2434,7 @@ static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
adev->hdp.funcs->init_registers(adev);
/* After HDP is initialized, flush HDP.*/
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 2395f1856962..e77a467af7ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -532,7 +532,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index cc621064610f..afdf8ce3b4c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -610,7 +610,7 @@ static int psp_v13_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index 4d33c95a5116..89f6c06946c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -488,7 +488,7 @@ static int psp_v14_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index ceb9fb475ef1..62a9a9ccf9bb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -2000,7 +2000,8 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
dev->node_props.capability |=
HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
- dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
+ if (!amdgpu_sriov_vf(dev->gpu->adev))
+ dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
} else {
dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 80a3cbd2cbe5..76c8e6457175 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3293,16 +3293,16 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
for (k = 0; k < dc_state->stream_count; k++) {
bundle->stream_update.stream = dc_state->streams[k];
- for (m = 0; m < dc_state->stream_status->plane_count; m++) {
+ for (m = 0; m < dc_state->stream_status[k].plane_count; m++) {
bundle->surface_updates[m].surface =
- dc_state->stream_status->plane_states[m];
+ dc_state->stream_status[k].plane_states[m];
bundle->surface_updates[m].surface->force_full_update =
true;
}
update_planes_and_stream_adapter(dm->dc,
UPDATE_TYPE_FULL,
- dc_state->stream_status->plane_count,
+ dc_state->stream_status[k].plane_count,
dc_state->streams[k],
&bundle->stream_update,
bundle->surface_updates);
@@ -10901,6 +10901,9 @@ static bool should_reset_plane(struct drm_atomic_state *state,
state->allow_modeset)
return true;
+ if (amdgpu_in_reset(adev) && state->allow_modeset)
+ return true;
+
/* Exit early if we know that we're adding or removing the plane. */
if (old_plane_state->crtc != new_plane_state->crtc)
return true;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index fbd80d8545a8..a2532907c7be 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -912,7 +912,7 @@ dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len)
{
struct drm_connector *connector = data;
struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev);
- unsigned char start = block * EDID_LENGTH;
+ unsigned short start = block * EDID_LENGTH;
struct edid *edid;
int r;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 81d2ee37e773..49ff9f1f16d3 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -169,7 +169,7 @@ static const struct meson_drm_soc_attr meson_drm_soc_attrs[] = {
/* S805X/S805Y HDMI PLL won't lock for HDMI PHY freq > 1,65GHz */
{
.limits = {
- .max_hdmi_phy_freq = 1650000,
+ .max_hdmi_phy_freq = 1650000000,
},
.attrs = (const struct soc_device_attribute []) {
{ .soc_id = "GXL (S805*)", },
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 3f9345c14f31..be4b0e4df6e1 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -37,7 +37,7 @@ struct meson_drm_match_data {
};
struct meson_drm_soc_limits {
- unsigned int max_hdmi_phy_freq;
+ unsigned long long max_hdmi_phy_freq;
};
struct meson_drm {
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 0593a1cde906..ce8cea5d3a56 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -70,12 +70,12 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
{
struct meson_drm *priv = encoder_hdmi->priv;
int vic = drm_match_cea_mode(mode);
- unsigned int phy_freq;
- unsigned int vclk_freq;
- unsigned int venc_freq;
- unsigned int hdmi_freq;
+ unsigned long long phy_freq;
+ unsigned long long vclk_freq;
+ unsigned long long venc_freq;
+ unsigned long long hdmi_freq;
- vclk_freq = mode->clock;
+ vclk_freq = mode->clock * 1000;
/* For 420, pixel clock is half unlike venc clock */
if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
@@ -107,7 +107,8 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
- dev_dbg(priv->dev, "vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n",
+ dev_dbg(priv->dev,
+ "vclk:%lluHz phy=%lluHz venc=%lluHz hdmi=%lluHz enci=%d\n",
phy_freq, vclk_freq, venc_freq, hdmi_freq,
priv->venc.hdmi_use_enci);
@@ -122,10 +123,11 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
struct meson_drm *priv = encoder_hdmi->priv;
bool is_hdmi2_sink = display_info->hdmi.scdc.supported;
- unsigned int phy_freq;
- unsigned int vclk_freq;
- unsigned int venc_freq;
- unsigned int hdmi_freq;
+ unsigned long long clock = mode->clock * 1000;
+ unsigned long long phy_freq;
+ unsigned long long vclk_freq;
+ unsigned long long venc_freq;
+ unsigned long long hdmi_freq;
int vic = drm_match_cea_mode(mode);
enum drm_mode_status status;
@@ -144,12 +146,12 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
if (status != MODE_OK)
return status;
- return meson_vclk_dmt_supported_freq(priv, mode->clock);
+ return meson_vclk_dmt_supported_freq(priv, clock);
/* Check against supported VIC modes */
} else if (!meson_venc_hdmi_supported_vic(vic))
return MODE_BAD;
- vclk_freq = mode->clock;
+ vclk_freq = clock;
/* For 420, pixel clock is half unlike venc clock */
if (drm_mode_is_420_only(display_info, mode) ||
@@ -179,7 +181,8 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
- dev_dbg(priv->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
+ dev_dbg(priv->dev,
+ "%s: vclk:%lluHz phy=%lluHz venc=%lluHz hdmi=%lluHz\n",
__func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq);
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 2a942dc6a6dc..3325580d885d 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -110,7 +110,10 @@
#define HDMI_PLL_LOCK BIT(31)
#define HDMI_PLL_LOCK_G12A (3 << 30)
-#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST(_freq * 1000, 1001)
+#define PIXEL_FREQ_1000_1001(_freq) \
+ DIV_ROUND_CLOSEST_ULL((_freq) * 1000ULL, 1001ULL)
+#define PHY_FREQ_1000_1001(_freq) \
+ (PIXEL_FREQ_1000_1001(DIV_ROUND_DOWN_ULL(_freq, 10ULL)) * 10)
/* VID PLL Dividers */
enum {
@@ -360,11 +363,11 @@ enum {
};
struct meson_vclk_params {
- unsigned int pll_freq;
- unsigned int phy_freq;
- unsigned int vclk_freq;
- unsigned int venc_freq;
- unsigned int pixel_freq;
+ unsigned long long pll_freq;
+ unsigned long long phy_freq;
+ unsigned long long vclk_freq;
+ unsigned long long venc_freq;
+ unsigned long long pixel_freq;
unsigned int pll_od1;
unsigned int pll_od2;
unsigned int pll_od3;
@@ -372,11 +375,11 @@ struct meson_vclk_params {
unsigned int vclk_div;
} params[] = {
[MESON_VCLK_HDMI_ENCI_54000] = {
- .pll_freq = 4320000,
- .phy_freq = 270000,
- .vclk_freq = 54000,
- .venc_freq = 54000,
- .pixel_freq = 54000,
+ .pll_freq = 4320000000,
+ .phy_freq = 270000000,
+ .vclk_freq = 54000000,
+ .venc_freq = 54000000,
+ .pixel_freq = 54000000,
.pll_od1 = 4,
.pll_od2 = 4,
.pll_od3 = 1,
@@ -384,11 +387,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_54000] = {
- .pll_freq = 4320000,
- .phy_freq = 270000,
- .vclk_freq = 54000,
- .venc_freq = 54000,
- .pixel_freq = 27000,
+ .pll_freq = 4320000000,
+ .phy_freq = 270000000,
+ .vclk_freq = 54000000,
+ .venc_freq = 54000000,
+ .pixel_freq = 27000000,
.pll_od1 = 4,
.pll_od2 = 4,
.pll_od3 = 1,
@@ -396,11 +399,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_148500] = {
- .pll_freq = 2970000,
- .phy_freq = 742500,
- .vclk_freq = 148500,
- .venc_freq = 148500,
- .pixel_freq = 74250,
+ .pll_freq = 2970000000,
+ .phy_freq = 742500000,
+ .vclk_freq = 148500000,
+ .venc_freq = 148500000,
+ .pixel_freq = 74250000,
.pll_od1 = 4,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -408,11 +411,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_74250] = {
- .pll_freq = 2970000,
- .phy_freq = 742500,
- .vclk_freq = 74250,
- .venc_freq = 74250,
- .pixel_freq = 74250,
+ .pll_freq = 2970000000,
+ .phy_freq = 742500000,
+ .vclk_freq = 74250000,
+ .venc_freq = 74250000,
+ .pixel_freq = 74250000,
.pll_od1 = 2,
.pll_od2 = 2,
.pll_od3 = 2,
@@ -420,11 +423,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_148500] = {
- .pll_freq = 2970000,
- .phy_freq = 1485000,
- .vclk_freq = 148500,
- .venc_freq = 148500,
- .pixel_freq = 148500,
+ .pll_freq = 2970000000,
+ .phy_freq = 1485000000,
+ .vclk_freq = 148500000,
+ .venc_freq = 148500000,
+ .pixel_freq = 148500000,
.pll_od1 = 1,
.pll_od2 = 2,
.pll_od3 = 2,
@@ -432,11 +435,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_297000] = {
- .pll_freq = 5940000,
- .phy_freq = 2970000,
- .venc_freq = 297000,
- .vclk_freq = 297000,
- .pixel_freq = 297000,
+ .pll_freq = 5940000000,
+ .phy_freq = 2970000000,
+ .venc_freq = 297000000,
+ .vclk_freq = 297000000,
+ .pixel_freq = 297000000,
.pll_od1 = 2,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -444,11 +447,11 @@ struct meson_vclk_params {
.vclk_div = 2,
},
[MESON_VCLK_HDMI_594000] = {
- .pll_freq = 5940000,
- .phy_freq = 5940000,
- .venc_freq = 594000,
- .vclk_freq = 594000,
- .pixel_freq = 594000,
+ .pll_freq = 5940000000,
+ .phy_freq = 5940000000,
+ .venc_freq = 594000000,
+ .vclk_freq = 594000000,
+ .pixel_freq = 594000000,
.pll_od1 = 1,
.pll_od2 = 1,
.pll_od3 = 2,
@@ -456,11 +459,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_594000_YUV420] = {
- .pll_freq = 5940000,
- .phy_freq = 2970000,
- .venc_freq = 594000,
- .vclk_freq = 594000,
- .pixel_freq = 297000,
+ .pll_freq = 5940000000,
+ .phy_freq = 2970000000,
+ .venc_freq = 594000000,
+ .vclk_freq = 594000000,
+ .pixel_freq = 297000000,
.pll_od1 = 2,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -617,16 +620,16 @@ static void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
3 << 20, pll_od_to_reg(od3) << 20);
}
-#define XTAL_FREQ 24000
+#define XTAL_FREQ (24 * 1000 * 1000)
static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
- unsigned int pll_freq)
+ unsigned long long pll_freq)
{
/* The GXBB PLL has a /2 pre-multiplier */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
- pll_freq /= 2;
+ pll_freq = DIV_ROUND_DOWN_ULL(pll_freq, 2);
- return pll_freq / XTAL_FREQ;
+ return DIV_ROUND_DOWN_ULL(pll_freq, XTAL_FREQ);
}
#define HDMI_FRAC_MAX_GXBB 4096
@@ -635,12 +638,13 @@ static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
unsigned int m,
- unsigned int pll_freq)
+ unsigned long long pll_freq)
{
- unsigned int parent_freq = XTAL_FREQ;
+ unsigned long long parent_freq = XTAL_FREQ;
unsigned int frac_max = HDMI_FRAC_MAX_GXL;
unsigned int frac_m;
unsigned int frac;
+ u32 remainder;
/* The GXBB PLL has a /2 pre-multiplier and a larger FRAC width */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
@@ -652,11 +656,11 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
frac_max = HDMI_FRAC_MAX_G12A;
/* We can have a perfect match !*/
- if (pll_freq / m == parent_freq &&
- pll_freq % m == 0)
+ if (div_u64_rem(pll_freq, m, &remainder) == parent_freq &&
+ remainder == 0)
return 0;
- frac = div_u64((u64)pll_freq * (u64)frac_max, parent_freq);
+ frac = mul_u64_u64_div_u64(pll_freq, frac_max, parent_freq);
frac_m = m * frac_max;
if (frac_m > frac)
return frac_max;
@@ -666,7 +670,7 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
}
static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
- unsigned int m,
+ unsigned long long m,
unsigned int frac)
{
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
@@ -694,7 +698,7 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
}
static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
- unsigned int freq,
+ unsigned long long freq,
unsigned int *m,
unsigned int *frac,
unsigned int *od)
@@ -706,7 +710,7 @@ static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
continue;
*frac = meson_hdmi_pll_get_frac(priv, *m, freq * *od);
- DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d\n",
+ DRM_DEBUG_DRIVER("PLL params for %lluHz: m=%x frac=%x od=%d\n",
freq, *m, *frac, *od);
if (meson_hdmi_pll_validate_params(priv, *m, *frac))
@@ -718,7 +722,7 @@ static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
/* pll_freq is the frequency after the OD dividers */
enum drm_mode_status
-meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq)
+meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned long long freq)
{
unsigned int od, m, frac;
@@ -741,7 +745,7 @@ EXPORT_SYMBOL_GPL(meson_vclk_dmt_supported_freq);
/* pll_freq is the frequency after the OD dividers */
static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
- unsigned int pll_freq)
+ unsigned long long pll_freq)
{
unsigned int od, m, frac, od1, od2, od3;
@@ -756,7 +760,7 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
od1 = od / od2;
}
- DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d/%d/%d\n",
+ DRM_DEBUG_DRIVER("PLL params for %lluHz: m=%x frac=%x od=%d/%d/%d\n",
pll_freq, m, frac, od1, od2, od3);
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
@@ -764,17 +768,18 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
return;
}
- DRM_ERROR("Fatal, unable to find parameters for PLL freq %d\n",
+ DRM_ERROR("Fatal, unable to find parameters for PLL freq %lluHz\n",
pll_freq);
}
enum drm_mode_status
-meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
- unsigned int vclk_freq)
+meson_vclk_vic_supported_freq(struct meson_drm *priv,
+ unsigned long long phy_freq,
+ unsigned long long vclk_freq)
{
int i;
- DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n",
+ DRM_DEBUG_DRIVER("phy_freq = %lluHz vclk_freq = %lluHz\n",
phy_freq, vclk_freq);
/* Check against soc revision/package limits */
@@ -785,19 +790,19 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
}
for (i = 0 ; params[i].pixel_freq ; ++i) {
- DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
+ DRM_DEBUG_DRIVER("i = %d pixel_freq = %lluHz alt = %lluHz\n",
i, params[i].pixel_freq,
- FREQ_1000_1001(params[i].pixel_freq));
- DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
+ PIXEL_FREQ_1000_1001(params[i].pixel_freq));
+ DRM_DEBUG_DRIVER("i = %d phy_freq = %lluHz alt = %lluHz\n",
i, params[i].phy_freq,
- FREQ_1000_1001(params[i].phy_freq/1000)*1000);
+ PHY_FREQ_1000_1001(params[i].phy_freq));
/* Match strict frequency */
if (phy_freq == params[i].phy_freq &&
vclk_freq == params[i].vclk_freq)
return MODE_OK;
/* Match 1000/1001 variant */
- if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) &&
- vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
+ if (phy_freq == PHY_FREQ_1000_1001(params[i].phy_freq) &&
+ vclk_freq == PIXEL_FREQ_1000_1001(params[i].vclk_freq))
return MODE_OK;
}
@@ -805,8 +810,9 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
}
EXPORT_SYMBOL_GPL(meson_vclk_vic_supported_freq);
-static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
- unsigned int od1, unsigned int od2, unsigned int od3,
+static void meson_vclk_set(struct meson_drm *priv,
+ unsigned long long pll_base_freq, unsigned int od1,
+ unsigned int od2, unsigned int od3,
unsigned int vid_pll_div, unsigned int vclk_div,
unsigned int hdmi_tx_div, unsigned int venc_div,
bool hdmi_use_enci, bool vic_alternate_clock)
@@ -826,15 +832,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
meson_hdmi_pll_generic_set(priv, pll_base_freq);
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
switch (pll_base_freq) {
- case 2970000:
+ case 2970000000:
m = 0x3d;
frac = vic_alternate_clock ? 0xd02 : 0xe00;
break;
- case 4320000:
+ case 4320000000:
m = vic_alternate_clock ? 0x59 : 0x5a;
frac = vic_alternate_clock ? 0xe8f : 0;
break;
- case 5940000:
+ case 5940000000:
m = 0x7b;
frac = vic_alternate_clock ? 0xa05 : 0xc00;
break;
@@ -844,15 +850,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
switch (pll_base_freq) {
- case 2970000:
+ case 2970000000:
m = 0x7b;
frac = vic_alternate_clock ? 0x281 : 0x300;
break;
- case 4320000:
+ case 4320000000:
m = vic_alternate_clock ? 0xb3 : 0xb4;
frac = vic_alternate_clock ? 0x347 : 0;
break;
- case 5940000:
+ case 5940000000:
m = 0xf7;
frac = vic_alternate_clock ? 0x102 : 0x200;
break;
@@ -861,15 +867,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
switch (pll_base_freq) {
- case 2970000:
+ case 2970000000:
m = 0x7b;
frac = vic_alternate_clock ? 0x140b4 : 0x18000;
break;
- case 4320000:
+ case 4320000000:
m = vic_alternate_clock ? 0xb3 : 0xb4;
frac = vic_alternate_clock ? 0x1a3ee : 0;
break;
- case 5940000:
+ case 5940000000:
m = 0xf7;
frac = vic_alternate_clock ? 0x8148 : 0x10000;
break;
@@ -1025,14 +1031,14 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
}
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int phy_freq, unsigned int vclk_freq,
- unsigned int venc_freq, unsigned int dac_freq,
+ unsigned long long phy_freq, unsigned long long vclk_freq,
+ unsigned long long venc_freq, unsigned long long dac_freq,
bool hdmi_use_enci)
{
bool vic_alternate_clock = false;
- unsigned int freq;
- unsigned int hdmi_tx_div;
- unsigned int venc_div;
+ unsigned long long freq;
+ unsigned long long hdmi_tx_div;
+ unsigned long long venc_div;
if (target == MESON_VCLK_TARGET_CVBS) {
meson_venci_cvbs_clock_config(priv);
@@ -1052,27 +1058,27 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
return;
}
- hdmi_tx_div = vclk_freq / dac_freq;
+ hdmi_tx_div = DIV_ROUND_DOWN_ULL(vclk_freq, dac_freq);
if (hdmi_tx_div == 0) {
- pr_err("Fatal Error, invalid HDMI-TX freq %d\n",
+ pr_err("Fatal Error, invalid HDMI-TX freq %lluHz\n",
dac_freq);
return;
}
- venc_div = vclk_freq / venc_freq;
+ venc_div = DIV_ROUND_DOWN_ULL(vclk_freq, venc_freq);
if (venc_div == 0) {
- pr_err("Fatal Error, invalid HDMI venc freq %d\n",
+ pr_err("Fatal Error, invalid HDMI venc freq %lluHz\n",
venc_freq);
return;
}
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
if ((phy_freq == params[freq].phy_freq ||
- phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) &&
+ phy_freq == PHY_FREQ_1000_1001(params[freq].phy_freq)) &&
(vclk_freq == params[freq].vclk_freq ||
- vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
+ vclk_freq == PIXEL_FREQ_1000_1001(params[freq].vclk_freq))) {
if (vclk_freq != params[freq].vclk_freq)
vic_alternate_clock = true;
else
@@ -1098,7 +1104,8 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
}
if (!params[freq].pixel_freq) {
- pr_err("Fatal Error, invalid HDMI vclk freq %d\n", vclk_freq);
+ pr_err("Fatal Error, invalid HDMI vclk freq %lluHz\n",
+ vclk_freq);
return;
}
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index 60617aaf18dd..7ac55744e574 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -20,17 +20,18 @@ enum {
};
/* 27MHz is the CVBS Pixel Clock */
-#define MESON_VCLK_CVBS 27000
+#define MESON_VCLK_CVBS (27 * 1000 * 1000)
enum drm_mode_status
-meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
+meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned long long freq);
enum drm_mode_status
-meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
- unsigned int vclk_freq);
+meson_vclk_vic_supported_freq(struct meson_drm *priv,
+ unsigned long long phy_freq,
+ unsigned long long vclk_freq);
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int phy_freq, unsigned int vclk_freq,
- unsigned int venc_freq, unsigned int dac_freq,
+ unsigned long long phy_freq, unsigned long long vclk_freq,
+ unsigned long long venc_freq, unsigned long long dac_freq,
bool hdmi_use_enci);
#endif /* __MESON_VCLK_H */
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index 7d68a8acfe2e..eb0f8373258c 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -129,11 +129,11 @@ static int jadard_unprepare(struct drm_panel *panel)
{
struct jadard *jadard = panel_to_jadard(panel);
- gpiod_set_value(jadard->reset, 1);
+ gpiod_set_value(jadard->reset, 0);
msleep(120);
if (jadard->desc->reset_before_power_off_vcioo) {
- gpiod_set_value(jadard->reset, 0);
+ gpiod_set_value(jadard->reset, 1);
usleep_range(1000, 2000);
}
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 162f18e975da..d0ea8a55fd9c 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -475,6 +475,7 @@
#define TDL_TSL_CHICKEN XE_REG_MCR(0xe4c4, XE_REG_OPTION_MASKED)
#define STK_ID_RESTRICT REG_BIT(12)
#define SLM_WMTP_RESTORE REG_BIT(11)
+#define RES_CHK_SPR_DIS REG_BIT(6)
#define ROW_CHICKEN XE_REG_MCR(0xe4f0, XE_REG_OPTION_MASKED)
#define UGM_BACKUP_MODE REG_BIT(13)
@@ -500,6 +501,9 @@
#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11)
#define DIS_ATOMIC_CHAINING_TYPED_WRITES REG_BIT(3)
+#define TDL_CHICKEN XE_REG_MCR(0xe5f4, XE_REG_OPTION_MASKED)
+#define QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE REG_BIT(12)
+
#define LSC_CHICKEN_BIT_0 XE_REG_MCR(0xe7c8)
#define DISABLE_D8_D16_COASLESCE REG_BIT(30)
#define WR_REQ_CHAINING_DIS REG_BIT(26)
diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
index 36a3b5420fef..b0254b014fe4 100644
--- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
@@ -320,7 +320,7 @@ static void xe_rtp_process_to_sr_tests(struct kunit *test)
count_rtp_entries++;
xe_rtp_process_ctx_enable_active_tracking(&ctx, &active, count_rtp_entries);
- xe_rtp_process_to_sr(&ctx, param->entries, reg_sr);
+ xe_rtp_process_to_sr(&ctx, param->entries, count_rtp_entries, reg_sr);
xa_for_each(&reg_sr->xa, idx, sre) {
if (idx == param->expected_reg.addr)
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 8a20e6744836..94eed1315b0f 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -381,6 +381,10 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
+ err = xe_tuning_init(gt);
+ if (err)
+ return err;
+
xe_wa_process_oob(gt);
xe_force_wake_init_gt(gt, gt_to_fw(gt));
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index e7792858b1e4..2d63a69cbfa3 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -30,6 +30,7 @@
#include "xe_reg_sr.h"
#include "xe_reg_whitelist.h"
#include "xe_sriov.h"
+#include "xe_tuning.h"
#include "xe_uc_debugfs.h"
#include "xe_wa.h"
@@ -217,6 +218,15 @@ static int workarounds(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
+static int tunings(struct xe_gt *gt, struct drm_printer *p)
+{
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_tuning_dump(gt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
+
+ return 0;
+}
+
static int pat(struct xe_gt *gt, struct drm_printer *p)
{
xe_pm_runtime_get(gt_to_xe(gt));
@@ -300,6 +310,7 @@ static const struct drm_info_list debugfs_list[] = {
{"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info},
{"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore},
{"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds},
+ {"tunings", .show = xe_gt_debugfs_simple_show, .data = tunings},
{"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
{"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs},
{"default_lrc_rcs", .show = xe_gt_debugfs_simple_show, .data = rcs_default_lrc},
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 6e66bf0e8b3f..dd2969a1846d 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -413,6 +413,16 @@ struct xe_gt {
bool oob_initialized;
} wa_active;
+ /** @tuning_active: keep track of active tunings */
+ struct {
+ /** @tuning_active.gt: bitmap with active GT tunings */
+ unsigned long *gt;
+ /** @tuning_active.engine: bitmap with active engine tunings */
+ unsigned long *engine;
+ /** @tuning_active.lrc: bitmap with active LRC tunings */
+ unsigned long *lrc;
+ } tuning_active;
+
/** @user_engines: engines present in GT and available to userspace */
struct {
/**
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index fc447751fe78..b26b6fb5cdb5 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -386,12 +386,6 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
blit_cctl_val,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
- /* Use Fixed slice CCS mode */
- { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
- XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
- XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
- RCU_MODE_FIXED_SLICE_CCS_MODE))
- },
/* Disable WMTP if HW doesn't support it */
{ XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
@@ -400,10 +394,9 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
PREEMPT_GPGPU_THREAD_GROUP_LEVEL)),
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE)
},
- {}
};
- xe_rtp_process_to_sr(&ctx, lrc_setup, &hwe->reg_lrc);
+ xe_rtp_process_to_sr(&ctx, lrc_setup, ARRAY_SIZE(lrc_setup), &hwe->reg_lrc);
}
static void
@@ -459,10 +452,15 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
- {}
+ /* Use Fixed slice CCS mode */
+ { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
+ XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
+ XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
+ RCU_MODE_FIXED_SLICE_CCS_MODE))
+ },
};
- xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr);
+ xe_rtp_process_to_sr(&ctx, engine_entries, ARRAY_SIZE(engine_entries), &hwe->reg_sr);
}
static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance)
diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c
index edab5d4e3ba5..23f6c81d9994 100644
--- a/drivers/gpu/drm/xe/xe_reg_whitelist.c
+++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c
@@ -88,7 +88,6 @@ static const struct xe_rtp_entry_sr register_whitelist[] = {
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4))
},
- {}
};
static void whitelist_apply_to_hwe(struct xe_hw_engine *hwe)
@@ -137,7 +136,8 @@ void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
- xe_rtp_process_to_sr(&ctx, register_whitelist, &hwe->reg_whitelist);
+ xe_rtp_process_to_sr(&ctx, register_whitelist, ARRAY_SIZE(register_whitelist),
+ &hwe->reg_whitelist);
whitelist_apply_to_hwe(hwe);
}
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 7a1c78fdfc92..13bb62d3e615 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -237,6 +237,7 @@ static void rtp_mark_active(struct xe_device *xe,
* the save-restore argument.
* @ctx: The context for processing the table, with one of device, gt or hwe
* @entries: Table with RTP definitions
+ * @n_entries: Number of entries to process, usually ARRAY_SIZE(entries)
* @sr: Save-restore struct where matching rules execute the action. This can be
* viewed as the "coalesced view" of multiple the tables. The bits for each
* register set are expected not to collide with previously added entries
@@ -247,6 +248,7 @@ static void rtp_mark_active(struct xe_device *xe,
*/
void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry_sr *entries,
+ size_t n_entries,
struct xe_reg_sr *sr)
{
const struct xe_rtp_entry_sr *entry;
@@ -259,7 +261,9 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
if (IS_SRIOV_VF(xe))
return;
- for (entry = entries; entry && entry->name; entry++) {
+ xe_assert(xe, entries);
+
+ for (entry = entries; entry - entries < n_entries; entry++) {
bool match = false;
if (entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) {
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index 38b9f13bba5e..4fe736a11c42 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -430,7 +430,7 @@ void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx,
void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry_sr *entries,
- struct xe_reg_sr *sr);
+ size_t n_entries, struct xe_reg_sr *sr);
void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry *entries);
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index 3c78f3d71559..a61a2917590f 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -7,6 +7,8 @@
#include <kunit/visibility.h>
+#include <drm/drm_managed.h>
+
#include "regs/xe_gt_regs.h"
#include "xe_gt_types.h"
#include "xe_platform_types.h"
@@ -83,8 +85,6 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
XE_RTP_RULES(MEDIA_VERSION(2000)),
XE_RTP_ACTIONS(SET(XE2LPM_SCRATCH3_LBCF, RWFLUSHALLEN))
},
-
- {}
};
static const struct xe_rtp_entry_sr engine_tunings[] = {
@@ -93,7 +93,6 @@ static const struct xe_rtp_entry_sr engine_tunings[] = {
ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE))
},
- {}
};
static const struct xe_rtp_entry_sr lrc_tunings[] = {
@@ -131,15 +130,47 @@ static const struct xe_rtp_entry_sr lrc_tunings[] = {
XE_RTP_ACTIONS(FIELD_SET(FF_MODE, VS_HIT_MAX_VALUE_MASK,
REG_FIELD_PREP(VS_HIT_MAX_VALUE_MASK, 0x3f)))
},
-
- {}
};
+/**
+ * xe_tuning_init - initialize gt with tunings bookkeeping
+ * @gt: GT instance to initialize
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_tuning_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ size_t n_lrc, n_engine, n_gt, total;
+ unsigned long *p;
+
+ n_gt = BITS_TO_LONGS(ARRAY_SIZE(gt_tunings));
+ n_engine = BITS_TO_LONGS(ARRAY_SIZE(engine_tunings));
+ n_lrc = BITS_TO_LONGS(ARRAY_SIZE(lrc_tunings));
+ total = n_gt + n_engine + n_lrc;
+
+ p = drmm_kzalloc(&xe->drm, sizeof(*p) * total, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ gt->tuning_active.gt = p;
+ p += n_gt;
+ gt->tuning_active.engine = p;
+ p += n_engine;
+ gt->tuning_active.lrc = p;
+
+ return 0;
+}
+ALLOW_ERROR_INJECTION(xe_tuning_init, ERRNO); /* See xe_pci_probe() */
+
void xe_tuning_process_gt(struct xe_gt *gt)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
- xe_rtp_process_to_sr(&ctx, gt_tunings, &gt->reg_sr);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx,
+ gt->tuning_active.gt,
+ ARRAY_SIZE(gt_tunings));
+ xe_rtp_process_to_sr(&ctx, gt_tunings, ARRAY_SIZE(gt_tunings), &gt->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_tuning_process_gt);
@@ -147,7 +178,11 @@ void xe_tuning_process_engine(struct xe_hw_engine *hwe)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
- xe_rtp_process_to_sr(&ctx, engine_tunings, &hwe->reg_sr);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx,
+ hwe->gt->tuning_active.engine,
+ ARRAY_SIZE(engine_tunings));
+ xe_rtp_process_to_sr(&ctx, engine_tunings, ARRAY_SIZE(engine_tunings),
+ &hwe->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_tuning_process_engine);
@@ -163,5 +198,25 @@ void xe_tuning_process_lrc(struct xe_hw_engine *hwe)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
- xe_rtp_process_to_sr(&ctx, lrc_tunings, &hwe->reg_lrc);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx,
+ hwe->gt->tuning_active.lrc,
+ ARRAY_SIZE(lrc_tunings));
+ xe_rtp_process_to_sr(&ctx, lrc_tunings, ARRAY_SIZE(lrc_tunings), &hwe->reg_lrc);
+}
+
+void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p)
+{
+ size_t idx;
+
+ drm_printf(p, "GT Tunings\n");
+ for_each_set_bit(idx, gt->tuning_active.gt, ARRAY_SIZE(gt_tunings))
+ drm_printf_indent(p, 1, "%s\n", gt_tunings[idx].name);
+
+ drm_printf(p, "\nEngine Tunings\n");
+ for_each_set_bit(idx, gt->tuning_active.engine, ARRAY_SIZE(engine_tunings))
+ drm_printf_indent(p, 1, "%s\n", engine_tunings[idx].name);
+
+ drm_printf(p, "\nLRC Tunings\n");
+ for_each_set_bit(idx, gt->tuning_active.lrc, ARRAY_SIZE(lrc_tunings))
+ drm_printf_indent(p, 1, "%s\n", lrc_tunings[idx].name);
}
diff --git a/drivers/gpu/drm/xe/xe_tuning.h b/drivers/gpu/drm/xe/xe_tuning.h
index 4f9c3ac3b516..dd0d3ccc9c65 100644
--- a/drivers/gpu/drm/xe/xe_tuning.h
+++ b/drivers/gpu/drm/xe/xe_tuning.h
@@ -6,11 +6,14 @@
#ifndef _XE_TUNING_
#define _XE_TUNING_
+struct drm_printer;
struct xe_gt;
struct xe_hw_engine;
+int xe_tuning_init(struct xe_gt *gt);
void xe_tuning_process_gt(struct xe_gt *gt);
void xe_tuning_process_engine(struct xe_hw_engine *hwe);
void xe_tuning_process_lrc(struct xe_hw_engine *hwe);
+void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 2553accf8c51..65bfb2f894d0 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -279,8 +279,6 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), RAMDFTUNIT_CLKGATE_DIS)),
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
-
- {}
};
static const struct xe_rtp_entry_sr engine_was[] = {
@@ -613,8 +611,16 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTIONS(FIELD_SET(SAMPLER_MODE, SMP_WAIT_FETCH_MERGING_COUNTER,
SMP_FORCE_128B_OVERFETCH))
},
-
- {}
+ { XE_RTP_NAME("14023061436"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_CHICKEN, QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE))
+ },
+ { XE_RTP_NAME("13012615864"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS))
+ },
};
static const struct xe_rtp_entry_sr lrc_was[] = {
@@ -807,8 +813,6 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_PARTIAL_AUTOSTRIP |
DIS_AUTOSTRIP))
},
-
- {}
};
static __maybe_unused const struct xe_rtp_entry oob_was[] = {
@@ -850,7 +854,7 @@ void xe_wa_process_gt(struct xe_gt *gt)
xe_rtp_process_ctx_enable_active_tracking(&ctx, gt->wa_active.gt,
ARRAY_SIZE(gt_was));
- xe_rtp_process_to_sr(&ctx, gt_was, &gt->reg_sr);
+ xe_rtp_process_to_sr(&ctx, gt_was, ARRAY_SIZE(gt_was), &gt->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_wa_process_gt);
@@ -868,7 +872,7 @@ void xe_wa_process_engine(struct xe_hw_engine *hwe)
xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->wa_active.engine,
ARRAY_SIZE(engine_was));
- xe_rtp_process_to_sr(&ctx, engine_was, &hwe->reg_sr);
+ xe_rtp_process_to_sr(&ctx, engine_was, ARRAY_SIZE(engine_was), &hwe->reg_sr);
}
/**
@@ -885,7 +889,7 @@ void xe_wa_process_lrc(struct xe_hw_engine *hwe)
xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->wa_active.lrc,
ARRAY_SIZE(lrc_was));
- xe_rtp_process_to_sr(&ctx, lrc_was, &hwe->reg_lrc);
+ xe_rtp_process_to_sr(&ctx, lrc_was, ARRAY_SIZE(lrc_was), &hwe->reg_lrc);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 40438c3d9b72..32d3853b08ec 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -30,8 +30,10 @@
13011645652 GRAPHICS_VERSION(2004)
14022293748 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
+ GRAPHICS_VERSION_RANGE(3000, 3001)
22019794406 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
+ GRAPHICS_VERSION_RANGE(3000, 3001)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index ed7b9d7f688c..0fc03bb5d0a6 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -158,6 +158,10 @@ struct svc_i3c_regs_save {
u32 mdynaddr;
};
+struct svc_i3c_drvdata {
+ u32 quirks;
+};
+
/**
* struct svc_i3c_master - Silvaco I3C Master structure
* @base: I3C master controller
@@ -183,6 +187,7 @@ struct svc_i3c_regs_save {
* @ibi.tbq_slot: To be queued IBI slot
* @ibi.lock: IBI lock
* @lock: Transfer lock, protect between IBI work thread and callbacks from master
+ * @drvdata: Driver data
* @enabled_events: Bit masks for enable events (IBI, HotJoin).
* @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back.
*/
@@ -214,6 +219,7 @@ struct svc_i3c_master {
spinlock_t lock;
} ibi;
struct mutex lock;
+ const struct svc_i3c_drvdata *drvdata;
u32 enabled_events;
u32 mctrl_config;
};
@@ -1817,6 +1823,10 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
+ master->drvdata = of_device_get_match_data(dev);
+ if (!master->drvdata)
+ return -EINVAL;
+
master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
@@ -1958,8 +1968,13 @@ static const struct dev_pm_ops svc_i3c_pm_ops = {
svc_i3c_runtime_resume, NULL)
};
+static const struct svc_i3c_drvdata npcm845_drvdata = {};
+
+static const struct svc_i3c_drvdata svc_default_drvdata = {};
+
static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
- { .compatible = "silvaco,i3c-master-v1"},
+ { .compatible = "nuvoton,npcm845-i3c", .data = &npcm845_drvdata },
+ { .compatible = "silvaco,i3c-master-v1", .data = &svc_default_drvdata },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
diff --git a/drivers/iio/adc/ad4695.c b/drivers/iio/adc/ad4695.c
index b79d135a5471..22fdc454b0ce 100644
--- a/drivers/iio/adc/ad4695.c
+++ b/drivers/iio/adc/ad4695.c
@@ -92,6 +92,8 @@
#define AD4695_T_REFBUF_MS 100
#define AD4695_T_REGCONFIG_NS 20
#define AD4695_T_SCK_CNV_DELAY_NS 80
+#define AD4695_T_CNVL_NS 80
+#define AD4695_T_CNVH_NS 10
#define AD4695_REG_ACCESS_SCLK_HZ (10 * MEGA)
/* Max number of voltage input channels. */
@@ -364,11 +366,31 @@ static int ad4695_enter_advanced_sequencer_mode(struct ad4695_state *st, u32 n)
*/
static int ad4695_exit_conversion_mode(struct ad4695_state *st)
{
- struct spi_transfer xfer = {
- .tx_buf = &st->cnv_cmd2,
- .len = 1,
- .delay.value = AD4695_T_REGCONFIG_NS,
- .delay.unit = SPI_DELAY_UNIT_NSECS,
+ /*
+ * An extra transfer is needed to trigger a conversion here so
+ * that we can be 100% sure the command will be processed by the
+ * ADC, rather than relying on it to be in the correct state
+ * when this function is called (this chip has a quirk where the
+ * command only works when reading a conversion, and if the
+ * previous conversion was already read then it won't work). The
+ * actual conversion command is then run at the slower
+ * AD4695_REG_ACCESS_SCLK_HZ speed to guarantee this works.
+ */
+ struct spi_transfer xfers[] = {
+ {
+ .delay.value = AD4695_T_CNVL_NS,
+ .delay.unit = SPI_DELAY_UNIT_NSECS,
+ .cs_change = 1,
+ .cs_change_delay.value = AD4695_T_CNVH_NS,
+ .cs_change_delay.unit = SPI_DELAY_UNIT_NSECS,
+ },
+ {
+ .speed_hz = AD4695_REG_ACCESS_SCLK_HZ,
+ .tx_buf = &st->cnv_cmd2,
+ .len = 1,
+ .delay.value = AD4695_T_REGCONFIG_NS,
+ .delay.unit = SPI_DELAY_UNIT_NSECS,
+ },
};
/*
@@ -377,7 +399,7 @@ static int ad4695_exit_conversion_mode(struct ad4695_state *st)
*/
st->cnv_cmd2 = AD4695_CMD_EXIT_CNV_MODE << 3;
- return spi_sync_transfer(st->spi, &xfer, 1);
+ return spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
}
static int ad4695_set_ref_voltage(struct ad4695_state *st, int vref_mv)
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index 6f8816483f1a..157a0df97f97 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -142,7 +142,7 @@ static const struct iio_chan_spec ad7768_channels[] = {
.channel = 0,
.scan_index = 0,
.scan_type = {
- .sign = 'u',
+ .sign = 's',
.realbits = 24,
.storagebits = 32,
.shift = 8,
@@ -370,12 +370,11 @@ static int ad7768_read_raw(struct iio_dev *indio_dev,
return ret;
ret = ad7768_scan_direct(indio_dev);
- if (ret >= 0)
- *val = ret;
iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
+ *val = sign_extend32(ret, chan->scan_type.realbits - 1);
return IIO_VAL_INT;
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index b27791029fa9..b9f4a2937c3a 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -55,6 +55,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
struct inode *inode = new_inode(dir->i_sb);
if (!inode) {
+ dput(dentry);
error = -EPERM;
goto bail;
}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index cd5116d8c3b2..b3a01b7757ee 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -3850,7 +3850,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
* we should not modify the IRTE
*/
if (!dev_data || !dev_data->use_vapic)
- return 0;
+ return -EINVAL;
ir_data->cfg = irqd_cfg(data);
pi_data->ir_data = ir_data;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
index 5aa2e7af58b4..34a0be59cd91 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
@@ -43,6 +43,8 @@ static void arm_smmu_make_nested_cd_table_ste(
target->data[0] |= nested_domain->ste[0] &
~cpu_to_le64(STRTAB_STE_0_CFG);
target->data[1] |= nested_domain->ste[1];
+ /* Merge events for DoS mitigations on eventq */
+ target->data[1] |= cpu_to_le64(STRTAB_STE_1_MEV);
}
/*
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 358072b4e293..59749e8180af 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1052,7 +1052,7 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR |
STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH |
STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW |
- STRTAB_STE_1_EATS);
+ STRTAB_STE_1_EATS | STRTAB_STE_1_MEV);
used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
/*
@@ -1068,7 +1068,7 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
if (cfg & BIT(1)) {
used_bits[1] |=
cpu_to_le64(STRTAB_STE_1_S2FWB | STRTAB_STE_1_EATS |
- STRTAB_STE_1_SHCFG);
+ STRTAB_STE_1_SHCFG | STRTAB_STE_1_MEV);
used_bits[2] |=
cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR |
STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI |
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index bd9d7c85576a..7290bd4c2bb0 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -266,6 +266,7 @@ static inline u32 arm_smmu_strtab_l2_idx(u32 sid)
#define STRTAB_STE_1_S1COR GENMASK_ULL(5, 4)
#define STRTAB_STE_1_S1CSH GENMASK_ULL(7, 6)
+#define STRTAB_STE_1_MEV (1UL << 19)
#define STRTAB_STE_1_S2FWB (1UL << 25)
#define STRTAB_STE_1_S1STALLD (1UL << 27)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index e3df1f06afbe..1efe7cddb4fe 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -508,6 +508,9 @@ static void iommu_deinit_device(struct device *dev)
dev->iommu_group = NULL;
module_put(ops->owner);
dev_iommu_free(dev);
+#ifdef CONFIG_IOMMU_DMA
+ dev->dma_iommu = false;
+#endif
}
DEFINE_MUTEX(iommu_probe_device_lock);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index be35c5349986..a1e370d0200f 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -423,7 +423,7 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
#ifdef CONFIG_ACPI
static int acpi_num_msi;
-static __init struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
+static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
{
struct v2m_data *data;
diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c
index f6363246a71a..21d01ce2da5c 100644
--- a/drivers/irqchip/irq-renesas-rzv2h.c
+++ b/drivers/irqchip/irq-renesas-rzv2h.c
@@ -81,17 +81,27 @@
#define ICU_PB5_TINT 0x55
/**
+ * struct rzv2h_hw_info - Interrupt Control Unit controller hardware info structure.
+ * @t_offs: TINT offset
+ */
+struct rzv2h_hw_info {
+ u16 t_offs;
+};
+
+/**
* struct rzv2h_icu_priv - Interrupt Control Unit controller private data structure.
* @base: Controller's base address
* @irqchip: Pointer to struct irq_chip
* @fwspec: IRQ firmware specific data
* @lock: Lock to serialize access to hardware registers
+ * @info: Pointer to struct rzv2h_hw_info
*/
struct rzv2h_icu_priv {
void __iomem *base;
const struct irq_chip *irqchip;
struct irq_fwspec fwspec[ICU_NUM_IRQ];
raw_spinlock_t lock;
+ const struct rzv2h_hw_info *info;
};
static inline struct rzv2h_icu_priv *irq_data_to_priv(struct irq_data *data)
@@ -111,7 +121,7 @@ static void rzv2h_icu_eoi(struct irq_data *d)
tintirq_nr = hw_irq - ICU_TINT_START;
bit = BIT(tintirq_nr);
if (!irqd_is_level_type(d))
- writel_relaxed(bit, priv->base + ICU_TSCLR);
+ writel_relaxed(bit, priv->base + priv->info->t_offs + ICU_TSCLR);
} else if (hw_irq >= ICU_IRQ_START) {
tintirq_nr = hw_irq - ICU_IRQ_START;
bit = BIT(tintirq_nr);
@@ -139,12 +149,20 @@ static void rzv2h_tint_irq_endisable(struct irq_data *d, bool enable)
tssel_n = ICU_TSSR_TSSEL_N(tint_nr);
guard(raw_spinlock)(&priv->lock);
- tssr = readl_relaxed(priv->base + ICU_TSSR(k));
+ tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(k));
if (enable)
tssr |= ICU_TSSR_TIEN(tssel_n);
else
tssr &= ~ICU_TSSR_TIEN(tssel_n);
- writel_relaxed(tssr, priv->base + ICU_TSSR(k));
+ writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(k));
+
+ /*
+ * A glitch in the edge detection circuit can cause a spurious
+ * interrupt. Clear the status flag after setting the ICU_TSSRk
+ * registers, which is recommended by the hardware manual as a
+ * countermeasure.
+ */
+ writel_relaxed(BIT(tint_nr), priv->base + priv->info->t_offs + ICU_TSCLR);
}
static void rzv2h_icu_irq_disable(struct irq_data *d)
@@ -247,8 +265,8 @@ static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq
u32 bit = BIT(tint_nr);
int k = tint_nr / 16;
- tsctr = readl_relaxed(priv->base + ICU_TSCTR);
- titsr = readl_relaxed(priv->base + ICU_TITSR(k));
+ tsctr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSCTR);
+ titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(k));
titsel = ICU_TITSR_TITSEL_GET(titsr, titsel_n);
/*
@@ -257,7 +275,7 @@ static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq
*/
if ((tsctr & bit) && ((titsel == ICU_TINT_EDGE_RISING) ||
(titsel == ICU_TINT_EDGE_FALLING)))
- writel_relaxed(bit, priv->base + ICU_TSCLR);
+ writel_relaxed(bit, priv->base + priv->info->t_offs + ICU_TSCLR);
}
static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
@@ -308,21 +326,21 @@ static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
guard(raw_spinlock)(&priv->lock);
- tssr = readl_relaxed(priv->base + ICU_TSSR(tssr_k));
+ tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
tssr &= ~(ICU_TSSR_TSSEL_MASK(tssel_n) | tien);
tssr |= ICU_TSSR_TSSEL_PREP(tint, tssel_n);
- writel_relaxed(tssr, priv->base + ICU_TSSR(tssr_k));
+ writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
- titsr = readl_relaxed(priv->base + ICU_TITSR(titsr_k));
+ titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(titsr_k));
titsr &= ~ICU_TITSR_TITSEL_MASK(titsel_n);
titsr |= ICU_TITSR_TITSEL_PREP(sense, titsel_n);
- writel_relaxed(titsr, priv->base + ICU_TITSR(titsr_k));
+ writel_relaxed(titsr, priv->base + priv->info->t_offs + ICU_TITSR(titsr_k));
rzv2h_clear_tint_int(priv, hwirq);
- writel_relaxed(tssr | tien, priv->base + ICU_TSSR(tssr_k));
+ writel_relaxed(tssr | tien, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
return 0;
}
@@ -421,7 +439,13 @@ static int rzv2h_icu_parse_interrupts(struct rzv2h_icu_priv *priv, struct device
return 0;
}
-static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
+static void rzv2h_icu_put_device(void *data)
+{
+ put_device(data);
+}
+
+static int rzv2h_icu_init_common(struct device_node *node, struct device_node *parent,
+ const struct rzv2h_hw_info *hw_info)
{
struct irq_domain *irq_domain, *parent_domain;
struct rzv2h_icu_priv *rzv2h_icu_data;
@@ -433,43 +457,41 @@ static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
if (!pdev)
return -ENODEV;
+ ret = devm_add_action_or_reset(&pdev->dev, rzv2h_icu_put_device,
+ &pdev->dev);
+ if (ret < 0)
+ return ret;
+
parent_domain = irq_find_host(parent);
if (!parent_domain) {
dev_err(&pdev->dev, "cannot find parent domain\n");
- ret = -ENODEV;
- goto put_dev;
+ return -ENODEV;
}
rzv2h_icu_data = devm_kzalloc(&pdev->dev, sizeof(*rzv2h_icu_data), GFP_KERNEL);
- if (!rzv2h_icu_data) {
- ret = -ENOMEM;
- goto put_dev;
- }
+ if (!rzv2h_icu_data)
+ return -ENOMEM;
rzv2h_icu_data->irqchip = &rzv2h_icu_chip;
rzv2h_icu_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
- if (IS_ERR(rzv2h_icu_data->base)) {
- ret = PTR_ERR(rzv2h_icu_data->base);
- goto put_dev;
- }
+ if (IS_ERR(rzv2h_icu_data->base))
+ return PTR_ERR(rzv2h_icu_data->base);
ret = rzv2h_icu_parse_interrupts(rzv2h_icu_data, node);
if (ret) {
dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
- goto put_dev;
+ return ret;
}
resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(resetn)) {
- ret = PTR_ERR(resetn);
- goto put_dev;
- }
+ if (IS_ERR(resetn))
+ return PTR_ERR(resetn);
ret = reset_control_deassert(resetn);
if (ret) {
dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
- goto put_dev;
+ return ret;
}
pm_runtime_enable(&pdev->dev);
@@ -489,6 +511,8 @@ static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
goto pm_put;
}
+ rzv2h_icu_data->info = hw_info;
+
/*
* coccicheck complains about a missing put_device call before returning, but it's a false
* positive. We still need &pdev->dev after successfully returning from this function.
@@ -500,12 +524,19 @@ pm_put:
pm_disable:
pm_runtime_disable(&pdev->dev);
reset_control_assert(resetn);
-put_dev:
- put_device(&pdev->dev);
return ret;
}
+static const struct rzv2h_hw_info rzv2h_hw_params = {
+ .t_offs = 0,
+};
+
+static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
+{
+ return rzv2h_icu_init_common(node, parent, &rzv2h_hw_params);
+}
+
IRQCHIP_PLATFORM_DRIVER_BEGIN(rzv2h_icu)
IRQCHIP_MATCH("renesas,r9a09g057-icu", rzv2h_icu_init)
IRQCHIP_PLATFORM_DRIVER_END(rzv2h_icu)
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 82102a4c5d68..f8215a8f656a 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -313,6 +313,10 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
int ret;
pchan = chan->con_priv;
+
+ if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
+ return IRQ_NONE;
+
if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE &&
!pchan->chan_in_use)
return IRQ_NONE;
@@ -330,13 +334,16 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
return IRQ_NONE;
}
- if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
- return IRQ_NONE;
-
+ /*
+ * Clear this flag after updating interrupt ack register and just
+ * before mbox_chan_received_data() which might call pcc_send_data()
+ * where the flag is set again to start new transfer. This is
+ * required to avoid any possible race in updatation of this flag.
+ */
+ pchan->chan_in_use = false;
mbox_chan_received_data(chan, NULL);
check_and_ack(pchan, chan);
- pchan->chan_in_use = false;
return IRQ_HANDLED;
}
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index 02a680c73979..bf0d7d58c8b0 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -96,7 +96,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
ret = mcb_device_register(bus, mdev);
if (ret < 0)
- goto err;
+ return ret;
return 0;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 15829ab192d2..7373dff023d0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2199,14 +2199,9 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
if (!rdev_set_badblocks(rdev, sect, s, 0))
abort = 1;
}
- if (abort) {
- conf->recovery_disabled =
- mddev->recovery_disabled;
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_done_sync(mddev, r1_bio->sectors, 0);
- put_buf(r1_bio);
+ if (abort)
return 0;
- }
+
/* Try next page */
sectors -= s;
sect += s;
@@ -2345,10 +2340,21 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
int disks = conf->raid_disks * 2;
struct bio *wbio;
- if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
- /* ouch - failed to read all of that. */
- if (!fix_sync_read_error(r1_bio))
+ if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
+ /*
+ * ouch - failed to read all of that.
+ * No need to fix read error for check/repair
+ * because all member disks are read.
+ */
+ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) ||
+ !fix_sync_read_error(r1_bio)) {
+ conf->recovery_disabled = mddev->recovery_disabled;
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_done_sync(mddev, r1_bio->sectors, 0);
+ put_buf(r1_bio);
return;
+ }
+ }
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
process_checks(r1_bio);
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 8ba096b8ebca..85ecb2aeefdb 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -140,6 +140,7 @@ config VIDEO_IMX214
tristate "Sony IMX214 sensor support"
depends on GPIOLIB
select REGMAP_I2C
+ select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the Sony
IMX214 camera.
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 6a393e18267f..ea5e294327e7 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -15,26 +15,152 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <media/media-entity.h>
+#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-#define IMX214_REG_MODE_SELECT 0x0100
+#define IMX214_REG_MODE_SELECT CCI_REG8(0x0100)
#define IMX214_MODE_STANDBY 0x00
#define IMX214_MODE_STREAMING 0x01
+#define IMX214_REG_FAST_STANDBY_CTRL CCI_REG8(0x0106)
+
#define IMX214_DEFAULT_CLK_FREQ 24000000
-#define IMX214_DEFAULT_LINK_FREQ 480000000
+#define IMX214_DEFAULT_LINK_FREQ 600000000
+/* Keep wrong link frequency for backward compatibility */
+#define IMX214_DEFAULT_LINK_FREQ_LEGACY 480000000
#define IMX214_DEFAULT_PIXEL_RATE ((IMX214_DEFAULT_LINK_FREQ * 8LL) / 10)
#define IMX214_FPS 30
#define IMX214_MBUS_CODE MEDIA_BUS_FMT_SRGGB10_1X10
+/* V-TIMING internal */
+#define IMX214_REG_FRM_LENGTH_LINES CCI_REG16(0x0340)
+
/* Exposure control */
-#define IMX214_REG_EXPOSURE 0x0202
+#define IMX214_REG_EXPOSURE CCI_REG16(0x0202)
#define IMX214_EXPOSURE_MIN 0
#define IMX214_EXPOSURE_MAX 3184
#define IMX214_EXPOSURE_STEP 1
#define IMX214_EXPOSURE_DEFAULT 3184
+#define IMX214_REG_EXPOSURE_RATIO CCI_REG8(0x0222)
+#define IMX214_REG_SHORT_EXPOSURE CCI_REG16(0x0224)
+
+/* Analog gain control */
+#define IMX214_REG_ANALOG_GAIN CCI_REG16(0x0204)
+#define IMX214_REG_SHORT_ANALOG_GAIN CCI_REG16(0x0216)
+
+/* Digital gain control */
+#define IMX214_REG_DIG_GAIN_GREENR CCI_REG16(0x020e)
+#define IMX214_REG_DIG_GAIN_RED CCI_REG16(0x0210)
+#define IMX214_REG_DIG_GAIN_BLUE CCI_REG16(0x0212)
+#define IMX214_REG_DIG_GAIN_GREENB CCI_REG16(0x0214)
+
+#define IMX214_REG_ORIENTATION CCI_REG8(0x0101)
+
+#define IMX214_REG_MASK_CORR_FRAMES CCI_REG8(0x0105)
+#define IMX214_CORR_FRAMES_TRANSMIT 0
+#define IMX214_CORR_FRAMES_MASK 1
+
+#define IMX214_REG_CSI_DATA_FORMAT CCI_REG16(0x0112)
+#define IMX214_CSI_DATA_FORMAT_RAW8 0x0808
+#define IMX214_CSI_DATA_FORMAT_RAW10 0x0A0A
+#define IMX214_CSI_DATA_FORMAT_COMP6 0x0A06
+#define IMX214_CSI_DATA_FORMAT_COMP8 0x0A08
+
+#define IMX214_REG_CSI_LANE_MODE CCI_REG8(0x0114)
+#define IMX214_CSI_2_LANE_MODE 1
+#define IMX214_CSI_4_LANE_MODE 3
+
+#define IMX214_REG_EXCK_FREQ CCI_REG16(0x0136)
+#define IMX214_EXCK_FREQ(n) ((n) * 256) /* n expressed in MHz */
+
+#define IMX214_REG_TEMP_SENSOR_CONTROL CCI_REG8(0x0138)
+
+#define IMX214_REG_HDR_MODE CCI_REG8(0x0220)
+#define IMX214_HDR_MODE_OFF 0
+#define IMX214_HDR_MODE_ON 1
+
+#define IMX214_REG_HDR_RES_REDUCTION CCI_REG8(0x0221)
+#define IMX214_HDR_RES_REDU_THROUGH 0x11
+#define IMX214_HDR_RES_REDU_2_BINNING 0x22
+
+/* PLL settings */
+#define IMX214_REG_VTPXCK_DIV CCI_REG8(0x0301)
+#define IMX214_REG_VTSYCK_DIV CCI_REG8(0x0303)
+#define IMX214_REG_PREPLLCK_VT_DIV CCI_REG8(0x0305)
+#define IMX214_REG_PLL_VT_MPY CCI_REG16(0x0306)
+#define IMX214_REG_OPPXCK_DIV CCI_REG8(0x0309)
+#define IMX214_REG_OPSYCK_DIV CCI_REG8(0x030b)
+#define IMX214_REG_PLL_MULT_DRIV CCI_REG8(0x0310)
+#define IMX214_PLL_SINGLE 0
+#define IMX214_PLL_DUAL 1
+
+#define IMX214_REG_LINE_LENGTH_PCK CCI_REG16(0x0342)
+#define IMX214_REG_X_ADD_STA CCI_REG16(0x0344)
+#define IMX214_REG_Y_ADD_STA CCI_REG16(0x0346)
+#define IMX214_REG_X_ADD_END CCI_REG16(0x0348)
+#define IMX214_REG_Y_ADD_END CCI_REG16(0x034a)
+#define IMX214_REG_X_OUTPUT_SIZE CCI_REG16(0x034c)
+#define IMX214_REG_Y_OUTPUT_SIZE CCI_REG16(0x034e)
+#define IMX214_REG_X_EVEN_INC CCI_REG8(0x0381)
+#define IMX214_REG_X_ODD_INC CCI_REG8(0x0383)
+#define IMX214_REG_Y_EVEN_INC CCI_REG8(0x0385)
+#define IMX214_REG_Y_ODD_INC CCI_REG8(0x0387)
+
+#define IMX214_REG_SCALE_MODE CCI_REG8(0x0401)
+#define IMX214_SCALE_NONE 0
+#define IMX214_SCALE_HORIZONTAL 1
+#define IMX214_SCALE_FULL 2
+#define IMX214_REG_SCALE_M CCI_REG16(0x0404)
+
+#define IMX214_REG_DIG_CROP_X_OFFSET CCI_REG16(0x0408)
+#define IMX214_REG_DIG_CROP_Y_OFFSET CCI_REG16(0x040a)
+#define IMX214_REG_DIG_CROP_WIDTH CCI_REG16(0x040c)
+#define IMX214_REG_DIG_CROP_HEIGHT CCI_REG16(0x040e)
+
+#define IMX214_REG_REQ_LINK_BIT_RATE CCI_REG32(0x0820)
+#define IMX214_LINK_BIT_RATE_MBPS(n) ((n) << 16)
+
+/* Binning mode */
+#define IMX214_REG_BINNING_MODE CCI_REG8(0x0900)
+#define IMX214_BINNING_NONE 0
+#define IMX214_BINNING_ENABLE 1
+#define IMX214_REG_BINNING_TYPE CCI_REG8(0x0901)
+#define IMX214_REG_BINNING_WEIGHTING CCI_REG8(0x0902)
+#define IMX214_BINNING_AVERAGE 0x00
+#define IMX214_BINNING_SUMMED 0x01
+#define IMX214_BINNING_BAYER 0x02
+
+#define IMX214_REG_SING_DEF_CORR_EN CCI_REG8(0x0b06)
+#define IMX214_SING_DEF_CORR_OFF 0
+#define IMX214_SING_DEF_CORR_ON 1
+
+/* AWB control */
+#define IMX214_REG_ABS_GAIN_GREENR CCI_REG16(0x0b8e)
+#define IMX214_REG_ABS_GAIN_RED CCI_REG16(0x0b90)
+#define IMX214_REG_ABS_GAIN_BLUE CCI_REG16(0x0b92)
+#define IMX214_REG_ABS_GAIN_GREENB CCI_REG16(0x0b94)
+
+#define IMX214_REG_RMSC_NR_MODE CCI_REG8(0x3001)
+#define IMX214_REG_STATS_OUT_EN CCI_REG8(0x3013)
+#define IMX214_STATS_OUT_OFF 0
+#define IMX214_STATS_OUT_ON 1
+
+/* Chroma noise reduction */
+#define IMX214_REG_NML_NR_EN CCI_REG8(0x30a2)
+#define IMX214_NML_NR_OFF 0
+#define IMX214_NML_NR_ON 1
+
+#define IMX214_REG_EBD_SIZE_V CCI_REG8(0x5041)
+#define IMX214_EBD_NO 0
+#define IMX214_EBD_4_LINE 4
+
+#define IMX214_REG_RG_STATS_LMT CCI_REG16(0x6d12)
+#define IMX214_RG_STATS_LMT_10_BIT 0x03FF
+#define IMX214_RG_STATS_LMT_14_BIT 0x3FFF
+
+#define IMX214_REG_ATR_FAST_MOVE CCI_REG8(0x9300)
/* IMX214 native and active pixel array size */
#define IMX214_NATIVE_WIDTH 4224U
@@ -59,8 +185,6 @@ struct imx214 {
struct v4l2_subdev sd;
struct media_pad pad;
- struct v4l2_mbus_framefmt fmt;
- struct v4l2_rect crop;
struct v4l2_ctrl_handler ctrls;
struct v4l2_ctrl *pixel_rate;
@@ -71,353 +195,266 @@ struct imx214 {
struct regulator_bulk_data supplies[IMX214_NUM_SUPPLIES];
struct gpio_desc *enable_gpio;
-
- /*
- * Serialize control access, get/set format, get selection
- * and start streaming.
- */
- struct mutex mutex;
-};
-
-struct reg_8 {
- u16 addr;
- u8 val;
-};
-
-enum {
- IMX214_TABLE_WAIT_MS = 0,
- IMX214_TABLE_END,
- IMX214_MAX_RETRIES,
- IMX214_WAIT_MS
};
/*From imx214_mode_tbls.h*/
-static const struct reg_8 mode_4096x2304[] = {
- {0x0114, 0x03},
- {0x0220, 0x00},
- {0x0221, 0x11},
- {0x0222, 0x01},
- {0x0340, 0x0C},
- {0x0341, 0x7A},
- {0x0342, 0x13},
- {0x0343, 0x90},
- {0x0344, 0x00},
- {0x0345, 0x38},
- {0x0346, 0x01},
- {0x0347, 0x98},
- {0x0348, 0x10},
- {0x0349, 0x37},
- {0x034A, 0x0A},
- {0x034B, 0x97},
- {0x0381, 0x01},
- {0x0383, 0x01},
- {0x0385, 0x01},
- {0x0387, 0x01},
- {0x0900, 0x00},
- {0x0901, 0x00},
- {0x0902, 0x00},
- {0x3000, 0x35},
- {0x3054, 0x01},
- {0x305C, 0x11},
-
- {0x0112, 0x0A},
- {0x0113, 0x0A},
- {0x034C, 0x10},
- {0x034D, 0x00},
- {0x034E, 0x09},
- {0x034F, 0x00},
- {0x0401, 0x00},
- {0x0404, 0x00},
- {0x0405, 0x10},
- {0x0408, 0x00},
- {0x0409, 0x00},
- {0x040A, 0x00},
- {0x040B, 0x00},
- {0x040C, 0x10},
- {0x040D, 0x00},
- {0x040E, 0x09},
- {0x040F, 0x00},
-
- {0x0301, 0x05},
- {0x0303, 0x02},
- {0x0305, 0x03},
- {0x0306, 0x00},
- {0x0307, 0x96},
- {0x0309, 0x0A},
- {0x030B, 0x01},
- {0x0310, 0x00},
-
- {0x0820, 0x12},
- {0x0821, 0xC0},
- {0x0822, 0x00},
- {0x0823, 0x00},
-
- {0x3A03, 0x09},
- {0x3A04, 0x50},
- {0x3A05, 0x01},
-
- {0x0B06, 0x01},
- {0x30A2, 0x00},
-
- {0x30B4, 0x00},
-
- {0x3A02, 0xFF},
-
- {0x3011, 0x00},
- {0x3013, 0x01},
-
- {0x0202, 0x0C},
- {0x0203, 0x70},
- {0x0224, 0x01},
- {0x0225, 0xF4},
-
- {0x0204, 0x00},
- {0x0205, 0x00},
- {0x020E, 0x01},
- {0x020F, 0x00},
- {0x0210, 0x01},
- {0x0211, 0x00},
- {0x0212, 0x01},
- {0x0213, 0x00},
- {0x0214, 0x01},
- {0x0215, 0x00},
- {0x0216, 0x00},
- {0x0217, 0x00},
-
- {0x4170, 0x00},
- {0x4171, 0x10},
- {0x4176, 0x00},
- {0x4177, 0x3C},
- {0xAE20, 0x04},
- {0xAE21, 0x5C},
-
- {IMX214_TABLE_WAIT_MS, 10},
- {0x0138, 0x01},
- {IMX214_TABLE_END, 0x00}
+static const struct cci_reg_sequence mode_4096x2304[] = {
+ { IMX214_REG_HDR_MODE, IMX214_HDR_MODE_OFF },
+ { IMX214_REG_HDR_RES_REDUCTION, IMX214_HDR_RES_REDU_THROUGH },
+ { IMX214_REG_EXPOSURE_RATIO, 1 },
+ { IMX214_REG_FRM_LENGTH_LINES, 3194 },
+ { IMX214_REG_LINE_LENGTH_PCK, 5008 },
+ { IMX214_REG_X_ADD_STA, 56 },
+ { IMX214_REG_Y_ADD_STA, 408 },
+ { IMX214_REG_X_ADD_END, 4151 },
+ { IMX214_REG_Y_ADD_END, 2711 },
+ { IMX214_REG_X_EVEN_INC, 1 },
+ { IMX214_REG_X_ODD_INC, 1 },
+ { IMX214_REG_Y_EVEN_INC, 1 },
+ { IMX214_REG_Y_ODD_INC, 1 },
+ { IMX214_REG_BINNING_MODE, IMX214_BINNING_NONE },
+ { IMX214_REG_BINNING_TYPE, 0 },
+ { IMX214_REG_BINNING_WEIGHTING, IMX214_BINNING_AVERAGE },
+ { CCI_REG8(0x3000), 0x35 },
+ { CCI_REG8(0x3054), 0x01 },
+ { CCI_REG8(0x305C), 0x11 },
+
+ { IMX214_REG_CSI_DATA_FORMAT, IMX214_CSI_DATA_FORMAT_RAW10 },
+ { IMX214_REG_X_OUTPUT_SIZE, 4096 },
+ { IMX214_REG_Y_OUTPUT_SIZE, 2304 },
+ { IMX214_REG_SCALE_MODE, IMX214_SCALE_NONE },
+ { IMX214_REG_SCALE_M, 2 },
+ { IMX214_REG_DIG_CROP_X_OFFSET, 0 },
+ { IMX214_REG_DIG_CROP_Y_OFFSET, 0 },
+ { IMX214_REG_DIG_CROP_WIDTH, 4096 },
+ { IMX214_REG_DIG_CROP_HEIGHT, 2304 },
+
+ { IMX214_REG_VTPXCK_DIV, 5 },
+ { IMX214_REG_VTSYCK_DIV, 2 },
+ { IMX214_REG_PREPLLCK_VT_DIV, 3 },
+ { IMX214_REG_PLL_VT_MPY, 150 },
+ { IMX214_REG_OPPXCK_DIV, 10 },
+ { IMX214_REG_OPSYCK_DIV, 1 },
+ { IMX214_REG_PLL_MULT_DRIV, IMX214_PLL_SINGLE },
+
+ { IMX214_REG_REQ_LINK_BIT_RATE, IMX214_LINK_BIT_RATE_MBPS(4800) },
+
+ { CCI_REG8(0x3A03), 0x09 },
+ { CCI_REG8(0x3A04), 0x50 },
+ { CCI_REG8(0x3A05), 0x01 },
+
+ { IMX214_REG_SING_DEF_CORR_EN, IMX214_SING_DEF_CORR_ON },
+ { IMX214_REG_NML_NR_EN, IMX214_NML_NR_OFF },
+
+ { CCI_REG8(0x30B4), 0x00 },
+
+ { CCI_REG8(0x3A02), 0xFF },
+
+ { CCI_REG8(0x3011), 0x00 },
+ { IMX214_REG_STATS_OUT_EN, IMX214_STATS_OUT_ON },
+
+ { IMX214_REG_EXPOSURE, IMX214_EXPOSURE_DEFAULT },
+ { IMX214_REG_SHORT_EXPOSURE, 500 },
+
+ { IMX214_REG_ANALOG_GAIN, 0 },
+ { IMX214_REG_DIG_GAIN_GREENR, 256 },
+ { IMX214_REG_DIG_GAIN_RED, 256 },
+ { IMX214_REG_DIG_GAIN_BLUE, 256 },
+ { IMX214_REG_DIG_GAIN_GREENB, 256 },
+ { IMX214_REG_SHORT_ANALOG_GAIN, 0 },
+
+ { CCI_REG8(0x4170), 0x00 },
+ { CCI_REG8(0x4171), 0x10 },
+ { CCI_REG8(0x4176), 0x00 },
+ { CCI_REG8(0x4177), 0x3C },
+ { CCI_REG8(0xAE20), 0x04 },
+ { CCI_REG8(0xAE21), 0x5C },
};
-static const struct reg_8 mode_1920x1080[] = {
- {0x0114, 0x03},
- {0x0220, 0x00},
- {0x0221, 0x11},
- {0x0222, 0x01},
- {0x0340, 0x0C},
- {0x0341, 0x7A},
- {0x0342, 0x13},
- {0x0343, 0x90},
- {0x0344, 0x04},
- {0x0345, 0x78},
- {0x0346, 0x03},
- {0x0347, 0xFC},
- {0x0348, 0x0B},
- {0x0349, 0xF7},
- {0x034A, 0x08},
- {0x034B, 0x33},
- {0x0381, 0x01},
- {0x0383, 0x01},
- {0x0385, 0x01},
- {0x0387, 0x01},
- {0x0900, 0x00},
- {0x0901, 0x00},
- {0x0902, 0x00},
- {0x3000, 0x35},
- {0x3054, 0x01},
- {0x305C, 0x11},
-
- {0x0112, 0x0A},
- {0x0113, 0x0A},
- {0x034C, 0x07},
- {0x034D, 0x80},
- {0x034E, 0x04},
- {0x034F, 0x38},
- {0x0401, 0x00},
- {0x0404, 0x00},
- {0x0405, 0x10},
- {0x0408, 0x00},
- {0x0409, 0x00},
- {0x040A, 0x00},
- {0x040B, 0x00},
- {0x040C, 0x07},
- {0x040D, 0x80},
- {0x040E, 0x04},
- {0x040F, 0x38},
-
- {0x0301, 0x05},
- {0x0303, 0x02},
- {0x0305, 0x03},
- {0x0306, 0x00},
- {0x0307, 0x96},
- {0x0309, 0x0A},
- {0x030B, 0x01},
- {0x0310, 0x00},
-
- {0x0820, 0x12},
- {0x0821, 0xC0},
- {0x0822, 0x00},
- {0x0823, 0x00},
-
- {0x3A03, 0x04},
- {0x3A04, 0xF8},
- {0x3A05, 0x02},
-
- {0x0B06, 0x01},
- {0x30A2, 0x00},
-
- {0x30B4, 0x00},
-
- {0x3A02, 0xFF},
-
- {0x3011, 0x00},
- {0x3013, 0x01},
-
- {0x0202, 0x0C},
- {0x0203, 0x70},
- {0x0224, 0x01},
- {0x0225, 0xF4},
-
- {0x0204, 0x00},
- {0x0205, 0x00},
- {0x020E, 0x01},
- {0x020F, 0x00},
- {0x0210, 0x01},
- {0x0211, 0x00},
- {0x0212, 0x01},
- {0x0213, 0x00},
- {0x0214, 0x01},
- {0x0215, 0x00},
- {0x0216, 0x00},
- {0x0217, 0x00},
-
- {0x4170, 0x00},
- {0x4171, 0x10},
- {0x4176, 0x00},
- {0x4177, 0x3C},
- {0xAE20, 0x04},
- {0xAE21, 0x5C},
-
- {IMX214_TABLE_WAIT_MS, 10},
- {0x0138, 0x01},
- {IMX214_TABLE_END, 0x00}
+static const struct cci_reg_sequence mode_1920x1080[] = {
+ { IMX214_REG_HDR_MODE, IMX214_HDR_MODE_OFF },
+ { IMX214_REG_HDR_RES_REDUCTION, IMX214_HDR_RES_REDU_THROUGH },
+ { IMX214_REG_EXPOSURE_RATIO, 1 },
+ { IMX214_REG_FRM_LENGTH_LINES, 3194 },
+ { IMX214_REG_LINE_LENGTH_PCK, 5008 },
+ { IMX214_REG_X_ADD_STA, 1144 },
+ { IMX214_REG_Y_ADD_STA, 1020 },
+ { IMX214_REG_X_ADD_END, 3063 },
+ { IMX214_REG_Y_ADD_END, 2099 },
+ { IMX214_REG_X_EVEN_INC, 1 },
+ { IMX214_REG_X_ODD_INC, 1 },
+ { IMX214_REG_Y_EVEN_INC, 1 },
+ { IMX214_REG_Y_ODD_INC, 1 },
+ { IMX214_REG_BINNING_MODE, IMX214_BINNING_NONE },
+ { IMX214_REG_BINNING_TYPE, 0 },
+ { IMX214_REG_BINNING_WEIGHTING, IMX214_BINNING_AVERAGE },
+ { CCI_REG8(0x3000), 0x35 },
+ { CCI_REG8(0x3054), 0x01 },
+ { CCI_REG8(0x305C), 0x11 },
+
+ { IMX214_REG_CSI_DATA_FORMAT, IMX214_CSI_DATA_FORMAT_RAW10 },
+ { IMX214_REG_X_OUTPUT_SIZE, 1920 },
+ { IMX214_REG_Y_OUTPUT_SIZE, 1080 },
+ { IMX214_REG_SCALE_MODE, IMX214_SCALE_NONE },
+ { IMX214_REG_SCALE_M, 2 },
+ { IMX214_REG_DIG_CROP_X_OFFSET, 0 },
+ { IMX214_REG_DIG_CROP_Y_OFFSET, 0 },
+ { IMX214_REG_DIG_CROP_WIDTH, 1920 },
+ { IMX214_REG_DIG_CROP_HEIGHT, 1080 },
+
+ { IMX214_REG_VTPXCK_DIV, 5 },
+ { IMX214_REG_VTSYCK_DIV, 2 },
+ { IMX214_REG_PREPLLCK_VT_DIV, 3 },
+ { IMX214_REG_PLL_VT_MPY, 150 },
+ { IMX214_REG_OPPXCK_DIV, 10 },
+ { IMX214_REG_OPSYCK_DIV, 1 },
+ { IMX214_REG_PLL_MULT_DRIV, IMX214_PLL_SINGLE },
+
+ { IMX214_REG_REQ_LINK_BIT_RATE, IMX214_LINK_BIT_RATE_MBPS(4800) },
+
+ { CCI_REG8(0x3A03), 0x04 },
+ { CCI_REG8(0x3A04), 0xF8 },
+ { CCI_REG8(0x3A05), 0x02 },
+
+ { IMX214_REG_SING_DEF_CORR_EN, IMX214_SING_DEF_CORR_ON },
+ { IMX214_REG_NML_NR_EN, IMX214_NML_NR_OFF },
+
+ { CCI_REG8(0x30B4), 0x00 },
+
+ { CCI_REG8(0x3A02), 0xFF },
+
+ { CCI_REG8(0x3011), 0x00 },
+ { IMX214_REG_STATS_OUT_EN, IMX214_STATS_OUT_ON },
+
+ { IMX214_REG_EXPOSURE, IMX214_EXPOSURE_DEFAULT },
+ { IMX214_REG_SHORT_EXPOSURE, 500 },
+
+ { IMX214_REG_ANALOG_GAIN, 0 },
+ { IMX214_REG_DIG_GAIN_GREENR, 256 },
+ { IMX214_REG_DIG_GAIN_RED, 256 },
+ { IMX214_REG_DIG_GAIN_BLUE, 256 },
+ { IMX214_REG_DIG_GAIN_GREENB, 256 },
+ { IMX214_REG_SHORT_ANALOG_GAIN, 0 },
+
+ { CCI_REG8(0x4170), 0x00 },
+ { CCI_REG8(0x4171), 0x10 },
+ { CCI_REG8(0x4176), 0x00 },
+ { CCI_REG8(0x4177), 0x3C },
+ { CCI_REG8(0xAE20), 0x04 },
+ { CCI_REG8(0xAE21), 0x5C },
};
-static const struct reg_8 mode_table_common[] = {
+static const struct cci_reg_sequence mode_table_common[] = {
/* software reset */
/* software standby settings */
- {0x0100, 0x00},
+ { IMX214_REG_MODE_SELECT, IMX214_MODE_STANDBY },
/* ATR setting */
- {0x9300, 0x02},
+ { IMX214_REG_ATR_FAST_MOVE, 2 },
/* external clock setting */
- {0x0136, 0x18},
- {0x0137, 0x00},
+ { IMX214_REG_EXCK_FREQ, IMX214_EXCK_FREQ(IMX214_DEFAULT_CLK_FREQ / 1000000) },
/* global setting */
/* basic config */
- {0x0101, 0x00},
- {0x0105, 0x01},
- {0x0106, 0x01},
- {0x4550, 0x02},
- {0x4601, 0x00},
- {0x4642, 0x05},
- {0x6227, 0x11},
- {0x6276, 0x00},
- {0x900E, 0x06},
- {0xA802, 0x90},
- {0xA803, 0x11},
- {0xA804, 0x62},
- {0xA805, 0x77},
- {0xA806, 0xAE},
- {0xA807, 0x34},
- {0xA808, 0xAE},
- {0xA809, 0x35},
- {0xA80A, 0x62},
- {0xA80B, 0x83},
- {0xAE33, 0x00},
+ { IMX214_REG_ORIENTATION, 0 },
+ { IMX214_REG_MASK_CORR_FRAMES, IMX214_CORR_FRAMES_MASK },
+ { IMX214_REG_FAST_STANDBY_CTRL, 1 },
+ { CCI_REG8(0x4550), 0x02 },
+ { CCI_REG8(0x4601), 0x00 },
+ { CCI_REG8(0x4642), 0x05 },
+ { CCI_REG8(0x6227), 0x11 },
+ { CCI_REG8(0x6276), 0x00 },
+ { CCI_REG8(0x900E), 0x06 },
+ { CCI_REG8(0xA802), 0x90 },
+ { CCI_REG8(0xA803), 0x11 },
+ { CCI_REG8(0xA804), 0x62 },
+ { CCI_REG8(0xA805), 0x77 },
+ { CCI_REG8(0xA806), 0xAE },
+ { CCI_REG8(0xA807), 0x34 },
+ { CCI_REG8(0xA808), 0xAE },
+ { CCI_REG8(0xA809), 0x35 },
+ { CCI_REG8(0xA80A), 0x62 },
+ { CCI_REG8(0xA80B), 0x83 },
+ { CCI_REG8(0xAE33), 0x00 },
/* analog setting */
- {0x4174, 0x00},
- {0x4175, 0x11},
- {0x4612, 0x29},
- {0x461B, 0x12},
- {0x461F, 0x06},
- {0x4635, 0x07},
- {0x4637, 0x30},
- {0x463F, 0x18},
- {0x4641, 0x0D},
- {0x465B, 0x12},
- {0x465F, 0x11},
- {0x4663, 0x11},
- {0x4667, 0x0F},
- {0x466F, 0x0F},
- {0x470E, 0x09},
- {0x4909, 0xAB},
- {0x490B, 0x95},
- {0x4915, 0x5D},
- {0x4A5F, 0xFF},
- {0x4A61, 0xFF},
- {0x4A73, 0x62},
- {0x4A85, 0x00},
- {0x4A87, 0xFF},
+ { CCI_REG8(0x4174), 0x00 },
+ { CCI_REG8(0x4175), 0x11 },
+ { CCI_REG8(0x4612), 0x29 },
+ { CCI_REG8(0x461B), 0x12 },
+ { CCI_REG8(0x461F), 0x06 },
+ { CCI_REG8(0x4635), 0x07 },
+ { CCI_REG8(0x4637), 0x30 },
+ { CCI_REG8(0x463F), 0x18 },
+ { CCI_REG8(0x4641), 0x0D },
+ { CCI_REG8(0x465B), 0x12 },
+ { CCI_REG8(0x465F), 0x11 },
+ { CCI_REG8(0x4663), 0x11 },
+ { CCI_REG8(0x4667), 0x0F },
+ { CCI_REG8(0x466F), 0x0F },
+ { CCI_REG8(0x470E), 0x09 },
+ { CCI_REG8(0x4909), 0xAB },
+ { CCI_REG8(0x490B), 0x95 },
+ { CCI_REG8(0x4915), 0x5D },
+ { CCI_REG8(0x4A5F), 0xFF },
+ { CCI_REG8(0x4A61), 0xFF },
+ { CCI_REG8(0x4A73), 0x62 },
+ { CCI_REG8(0x4A85), 0x00 },
+ { CCI_REG8(0x4A87), 0xFF },
/* embedded data */
- {0x5041, 0x04},
- {0x583C, 0x04},
- {0x620E, 0x04},
- {0x6EB2, 0x01},
- {0x6EB3, 0x00},
- {0x9300, 0x02},
+ { IMX214_REG_EBD_SIZE_V, IMX214_EBD_4_LINE },
+ { CCI_REG8(0x583C), 0x04 },
+ { CCI_REG8(0x620E), 0x04 },
+ { CCI_REG8(0x6EB2), 0x01 },
+ { CCI_REG8(0x6EB3), 0x00 },
+ { IMX214_REG_ATR_FAST_MOVE, 2 },
/* imagequality */
/* HDR setting */
- {0x3001, 0x07},
- {0x6D12, 0x3F},
- {0x6D13, 0xFF},
- {0x9344, 0x03},
- {0x9706, 0x10},
- {0x9707, 0x03},
- {0x9708, 0x03},
- {0x9E04, 0x01},
- {0x9E05, 0x00},
- {0x9E0C, 0x01},
- {0x9E0D, 0x02},
- {0x9E24, 0x00},
- {0x9E25, 0x8C},
- {0x9E26, 0x00},
- {0x9E27, 0x94},
- {0x9E28, 0x00},
- {0x9E29, 0x96},
+ { IMX214_REG_RMSC_NR_MODE, 0x07 },
+ { IMX214_REG_RG_STATS_LMT, IMX214_RG_STATS_LMT_14_BIT },
+ { CCI_REG8(0x9344), 0x03 },
+ { CCI_REG8(0x9706), 0x10 },
+ { CCI_REG8(0x9707), 0x03 },
+ { CCI_REG8(0x9708), 0x03 },
+ { CCI_REG8(0x9E04), 0x01 },
+ { CCI_REG8(0x9E05), 0x00 },
+ { CCI_REG8(0x9E0C), 0x01 },
+ { CCI_REG8(0x9E0D), 0x02 },
+ { CCI_REG8(0x9E24), 0x00 },
+ { CCI_REG8(0x9E25), 0x8C },
+ { CCI_REG8(0x9E26), 0x00 },
+ { CCI_REG8(0x9E27), 0x94 },
+ { CCI_REG8(0x9E28), 0x00 },
+ { CCI_REG8(0x9E29), 0x96 },
/* CNR parameter setting */
- {0x69DB, 0x01},
+ { CCI_REG8(0x69DB), 0x01 },
/* Moire reduction */
- {0x6957, 0x01},
+ { CCI_REG8(0x6957), 0x01 },
/* image enhancement */
- {0x6987, 0x17},
- {0x698A, 0x03},
- {0x698B, 0x03},
+ { CCI_REG8(0x6987), 0x17 },
+ { CCI_REG8(0x698A), 0x03 },
+ { CCI_REG8(0x698B), 0x03 },
/* white balanace */
- {0x0B8E, 0x01},
- {0x0B8F, 0x00},
- {0x0B90, 0x01},
- {0x0B91, 0x00},
- {0x0B92, 0x01},
- {0x0B93, 0x00},
- {0x0B94, 0x01},
- {0x0B95, 0x00},
+ { IMX214_REG_ABS_GAIN_GREENR, 0x0100 },
+ { IMX214_REG_ABS_GAIN_RED, 0x0100 },
+ { IMX214_REG_ABS_GAIN_BLUE, 0x0100 },
+ { IMX214_REG_ABS_GAIN_GREENB, 0x0100 },
/* ATR setting */
- {0x6E50, 0x00},
- {0x6E51, 0x32},
- {0x9340, 0x00},
- {0x9341, 0x3C},
- {0x9342, 0x03},
- {0x9343, 0xFF},
- {IMX214_TABLE_END, 0x00}
+ { CCI_REG8(0x6E50), 0x00 },
+ { CCI_REG8(0x6E51), 0x32 },
+ { CCI_REG8(0x9340), 0x00 },
+ { CCI_REG8(0x9341), 0x3C },
+ { CCI_REG8(0x9342), 0x03 },
+ { CCI_REG8(0x9343), 0xFF },
};
/*
@@ -427,16 +464,19 @@ static const struct reg_8 mode_table_common[] = {
static const struct imx214_mode {
u32 width;
u32 height;
- const struct reg_8 *reg_table;
+ unsigned int num_of_regs;
+ const struct cci_reg_sequence *reg_table;
} imx214_modes[] = {
{
.width = 4096,
.height = 2304,
+ .num_of_regs = ARRAY_SIZE(mode_4096x2304),
.reg_table = mode_4096x2304,
},
{
.width = 1920,
.height = 1080,
+ .num_of_regs = ARRAY_SIZE(mode_1920x1080),
.reg_table = mode_1920x1080,
},
};
@@ -490,6 +530,22 @@ static int __maybe_unused imx214_power_off(struct device *dev)
return 0;
}
+static void imx214_update_pad_format(struct imx214 *imx214,
+ const struct imx214_mode *mode,
+ struct v4l2_mbus_framefmt *fmt, u32 code)
+{
+ fmt->code = IMX214_MBUS_CODE;
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
+ fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
+ fmt->colorspace,
+ fmt->ycbcr_enc);
+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
+}
+
static int imx214_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
@@ -549,52 +605,6 @@ static const struct v4l2_subdev_core_ops imx214_core_ops = {
#endif
};
-static struct v4l2_mbus_framefmt *
-__imx214_get_pad_format(struct imx214 *imx214,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad,
- enum v4l2_subdev_format_whence which)
-{
- switch (which) {
- case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_state_get_format(sd_state, pad);
- case V4L2_SUBDEV_FORMAT_ACTIVE:
- return &imx214->fmt;
- default:
- return NULL;
- }
-}
-
-static int imx214_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct imx214 *imx214 = to_imx214(sd);
-
- mutex_lock(&imx214->mutex);
- format->format = *__imx214_get_pad_format(imx214, sd_state,
- format->pad,
- format->which);
- mutex_unlock(&imx214->mutex);
-
- return 0;
-}
-
-static struct v4l2_rect *
-__imx214_get_pad_crop(struct imx214 *imx214,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad, enum v4l2_subdev_format_whence which)
-{
- switch (which) {
- case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_state_get_crop(sd_state, pad);
- case V4L2_SUBDEV_FORMAT_ACTIVE:
- return &imx214->crop;
- default:
- return NULL;
- }
-}
-
static int imx214_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
@@ -604,34 +614,20 @@ static int imx214_set_format(struct v4l2_subdev *sd,
struct v4l2_rect *__crop;
const struct imx214_mode *mode;
- mutex_lock(&imx214->mutex);
-
- __crop = __imx214_get_pad_crop(imx214, sd_state, format->pad,
- format->which);
-
mode = v4l2_find_nearest_size(imx214_modes,
ARRAY_SIZE(imx214_modes), width, height,
format->format.width,
format->format.height);
- __crop->width = mode->width;
- __crop->height = mode->height;
-
- __format = __imx214_get_pad_format(imx214, sd_state, format->pad,
- format->which);
- __format->width = __crop->width;
- __format->height = __crop->height;
- __format->code = IMX214_MBUS_CODE;
- __format->field = V4L2_FIELD_NONE;
- __format->colorspace = V4L2_COLORSPACE_SRGB;
- __format->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(__format->colorspace);
- __format->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
- __format->colorspace, __format->ycbcr_enc);
- __format->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(__format->colorspace);
+ imx214_update_pad_format(imx214, mode, &format->format,
+ format->format.code);
+ __format = v4l2_subdev_state_get_format(sd_state, 0);
- format->format = *__format;
+ *__format = format->format;
- mutex_unlock(&imx214->mutex);
+ __crop = v4l2_subdev_state_get_crop(sd_state, 0);
+ __crop->width = mode->width;
+ __crop->height = mode->height;
return 0;
}
@@ -640,14 +636,9 @@ static int imx214_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
- struct imx214 *imx214 = to_imx214(sd);
-
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- mutex_lock(&imx214->mutex);
- sel->r = *__imx214_get_pad_crop(imx214, sd_state, sel->pad,
- sel->which);
- mutex_unlock(&imx214->mutex);
+ sel->r = *v4l2_subdev_state_get_crop(sd_state, 0);
return 0;
case V4L2_SEL_TGT_NATIVE_SIZE:
@@ -687,7 +678,6 @@ static int imx214_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx214 *imx214 = container_of(ctrl->handler,
struct imx214, ctrls);
- u8 vals[2];
int ret;
/*
@@ -699,12 +689,7 @@ static int imx214_set_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_EXPOSURE:
- vals[1] = ctrl->val;
- vals[0] = ctrl->val >> 8;
- ret = regmap_bulk_write(imx214->regmap, IMX214_REG_EXPOSURE, vals, 2);
- if (ret < 0)
- dev_err(imx214->dev, "Error %d\n", ret);
- ret = 0;
+ cci_write(imx214->regmap, IMX214_REG_EXPOSURE, ctrl->val, &ret);
break;
default:
@@ -790,76 +775,52 @@ static int imx214_ctrls_init(struct imx214 *imx214)
return 0;
};
-#define MAX_CMD 4
-static int imx214_write_table(struct imx214 *imx214,
- const struct reg_8 table[])
-{
- u8 vals[MAX_CMD];
- int i;
- int ret;
-
- for (; table->addr != IMX214_TABLE_END ; table++) {
- if (table->addr == IMX214_TABLE_WAIT_MS) {
- usleep_range(table->val * 1000,
- table->val * 1000 + 500);
- continue;
- }
-
- for (i = 0; i < MAX_CMD; i++) {
- if (table[i].addr != (table[0].addr + i))
- break;
- vals[i] = table[i].val;
- }
-
- ret = regmap_bulk_write(imx214->regmap, table->addr, vals, i);
-
- if (ret) {
- dev_err(imx214->dev, "write_table error: %d\n", ret);
- return ret;
- }
-
- table += i - 1;
- }
-
- return 0;
-}
-
static int imx214_start_streaming(struct imx214 *imx214)
{
+ const struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_subdev_state *state;
const struct imx214_mode *mode;
int ret;
- mutex_lock(&imx214->mutex);
- ret = imx214_write_table(imx214, mode_table_common);
+ ret = cci_multi_reg_write(imx214->regmap, mode_table_common,
+ ARRAY_SIZE(mode_table_common), NULL);
if (ret < 0) {
dev_err(imx214->dev, "could not sent common table %d\n", ret);
- goto error;
+ return ret;
}
- mode = v4l2_find_nearest_size(imx214_modes,
- ARRAY_SIZE(imx214_modes), width, height,
- imx214->fmt.width, imx214->fmt.height);
- ret = imx214_write_table(imx214, mode->reg_table);
+ ret = cci_write(imx214->regmap, IMX214_REG_CSI_LANE_MODE,
+ IMX214_CSI_4_LANE_MODE, NULL);
+ if (ret) {
+ dev_err(imx214->dev, "failed to configure lanes\n");
+ return ret;
+ }
+
+ state = v4l2_subdev_get_locked_active_state(&imx214->sd);
+ fmt = v4l2_subdev_state_get_format(state, 0);
+ mode = v4l2_find_nearest_size(imx214_modes, ARRAY_SIZE(imx214_modes),
+ width, height, fmt->width, fmt->height);
+ ret = cci_multi_reg_write(imx214->regmap, mode->reg_table,
+ mode->num_of_regs, NULL);
if (ret < 0) {
dev_err(imx214->dev, "could not sent mode table %d\n", ret);
- goto error;
+ return ret;
}
+
+ usleep_range(10000, 10500);
+
+ cci_write(imx214->regmap, IMX214_REG_TEMP_SENSOR_CONTROL, 0x01, NULL);
+
ret = __v4l2_ctrl_handler_setup(&imx214->ctrls);
if (ret < 0) {
dev_err(imx214->dev, "could not sync v4l2 controls\n");
- goto error;
+ return ret;
}
- ret = regmap_write(imx214->regmap, IMX214_REG_MODE_SELECT, IMX214_MODE_STREAMING);
- if (ret < 0) {
+ ret = cci_write(imx214->regmap, IMX214_REG_MODE_SELECT,
+ IMX214_MODE_STREAMING, NULL);
+ if (ret < 0)
dev_err(imx214->dev, "could not sent start table %d\n", ret);
- goto error;
- }
- mutex_unlock(&imx214->mutex);
- return 0;
-
-error:
- mutex_unlock(&imx214->mutex);
return ret;
}
@@ -867,7 +828,8 @@ static int imx214_stop_streaming(struct imx214 *imx214)
{
int ret;
- ret = regmap_write(imx214->regmap, IMX214_REG_MODE_SELECT, IMX214_MODE_STANDBY);
+ ret = cci_write(imx214->regmap, IMX214_REG_MODE_SELECT,
+ IMX214_MODE_STANDBY, NULL);
if (ret < 0)
dev_err(imx214->dev, "could not sent stop table %d\n", ret);
@@ -877,14 +839,17 @@ static int imx214_stop_streaming(struct imx214 *imx214)
static int imx214_s_stream(struct v4l2_subdev *subdev, int enable)
{
struct imx214 *imx214 = to_imx214(subdev);
- int ret;
+ struct v4l2_subdev_state *state;
+ int ret = 0;
if (enable) {
ret = pm_runtime_resume_and_get(imx214->dev);
if (ret < 0)
return ret;
+ state = v4l2_subdev_lock_and_get_active_state(subdev);
ret = imx214_start_streaming(imx214);
+ v4l2_subdev_unlock_state(state);
if (ret < 0)
goto err_rpm_put;
} else {
@@ -948,7 +913,7 @@ static const struct v4l2_subdev_pad_ops imx214_subdev_pad_ops = {
.enum_mbus_code = imx214_enum_mbus_code,
.enum_frame_size = imx214_enum_frame_size,
.enum_frame_interval = imx214_enum_frame_interval,
- .get_fmt = imx214_get_format,
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = imx214_set_format,
.get_selection = imx214_get_selection,
.get_frame_interval = imx214_get_frame_interval,
@@ -965,12 +930,6 @@ static const struct v4l2_subdev_internal_ops imx214_internal_ops = {
.init_state = imx214_entity_init_state,
};
-static const struct regmap_config sensor_regmap_config = {
- .reg_bits = 16,
- .val_bits = 8,
- .cache_type = REGCACHE_MAPLE,
-};
-
static int imx214_get_regulators(struct device *dev, struct imx214 *imx214)
{
unsigned int i;
@@ -992,28 +951,42 @@ static int imx214_parse_fwnode(struct device *dev)
int ret;
endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
- if (!endpoint) {
- dev_err(dev, "endpoint node not found\n");
- return -EINVAL;
- }
+ if (!endpoint)
+ return dev_err_probe(dev, -EINVAL, "endpoint node not found\n");
ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &bus_cfg);
if (ret) {
- dev_err(dev, "parsing endpoint node failed\n");
+ dev_err_probe(dev, ret, "parsing endpoint node failed\n");
+ goto done;
+ }
+
+ /* Check the number of MIPI CSI2 data lanes */
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 4) {
+ ret = dev_err_probe(dev, -EINVAL,
+ "only 4 data lanes are currently supported\n");
goto done;
}
- for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
+ if (bus_cfg.nr_of_link_frequencies != 1)
+ dev_warn(dev, "Only one link-frequency supported, please review your DT. Continuing anyway\n");
+
+ for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) {
if (bus_cfg.link_frequencies[i] == IMX214_DEFAULT_LINK_FREQ)
break;
-
- if (i == bus_cfg.nr_of_link_frequencies) {
- dev_err(dev, "link-frequencies %d not supported, Please review your DT\n",
- IMX214_DEFAULT_LINK_FREQ);
- ret = -EINVAL;
- goto done;
+ if (bus_cfg.link_frequencies[i] ==
+ IMX214_DEFAULT_LINK_FREQ_LEGACY) {
+ dev_warn(dev,
+ "link-frequencies %d not supported, please review your DT. Continuing anyway\n",
+ IMX214_DEFAULT_LINK_FREQ);
+ break;
+ }
}
+ if (i == bus_cfg.nr_of_link_frequencies)
+ ret = dev_err_probe(dev, -EINVAL,
+ "link-frequencies %d not supported, please review your DT\n",
+ IMX214_DEFAULT_LINK_FREQ);
+
done:
v4l2_fwnode_endpoint_free(&bus_cfg);
fwnode_handle_put(endpoint);
@@ -1037,34 +1010,28 @@ static int imx214_probe(struct i2c_client *client)
imx214->dev = dev;
imx214->xclk = devm_clk_get(dev, NULL);
- if (IS_ERR(imx214->xclk)) {
- dev_err(dev, "could not get xclk");
- return PTR_ERR(imx214->xclk);
- }
+ if (IS_ERR(imx214->xclk))
+ return dev_err_probe(dev, PTR_ERR(imx214->xclk),
+ "failed to get xclk\n");
ret = clk_set_rate(imx214->xclk, IMX214_DEFAULT_CLK_FREQ);
- if (ret) {
- dev_err(dev, "could not set xclk frequency\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to set xclk frequency\n");
ret = imx214_get_regulators(dev, imx214);
- if (ret < 0) {
- dev_err(dev, "cannot get regulators\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
imx214->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(imx214->enable_gpio)) {
- dev_err(dev, "cannot get enable gpio\n");
- return PTR_ERR(imx214->enable_gpio);
- }
+ if (IS_ERR(imx214->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(imx214->enable_gpio),
+ "failed to get enable gpio\n");
- imx214->regmap = devm_regmap_init_i2c(client, &sensor_regmap_config);
- if (IS_ERR(imx214->regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(imx214->regmap);
- }
+ imx214->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(imx214->regmap))
+ return dev_err_probe(dev, PTR_ERR(imx214->regmap),
+ "failed to initialize CCI\n");
v4l2_i2c_subdev_init(&imx214->sd, client, &imx214_subdev_ops);
imx214->sd.internal_ops = &imx214_internal_ops;
@@ -1079,9 +1046,6 @@ static int imx214_probe(struct i2c_client *client)
if (ret < 0)
goto error_power_off;
- mutex_init(&imx214->mutex);
- imx214->ctrls.lock = &imx214->mutex;
-
imx214->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
imx214->pad.flags = MEDIA_PAD_FL_SOURCE;
imx214->sd.dev = &client->dev;
@@ -1089,32 +1053,40 @@ static int imx214_probe(struct i2c_client *client)
ret = media_entity_pads_init(&imx214->sd.entity, 1, &imx214->pad);
if (ret < 0) {
- dev_err(dev, "could not register media entity\n");
+ dev_err_probe(dev, ret, "failed to init entity pads\n");
goto free_ctrl;
}
- imx214_entity_init_state(&imx214->sd, NULL);
+ imx214->sd.state_lock = imx214->ctrls.lock;
+ ret = v4l2_subdev_init_finalize(&imx214->sd);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "subdev init error\n");
+ goto free_entity;
+ }
pm_runtime_set_active(imx214->dev);
pm_runtime_enable(imx214->dev);
ret = v4l2_async_register_subdev_sensor(&imx214->sd);
if (ret < 0) {
- dev_err(dev, "could not register v4l2 device\n");
- goto free_entity;
+ dev_err_probe(dev, ret,
+ "failed to register sensor sub-device\n");
+ goto error_subdev_cleanup;
}
pm_runtime_idle(imx214->dev);
return 0;
-free_entity:
+error_subdev_cleanup:
pm_runtime_disable(imx214->dev);
pm_runtime_set_suspended(&client->dev);
+ v4l2_subdev_cleanup(&imx214->sd);
+
+free_entity:
media_entity_cleanup(&imx214->sd.entity);
free_ctrl:
- mutex_destroy(&imx214->mutex);
v4l2_ctrl_handler_free(&imx214->ctrls);
error_power_off:
@@ -1129,9 +1101,9 @@ static void imx214_remove(struct i2c_client *client)
struct imx214 *imx214 = to_imx214(sd);
v4l2_async_unregister_subdev(&imx214->sd);
+ v4l2_subdev_cleanup(sd);
media_entity_cleanup(&imx214->sd.entity);
v4l2_ctrl_handler_free(&imx214->ctrls);
- mutex_destroy(&imx214->mutex);
pm_runtime_disable(&client->dev);
if (!pm_runtime_status_suspended(&client->dev)) {
imx214_power_off(imx214->dev);
diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c
index 83b49cf114ac..625fbcd39068 100644
--- a/drivers/media/i2c/ov08x40.c
+++ b/drivers/media/i2c/ov08x40.c
@@ -1937,6 +1937,32 @@ static int ov08x40_stop_streaming(struct ov08x40 *ov08x)
OV08X40_REG_VALUE_08BIT, OV08X40_MODE_STANDBY);
}
+/* Verify chip ID */
+static int ov08x40_identify_module(struct ov08x40 *ov08x)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
+ int ret;
+ u32 val;
+
+ if (ov08x->identified)
+ return 0;
+
+ ret = ov08x40_read_reg(ov08x, OV08X40_REG_CHIP_ID,
+ OV08X40_REG_VALUE_24BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV08X40_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ OV08X40_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ ov08x->identified = true;
+
+ return 0;
+}
+
static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov08x40 *ov08x = to_ov08x40(sd);
@@ -1950,6 +1976,10 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
if (ret < 0)
goto err_unlock;
+ ret = ov08x40_identify_module(ov08x);
+ if (ret)
+ goto err_rpm_put;
+
/*
* Apply default & customized values
* and then start streaming.
@@ -1974,32 +2004,6 @@ err_unlock:
return ret;
}
-/* Verify chip ID */
-static int ov08x40_identify_module(struct ov08x40 *ov08x)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
- int ret;
- u32 val;
-
- if (ov08x->identified)
- return 0;
-
- ret = ov08x40_read_reg(ov08x, OV08X40_REG_CHIP_ID,
- OV08X40_REG_VALUE_24BIT, &val);
- if (ret)
- return ret;
-
- if (val != OV08X40_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
- OV08X40_CHIP_ID, val);
- return -ENXIO;
- }
-
- ov08x->identified = true;
-
- return 0;
-}
-
static const struct v4l2_subdev_video_ops ov08x40_video_ops = {
.s_stream = ov08x40_set_stream,
};
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index 5b861dbff27e..6c24426104ba 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -29,6 +29,13 @@ static const unsigned long rodata = 0xAA55AA55;
static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
/*
+ * This is a pointer to do_nothing() which is initialized at runtime rather
+ * than build time to avoid objtool IBT validation warnings caused by an
+ * inlined unrolled memcpy() in execute_location().
+ */
+static void __ro_after_init *do_nothing_ptr;
+
+/*
* This just returns to the caller. It is designed to be copied into
* non-executable memory regions.
*/
@@ -65,13 +72,12 @@ static noinline __nocfi void execute_location(void *dst, bool write)
{
void (*func)(void);
func_desc_t fdesc;
- void *do_nothing_text = dereference_function_descriptor(do_nothing);
- pr_info("attempting ok execution at %px\n", do_nothing_text);
+ pr_info("attempting ok execution at %px\n", do_nothing_ptr);
do_nothing();
if (write == CODE_WRITE) {
- memcpy(dst, do_nothing_text, EXEC_SIZE);
+ memcpy(dst, do_nothing_ptr, EXEC_SIZE);
flush_icache_range((unsigned long)dst,
(unsigned long)dst + EXEC_SIZE);
}
@@ -267,6 +273,8 @@ static void lkdtm_ACCESS_NULL(void)
void __init lkdtm_perms_init(void)
{
+ do_nothing_ptr = dereference_function_descriptor(do_nothing);
+
/* Make sure we can write to __ro_after_init values during __init */
ro_after_init |= 0xAA;
}
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
index 04756302b878..98d3d123004c 100644
--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
@@ -37,6 +37,7 @@
struct pci1xxxx_gpio {
struct auxiliary_device *aux_dev;
void __iomem *reg_base;
+ raw_spinlock_t wa_lock;
struct gpio_chip gpio;
spinlock_t lock;
int irq_base;
@@ -167,7 +168,7 @@ static void pci1xxxx_gpio_irq_ack(struct irq_data *data)
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
- pci1xxx_assign_bit(priv->reg_base, INTR_STAT_OFFSET(gpio), (gpio % 32), true);
+ writel(BIT(gpio % 32), priv->reg_base + INTR_STAT_OFFSET(gpio));
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -257,6 +258,7 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id)
struct pci1xxxx_gpio *priv = dev_id;
struct gpio_chip *gc = &priv->gpio;
unsigned long int_status = 0;
+ unsigned long wa_flags;
unsigned long flags;
u8 pincount;
int bit;
@@ -280,7 +282,9 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id)
writel(BIT(bit), priv->reg_base + INTR_STATUS_OFFSET(gpiobank));
spin_unlock_irqrestore(&priv->lock, flags);
irq = irq_find_mapping(gc->irq.domain, (bit + (gpiobank * 32)));
- handle_nested_irq(irq);
+ raw_spin_lock_irqsave(&priv->wa_lock, wa_flags);
+ generic_handle_irq(irq);
+ raw_spin_unlock_irqrestore(&priv->wa_lock, wa_flags);
}
}
spin_lock_irqsave(&priv->lock, flags);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index a5f88ec97df7..bc40b940ae21 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -117,6 +117,7 @@
#define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */
+#define MEI_DEV_ID_PTL_H 0xE370 /* Panther Lake H */
#define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */
/*
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index d6ff9d82ae94..3f9c60b579ae 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -124,6 +124,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_H, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)},
/* required last entry */
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index 7be1649b1972..fa553d4914b6 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -36,20 +36,24 @@
#define VSC_TP_XFER_TIMEOUT_BYTES 700
#define VSC_TP_PACKET_PADDING_SIZE 1
#define VSC_TP_PACKET_SIZE(pkt) \
- (sizeof(struct vsc_tp_packet) + le16_to_cpu((pkt)->len) + VSC_TP_CRC_SIZE)
+ (sizeof(struct vsc_tp_packet_hdr) + le16_to_cpu((pkt)->hdr.len) + VSC_TP_CRC_SIZE)
#define VSC_TP_MAX_PACKET_SIZE \
- (sizeof(struct vsc_tp_packet) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
+ (sizeof(struct vsc_tp_packet_hdr) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
#define VSC_TP_MAX_XFER_SIZE \
(VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES)
#define VSC_TP_NEXT_XFER_LEN(len, offset) \
- (len + sizeof(struct vsc_tp_packet) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
+ (len + sizeof(struct vsc_tp_packet_hdr) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
-struct vsc_tp_packet {
+struct vsc_tp_packet_hdr {
__u8 sync;
__u8 cmd;
__le16 len;
__le32 seq;
- __u8 buf[] __counted_by(len);
+};
+
+struct vsc_tp_packet {
+ struct vsc_tp_packet_hdr hdr;
+ __u8 buf[VSC_TP_MAX_XFER_SIZE - sizeof(struct vsc_tp_packet_hdr)];
};
struct vsc_tp {
@@ -158,12 +162,12 @@ static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len
static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt,
void *ibuf, u16 ilen)
{
- int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet);
+ int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet_hdr);
int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES;
u8 *src, *crc_src, *rx_buf = tp->rx_buf;
int count_down = VSC_TP_MAX_XFER_COUNT;
u32 recv_crc = 0, crc = ~0;
- struct vsc_tp_packet ack;
+ struct vsc_tp_packet_hdr ack;
u8 *dst = (u8 *)&ack;
bool synced = false;
@@ -280,10 +284,10 @@ int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
guard(mutex)(&tp->mutex);
- pkt->sync = VSC_TP_PACKET_SYNC;
- pkt->cmd = cmd;
- pkt->len = cpu_to_le16(olen);
- pkt->seq = cpu_to_le32(++tp->seq);
+ pkt->hdr.sync = VSC_TP_PACKET_SYNC;
+ pkt->hdr.cmd = cmd;
+ pkt->hdr.len = cpu_to_le16(olen);
+ pkt->hdr.seq = cpu_to_le32(++tp->seq);
memcpy(pkt->buf, obuf, olen);
crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index e3d39311fdc7..3fd898647237 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1873,7 +1873,7 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
return 0;
- ice = of_qcom_ice_get(dev);
+ ice = devm_of_qcom_ice_get(dev);
if (ice == ERR_PTR(-EOPNOTSUPP)) {
dev_warn(dev, "Disabling inline encryption support\n");
ice = NULL;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 5883eb93efb1..22513f3d56db 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2541,6 +2541,9 @@ mt7531_setup_common(struct dsa_switch *ds)
struct mt7530_priv *priv = ds->priv;
int ret, i;
+ ds->assisted_learning_on_cpu_port = true;
+ ds->mtu_enforcement_ingress = true;
+
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
@@ -2688,9 +2691,6 @@ mt7531_setup(struct dsa_switch *ds)
if (ret)
return ret;
- ds->assisted_learning_on_cpu_port = true;
- ds->mtu_enforcement_ingress = true;
-
return 0;
}
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
index c83a0a80d533..506f682d15c1 100644
--- a/drivers/net/ethernet/amd/pds_core/adminq.c
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -5,11 +5,6 @@
#include "core.h"
-struct pdsc_wait_context {
- struct pdsc_qcq *qcq;
- struct completion wait_completion;
-};
-
static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
{
union pds_core_notifyq_comp *comp;
@@ -109,10 +104,10 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
q_info = &q->info[q->tail_idx];
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- /* Copy out the completion data */
- memcpy(q_info->dest, comp, sizeof(*comp));
-
- complete_all(&q_info->wc->wait_completion);
+ if (!completion_done(&q_info->completion)) {
+ memcpy(q_info->dest, comp, sizeof(*comp));
+ complete(&q_info->completion);
+ }
if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
@@ -162,8 +157,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
static int __pdsc_adminq_post(struct pdsc *pdsc,
struct pdsc_qcq *qcq,
union pds_core_adminq_cmd *cmd,
- union pds_core_adminq_comp *comp,
- struct pdsc_wait_context *wc)
+ union pds_core_adminq_comp *comp)
{
struct pdsc_queue *q = &qcq->q;
struct pdsc_q_info *q_info;
@@ -205,9 +199,9 @@ static int __pdsc_adminq_post(struct pdsc *pdsc,
/* Post the request */
index = q->head_idx;
q_info = &q->info[index];
- q_info->wc = wc;
q_info->dest = comp;
memcpy(q_info->desc, cmd, sizeof(*cmd));
+ reinit_completion(&q_info->completion);
dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
q->head_idx, q->tail_idx);
@@ -231,16 +225,13 @@ int pdsc_adminq_post(struct pdsc *pdsc,
union pds_core_adminq_comp *comp,
bool fast_poll)
{
- struct pdsc_wait_context wc = {
- .wait_completion =
- COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion),
- };
unsigned long poll_interval = 1;
unsigned long poll_jiffies;
unsigned long time_limit;
unsigned long time_start;
unsigned long time_done;
unsigned long remaining;
+ struct completion *wc;
int err = 0;
int index;
@@ -250,20 +241,19 @@ int pdsc_adminq_post(struct pdsc *pdsc,
return -ENXIO;
}
- wc.qcq = &pdsc->adminqcq;
- index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
+ index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp);
if (index < 0) {
err = index;
goto err_out;
}
+ wc = &pdsc->adminqcq.q.info[index].completion;
time_start = jiffies;
time_limit = time_start + HZ * pdsc->devcmd_timeout;
do {
/* Timeslice the actual wait to catch IO errors etc early */
poll_jiffies = msecs_to_jiffies(poll_interval);
- remaining = wait_for_completion_timeout(&wc.wait_completion,
- poll_jiffies);
+ remaining = wait_for_completion_timeout(wc, poll_jiffies);
if (remaining)
break;
@@ -292,9 +282,11 @@ int pdsc_adminq_post(struct pdsc *pdsc,
dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
__func__, jiffies_to_msecs(time_done - time_start));
- /* Check the results */
- if (time_after_eq(time_done, time_limit))
+ /* Check the results and clear an un-completed timeout */
+ if (time_after_eq(time_done, time_limit) && !completion_done(wc)) {
err = -ETIMEDOUT;
+ complete(wc);
+ }
dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index 2babea110991..b76a9b7e0aed 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -107,9 +107,6 @@ int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev,
dev_dbg(pf->dev, "%s: %s opcode %d\n",
__func__, dev_name(&padev->aux_dev.dev), req->opcode);
- if (pf->state)
- return -ENXIO;
-
/* Wrap the client's request */
cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD;
cmd.client_request.client_id = cpu_to_le16(padev->client_id);
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 536635e57727..3c60d4cf9d0e 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -167,8 +167,10 @@ static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
q->base = base;
q->base_pa = base_pa;
- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
+ for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) {
cur->desc = base + (i * q->desc_size);
+ init_completion(&cur->completion);
+ }
}
static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
@@ -325,10 +327,7 @@ static int pdsc_core_init(struct pdsc *pdsc)
size_t sz;
int err;
- /* Scale the descriptor ring length based on number of CPUs and VFs */
- numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
- numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
- numdescs = roundup_pow_of_two(numdescs);
+ numdescs = PDSC_ADMINQ_MAX_LENGTH;
err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
numdescs,
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 14522d6d5f86..ec637dc4327a 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -16,7 +16,7 @@
#define PDSC_WATCHDOG_SECS 5
#define PDSC_QUEUE_NAME_MAX_SZ 16
-#define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */
+#define PDSC_ADMINQ_MAX_LENGTH 16 /* must be a power of two */
#define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
#define PDSC_TEARDOWN_RECOVERY false
#define PDSC_TEARDOWN_REMOVING true
@@ -96,7 +96,7 @@ struct pdsc_q_info {
unsigned int bytes;
unsigned int nbufs;
struct pdsc_buf_info bufs[PDS_CORE_MAX_FRAGS];
- struct pdsc_wait_context *wc;
+ struct completion completion;
void *dest;
};
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index 44971e71991f..ca23cde385e6 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -102,7 +102,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
.fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
.fw_control.oper = PDS_CORE_FW_GET_LIST,
};
- struct pds_core_fw_list_info fw_list;
+ struct pds_core_fw_list_info fw_list = {};
struct pdsc *pdsc = devlink_priv(dl);
union pds_core_dev_comp comp;
char buf[32];
@@ -115,8 +115,6 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (!err)
memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
mutex_unlock(&pdsc->devcmd_lock);
- if (err && err != -EIO)
- return err;
listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names));
for (i = 0; i < listlen; i++) {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 2106861463e4..3ee52f4b1166 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1850,6 +1850,16 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
}
}
+static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first,
+ int rx_ring_last)
+{
+ while (rx_ring_first != rx_ring_last) {
+ enetc_flip_rx_buff(rx_ring,
+ &rx_ring->rx_swbd[rx_ring_first]);
+ enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+ }
+}
+
static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
struct napi_struct *napi, int work_limit,
struct bpf_prog *prog)
@@ -1868,11 +1878,10 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd, *orig_rxbd;
- int orig_i, orig_cleaned_cnt;
struct xdp_buff xdp_buff;
struct sk_buff *skb;
+ int orig_i, err;
u32 bd_status;
- int err;
rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
@@ -1887,7 +1896,6 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
break;
orig_rxbd = rxbd;
- orig_cleaned_cnt = cleaned_cnt;
orig_i = i;
enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
@@ -1915,15 +1923,21 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
rx_ring->stats.xdp_drops++;
break;
case XDP_PASS:
- rxbd = orig_rxbd;
- cleaned_cnt = orig_cleaned_cnt;
- i = orig_i;
-
- skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
- &i, &cleaned_cnt,
- ENETC_RXB_DMA_SIZE_XDP);
- if (unlikely(!skb))
+ skb = xdp_build_skb_from_buff(&xdp_buff);
+ /* Probably under memory pressure, stop NAPI */
+ if (unlikely(!skb)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_drops++;
goto out;
+ }
+
+ enetc_get_offloads(rx_ring, orig_rxbd, skb);
+
+ /* These buffers are about to be owned by the stack.
+ * Update our buffer cache (the rx_swbd array elements)
+ * with their other page halves.
+ */
+ enetc_bulk_flip_buff(rx_ring, orig_i, i);
napi_gro_receive(napi, skb);
break;
@@ -1965,11 +1979,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_xdp_drop(rx_ring, orig_i, i);
rx_ring->stats.xdp_redirect_failures++;
} else {
- while (orig_i != i) {
- enetc_flip_rx_buff(rx_ring,
- &rx_ring->rx_swbd[orig_i]);
- enetc_bdr_idx_inc(rx_ring, &orig_i);
- }
+ enetc_bulk_flip_buff(rx_ring, orig_i, i);
xdp_redirect_frm_cnt++;
rx_ring->stats.xdp_redirect++;
}
@@ -3362,7 +3372,8 @@ static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i,
bdr->buffer_offset = ENETC_RXB_PAD;
priv->rx_ring[i] = bdr;
- err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
+ err = __xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0,
+ ENETC_RXB_DMA_SIZE_XDP);
if (err)
goto free_vector;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 0cd1ecacfd29..477b8732b860 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -3997,11 +3997,27 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
if (mtk_is_netsys_v3_or_greater(eth)) {
- /* PSE should not drop port1, port8 and port9 packets */
- mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
+ /* PSE dummy page mechanism */
+ mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) |
+ PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ);
+
+ /* PSE free buffer drop threshold */
+ mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
+
+ /* PSE should not drop port8, port9 and port13 packets from
+ * WDMA Tx
+ */
+ mtk_w32(eth, 0x00002300, PSE_DROP_CFG);
+
+ /* PSE should drop packets to port8, port9 and port13 on WDMA Rx
+ * ring full
+ */
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0));
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1));
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2));
/* GDM and CDM Threshold */
- mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
+ mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES);
mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
/* Disable GDM1 RX CRC stripping */
@@ -4018,7 +4034,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
- mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
+ mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0));
/* PSE Free Queue Flow Control */
mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 8d7b6818d860..0570623e569d 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -151,7 +151,15 @@
#define PSE_FQFC_CFG1 0x100
#define PSE_FQFC_CFG2 0x104
#define PSE_DROP_CFG 0x108
-#define PSE_PPE0_DROP 0x110
+#define PSE_PPE_DROP(x) (0x110 + ((x) * 0x4))
+
+/* PSE Last FreeQ Page Request Control */
+#define PSE_DUMY_REQ 0x10C
+/* PSE_DUMY_REQ is not a typo but actually called like that also in
+ * MediaTek's datasheet
+ */
+#define PSE_DUMMY_WORK_GDM(x) BIT(16 + (x))
+#define DUMMY_PAGE_THR 0x1
/* PSE Input Queue Reservation Register*/
#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index 9f13cea16446..43b2216bc0a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -618,10 +618,6 @@ struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
bool use_l4_type;
int err;
- ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
- if (!ttc)
- return ERR_PTR(-ENOMEM);
-
switch (params->ns_type) {
case MLX5_FLOW_NAMESPACE_PORT_SEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
@@ -635,7 +631,16 @@ struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
return ERR_PTR(-EINVAL);
}
+ ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
+ if (!ttc)
+ return ERR_PTR(-ENOMEM);
+
ns = mlx5_get_flow_namespace(dev, params->ns_type);
+ if (!ns) {
+ kvfree(ttc);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
&inner_ttc_groups[TTC_GROUPS_DEFAULT];
@@ -691,10 +696,6 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
bool use_l4_type;
int err;
- ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
- if (!ttc)
- return ERR_PTR(-ENOMEM);
-
switch (params->ns_type) {
case MLX5_FLOW_NAMESPACE_PORT_SEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
@@ -708,7 +709,16 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
return ERR_PTR(-EINVAL);
}
+ ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
+ if (!ttc)
+ return ERR_PTR(-ENOMEM);
+
ns = mlx5_get_flow_namespace(dev, params->ns_type);
+ if (!ns) {
+ kvfree(ttc);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
&ttc_groups[TTC_GROUPS_DEFAULT];
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 16020b72dec8..ece8588b3b17 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -523,24 +523,6 @@ static int socfpga_dwmac_resume(struct device *dev)
dwmac_priv->ops->set_phy_mode(priv->plat->bsp_priv);
- /* Before the enet controller is suspended, the phy is suspended.
- * This causes the phy clock to be gated. The enet controller is
- * resumed before the phy, so the clock is still gated "off" when
- * the enet controller is resumed. This code makes sure the phy
- * is "resumed" before reinitializing the enet controller since
- * the enet controller depends on an active phy clock to complete
- * a DMA reset. A DMA reset will "time out" if executed
- * with no phy clock input on the Synopsys enet controller.
- * Verified through Synopsys Case #8000711656.
- *
- * Note that the phy clock is also gated when the phy is isolated.
- * Phy "suspend" and "isolate" controls are located in phy basic
- * control register 0, and can be modified by the phy driver
- * framework.
- */
- if (ndev->phydev)
- phy_resume(ndev->phydev);
-
return stmmac_resume(dev);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 600fea8f712f..2d5bf1de5d2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -331,8 +331,8 @@ enum rtc_control {
/* PTP and timestamping registers */
-#define GMAC3_X_ATSNS GENMASK(19, 16)
-#define GMAC3_X_ATSNS_SHIFT 16
+#define GMAC3_X_ATSNS GENMASK(29, 25)
+#define GMAC3_X_ATSNS_SHIFT 25
#define GMAC_PTP_TCR_ATSFC BIT(24)
#define GMAC_PTP_TCR_ATSEN0 BIT(25)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 96bcda0856ec..11c525b8d269 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -560,7 +560,7 @@ void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
u64 ns;
ns = readl(ptpaddr + GMAC_PTP_ATNR);
- ns += readl(ptpaddr + GMAC_PTP_ATSR) * NSEC_PER_SEC;
+ ns += (u64)readl(ptpaddr + GMAC_PTP_ATSR) * NSEC_PER_SEC;
*ptp_time = ns;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 0f59aa982604..e2840fa241f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -222,7 +222,7 @@ static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
u64 ns;
ns = readl(ptpaddr + PTP_ATNR);
- ns += readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC;
+ ns += (u64)readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC;
*ptp_time = ns;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index b7c3bfdaa180..b9340f8bd182 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3448,9 +3448,18 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
if (priv->hw->phylink_pcs)
phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
+ /* Note that clk_rx_i must be running for reset to complete. This
+ * clock may also be required when setting the MAC address.
+ *
+ * Block the receive clock stop for LPI mode at the PHY in case
+ * the link is established with EEE mode active.
+ */
+ phylink_rx_clk_stop_block(priv->phylink);
+
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
+ phylink_rx_clk_stop_unblock(priv->phylink);
netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
__func__);
return ret;
@@ -3458,6 +3467,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Copy the MAC addr into the HW */
stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
+ phylink_rx_clk_stop_unblock(priv->phylink);
/* PS and related bits will be programmed according to the speed */
if (priv->hw->pcs) {
@@ -3568,7 +3578,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
@@ -5853,6 +5865,9 @@ static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
* whenever multicast addresses must be enabled/disabled.
* Return value:
* void.
+ *
+ * FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
*/
static void stmmac_set_rx_mode(struct net_device *dev)
{
@@ -5985,7 +6000,9 @@ static int stmmac_set_features(struct net_device *netdev,
else
priv->hw->hw_vlan_en = false;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
@@ -6269,7 +6286,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
if (ret)
goto set_mac_error;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+ phylink_rx_clk_stop_unblock(priv->phylink);
set_mac_error:
pm_runtime_put(priv->device);
@@ -6625,6 +6644,9 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -6656,6 +6678,9 @@ err_pm_put:
return ret;
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -7813,13 +7838,11 @@ int stmmac_suspend(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_suspend(priv->phylink, true);
- } else {
- if (device_may_wakeup(priv->device))
- phylink_speed_down(priv->phylink, false);
- phylink_suspend(priv->phylink, false);
- }
+ if (device_may_wakeup(priv->device) && !priv->plat->pmt)
+ phylink_speed_down(priv->phylink, false);
+
+ phylink_suspend(priv->phylink,
+ device_may_wakeup(priv->device) && priv->plat->pmt);
rtnl_unlock();
if (stmmac_fpe_supported(priv))
@@ -7909,16 +7932,12 @@ int stmmac_resume(struct device *dev)
}
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_resume(priv->phylink);
- } else {
- phylink_resume(priv->phylink);
- if (device_may_wakeup(priv->device))
- phylink_speed_up(priv->phylink);
- }
- rtnl_unlock();
- rtnl_lock();
+ /* Prepare the PHY to resume, ensuring that its clocks which are
+ * necessary for the MAC DMA reset to complete are running
+ */
+ phylink_prepare_resume(priv->phylink);
+
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
@@ -7928,14 +7947,25 @@ int stmmac_resume(struct device *dev)
stmmac_hw_setup(ndev, false);
stmmac_init_coalesce(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_rx_mode(ndev);
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
stmmac_enable_all_queues(priv);
stmmac_enable_all_dma_irq(priv);
mutex_unlock(&priv->lock);
+
+ /* phylink_resume() must be called after the hardware has been
+ * initialised because it may bring the link up immediately in a
+ * workqueue thread, which will race with initialisation.
+ */
+ phylink_resume(priv->phylink);
+ if (device_may_wakeup(priv->device) && !priv->plat->pmt)
+ phylink_speed_up(priv->phylink);
+
rtnl_unlock();
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 72177fea1cfb..edc3165f0077 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9064,6 +9064,8 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
msi_vec[i].entry = i;
}
+ pdev->dev_flags |= PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST;
+
num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
if (num_irqs < 0) {
np->flags &= ~NIU_FLAGS_MSIX;
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 6599feca1967..e32013eb0186 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -31,6 +31,7 @@
#define MII_DP83822_RCSR 0x17
#define MII_DP83822_RESET_CTRL 0x1f
#define MII_DP83822_MLEDCR 0x25
+#define MII_DP83822_LDCTRL 0x403
#define MII_DP83822_LEDCFG1 0x460
#define MII_DP83822_IOCTRL1 0x462
#define MII_DP83822_IOCTRL2 0x463
@@ -123,6 +124,9 @@
#define DP83822_IOCTRL1_GPIO1_CTRL GENMASK(2, 0)
#define DP83822_IOCTRL1_GPIO1_CTRL_LED_1 BIT(0)
+/* LDCTRL bits */
+#define DP83822_100BASE_TX_LINE_DRIVER_SWING GENMASK(7, 4)
+
/* IOCTRL2 bits */
#define DP83822_IOCTRL2_GPIO2_CLK_SRC GENMASK(6, 4)
#define DP83822_IOCTRL2_GPIO2_CTRL GENMASK(2, 0)
@@ -197,6 +201,7 @@ struct dp83822_private {
bool set_gpio2_clk_out;
u32 gpio2_clk_out;
bool led_pin_enable[DP83822_MAX_LED_PINS];
+ int tx_amplitude_100base_tx_index;
};
static int dp83822_config_wol(struct phy_device *phydev,
@@ -522,6 +527,12 @@ static int dp83822_config_init(struct phy_device *phydev)
FIELD_PREP(DP83822_IOCTRL2_GPIO2_CLK_SRC,
dp83822->gpio2_clk_out));
+ if (dp83822->tx_amplitude_100base_tx_index >= 0)
+ phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_LDCTRL,
+ DP83822_100BASE_TX_LINE_DRIVER_SWING,
+ FIELD_PREP(DP83822_100BASE_TX_LINE_DRIVER_SWING,
+ dp83822->tx_amplitude_100base_tx_index));
+
err = dp83822_config_init_leds(phydev);
if (err)
return err;
@@ -719,7 +730,12 @@ static int dp83822_phy_reset(struct phy_device *phydev)
return phydev->drv->config_init(phydev);
}
-#ifdef CONFIG_OF_MDIO
+#if IS_ENABLED(CONFIG_OF_MDIO)
+static const u32 tx_amplitude_100base_tx_gain[] = {
+ 80, 82, 83, 85, 87, 88, 90, 92,
+ 93, 95, 97, 98, 100, 102, 103, 105,
+};
+
static int dp83822_of_init_leds(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -780,6 +796,8 @@ static int dp83822_of_init(struct phy_device *phydev)
struct dp83822_private *dp83822 = phydev->priv;
struct device *dev = &phydev->mdio.dev;
const char *of_val;
+ int i, ret;
+ u32 val;
/* Signal detection for the PHY is only enabled if the FX_EN and the
* SD_EN pins are strapped. Signal detection can only enabled if FX_EN
@@ -815,6 +833,25 @@ static int dp83822_of_init(struct phy_device *phydev)
dp83822->set_gpio2_clk_out = true;
}
+ ret = phy_get_tx_amplitude_gain(phydev, dev,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ &val);
+ if (!ret) {
+ for (i = 0; i < ARRAY_SIZE(tx_amplitude_100base_tx_gain); i++) {
+ if (tx_amplitude_100base_tx_gain[i] == val) {
+ dp83822->tx_amplitude_100base_tx_index = i;
+ break;
+ }
+ }
+
+ if (dp83822->tx_amplitude_100base_tx_index < 0) {
+ phydev_err(phydev,
+ "Invalid value for tx-amplitude-100base-tx-percent property (%u)\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
return dp83822_of_init_leds(phydev);
}
@@ -893,6 +930,7 @@ static int dp8382x_probe(struct phy_device *phydev)
if (!dp83822)
return -ENOMEM;
+ dp83822->tx_amplitude_100base_tx_index = -1;
phydev->priv = dp83822;
return 0;
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 0e17cc458efd..93de88c1c8fd 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -37,47 +37,6 @@ static int lan88xx_write_page(struct phy_device *phydev, int page)
return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page);
}
-static int lan88xx_phy_config_intr(struct phy_device *phydev)
-{
- int rc;
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- /* unmask all source and clear them before enable */
- rc = phy_write(phydev, LAN88XX_INT_MASK, 0x7FFF);
- rc = phy_read(phydev, LAN88XX_INT_STS);
- rc = phy_write(phydev, LAN88XX_INT_MASK,
- LAN88XX_INT_MASK_MDINTPIN_EN_ |
- LAN88XX_INT_MASK_LINK_CHANGE_);
- } else {
- rc = phy_write(phydev, LAN88XX_INT_MASK, 0);
- if (rc)
- return rc;
-
- /* Ack interrupts after they have been disabled */
- rc = phy_read(phydev, LAN88XX_INT_STS);
- }
-
- return rc < 0 ? rc : 0;
-}
-
-static irqreturn_t lan88xx_handle_interrupt(struct phy_device *phydev)
-{
- int irq_status;
-
- irq_status = phy_read(phydev, LAN88XX_INT_STS);
- if (irq_status < 0) {
- phy_error(phydev);
- return IRQ_NONE;
- }
-
- if (!(irq_status & LAN88XX_INT_STS_LINK_CHANGE_))
- return IRQ_NONE;
-
- phy_trigger_machine(phydev);
-
- return IRQ_HANDLED;
-}
-
static int lan88xx_suspend(struct phy_device *phydev)
{
struct lan88xx_priv *priv = phydev->priv;
@@ -528,8 +487,9 @@ static struct phy_driver microchip_phy_driver[] = {
.config_aneg = lan88xx_config_aneg,
.link_change_notify = lan88xx_link_change_notify,
- .config_intr = lan88xx_phy_config_intr,
- .handle_interrupt = lan88xx_handle_interrupt,
+ /* Interrupt handling is broken, do not define related
+ * functions to force polling.
+ */
.suspend = lan88xx_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 92161af788af..2a01887c5617 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -3123,19 +3123,12 @@ void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause)
EXPORT_SYMBOL(phy_get_pause);
#if IS_ENABLED(CONFIG_OF_MDIO)
-static int phy_get_int_delay_property(struct device *dev, const char *name)
+static int phy_get_u32_property(struct device *dev, const char *name, u32 *val)
{
- s32 int_delay;
- int ret;
-
- ret = device_property_read_u32(dev, name, &int_delay);
- if (ret)
- return ret;
-
- return int_delay;
+ return device_property_read_u32(dev, name, val);
}
#else
-static int phy_get_int_delay_property(struct device *dev, const char *name)
+static int phy_get_u32_property(struct device *dev, const char *name, u32 *val)
{
return -EINVAL;
}
@@ -3160,12 +3153,12 @@ static int phy_get_int_delay_property(struct device *dev, const char *name)
s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
const int *delay_values, int size, bool is_rx)
{
- s32 delay;
- int i;
+ int i, ret;
+ u32 delay;
if (is_rx) {
- delay = phy_get_int_delay_property(dev, "rx-internal-delay-ps");
- if (delay < 0 && size == 0) {
+ ret = phy_get_u32_property(dev, "rx-internal-delay-ps", &delay);
+ if (ret < 0 && size == 0) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
return 1;
@@ -3174,8 +3167,8 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
} else {
- delay = phy_get_int_delay_property(dev, "tx-internal-delay-ps");
- if (delay < 0 && size == 0) {
+ ret = phy_get_u32_property(dev, "tx-internal-delay-ps", &delay);
+ if (ret < 0 && size == 0) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
return 1;
@@ -3184,8 +3177,8 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
}
- if (delay < 0)
- return delay;
+ if (ret < 0)
+ return ret;
if (size == 0)
return delay;
@@ -3220,6 +3213,30 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
EXPORT_SYMBOL(phy_get_internal_delay);
+/**
+ * phy_get_tx_amplitude_gain - stores tx amplitude gain in @val
+ * @phydev: phy_device struct
+ * @dev: pointer to the devices device struct
+ * @linkmode: linkmode for which the tx amplitude gain should be retrieved
+ * @val: tx amplitude gain
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int phy_get_tx_amplitude_gain(struct phy_device *phydev, struct device *dev,
+ enum ethtool_link_mode_bit_indices linkmode,
+ u32 *val)
+{
+ switch (linkmode) {
+ case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
+ return phy_get_u32_property(dev,
+ "tx-amplitude-100base-tx-percent",
+ val);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(phy_get_tx_amplitude_gain);
+
static int phy_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index f550576eb9da..6f9d8da76c4d 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -91,9 +91,8 @@ int phy_led_triggers_register(struct phy_device *phy)
if (!phy->phy_num_led_triggers)
return 0;
- phy->led_link_trigger = devm_kzalloc(&phy->mdio.dev,
- sizeof(*phy->led_link_trigger),
- GFP_KERNEL);
+ phy->led_link_trigger = kzalloc(sizeof(*phy->led_link_trigger),
+ GFP_KERNEL);
if (!phy->led_link_trigger) {
err = -ENOMEM;
goto out_clear;
@@ -103,10 +102,9 @@ int phy_led_triggers_register(struct phy_device *phy)
if (err)
goto out_free_link;
- phy->phy_led_triggers = devm_kcalloc(&phy->mdio.dev,
- phy->phy_num_led_triggers,
- sizeof(struct phy_led_trigger),
- GFP_KERNEL);
+ phy->phy_led_triggers = kcalloc(phy->phy_num_led_triggers,
+ sizeof(struct phy_led_trigger),
+ GFP_KERNEL);
if (!phy->phy_led_triggers) {
err = -ENOMEM;
goto out_unreg_link;
@@ -127,11 +125,11 @@ int phy_led_triggers_register(struct phy_device *phy)
out_unreg:
while (i--)
phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
- devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
+ kfree(phy->phy_led_triggers);
out_unreg_link:
phy_led_trigger_unregister(phy->led_link_trigger);
out_free_link:
- devm_kfree(&phy->mdio.dev, phy->led_link_trigger);
+ kfree(phy->led_link_trigger);
phy->led_link_trigger = NULL;
out_clear:
phy->phy_num_led_triggers = 0;
@@ -145,8 +143,13 @@ void phy_led_triggers_unregister(struct phy_device *phy)
for (i = 0; i < phy->phy_num_led_triggers; i++)
phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
+ kfree(phy->phy_led_triggers);
+ phy->phy_led_triggers = NULL;
- if (phy->led_link_trigger)
+ if (phy->led_link_trigger) {
phy_led_trigger_unregister(phy->led_link_trigger);
+ kfree(phy->led_link_trigger);
+ phy->led_link_trigger = NULL;
+ }
}
EXPORT_SYMBOL_GPL(phy_led_triggers_unregister);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index b00a315de060..5be48eb810ab 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -82,12 +82,15 @@ struct phylink {
unsigned int pcs_state;
bool link_failed;
+ bool suspend_link_up;
+ bool major_config_failed;
bool mac_supports_eee_ops;
bool mac_supports_eee;
bool phy_enable_tx_lpi;
bool mac_enable_tx_lpi;
bool mac_tx_clk_stop;
u32 mac_tx_lpi_timer;
+ u8 mac_rx_clk_stop_blocked;
struct sfp_bus *sfp_bus;
bool sfp_may_have_phy;
@@ -1360,12 +1363,16 @@ static void phylink_major_config(struct phylink *pl, bool restart,
phylink_an_mode_str(pl->req_link_an_mode),
phy_modes(state->interface));
+ pl->major_config_failed = false;
+
if (pl->mac_ops->mac_select_pcs) {
pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
if (IS_ERR(pcs)) {
phylink_err(pl,
"mac_select_pcs unexpectedly failed: %pe\n",
pcs);
+
+ pl->major_config_failed = true;
return;
}
@@ -1387,6 +1394,7 @@ static void phylink_major_config(struct phylink *pl, bool restart,
if (err < 0) {
phylink_err(pl, "mac_prepare failed: %pe\n",
ERR_PTR(err));
+ pl->major_config_failed = true;
return;
}
}
@@ -1410,8 +1418,15 @@ static void phylink_major_config(struct phylink *pl, bool restart,
phylink_mac_config(pl, state);
- if (pl->pcs)
- phylink_pcs_post_config(pl->pcs, state->interface);
+ if (pl->pcs) {
+ err = phylink_pcs_post_config(pl->pcs, state->interface);
+ if (err < 0) {
+ phylink_err(pl, "pcs_post_config failed: %pe\n",
+ ERR_PTR(err));
+
+ pl->major_config_failed = true;
+ }
+ }
if (pl->pcs_state == PCS_STATE_STARTING || pcs_changed)
phylink_pcs_enable(pl->pcs);
@@ -1422,11 +1437,12 @@ static void phylink_major_config(struct phylink *pl, bool restart,
err = phylink_pcs_config(pl->pcs, neg_mode, state,
!!(pl->link_config.pause & MLO_PAUSE_AN));
- if (err < 0)
- phylink_err(pl, "pcs_config failed: %pe\n",
- ERR_PTR(err));
- else if (err > 0)
+ if (err < 0) {
+ phylink_err(pl, "pcs_config failed: %pe\n", ERR_PTR(err));
+ pl->major_config_failed = true;
+ } else if (err > 0) {
restart = true;
+ }
if (restart)
phylink_pcs_an_restart(pl);
@@ -1434,16 +1450,22 @@ static void phylink_major_config(struct phylink *pl, bool restart,
if (pl->mac_ops->mac_finish) {
err = pl->mac_ops->mac_finish(pl->config, pl->act_link_an_mode,
state->interface);
- if (err < 0)
+ if (err < 0) {
phylink_err(pl, "mac_finish failed: %pe\n",
ERR_PTR(err));
+
+ pl->major_config_failed = true;
+ }
}
if (pl->phydev && pl->phy_ib_mode) {
err = phy_config_inband(pl->phydev, pl->phy_ib_mode);
- if (err < 0)
+ if (err < 0) {
phylink_err(pl, "phy_config_inband: %pe\n",
ERR_PTR(err));
+
+ pl->major_config_failed = true;
+ }
}
if (pl->sfp_bus) {
@@ -1795,6 +1817,12 @@ static void phylink_resolve(struct work_struct *w)
}
}
+ /* If configuration of the interface failed, force the link down
+ * until we get a successful configuration.
+ */
+ if (pl->major_config_failed)
+ link_state.link = false;
+
if (link_state.link != cur_link_state) {
pl->old_link_state = link_state.link;
if (!link_state.link)
@@ -2595,6 +2623,64 @@ void phylink_stop(struct phylink *pl)
EXPORT_SYMBOL_GPL(phylink_stop);
/**
+ * phylink_rx_clk_stop_block() - block PHY ability to stop receive clock in LPI
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Disable the PHY's ability to stop the receive clock while the receive path
+ * is in EEE LPI state, until the number of calls to phylink_rx_clk_stop_block()
+ * are balanced by calls to phylink_rx_clk_stop_unblock().
+ */
+void phylink_rx_clk_stop_block(struct phylink *pl)
+{
+ ASSERT_RTNL();
+
+ if (pl->mac_rx_clk_stop_blocked == U8_MAX) {
+ phylink_warn(pl, "%s called too many times - ignoring\n",
+ __func__);
+ dump_stack();
+ return;
+ }
+
+ /* Disable PHY receive clock stop if this is the first time this
+ * function has been called and clock-stop was previously enabled.
+ */
+ if (pl->mac_rx_clk_stop_blocked++ == 0 &&
+ pl->mac_supports_eee_ops && pl->phydev &&
+ pl->config->eee_rx_clk_stop_enable)
+ phy_eee_rx_clock_stop(pl->phydev, false);
+}
+EXPORT_SYMBOL_GPL(phylink_rx_clk_stop_block);
+
+/**
+ * phylink_rx_clk_stop_unblock() - unblock PHY ability to stop receive clock
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * All calls to phylink_rx_clk_stop_block() must be balanced with a
+ * corresponding call to phylink_rx_clk_stop_unblock() to restore the PHYs
+ * ability to stop the receive clock when the receive path is in EEE LPI mode.
+ */
+void phylink_rx_clk_stop_unblock(struct phylink *pl)
+{
+ ASSERT_RTNL();
+
+ if (pl->mac_rx_clk_stop_blocked == 0) {
+ phylink_warn(pl, "%s called too many times - ignoring\n",
+ __func__);
+ dump_stack();
+ return;
+ }
+
+ /* Re-enable PHY receive clock stop if the number of unblocks matches
+ * the number of calls to the block function above.
+ */
+ if (--pl->mac_rx_clk_stop_blocked == 0 &&
+ pl->mac_supports_eee_ops && pl->phydev &&
+ pl->config->eee_rx_clk_stop_enable)
+ phy_eee_rx_clock_stop(pl->phydev, true);
+}
+EXPORT_SYMBOL_GPL(phylink_rx_clk_stop_unblock);
+
+/**
* phylink_suspend() - handle a network device suspend event
* @pl: a pointer to a &struct phylink returned from phylink_create()
* @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan
@@ -2619,14 +2705,16 @@ void phylink_suspend(struct phylink *pl, bool mac_wol)
/* Stop the resolver bringing the link up */
__set_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state);
- /* Disable the carrier, to prevent transmit timeouts,
- * but one would hope all packets have been sent. This
- * also means phylink_resolve() will do nothing.
- */
- if (pl->netdev)
- netif_carrier_off(pl->netdev);
- else
+ pl->suspend_link_up = phylink_link_is_up(pl);
+ if (pl->suspend_link_up) {
+ /* Disable the carrier, to prevent transmit timeouts,
+ * but one would hope all packets have been sent. This
+ * also means phylink_resolve() will do nothing.
+ */
+ if (pl->netdev)
+ netif_carrier_off(pl->netdev);
pl->old_link_state = false;
+ }
/* We do not call mac_link_down() here as we want the
* link to remain up to receive the WoL packets.
@@ -2639,6 +2727,31 @@ void phylink_suspend(struct phylink *pl, bool mac_wol)
EXPORT_SYMBOL_GPL(phylink_suspend);
/**
+ * phylink_prepare_resume() - prepare to resume a network device
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Optional, but if called must be called prior to phylink_resume().
+ *
+ * Prepare to resume a network device, preparing the PHY as necessary.
+ */
+void phylink_prepare_resume(struct phylink *pl)
+{
+ struct phy_device *phydev = pl->phydev;
+
+ ASSERT_RTNL();
+
+ /* IEEE 802.3 22.2.4.1.5 allows PHYs to stop their receive clock
+ * when PDOWN is set. However, some MACs require RXC to be running
+ * in order to resume. If the MAC requires RXC, and we have a PHY,
+ * then resume the PHY. Note that 802.3 allows PHYs 500ms before
+ * the clock meets requirements. We do not implement this delay.
+ */
+ if (pl->config->mac_requires_rxc && phydev && phydev->suspended)
+ phy_resume(phydev);
+}
+EXPORT_SYMBOL_GPL(phylink_prepare_resume);
+
+/**
* phylink_resume() - handle a network device resume event
* @pl: a pointer to a &struct phylink returned from phylink_create()
*
@@ -2652,15 +2765,18 @@ void phylink_resume(struct phylink *pl)
if (test_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state)) {
/* Wake-on-Lan enabled, MAC handling */
- /* Call mac_link_down() so we keep the overall state balanced.
- * Do this under the state_mutex lock for consistency. This
- * will cause a "Link Down" message to be printed during
- * resume, which is harmless - the true link state will be
- * printed when we run a resolve.
- */
- mutex_lock(&pl->state_mutex);
- phylink_link_down(pl);
- mutex_unlock(&pl->state_mutex);
+ if (pl->suspend_link_up) {
+ /* Call mac_link_down() so we keep the overall state
+ * balanced. Do this under the state_mutex lock for
+ * consistency. This will cause a "Link Down" message
+ * to be printed during resume, which is harmless -
+ * the true link state will be printed when we run a
+ * resolve.
+ */
+ mutex_lock(&pl->state_mutex);
+ phylink_link_down(pl);
+ mutex_unlock(&pl->state_mutex);
+ }
/* Re-apply the link parameters so that all the settings get
* restored to the MAC.
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d1ed544ba03a..3e4896d9537e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2789,7 +2789,8 @@ static void skb_recv_done(struct virtqueue *rvq)
virtqueue_napi_schedule(&rq->napi, rvq);
}
-static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
+static void virtnet_napi_do_enable(struct virtqueue *vq,
+ struct napi_struct *napi)
{
napi_enable(napi);
@@ -2802,10 +2803,16 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
local_bh_enable();
}
-static void virtnet_napi_tx_enable(struct virtnet_info *vi,
- struct virtqueue *vq,
- struct napi_struct *napi)
+static void virtnet_napi_enable(struct receive_queue *rq)
{
+ virtnet_napi_do_enable(rq->vq, &rq->napi);
+}
+
+static void virtnet_napi_tx_enable(struct send_queue *sq)
+{
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct napi_struct *napi = &sq->napi;
+
if (!napi->weight)
return;
@@ -2817,15 +2824,24 @@ static void virtnet_napi_tx_enable(struct virtnet_info *vi,
return;
}
- return virtnet_napi_enable(vq, napi);
+ virtnet_napi_do_enable(sq->vq, napi);
}
-static void virtnet_napi_tx_disable(struct napi_struct *napi)
+static void virtnet_napi_tx_disable(struct send_queue *sq)
{
+ struct napi_struct *napi = &sq->napi;
+
if (napi->weight)
napi_disable(napi);
}
+static void virtnet_napi_disable(struct receive_queue *rq)
+{
+ struct napi_struct *napi = &rq->napi;
+
+ napi_disable(napi);
+}
+
static void refill_work(struct work_struct *work)
{
struct virtnet_info *vi =
@@ -2836,9 +2852,9 @@ static void refill_work(struct work_struct *work)
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
- napi_disable(&rq->napi);
+ virtnet_napi_disable(rq);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_enable(rq->vq, &rq->napi);
+ virtnet_napi_enable(rq);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
@@ -3035,8 +3051,8 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
{
- virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
- napi_disable(&vi->rq[qp_index].napi);
+ virtnet_napi_tx_disable(&vi->sq[qp_index]);
+ virtnet_napi_disable(&vi->rq[qp_index]);
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
}
@@ -3055,8 +3071,8 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
if (err < 0)
goto err_xdp_reg_mem_model;
- virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
- virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
+ virtnet_napi_enable(&vi->rq[qp_index]);
+ virtnet_napi_tx_enable(&vi->sq[qp_index]);
return 0;
@@ -3302,25 +3318,72 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
+static void __virtnet_rx_pause(struct virtnet_info *vi,
+ struct receive_queue *rq)
{
bool running = netif_running(vi->dev);
if (running) {
- napi_disable(&rq->napi);
+ virtnet_napi_disable(rq);
virtnet_cancel_dim(vi, &rq->dim);
}
}
-static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
+static void virtnet_rx_pause_all(struct virtnet_info *vi)
+{
+ int i;
+
+ /*
+ * Make sure refill_work does not run concurrently to
+ * avoid napi_disable race which leads to deadlock.
+ */
+ disable_delayed_refill(vi);
+ cancel_delayed_work_sync(&vi->refill);
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ __virtnet_rx_pause(vi, &vi->rq[i]);
+}
+
+static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
+{
+ /*
+ * Make sure refill_work does not run concurrently to
+ * avoid napi_disable race which leads to deadlock.
+ */
+ disable_delayed_refill(vi);
+ cancel_delayed_work_sync(&vi->refill);
+ __virtnet_rx_pause(vi, rq);
+}
+
+static void __virtnet_rx_resume(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ bool refill)
{
bool running = netif_running(vi->dev);
- if (!try_fill_recv(vi, rq, GFP_KERNEL))
+ if (refill && !try_fill_recv(vi, rq, GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
if (running)
- virtnet_napi_enable(rq->vq, &rq->napi);
+ virtnet_napi_enable(rq);
+}
+
+static void virtnet_rx_resume_all(struct virtnet_info *vi)
+{
+ int i;
+
+ enable_delayed_refill(vi);
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ if (i < vi->curr_queue_pairs)
+ __virtnet_rx_resume(vi, &vi->rq[i], true);
+ else
+ __virtnet_rx_resume(vi, &vi->rq[i], false);
+ }
+}
+
+static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
+{
+ enable_delayed_refill(vi);
+ __virtnet_rx_resume(vi, rq, true);
}
static int virtnet_rx_resize(struct virtnet_info *vi,
@@ -3349,7 +3412,7 @@ static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
qindex = sq - vi->sq;
if (running)
- virtnet_napi_tx_disable(&sq->napi);
+ virtnet_napi_tx_disable(sq);
txq = netdev_get_tx_queue(vi->dev, qindex);
@@ -3383,7 +3446,7 @@ static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq)
__netif_tx_unlock_bh(txq);
if (running)
- virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+ virtnet_napi_tx_enable(sq);
}
static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
@@ -5923,12 +5986,12 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (prog)
bpf_prog_add(prog, vi->max_queue_pairs - 1);
+ virtnet_rx_pause_all(vi);
+
/* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_disable(&vi->rq[i].napi);
- virtnet_napi_tx_disable(&vi->sq[i].napi);
- }
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ virtnet_napi_tx_disable(&vi->sq[i]);
}
if (!prog) {
@@ -5960,14 +6023,12 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
vi->xdp_enabled = false;
}
+ virtnet_rx_resume_all(vi);
for (i = 0; i < vi->max_queue_pairs; i++) {
if (old_prog)
bpf_prog_put(old_prog);
- if (netif_running(dev)) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
- }
+ if (netif_running(dev))
+ virtnet_napi_tx_enable(&vi->sq[i]);
}
return 0;
@@ -5979,12 +6040,10 @@ err:
rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
}
+ virtnet_rx_resume_all(vi);
if (netif_running(dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
- }
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ virtnet_napi_tx_enable(&vi->sq[i]);
}
if (prog)
bpf_prog_sub(prog, vi->max_queue_pairs - 1);
diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
index 616ecc38d172..5f470499e600 100644
--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
+++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
@@ -397,7 +397,7 @@ vmxnet3_process_xdp(struct vmxnet3_adapter *adapter,
xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
- rbi->len, false);
+ rcd->len, false);
xdp_buff_clear_frags_flag(&xdp);
xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 63fe51d0e64d..809b407cece1 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -985,20 +985,27 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_TX:
- get_page(pdata);
xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ trace_xdp_exception(queue->info->netdev, prog, act);
+ break;
+ }
+ get_page(pdata);
err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
- if (unlikely(!err))
+ if (unlikely(err <= 0)) {
+ if (err < 0)
+ trace_xdp_exception(queue->info->netdev, prog, act);
xdp_return_frame_rx_napi(xdpf);
- else if (unlikely(err < 0))
- trace_xdp_exception(queue->info->netdev, prog, act);
+ }
break;
case XDP_REDIRECT:
get_page(pdata);
err = xdp_do_redirect(queue->info->netdev, xdp, prog);
*need_xdp_flush = true;
- if (unlikely(err))
+ if (unlikely(err)) {
trace_xdp_exception(queue->info->netdev, prog, act);
+ xdp_return_buff(xdp);
+ }
break;
case XDP_PASS:
case XDP_DROP:
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index d687e8c2cc78..63ceed89b62e 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -1318,6 +1318,7 @@ static const struct pci_device_id amd_ntb_pci_tbl[] = {
{ PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] },
{ PCI_VDEVICE(AMD, 0x14c0), (kernel_ulong_t)&dev_data[1] },
{ PCI_VDEVICE(AMD, 0x14c3), (kernel_ulong_t)&dev_data[1] },
+ { PCI_VDEVICE(AMD, 0x155a), (kernel_ulong_t)&dev_data[1] },
{ PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] },
{ 0, }
};
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index 544d8a4d2af5..f27df8d7f3b9 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -1041,7 +1041,7 @@ static inline char *idt_get_mw_name(enum idt_mw_type mw_type)
static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
unsigned char *mw_cnt)
{
- struct idt_mw_cfg mws[IDT_MAX_NR_MWS], *ret_mws;
+ struct idt_mw_cfg *mws;
const struct idt_ntb_bar *bars;
enum idt_mw_type mw_type;
unsigned char widx, bidx, en_cnt;
@@ -1049,6 +1049,11 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
int aprt_size;
u32 data;
+ mws = devm_kcalloc(&ndev->ntb.pdev->dev, IDT_MAX_NR_MWS,
+ sizeof(*mws), GFP_KERNEL);
+ if (!mws)
+ return ERR_PTR(-ENOMEM);
+
/* Retrieve the array of the BARs registers */
bars = portdata_tbl[port].bars;
@@ -1103,16 +1108,7 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
}
}
- /* Allocate memory for memory window descriptors */
- ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, sizeof(*ret_mws),
- GFP_KERNEL);
- if (!ret_mws)
- return ERR_PTR(-ENOMEM);
-
- /* Copy the info of detected memory windows */
- memcpy(ret_mws, mws, (*mw_cnt)*sizeof(*ret_mws));
-
- return ret_mws;
+ return mws;
}
/*
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8359d0aa0e44..150de63b26b2 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4292,6 +4292,15 @@ static void nvme_scan_work(struct work_struct *work)
nvme_scan_ns_sequential(ctrl);
}
mutex_unlock(&ctrl->scan_lock);
+
+ /* Requeue if we have missed AENs */
+ if (test_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events))
+ nvme_queue_scan(ctrl);
+#ifdef CONFIG_NVME_MULTIPATH
+ else if (ctrl->ana_log_buf)
+ /* Re-read the ANA log page to not miss updates */
+ queue_work(nvme_wq, &ctrl->ana_work);
+#endif
}
/*
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 2a7635565083..f39823cde62c 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -427,7 +427,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
struct nvme_ns *ns;
if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
- return NULL;
+ return false;
list_for_each_entry_srcu(ns, &head->list, siblings,
srcu_read_lock_held(&head->srcu)) {
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 2e741696f371..6ccce0ee5157 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -324,6 +324,9 @@ int nvmet_enable_port(struct nvmet_port *port)
lockdep_assert_held(&nvmet_config_sem);
+ if (port->disc_addr.trtype == NVMF_TRTYPE_MAX)
+ return -EINVAL;
+
ops = nvmet_transports[port->disc_addr.trtype];
if (!ops) {
up_write(&nvmet_config_sem);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 7318b736d414..ef8c5961e10c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1028,33 +1028,24 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
struct nvmet_fc_hostport *newhost, *match = NULL;
unsigned long flags;
+ /*
+ * Caller holds a reference on tgtport.
+ */
+
/* if LLDD not implemented, leave as NULL */
if (!hosthandle)
return NULL;
- /*
- * take reference for what will be the newly allocated hostport if
- * we end up using a new allocation
- */
- if (!nvmet_fc_tgtport_get(tgtport))
- return ERR_PTR(-EINVAL);
-
spin_lock_irqsave(&tgtport->lock, flags);
match = nvmet_fc_match_hostport(tgtport, hosthandle);
spin_unlock_irqrestore(&tgtport->lock, flags);
- if (match) {
- /* no new allocation - release reference */
- nvmet_fc_tgtport_put(tgtport);
+ if (match)
return match;
- }
newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
- if (!newhost) {
- /* no new allocation - release reference */
- nvmet_fc_tgtport_put(tgtport);
+ if (!newhost)
return ERR_PTR(-ENOMEM);
- }
spin_lock_irqsave(&tgtport->lock, flags);
match = nvmet_fc_match_hostport(tgtport, hosthandle);
@@ -1063,6 +1054,7 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
kfree(newhost);
newhost = match;
} else {
+ nvmet_fc_tgtport_get(tgtport);
newhost->tgtport = tgtport;
newhost->hosthandle = hosthandle;
INIT_LIST_HEAD(&newhost->host_list);
@@ -1097,7 +1089,8 @@ static void
nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
nvmet_fc_tgtport_get(assoc->tgtport);
- queue_work(nvmet_wq, &assoc->del_work);
+ if (!queue_work(nvmet_wq, &assoc->del_work))
+ nvmet_fc_tgtport_put(assoc->tgtport);
}
static bool
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index 5c4c4c1f535d..bc1daa9aede9 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -2109,11 +2109,18 @@ out_mempool_exit:
static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
{
+
+ dev_info(ctrl->dev, "PCI link up\n");
+ ctrl->link_up = true;
+
schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
}
static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
{
+ dev_info(ctrl->dev, "PCI link down\n");
+ ctrl->link_up = false;
+
cancel_delayed_work_sync(&ctrl->poll_cc);
nvmet_pci_epf_disable_ctrl(ctrl, false);
@@ -2340,10 +2347,8 @@ static int nvmet_pci_epf_epc_init(struct pci_epf *epf)
if (ret)
goto out_clear_bar;
- if (!epc_features->linkup_notifier) {
- ctrl->link_up = true;
+ if (!epc_features->linkup_notifier)
nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl);
- }
return 0;
@@ -2359,7 +2364,6 @@ static void nvmet_pci_epf_epc_deinit(struct pci_epf *epf)
struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
- ctrl->link_up = false;
nvmet_pci_epf_destroy_ctrl(ctrl);
nvmet_pci_epf_deinit_dma(nvme_epf);
@@ -2371,7 +2375,6 @@ static int nvmet_pci_epf_link_up(struct pci_epf *epf)
struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
- ctrl->link_up = true;
nvmet_pci_epf_start_ctrl(ctrl);
return 0;
@@ -2382,7 +2385,6 @@ static int nvmet_pci_epf_link_down(struct pci_epf *epf)
struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
- ctrl->link_up = false;
nvmet_pci_epf_stop_ctrl(ctrl);
return 0;
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index 779db058c42f..2caad365a665 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -249,25 +249,22 @@ static int adjust_local_phandle_references(const struct device_node *local_fixup
*/
int of_resolve_phandles(struct device_node *overlay)
{
- struct device_node *child, *local_fixups, *refnode;
- struct device_node *tree_symbols, *overlay_fixups;
+ struct device_node *child, *refnode;
+ struct device_node *overlay_fixups;
+ struct device_node __free(device_node) *local_fixups = NULL;
struct property *prop;
const char *refpath;
phandle phandle, phandle_delta;
int err;
- tree_symbols = NULL;
-
if (!overlay) {
pr_err("null overlay\n");
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (!of_node_check_flag(overlay, OF_DETACHED)) {
pr_err("overlay not detached\n");
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
phandle_delta = live_tree_max_phandle() + 1;
@@ -279,7 +276,7 @@ int of_resolve_phandles(struct device_node *overlay)
err = adjust_local_phandle_references(local_fixups, overlay, phandle_delta);
if (err)
- goto out;
+ return err;
overlay_fixups = NULL;
@@ -288,16 +285,13 @@ int of_resolve_phandles(struct device_node *overlay)
overlay_fixups = child;
}
- if (!overlay_fixups) {
- err = 0;
- goto out;
- }
+ if (!overlay_fixups)
+ return 0;
- tree_symbols = of_find_node_by_path("/__symbols__");
+ struct device_node __free(device_node) *tree_symbols = of_find_node_by_path("/__symbols__");
if (!tree_symbols) {
pr_err("no symbols in root of device tree.\n");
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
for_each_property_of_node(overlay_fixups, prop) {
@@ -311,14 +305,12 @@ int of_resolve_phandles(struct device_node *overlay)
if (err) {
pr_err("node label '%s' not found in live devicetree symbols table\n",
prop->name);
- goto out;
+ return err;
}
refnode = of_find_node_by_path(refpath);
- if (!refnode) {
- err = -ENOENT;
- goto out;
- }
+ if (!refnode)
+ return -ENOENT;
phandle = refnode->phandle;
of_node_put(refnode);
@@ -328,11 +320,8 @@ int of_resolve_phandles(struct device_node *overlay)
break;
}
-out:
if (err)
pr_err("overlay phandle fixup failed: %d\n", err);
- of_node_put(tree_symbols);
-
return err;
}
EXPORT_SYMBOL_GPL(of_resolve_phandles);
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 2f647cac4cae..8b8848788618 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -10,12 +10,12 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include "../pci.h"
#include "msi.h"
int pci_msi_enable = 1;
-int pci_msi_ignore_mask;
/**
* pci_msi_supported - check whether MSI may be enabled on a device
@@ -295,8 +295,7 @@ static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
/* Lies, damned lies, and MSIs */
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
control |= PCI_MSI_FLAGS_MASKBIT;
- /* Respect XEN's mask disabling */
- if (pci_msi_ignore_mask)
+ if (pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY))
control &= ~PCI_MSI_FLAGS_MASKBIT;
desc.nvec_used = nvec;
@@ -609,12 +608,16 @@ void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc)
desc->pci.msi_attrib.is_64 = 1;
desc->pci.msi_attrib.default_irq = dev->irq;
desc->pci.mask_base = dev->msix_base;
- desc->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
- !desc->pci.msi_attrib.is_virtual;
- if (desc->pci.msi_attrib.can_mask) {
+
+ if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY) &&
+ !desc->pci.msi_attrib.is_virtual) {
void __iomem *addr = pci_msix_desc_addr(desc);
+ desc->pci.msi_attrib.can_mask = 1;
+ /* Workaround for SUN NIU insanity, which requires write before read */
+ if (dev->dev_flags & PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST)
+ writel(0, addr + PCI_MSIX_ENTRY_DATA);
desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
}
@@ -659,9 +662,6 @@ static void msix_mask_all(void __iomem *base, int tsize)
u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
int i;
- if (pci_msi_ignore_mask)
- return;
-
for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
@@ -744,15 +744,17 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
/* Disable INTX */
pci_intx_for_msi(dev, 0);
- /*
- * Ensure that all table entries are masked to prevent
- * stale entries from firing in a crash kernel.
- *
- * Done late to deal with a broken Marvell NVME device
- * which takes the MSI-X mask bits into account even
- * when MSI-X is disabled, which prevents MSI delivery.
- */
- msix_mask_all(dev->msix_base, tsize);
+ if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY)) {
+ /*
+ * Ensure that all table entries are masked to prevent
+ * stale entries from firing in a crash kernel.
+ *
+ * Done late to deal with a broken Marvell NVME device
+ * which takes the MSI-X mask bits into account even
+ * when MSI-X is disabled, which prevents MSI delivery.
+ */
+ msix_mask_all(dev->msix_base, tsize);
+ }
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
pcibios_free_irq(dev);
diff --git a/drivers/phy/rockchip/phy-rockchip-usbdp.c b/drivers/phy/rockchip/phy-rockchip-usbdp.c
index 5b1e8a3806ed..c04cf64f8a35 100644
--- a/drivers/phy/rockchip/phy-rockchip-usbdp.c
+++ b/drivers/phy/rockchip/phy-rockchip-usbdp.c
@@ -1045,7 +1045,6 @@ static int rk_udphy_dp_phy_init(struct phy *phy)
mutex_lock(&udphy->mutex);
udphy->dp_in_use = true;
- rk_udphy_dp_hpd_event_trigger(udphy, udphy->dp_sink_hpd_cfg);
mutex_unlock(&udphy->mutex);
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index b96e6368a956..4d1f41488017 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -382,6 +382,7 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
{
struct mcp23s08 *mcp = data;
int intcap, intcon, intf, i, gpio, gpio_orig, intcap_mask, defval, gpinten;
+ bool need_unmask = false;
unsigned long int enabled_interrupts;
unsigned int child_irq;
bool intf_set, intcap_changed, gpio_bit_changed,
@@ -396,9 +397,6 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
goto unlock;
}
- if (mcp_read(mcp, MCP_INTCAP, &intcap))
- goto unlock;
-
if (mcp_read(mcp, MCP_INTCON, &intcon))
goto unlock;
@@ -408,6 +406,16 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
if (mcp_read(mcp, MCP_DEFVAL, &defval))
goto unlock;
+ /* Mask level interrupts to avoid their immediate reactivation after clearing */
+ if (intcon) {
+ need_unmask = true;
+ if (mcp_write(mcp, MCP_GPINTEN, gpinten & ~intcon))
+ goto unlock;
+ }
+
+ if (mcp_read(mcp, MCP_INTCAP, &intcap))
+ goto unlock;
+
/* This clears the interrupt(configurable on S18) */
if (mcp_read(mcp, MCP_GPIO, &gpio))
goto unlock;
@@ -470,9 +478,18 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
}
}
+ if (need_unmask) {
+ mutex_lock(&mcp->lock);
+ goto unlock;
+ }
+
return IRQ_HANDLED;
unlock:
+ if (need_unmask)
+ if (mcp_write(mcp, MCP_GPINTEN, gpinten))
+ dev_err(mcp->chip.parent, "can't unmask GPINTEN\n");
+
mutex_unlock(&mcp->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c
index 8b36161c7c50..3b5812963850 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza2.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza2.c
@@ -246,6 +246,9 @@ static int rza2_gpio_register(struct rza2_pinctrl_priv *priv)
int ret;
chip.label = devm_kasprintf(priv->dev, GFP_KERNEL, "%pOFn", np);
+ if (!chip.label)
+ return -ENOMEM;
+
chip.parent = priv->dev;
chip.ngpio = priv->npins;
diff --git a/drivers/platform/x86/x86-android-tablets/dmi.c b/drivers/platform/x86/x86-android-tablets/dmi.c
index 3e5fa3b6e2fd..278c6d151dc4 100644
--- a/drivers/platform/x86/x86-android-tablets/dmi.c
+++ b/drivers/platform/x86/x86-android-tablets/dmi.c
@@ -180,6 +180,18 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
.driver_data = (void *)&peaq_c1010_info,
},
{
+ /* Vexia Edu Atla 10 tablet 5V version */
+ .matches = {
+ /* Having all 3 of these not set is somewhat unique */
+ DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"),
+ },
+ .driver_data = (void *)&vexia_edu_atla10_5v_info,
+ },
+ {
/* Vexia Edu Atla 10 tablet 9V version */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
@@ -187,7 +199,7 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
/* Above strings are too generic, also match on BIOS date */
DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"),
},
- .driver_data = (void *)&vexia_edu_atla10_info,
+ .driver_data = (void *)&vexia_edu_atla10_9v_info,
},
{
/* Whitelabel (sold as various brands) TM800A550L */
diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c
index 1d93d9edb23f..f7bd9f863c85 100644
--- a/drivers/platform/x86/x86-android-tablets/other.c
+++ b/drivers/platform/x86/x86-android-tablets/other.c
@@ -599,62 +599,122 @@ const struct x86_dev_info whitelabel_tm800a550l_info __initconst = {
};
/*
- * Vexia EDU ATLA 10 tablet, Android 4.2 / 4.4 + Guadalinex Ubuntu tablet
+ * Vexia EDU ATLA 10 tablet 5V, Android 4.4 + Guadalinex Ubuntu tablet
+ * distributed to schools in the Spanish Andalucía region.
+ */
+static const struct property_entry vexia_edu_atla10_5v_touchscreen_props[] = {
+ PROPERTY_ENTRY_U32("hid-descr-addr", 0x0000),
+ PROPERTY_ENTRY_U32("post-reset-deassert-delay-ms", 120),
+ { }
+};
+
+static const struct software_node vexia_edu_atla10_5v_touchscreen_node = {
+ .properties = vexia_edu_atla10_5v_touchscreen_props,
+};
+
+static const struct x86_i2c_client_info vexia_edu_atla10_5v_i2c_clients[] __initconst = {
+ {
+ /* kxcjk1013 accelerometer */
+ .board_info = {
+ .type = "kxcjk1013",
+ .addr = 0x0f,
+ .dev_name = "kxcjk1013",
+ },
+ .adapter_path = "\\_SB_.I2C3",
+ }, {
+ /* touchscreen controller */
+ .board_info = {
+ .type = "hid-over-i2c",
+ .addr = 0x38,
+ .dev_name = "FTSC1000",
+ .swnode = &vexia_edu_atla10_5v_touchscreen_node,
+ },
+ .adapter_path = "\\_SB_.I2C4",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_APIC,
+ .index = 0x44,
+ .trigger = ACPI_LEVEL_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ },
+ }
+};
+
+static struct gpiod_lookup_table vexia_edu_atla10_5v_ft5416_gpios = {
+ .dev_id = "i2c-FTSC1000",
+ .table = {
+ GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_LOW),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table * const vexia_edu_atla10_5v_gpios[] = {
+ &vexia_edu_atla10_5v_ft5416_gpios,
+ NULL
+};
+
+const struct x86_dev_info vexia_edu_atla10_5v_info __initconst = {
+ .i2c_client_info = vexia_edu_atla10_5v_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_5v_i2c_clients),
+ .gpiod_lookup_tables = vexia_edu_atla10_5v_gpios,
+};
+
+/*
+ * Vexia EDU ATLA 10 tablet 9V, Android 4.2 + Guadalinex Ubuntu tablet
* distributed to schools in the Spanish Andalucía region.
*/
static const char * const crystal_cove_pwrsrc_psy[] = { "crystal_cove_pwrsrc" };
-static const struct property_entry vexia_edu_atla10_ulpmc_props[] = {
+static const struct property_entry vexia_edu_atla10_9v_ulpmc_props[] = {
PROPERTY_ENTRY_STRING_ARRAY("supplied-from", crystal_cove_pwrsrc_psy),
{ }
};
-static const struct software_node vexia_edu_atla10_ulpmc_node = {
- .properties = vexia_edu_atla10_ulpmc_props,
+static const struct software_node vexia_edu_atla10_9v_ulpmc_node = {
+ .properties = vexia_edu_atla10_9v_ulpmc_props,
};
-static const char * const vexia_edu_atla10_accel_mount_matrix[] = {
+static const char * const vexia_edu_atla10_9v_accel_mount_matrix[] = {
"0", "-1", "0",
"1", "0", "0",
"0", "0", "1"
};
-static const struct property_entry vexia_edu_atla10_accel_props[] = {
- PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", vexia_edu_atla10_accel_mount_matrix),
+static const struct property_entry vexia_edu_atla10_9v_accel_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", vexia_edu_atla10_9v_accel_mount_matrix),
{ }
};
-static const struct software_node vexia_edu_atla10_accel_node = {
- .properties = vexia_edu_atla10_accel_props,
+static const struct software_node vexia_edu_atla10_9v_accel_node = {
+ .properties = vexia_edu_atla10_9v_accel_props,
};
-static const struct property_entry vexia_edu_atla10_touchscreen_props[] = {
+static const struct property_entry vexia_edu_atla10_9v_touchscreen_props[] = {
PROPERTY_ENTRY_U32("hid-descr-addr", 0x0000),
PROPERTY_ENTRY_U32("post-reset-deassert-delay-ms", 120),
{ }
};
-static const struct software_node vexia_edu_atla10_touchscreen_node = {
- .properties = vexia_edu_atla10_touchscreen_props,
+static const struct software_node vexia_edu_atla10_9v_touchscreen_node = {
+ .properties = vexia_edu_atla10_9v_touchscreen_props,
};
-static const struct property_entry vexia_edu_atla10_pmic_props[] = {
+static const struct property_entry vexia_edu_atla10_9v_pmic_props[] = {
PROPERTY_ENTRY_BOOL("linux,register-pwrsrc-power_supply"),
{ }
};
-static const struct software_node vexia_edu_atla10_pmic_node = {
- .properties = vexia_edu_atla10_pmic_props,
+static const struct software_node vexia_edu_atla10_9v_pmic_node = {
+ .properties = vexia_edu_atla10_9v_pmic_props,
};
-static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initconst = {
+static const struct x86_i2c_client_info vexia_edu_atla10_9v_i2c_clients[] __initconst = {
{
/* I2C attached embedded controller, used to access fuel-gauge */
.board_info = {
.type = "vexia_atla10_ec",
.addr = 0x76,
.dev_name = "ulpmc",
- .swnode = &vexia_edu_atla10_ulpmc_node,
+ .swnode = &vexia_edu_atla10_9v_ulpmc_node,
},
.adapter_path = "0000:00:18.1",
}, {
@@ -679,7 +739,7 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon
.type = "kxtj21009",
.addr = 0x0f,
.dev_name = "kxtj21009",
- .swnode = &vexia_edu_atla10_accel_node,
+ .swnode = &vexia_edu_atla10_9v_accel_node,
},
.adapter_path = "0000:00:18.5",
}, {
@@ -688,7 +748,7 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon
.type = "hid-over-i2c",
.addr = 0x38,
.dev_name = "FTSC1000",
- .swnode = &vexia_edu_atla10_touchscreen_node,
+ .swnode = &vexia_edu_atla10_9v_touchscreen_node,
},
.adapter_path = "0000:00:18.6",
.irq_data = {
@@ -703,7 +763,7 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon
.type = "intel_soc_pmic_crc",
.addr = 0x6e,
.dev_name = "intel_soc_pmic_crc",
- .swnode = &vexia_edu_atla10_pmic_node,
+ .swnode = &vexia_edu_atla10_9v_pmic_node,
},
.adapter_path = "0000:00:18.7",
.irq_data = {
@@ -715,7 +775,7 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon
}
};
-static const struct x86_serdev_info vexia_edu_atla10_serdevs[] __initconst = {
+static const struct x86_serdev_info vexia_edu_atla10_9v_serdevs[] __initconst = {
{
.ctrl.pci.devfn = PCI_DEVFN(0x1e, 3),
.ctrl_devname = "serial0",
@@ -723,7 +783,7 @@ static const struct x86_serdev_info vexia_edu_atla10_serdevs[] __initconst = {
},
};
-static struct gpiod_lookup_table vexia_edu_atla10_ft5416_gpios = {
+static struct gpiod_lookup_table vexia_edu_atla10_9v_ft5416_gpios = {
.dev_id = "i2c-FTSC1000",
.table = {
GPIO_LOOKUP("INT33FC:00", 60, "reset", GPIO_ACTIVE_LOW),
@@ -731,12 +791,12 @@ static struct gpiod_lookup_table vexia_edu_atla10_ft5416_gpios = {
},
};
-static struct gpiod_lookup_table * const vexia_edu_atla10_gpios[] = {
- &vexia_edu_atla10_ft5416_gpios,
+static struct gpiod_lookup_table * const vexia_edu_atla10_9v_gpios[] = {
+ &vexia_edu_atla10_9v_ft5416_gpios,
NULL
};
-static int __init vexia_edu_atla10_init(struct device *dev)
+static int __init vexia_edu_atla10_9v_init(struct device *dev)
{
struct pci_dev *pdev;
int ret;
@@ -760,13 +820,13 @@ static int __init vexia_edu_atla10_init(struct device *dev)
return 0;
}
-const struct x86_dev_info vexia_edu_atla10_info __initconst = {
- .i2c_client_info = vexia_edu_atla10_i2c_clients,
- .i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_i2c_clients),
- .serdev_info = vexia_edu_atla10_serdevs,
- .serdev_count = ARRAY_SIZE(vexia_edu_atla10_serdevs),
- .gpiod_lookup_tables = vexia_edu_atla10_gpios,
- .init = vexia_edu_atla10_init,
+const struct x86_dev_info vexia_edu_atla10_9v_info __initconst = {
+ .i2c_client_info = vexia_edu_atla10_9v_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_9v_i2c_clients),
+ .serdev_info = vexia_edu_atla10_9v_serdevs,
+ .serdev_count = ARRAY_SIZE(vexia_edu_atla10_9v_serdevs),
+ .gpiod_lookup_tables = vexia_edu_atla10_9v_gpios,
+ .init = vexia_edu_atla10_9v_init,
.use_pci = true,
};
diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
index 63a38a0069ba..dcf8d49e3b5f 100644
--- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
+++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
@@ -127,7 +127,8 @@ extern const struct x86_dev_info nextbook_ares8_info;
extern const struct x86_dev_info nextbook_ares8a_info;
extern const struct x86_dev_info peaq_c1010_info;
extern const struct x86_dev_info whitelabel_tm800a550l_info;
-extern const struct x86_dev_info vexia_edu_atla10_info;
+extern const struct x86_dev_info vexia_edu_atla10_5v_info;
+extern const struct x86_dev_info vexia_edu_atla10_9v_info;
extern const struct x86_dev_info xiaomi_mipad2_info;
extern const struct dmi_system_id x86_android_tablet_ids[];
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index ccd54c089bab..0e4df71c2cef 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -322,7 +322,7 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
const struct pwm_ops *ops = chip->ops;
char wfhw[WFHWSIZE];
struct pwm_waveform wf_rounded;
- int err;
+ int err, ret_tohw;
BUG_ON(WFHWSIZE < ops->sizeof_wfhw);
@@ -332,16 +332,16 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
if (!pwm_wf_valid(wf))
return -EINVAL;
- err = __pwm_round_waveform_tohw(chip, pwm, wf, &wfhw);
- if (err)
- return err;
+ ret_tohw = __pwm_round_waveform_tohw(chip, pwm, wf, &wfhw);
+ if (ret_tohw < 0)
+ return ret_tohw;
if ((IS_ENABLED(CONFIG_PWM_DEBUG) || exact) && wf->period_length_ns) {
err = __pwm_round_waveform_fromhw(chip, pwm, &wfhw, &wf_rounded);
if (err)
return err;
- if (IS_ENABLED(CONFIG_PWM_DEBUG) && !pwm_check_rounding(wf, &wf_rounded))
+ if (IS_ENABLED(CONFIG_PWM_DEBUG) && ret_tohw == 0 && !pwm_check_rounding(wf, &wf_rounded))
dev_err(&chip->dev, "Wrong rounding: requested %llu/%llu [+%llu], result %llu/%llu [+%llu]\n",
wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns,
wf_rounded.duty_length_ns, wf_rounded.period_length_ns, wf_rounded.duty_offset_ns);
@@ -382,7 +382,8 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
wf_rounded.duty_length_ns, wf_rounded.period_length_ns, wf_rounded.duty_offset_ns,
wf_set.duty_length_ns, wf_set.period_length_ns, wf_set.duty_offset_ns);
}
- return 0;
+
+ return ret_tohw;
}
/**
diff --git a/drivers/pwm/pwm-axi-pwmgen.c b/drivers/pwm/pwm-axi-pwmgen.c
index 4259a0db9ff4..4337c8f5acf0 100644
--- a/drivers/pwm/pwm-axi-pwmgen.c
+++ b/drivers/pwm/pwm-axi-pwmgen.c
@@ -75,6 +75,7 @@ static int axi_pwmgen_round_waveform_tohw(struct pwm_chip *chip,
{
struct axi_pwmgen_waveform *wfhw = _wfhw;
struct axi_pwmgen_ddata *ddata = axi_pwmgen_ddata_from_chip(chip);
+ int ret = 0;
if (wf->period_length_ns == 0) {
*wfhw = (struct axi_pwmgen_waveform){
@@ -91,12 +92,15 @@ static int axi_pwmgen_round_waveform_tohw(struct pwm_chip *chip,
if (wfhw->period_cnt == 0) {
/*
* The specified period is too short for the hardware.
- * Let's round .duty_cycle down to 0 to get a (somewhat)
- * valid result.
+ * So round up .period_cnt to 1 (i.e. the smallest
+ * possible period). With .duty_cycle and .duty_offset
+ * being less than or equal to .period, their rounded
+ * value must be 0.
*/
wfhw->period_cnt = 1;
wfhw->duty_cycle_cnt = 0;
wfhw->duty_offset_cnt = 0;
+ ret = 1;
} else {
wfhw->duty_cycle_cnt = min_t(u64,
mul_u64_u32_div(wf->duty_length_ns, ddata->clk_rate_hz, NSEC_PER_SEC),
@@ -111,7 +115,7 @@ static int axi_pwmgen_round_waveform_tohw(struct pwm_chip *chip,
pwm->hwpwm, wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns,
ddata->clk_rate_hz, wfhw->period_cnt, wfhw->duty_cycle_cnt, wfhw->duty_offset_cnt);
- return 0;
+ return ret;
}
static int axi_pwmgen_round_waveform_fromhw(struct pwm_chip *chip, struct pwm_device *pwm,
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 7d82bd1b36df..1e8142479656 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -270,8 +270,8 @@ static const unsigned int rk817_buck1_4_ramp_table[] = {
static int rk806_set_mode_dcdc(struct regulator_dev *rdev, unsigned int mode)
{
- int rid = rdev_get_id(rdev);
- int ctr_bit, reg;
+ unsigned int rid = rdev_get_id(rdev);
+ unsigned int ctr_bit, reg;
reg = RK806_POWER_FPWM_EN0 + rid / 8;
ctr_bit = rid % 8;
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 905986c61655..73848f764559 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -35,6 +35,7 @@
#define PCF85063_REG_CTRL1_CAP_SEL BIT(0)
#define PCF85063_REG_CTRL1_STOP BIT(5)
#define PCF85063_REG_CTRL1_EXT_TEST BIT(7)
+#define PCF85063_REG_CTRL1_SWR 0x58
#define PCF85063_REG_CTRL2 0x01
#define PCF85063_CTRL2_AF BIT(6)
@@ -589,7 +590,7 @@ static int pcf85063_probe(struct i2c_client *client)
i2c_set_clientdata(client, pcf85063);
- err = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL1, &tmp);
+ err = regmap_read(pcf85063->regmap, PCF85063_REG_SC, &tmp);
if (err) {
dev_err(&client->dev, "RTC chip is not present\n");
return err;
@@ -599,6 +600,22 @@ static int pcf85063_probe(struct i2c_client *client)
if (IS_ERR(pcf85063->rtc))
return PTR_ERR(pcf85063->rtc);
+ /*
+ * If a Power loss is detected, SW reset the device.
+ * From PCF85063A datasheet:
+ * There is a low probability that some devices will have corruption
+ * of the registers after the automatic power-on reset...
+ */
+ if (tmp & PCF85063_REG_SC_OS) {
+ dev_warn(&client->dev,
+ "POR issue detected, sending a SW reset\n");
+ err = regmap_write(pcf85063->regmap, PCF85063_REG_CTRL1,
+ PCF85063_REG_CTRL1_SWR);
+ if (err < 0)
+ dev_warn(&client->dev,
+ "SW reset failed, trying to continue\n");
+ }
+
err = pcf85063_load_capacitance(pcf85063, client->dev.of_node,
config->force_cap_7000 ? 7000 : 0);
if (err < 0)
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index e5d947c763ea..6a030ba38bf3 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -264,6 +264,19 @@ static struct console sclp_console =
};
/*
+ * Release allocated pages.
+ */
+static void __init __sclp_console_free_pages(void)
+{
+ struct list_head *page, *p;
+
+ list_for_each_safe(page, p, &sclp_con_pages) {
+ list_del(page);
+ free_page((unsigned long)page);
+ }
+}
+
+/*
* called by console_init() in drivers/char/tty_io.c at boot-time.
*/
static int __init
@@ -282,6 +295,10 @@ sclp_console_init(void)
/* Allocate pages for output buffering */
for (i = 0; i < sclp_console_pages; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!page) {
+ __sclp_console_free_pages();
+ return -ENOMEM;
+ }
list_add_tail(page, &sclp_con_pages);
}
sclp_conbuf = NULL;
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 892c18d2f87e..d3edacb6ee14 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -490,6 +490,17 @@ static const struct tty_operations sclp_ops = {
.flush_buffer = sclp_tty_flush_buffer,
};
+/* Release allocated pages. */
+static void __init __sclp_tty_free_pages(void)
+{
+ struct list_head *page, *p;
+
+ list_for_each_safe(page, p, &sclp_tty_pages) {
+ list_del(page);
+ free_page((unsigned long)page);
+ }
+}
+
static int __init
sclp_tty_init(void)
{
@@ -516,6 +527,7 @@ sclp_tty_init(void)
for (i = 0; i < MAX_KMEM_PAGES; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (page == NULL) {
+ __sclp_tty_free_pages();
tty_driver_kref_put(driver);
return -ENOMEM;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 3596414d970b..7a484ad0f9ab 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -935,8 +935,28 @@ static void hisi_sas_phyup_work_common(struct work_struct *work,
container_of(work, typeof(*phy), works[event]);
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct asd_sas_port *sas_port = sas_phy->port;
+ struct hisi_sas_port *port = phy->port;
+ struct device *dev = hisi_hba->dev;
+ struct domain_device *port_dev;
int phy_no = sas_phy->id;
+ if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) &&
+ sas_port && port && (port->id != phy->port_id)) {
+ dev_info(dev, "phy%d's hw port id changed from %d to %llu\n",
+ phy_no, port->id, phy->port_id);
+ port_dev = sas_port->port_dev;
+ if (port_dev && !dev_is_expander(port_dev->dev_type)) {
+ /*
+ * Set the device state to gone to block
+ * sending IO to the device.
+ */
+ set_bit(SAS_DEV_GONE, &port_dev->state);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+ return;
+ }
+ }
+
phy->wait_phyup_cnt = 0;
if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index ec5b1ab28717..c0a372868e1d 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -563,7 +563,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
reply_qidx);
- atomic_dec(&op_reply_q->pend_ios);
+
if (reply_dma)
mpi3mr_repost_reply_buf(mrioc, reply_dma);
num_op_reply++;
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 183ce00aa671..f7067878b34f 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -766,6 +766,7 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
spin_lock_irqsave(&pm8001_ha->lock, flags);
}
PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
+ pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0;
pm8001_free_dev(pm8001_dev);
} else {
pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index a77e0499b738..9d2db5bc8ee7 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -695,26 +695,23 @@ void scsi_cdl_check(struct scsi_device *sdev)
*/
int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
{
- struct scsi_mode_data data;
- struct scsi_sense_hdr sshdr;
- struct scsi_vpd *vpd;
- bool is_ata = false;
char buf[64];
+ bool is_ata;
int ret;
if (!sdev->cdl_supported)
return -EOPNOTSUPP;
rcu_read_lock();
- vpd = rcu_dereference(sdev->vpd_pg89);
- if (vpd)
- is_ata = true;
+ is_ata = rcu_dereference(sdev->vpd_pg89);
rcu_read_unlock();
/*
* For ATA devices, CDL needs to be enabled with a SET FEATURES command.
*/
if (is_ata) {
+ struct scsi_mode_data data;
+ struct scsi_sense_hdr sshdr;
char *buf_data;
int len;
@@ -723,16 +720,30 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
if (ret)
return -EINVAL;
- /* Enable CDL using the ATA feature page */
+ /* Enable or disable CDL using the ATA feature page */
len = min_t(size_t, sizeof(buf),
data.length - data.header_length -
data.block_descriptor_length);
buf_data = buf + data.header_length +
data.block_descriptor_length;
- if (enable)
- buf_data[4] = 0x02;
- else
- buf_data[4] = 0;
+
+ /*
+ * If we want to enable CDL and CDL is already enabled on the
+ * device, do nothing. This avoids needlessly resetting the CDL
+ * statistics on the device as that is implied by the CDL enable
+ * action. Similar to this, there is no need to do anything if
+ * we want to disable CDL and CDL is already disabled.
+ */
+ if (enable) {
+ if ((buf_data[4] & 0x03) == 0x02)
+ goto out;
+ buf_data[4] &= ~0x03;
+ buf_data[4] |= 0x02;
+ } else {
+ if ((buf_data[4] & 0x03) == 0x00)
+ goto out;
+ buf_data[4] &= ~0x03;
+ }
ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3,
&data, &sshdr);
@@ -744,6 +755,7 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
}
}
+out:
sdev->cdl_enable = enable;
return 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f1cfe0bb89b2..7a31dae9aa82 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1253,8 +1253,12 @@ EXPORT_SYMBOL_GPL(scsi_alloc_request);
*/
static void scsi_cleanup_rq(struct request *rq)
{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ cmd->flags = 0;
+
if (rq->rq_flags & RQF_DONTPREP) {
- scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
+ scsi_mq_uninit_cmd(cmd);
rq->rq_flags &= ~RQF_DONTPREP;
}
}
diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c
index 393d2d1d275f..79e04bff3e33 100644
--- a/drivers/soc/qcom/ice.c
+++ b/drivers/soc/qcom/ice.c
@@ -11,6 +11,7 @@
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -324,6 +325,53 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev)
}
EXPORT_SYMBOL_GPL(of_qcom_ice_get);
+static void qcom_ice_put(const struct qcom_ice *ice)
+{
+ struct platform_device *pdev = to_platform_device(ice->dev);
+
+ if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"))
+ platform_device_put(pdev);
+}
+
+static void devm_of_qcom_ice_put(struct device *dev, void *res)
+{
+ qcom_ice_put(*(struct qcom_ice **)res);
+}
+
+/**
+ * devm_of_qcom_ice_get() - Devres managed helper to get an ICE instance from
+ * a DT node.
+ * @dev: device pointer for the consumer device.
+ *
+ * This function will provide an ICE instance either by creating one for the
+ * consumer device if its DT node provides the 'ice' reg range and the 'ice'
+ * clock (for legacy DT style). On the other hand, if consumer provides a
+ * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already
+ * be created and so this function will return that instead.
+ *
+ * Return: ICE pointer on success, NULL if there is no ICE data provided by the
+ * consumer or ERR_PTR() on error.
+ */
+struct qcom_ice *devm_of_qcom_ice_get(struct device *dev)
+{
+ struct qcom_ice *ice, **dr;
+
+ dr = devres_alloc(devm_of_qcom_ice_put, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ ice = of_qcom_ice_get(dev);
+ if (!IS_ERR_OR_NULL(ice)) {
+ *dr = ice;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return ice;
+}
+EXPORT_SYMBOL_GPL(devm_of_qcom_ice_get);
+
static int qcom_ice_probe(struct platform_device *pdev)
{
struct qcom_ice *engine;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index eeb7d082c247..c43fb496da95 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1695,9 +1695,12 @@ static int spi_imx_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *transfer)
{
+ int ret;
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
- spi_imx_setupxfer(spi, transfer);
+ ret = spi_imx_setupxfer(spi, transfer);
+ if (ret < 0)
+ return ret;
transfer->effective_speed_hz = spi_imx->spi_bus_clk;
/* flush rxfifo before transfer */
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 08e49a876894..64e1b2f8a000 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -1117,9 +1117,9 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
(&tqspi->xfer_completion,
QSPI_DMA_TIMEOUT);
- if (WARN_ON(ret == 0)) {
- dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
- ret);
+ if (WARN_ON_ONCE(ret == 0)) {
+ dev_err_ratelimited(tqspi->dev,
+ "QSPI Transfer failed with timeout\n");
if (tqspi->is_curr_dma_xfer &&
(tqspi->cur_direction & DATA_DIR_TX))
dmaengine_terminate_all
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
index c62407077d37..cd7fe7d814ce 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
@@ -66,10 +66,7 @@ int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t lengt
int j;
int count;
- if (num_fifo_bytes - i < agilent_82350b_fifo_size)
- block_size = num_fifo_bytes - i;
- else
- block_size = agilent_82350b_fifo_size;
+ block_size = min(num_fifo_bytes - i, agilent_82350b_fifo_size);
set_transfer_counter(a_priv, block_size);
writeb(ENABLE_TI_TO_SRAM | DIRECTION_GPIB_TO_HOST,
a_priv->gpib_base + SRAM_ACCESS_CONTROL_REG);
@@ -200,10 +197,7 @@ int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t leng
for (i = 1; i < fifotransferlength;) {
clear_bit(WRITE_READY_BN, &tms_priv->state);
- if (fifotransferlength - i < agilent_82350b_fifo_size)
- block_size = fifotransferlength - i;
- else
- block_size = agilent_82350b_fifo_size;
+ block_size = min(fifotransferlength - i, agilent_82350b_fifo_size);
set_transfer_counter(a_priv, block_size);
for (j = 0; j < block_size; ++j, ++i) {
// load data into board's sram
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 390abcfe7188..8c527af98927 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1305,12 +1305,16 @@ static void tb_scan_port(struct tb_port *port)
goto out_rpm_put;
}
- tb_retimer_scan(port, true);
-
sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
tb_downstream_route(port));
if (IS_ERR(sw)) {
/*
+ * Make the downstream retimers available even if there
+ * is no router connected.
+ */
+ tb_retimer_scan(port, true);
+
+ /*
* If there is an error accessing the connected switch
* it may be connected to another domain. Also we allow
* the other domain to be connected to a max depth switch.
@@ -1360,6 +1364,14 @@ static void tb_scan_port(struct tb_port *port)
tb_configure_link(port, upstream_port, sw);
/*
+ * Scan for downstream retimers. We only scan them after the
+ * router has been enumerated to avoid issues with certain
+ * Pluggable devices that expect the host to enumerate them
+ * within certain timeout.
+ */
+ tb_retimer_scan(port, true);
+
+ /*
* CL0s and CL1 are enabled and supported together.
* Silently ignore CLx enabling in case CLx is not supported.
*/
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 1b137e068444..3449945493ce 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1746,6 +1746,12 @@ msm_serial_early_console_setup_dm(struct earlycon_device *device,
if (!device->port.membase)
return -ENODEV;
+ /* Disable DM / single-character modes */
+ msm_write(&device->port, 0, UARTDM_DMEN);
+ msm_write(&device->port, MSM_UART_CR_CMD_RESET_RX, MSM_UART_CR);
+ msm_write(&device->port, MSM_UART_CR_CMD_RESET_TX, MSM_UART_CR);
+ msm_write(&device->port, MSM_UART_CR_TX_ENABLE, MSM_UART_CR);
+
device->con->write = msm_serial_early_write_dm;
return 0;
}
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index 5904a2d4cefa..054a8e630ace 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -563,8 +563,11 @@ static void sifive_serial_break_ctl(struct uart_port *port, int break_state)
static int sifive_serial_startup(struct uart_port *port)
{
struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
+ unsigned long flags;
+ uart_port_lock_irqsave(&ssp->port, &flags);
__ssp_enable_rxwm(ssp);
+ uart_port_unlock_irqrestore(&ssp->port, flags);
return 0;
}
@@ -572,9 +575,12 @@ static int sifive_serial_startup(struct uart_port *port)
static void sifive_serial_shutdown(struct uart_port *port)
{
struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
+ unsigned long flags;
+ uart_port_lock_irqsave(&ssp->port, &flags);
__ssp_disable_rxwm(ssp);
__ssp_disable_txwm(ssp);
+ uart_port_unlock_irqrestore(&ssp->port, flags);
}
/**
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 0bd6544e30a6..791e2f1f7c0b 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -193,13 +193,12 @@ int set_selection_user(const struct tiocl_selection __user *sel,
return -EFAULT;
/*
- * TIOCL_SELCLEAR, TIOCL_SELPOINTER and TIOCL_SELMOUSEREPORT are OK to
- * use without CAP_SYS_ADMIN as they do not modify the selection.
+ * TIOCL_SELCLEAR and TIOCL_SELPOINTER are OK to use without
+ * CAP_SYS_ADMIN as they do not modify the selection.
*/
switch (v.sel_mode) {
case TIOCL_SELCLEAR:
case TIOCL_SELPOINTER:
- case TIOCL_SELMOUSEREPORT:
break;
default:
if (!capable(CAP_SYS_ADMIN))
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 240ce135bbfb..f1294c29f484 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -677,13 +677,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
unsigned long flags;
int err;
- if (!ufshcd_cmd_inflight(lrbp->cmd)) {
- dev_err(hba->dev,
- "%s: skip abort. cmd at tag %d already completed.\n",
- __func__, tag);
- return FAILED;
- }
-
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
@@ -692,6 +685,11 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
}
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq) {
+ dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n",
+ __func__, tag);
+ return FAILED;
+ }
if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
/*
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 464f13da259a..128e35a848b7 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -5658,6 +5658,8 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
continue;
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq)
+ continue;
if (force_compl) {
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index 5ea3f9beb1bd..2436b9454480 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -34,7 +34,7 @@
* Exynos's Vendor specific registers for UFSHCI
*/
#define HCI_TXPRDT_ENTRY_SIZE 0x00
-#define PRDT_PREFECT_EN BIT(31)
+#define PRDT_PREFETCH_EN BIT(31)
#define HCI_RXPRDT_ENTRY_SIZE 0x04
#define HCI_1US_TO_CNT_VAL 0x0C
#define CNT_VAL_1US_MASK 0x3FF
@@ -1060,9 +1060,14 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4);
exynos_ufs_set_unipro_pclk_div(ufs);
+ exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
+
/* unipro */
exynos_ufs_config_unipro(ufs);
+ if (ufs->drv_data->pre_link)
+ ufs->drv_data->pre_link(ufs);
+
/* m-phy */
exynos_ufs_phy_init(ufs);
if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) {
@@ -1070,11 +1075,6 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
exynos_ufs_config_phy_cap_attr(ufs);
}
- exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
-
- if (ufs->drv_data->pre_link)
- ufs->drv_data->pre_link(ufs);
-
return 0;
}
@@ -1098,12 +1098,17 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct phy *generic_phy = ufs->phy;
struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+ u32 val = ilog2(DATA_UNIT_SIZE);
exynos_ufs_establish_connt(ufs);
exynos_ufs_fit_aggr_timeout(ufs);
hci_writel(ufs, 0xa, HCI_DATA_REORDER);
- hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_TXPRDT_ENTRY_SIZE);
+
+ if (hba->caps & UFSHCD_CAP_CRYPTO)
+ val |= PRDT_PREFETCH_EN;
+ hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE);
+
hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE);
hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
@@ -1517,6 +1522,14 @@ out:
return ret;
}
+static void exynos_ufs_exit(struct ufs_hba *hba)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+
+ phy_power_off(ufs->phy);
+ phy_exit(ufs->phy);
+}
+
static int exynos_ufs_host_reset(struct ufs_hba *hba)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
@@ -1687,6 +1700,12 @@ static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
}
}
+static int gs101_ufs_suspend(struct exynos_ufs *ufs)
+{
+ hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
+ return 0;
+}
+
static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
@@ -1695,6 +1714,9 @@ static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
if (status == PRE_CHANGE)
return 0;
+ if (ufs->drv_data->suspend)
+ ufs->drv_data->suspend(ufs);
+
if (!ufshcd_is_link_active(hba))
phy_power_off(ufs->phy);
@@ -1972,6 +1994,7 @@ static int gs101_ufs_pre_pwr_change(struct exynos_ufs *ufs,
static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.name = "exynos_ufs",
.init = exynos_ufs_init,
+ .exit = exynos_ufs_exit,
.hce_enable_notify = exynos_ufs_hce_enable_notify,
.link_startup_notify = exynos_ufs_link_startup_notify,
.pwr_change_notify = exynos_ufs_pwr_change_notify,
@@ -2010,13 +2033,7 @@ static int exynos_ufs_probe(struct platform_device *pdev)
static void exynos_ufs_remove(struct platform_device *pdev)
{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
-
ufshcd_pltfrm_remove(pdev);
-
- phy_power_off(ufs->phy);
- phy_exit(ufs->phy);
}
static struct exynos_ufs_uic_attr exynos7_uic_attr = {
@@ -2162,6 +2179,7 @@ static const struct exynos_ufs_drv_data gs101_ufs_drvs = {
.pre_link = gs101_ufs_pre_link,
.post_link = gs101_ufs_post_link,
.pre_pwr_change = gs101_ufs_pre_pwr_change,
+ .suspend = gs101_ufs_suspend,
};
static const struct of_device_id exynos_ufs_of_match[] = {
diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h
index d0b3df221503..3c6fe5132190 100644
--- a/drivers/ufs/host/ufs-exynos.h
+++ b/drivers/ufs/host/ufs-exynos.h
@@ -192,6 +192,7 @@ struct exynos_ufs_drv_data {
struct ufs_pa_layer_attr *pwr);
int (*pre_hce_enable)(struct exynos_ufs *ufs);
int (*post_hce_enable)(struct exynos_ufs *ufs);
+ int (*suspend)(struct exynos_ufs *ufs);
};
struct ufs_phy_time_cfg {
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 23b9f6efa047..a455a95f65fc 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -125,7 +125,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
int err;
int i;
- ice = of_qcom_ice_get(dev);
+ ice = devm_of_qcom_ice_get(dev);
if (ice == ERR_PTR(-EOPNOTSUPP)) {
dev_warn(dev, "Disabling inline encryption support\n");
ice = NULL;
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index fd1beb10bba7..19101ff1cf1b 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -1963,6 +1963,7 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
unsigned int bit;
unsigned long reg;
+ local_bh_disable();
spin_lock_irqsave(&priv_dev->lock, flags);
reg = readl(&priv_dev->regs->usb_ists);
@@ -2004,6 +2005,7 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
irqend:
writel(~0, &priv_dev->regs->ep_ien);
spin_unlock_irqrestore(&priv_dev->lock, flags);
+ local_bh_enable();
return ret;
}
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 1a7fc638213e..4f8bfd242b59 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -336,6 +336,13 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event)
return ret;
}
+static void ci_hdrc_imx_disable_regulator(void *arg)
+{
+ struct ci_hdrc_imx_data *data = arg;
+
+ regulator_disable(data->hsic_pad_regulator);
+}
+
static int ci_hdrc_imx_probe(struct platform_device *pdev)
{
struct ci_hdrc_imx_data *data;
@@ -394,6 +401,13 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
"Failed to enable HSIC pad regulator\n");
goto err_put;
}
+ ret = devm_add_action_or_reset(dev,
+ ci_hdrc_imx_disable_regulator, data);
+ if (ret) {
+ dev_err(dev,
+ "Failed to add regulator devm action\n");
+ goto err_put;
+ }
}
}
@@ -432,11 +446,11 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
ret = imx_get_clks(dev);
if (ret)
- goto disable_hsic_regulator;
+ goto qos_remove_request;
ret = imx_prepare_enable_clks(dev);
if (ret)
- goto disable_hsic_regulator;
+ goto qos_remove_request;
ret = clk_prepare_enable(data->clk_wakeup);
if (ret)
@@ -470,7 +484,11 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI) {
pdata.flags |= CI_HDRC_OVERRIDE_PHY_CONTROL;
data->override_phy_control = true;
- usb_phy_init(pdata.usb_phy);
+ ret = usb_phy_init(pdata.usb_phy);
+ if (ret) {
+ dev_err(dev, "Failed to init phy\n");
+ goto err_clk;
+ }
}
if (pdata.flags & CI_HDRC_SUPPORTS_RUNTIME_PM)
@@ -479,7 +497,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
ret = imx_usbmisc_init(data->usbmisc_data);
if (ret) {
dev_err(dev, "usbmisc init failed, ret=%d\n", ret);
- goto err_clk;
+ goto phy_shutdown;
}
data->ci_pdev = ci_hdrc_add_device(dev,
@@ -488,7 +506,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
if (IS_ERR(data->ci_pdev)) {
ret = PTR_ERR(data->ci_pdev);
dev_err_probe(dev, ret, "ci_hdrc_add_device failed\n");
- goto err_clk;
+ goto phy_shutdown;
}
if (data->usbmisc_data) {
@@ -522,19 +540,20 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
disable_device:
ci_hdrc_remove_device(data->ci_pdev);
+phy_shutdown:
+ if (data->override_phy_control)
+ usb_phy_shutdown(data->phy);
err_clk:
clk_disable_unprepare(data->clk_wakeup);
err_wakeup_clk:
imx_disable_unprepare_clks(dev);
-disable_hsic_regulator:
- if (data->hsic_pad_regulator)
- /* don't overwrite original ret (cf. EPROBE_DEFER) */
- regulator_disable(data->hsic_pad_regulator);
+qos_remove_request:
if (pdata.flags & CI_HDRC_PMQOS)
cpu_latency_qos_remove_request(&data->pm_qos_req);
data->ci_pdev = NULL;
err_put:
- put_device(data->usbmisc_data->dev);
+ if (data->usbmisc_data)
+ put_device(data->usbmisc_data->dev);
return ret;
}
@@ -556,10 +575,9 @@ static void ci_hdrc_imx_remove(struct platform_device *pdev)
clk_disable_unprepare(data->clk_wakeup);
if (data->plat_data->flags & CI_HDRC_PMQOS)
cpu_latency_qos_remove_request(&data->pm_qos_req);
- if (data->hsic_pad_regulator)
- regulator_disable(data->hsic_pad_regulator);
}
- put_device(data->usbmisc_data->dev);
+ if (data->usbmisc_data)
+ put_device(data->usbmisc_data->dev);
}
static void ci_hdrc_imx_shutdown(struct platform_device *pdev)
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 86ee39db013f..16e7fa4d488d 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -726,7 +726,7 @@ static int wdm_open(struct inode *inode, struct file *file)
rv = -EBUSY;
goto out;
}
-
+ smp_rmb(); /* ordered against wdm_wwan_port_stop() */
rv = usb_autopm_get_interface(desc->intf);
if (rv < 0) {
dev_err(&desc->intf->dev, "Error autopm - %d\n", rv);
@@ -829,6 +829,7 @@ static struct usb_class_driver wdm_class = {
static int wdm_wwan_port_start(struct wwan_port *port)
{
struct wdm_device *desc = wwan_port_get_drvdata(port);
+ int rv;
/* The interface is both exposed via the WWAN framework and as a
* legacy usbmisc chardev. If chardev is already open, just fail
@@ -848,7 +849,15 @@ static int wdm_wwan_port_start(struct wwan_port *port)
wwan_port_txon(port);
/* Start getting events */
- return usb_submit_urb(desc->validity, GFP_KERNEL);
+ rv = usb_submit_urb(desc->validity, GFP_KERNEL);
+ if (rv < 0) {
+ wwan_port_txoff(port);
+ desc->manage_power(desc->intf, 0);
+ /* this must be last lest we race with chardev open */
+ clear_bit(WDM_WWAN_IN_USE, &desc->flags);
+ }
+
+ return rv;
}
static void wdm_wwan_port_stop(struct wwan_port *port)
@@ -859,8 +868,10 @@ static void wdm_wwan_port_stop(struct wwan_port *port)
poison_urbs(desc);
desc->manage_power(desc->intf, 0);
clear_bit(WDM_READ, &desc->flags);
- clear_bit(WDM_WWAN_IN_USE, &desc->flags);
unpoison_urbs(desc);
+ smp_wmb(); /* ordered against wdm_open() */
+ /* this must be last lest we open a poisoned device */
+ clear_bit(WDM_WWAN_IN_USE, &desc->flags);
}
static void wdm_wwan_port_tx_complete(struct urb *urb)
@@ -868,7 +879,7 @@ static void wdm_wwan_port_tx_complete(struct urb *urb)
struct sk_buff *skb = urb->context;
struct wdm_device *desc = skb_shinfo(skb)->destructor_arg;
- usb_autopm_put_interface(desc->intf);
+ usb_autopm_put_interface_async(desc->intf);
wwan_port_txon(desc->wwanp);
kfree_skb(skb);
}
@@ -898,7 +909,7 @@ static int wdm_wwan_port_tx(struct wwan_port *port, struct sk_buff *skb)
req->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
req->wValue = 0;
- req->wIndex = desc->inum;
+ req->wIndex = desc->inum; /* already converted */
req->wLength = cpu_to_le16(skb->len);
skb_shinfo(skb)->destructor_arg = desc;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 8efbacc5bc34..36d3df7d040c 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -369,6 +369,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
{ USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
+ /* SanDisk Corp. SanDisk 3.2Gen1 */
+ { USB_DEVICE(0x0781, 0x55a3), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Realforce 87U Keyboard */
{ USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM },
@@ -383,6 +386,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0904, 0x6103), .driver_info =
USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+ /* Silicon Motion Flash Drive */
+ { USB_DEVICE(0x090c, 0x1000), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Sound Devices USBPre2 */
{ USB_DEVICE(0x0926, 0x0202), .driver_info =
USB_QUIRK_ENDPOINT_IGNORE },
@@ -539,6 +545,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x2040, 0x7200), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* VLI disk */
+ { USB_DEVICE(0x2109, 0x0711), .driver_info = USB_QUIRK_NO_LPM },
+
/* Raydium Touchscreen */
{ USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 052852f80146..54a4ee2b90b7 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -148,11 +148,21 @@ static const struct property_entry dwc3_pci_intel_byt_properties[] = {
{}
};
+/*
+ * Intel Merrifield SoC uses these endpoints for tracing and they cannot
+ * be re-allocated if being used because the side band flow control signals
+ * are hard wired to certain endpoints:
+ * - 1 High BW Bulk IN (IN#1) (RTIT)
+ * - 1 1KB BW Bulk IN (IN#8) + 1 1KB BW Bulk OUT (Run Control) (OUT#8)
+ */
+static const u8 dwc3_pci_mrfld_reserved_endpoints[] = { 3, 16, 17 };
+
static const struct property_entry dwc3_pci_mrfld_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "otg"),
PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_U8_ARRAY("snps,reserved-endpoints", dwc3_pci_mrfld_reserved_endpoints),
PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index a33a42ba0249..4ca7f6240d07 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -207,15 +207,13 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
skip_usb3_phy:
/* ulpi reset via gpio-modepin or gpio-framework driver */
- reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(reset_gpio)) {
return dev_err_probe(dev, PTR_ERR(reset_gpio),
"Failed to request reset GPIO\n");
}
if (reset_gpio) {
- /* Toggle ulpi to reset the phy. */
- gpiod_set_value_cansleep(reset_gpio, 1);
usleep_range(5000, 10000);
gpiod_set_value_cansleep(reset_gpio, 0);
usleep_range(5000, 10000);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 89a4dc8ebf94..c6761fe89cfa 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -547,6 +547,7 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index)
{
struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_ep *dep;
u32 cmd;
int i;
int ret;
@@ -563,8 +564,13 @@ int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index)
return ret;
/* Reset resource allocation flags */
- for (i = resource_index; i < dwc->num_eps && dwc->eps[i]; i++)
- dwc->eps[i]->flags &= ~DWC3_EP_RESOURCE_ALLOCATED;
+ for (i = resource_index; i < dwc->num_eps; i++) {
+ dep = dwc->eps[i];
+ if (!dep)
+ continue;
+
+ dep->flags &= ~DWC3_EP_RESOURCE_ALLOCATED;
+ }
return 0;
}
@@ -751,9 +757,11 @@ void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc)
dwc->last_fifo_depth = fifo_depth;
/* Clear existing TXFIFO for all IN eps except ep0 */
- for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM);
- num += 2) {
+ for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM); num += 2) {
dep = dwc->eps[num];
+ if (!dep)
+ continue;
+
/* Don't change TXFRAMNUM on usb31 version */
size = DWC3_IP_IS(DWC3) ? 0 :
dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1)) &
@@ -3703,6 +3711,8 @@ out:
for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
dep = dwc->eps[i];
+ if (!dep)
+ continue;
if (!(dep->flags & DWC3_EP_ENABLED))
continue;
@@ -3852,6 +3862,10 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
u8 epnum = event->endpoint_number;
dep = dwc->eps[epnum];
+ if (!dep) {
+ dev_warn(dwc->dev, "spurious event, endpoint %u is not allocated\n", epnum);
+ return;
+ }
if (!(dep->flags & DWC3_EP_ENABLED)) {
if ((epnum > 1) && !(dep->flags & DWC3_EP_TRANSFER_STARTED))
@@ -4564,6 +4578,12 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
if (!count)
return IRQ_NONE;
+ if (count > evt->length) {
+ dev_err_ratelimited(dwc->dev, "invalid count(%u) > evt->length(%u)\n",
+ count, evt->length);
+ return IRQ_NONE;
+ }
+
evt->count = count;
evt->flags |= DWC3_EVENT_PENDING;
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
index 573109ca5b79..a09f72772e6e 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/dev.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
@@ -548,6 +548,9 @@ int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx)
d->vhub = vhub;
d->index = idx;
d->name = devm_kasprintf(parent, GFP_KERNEL, "port%d", idx+1);
+ if (!d->name)
+ return -ENOMEM;
+
d->regs = vhub->regs + 0x100 + 0x10 * idx;
ast_vhub_init_ep0(vhub, &d->ep0, d);
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 0881fdd1823e..dcf31a592f5d 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1946,6 +1946,12 @@ max3421_remove(struct spi_device *spi)
usb_put_hcd(hcd);
}
+static const struct spi_device_id max3421_spi_ids[] = {
+ { "max3421" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, max3421_spi_ids);
+
static const struct of_device_id max3421_of_match_table[] = {
{ .compatible = "maxim,max3421", },
{},
@@ -1955,6 +1961,7 @@ MODULE_DEVICE_TABLE(of, max3421_of_match_table);
static struct spi_driver max3421_driver = {
.probe = max3421_probe,
.remove = max3421_remove,
+ .id_table = max3421_spi_ids,
.driver = {
.name = "max3421-hcd",
.of_match_table = max3421_of_match_table,
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 900ea0d368e0..9f0a6b27e47c 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -165,6 +165,25 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
return 0;
}
+static int ohci_quirk_loongson(struct usb_hcd *hcd)
+{
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+
+ /*
+ * Loongson's LS7A OHCI controller (rev 0x02) has a
+ * flaw. MMIO register with offset 0x60/64 is treated
+ * as legacy PS2-compatible keyboard/mouse interface.
+ * Since OHCI only use 4KB BAR resource, LS7A OHCI's
+ * 32KB BAR is wrapped around (the 2nd 4KB BAR space
+ * is the same as the 1st 4KB internally). So add 4KB
+ * offset (0x1000) to the OHCI registers as a quirk.
+ */
+ if (pdev->revision == 0x2)
+ hcd->regs += SZ_4K; /* SZ_4K = 0x1000 */
+
+ return 0;
+}
+
static int ohci_quirk_qemu(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
@@ -225,6 +244,10 @@ static const struct pci_device_id ohci_pci_quirks[] = {
.driver_data = (unsigned long)ohci_quirk_amd700,
},
{
+ PCI_DEVICE(PCI_VENDOR_ID_LOONGSON, 0x7a24),
+ .driver_data = (unsigned long)ohci_quirk_loongson,
+ },
+ {
.vendor = PCI_VENDOR_ID_APPLE,
.device = 0x003f,
.subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 69c278b64084..71e4c4ca6ad5 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1878,9 +1878,10 @@ int xhci_bus_resume(struct usb_hcd *hcd)
int max_ports, port_index;
int sret;
u32 next_state;
- u32 temp, portsc;
+ u32 portsc;
struct xhci_hub *rhub;
struct xhci_port **ports;
+ bool disabled_irq = false;
rhub = xhci_get_rhub(hcd);
ports = rhub->ports;
@@ -1896,17 +1897,20 @@ int xhci_bus_resume(struct usb_hcd *hcd)
return -ESHUTDOWN;
}
- /* delay the irqs */
- temp = readl(&xhci->op_regs->command);
- temp &= ~CMD_EIE;
- writel(temp, &xhci->op_regs->command);
-
/* bus specific resume for ports we suspended at bus_suspend */
- if (hcd->speed >= HCD_USB3)
+ if (hcd->speed >= HCD_USB3) {
next_state = XDEV_U0;
- else
+ } else {
next_state = XDEV_RESUME;
-
+ if (bus_state->bus_suspended) {
+ /*
+ * prevent port event interrupts from interfering
+ * with usb2 port resume process
+ */
+ xhci_disable_interrupter(xhci->interrupters[0]);
+ disabled_irq = true;
+ }
+ }
port_index = max_ports;
while (port_index--) {
portsc = readl(ports[port_index]->addr);
@@ -1974,11 +1978,9 @@ int xhci_bus_resume(struct usb_hcd *hcd)
(void) readl(&xhci->op_regs->command);
bus_state->next_statechange = jiffies + msecs_to_jiffies(5);
- /* re-enable irqs */
- temp = readl(&xhci->op_regs->command);
- temp |= CMD_EIE;
- writel(temp, &xhci->op_regs->command);
- temp = readl(&xhci->op_regs->command);
+ /* re-enable interrupter */
+ if (disabled_irq)
+ xhci_enable_interrupter(xhci->interrupters[0]);
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
index 87f1597a0e5a..257e4d79971f 100644
--- a/drivers/usb/host/xhci-mvebu.c
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -73,13 +73,3 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
return 0;
}
-
-int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
-{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-
- /* Without reset on resume, the HC won't work at all */
- xhci->quirks |= XHCI_RESET_ON_RESUME;
-
- return 0;
-}
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
index 3be021793cc8..9d26e22c4842 100644
--- a/drivers/usb/host/xhci-mvebu.h
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -12,16 +12,10 @@ struct usb_hcd;
#if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd);
-int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd);
#else
static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
{
return 0;
}
-
-static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
-{
- return 0;
-}
#endif
#endif /* __LINUX_XHCI_MVEBU_H */
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index d85ffa9ffaa7..ff813dca2d1d 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -106,7 +106,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = {
};
static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
- .init_quirk = xhci_mvebu_a3700_init_quirk,
+ .quirks = XHCI_RESET_ON_RESUME,
};
static const struct xhci_plat_priv xhci_plat_brcm = {
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5e89e9cdcec2..5a0e361818c2 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1198,16 +1198,19 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
* Stopped state, but it will soon change to Running.
*
* Assume this bug on unexpected Stop Endpoint failures.
- * Keep retrying until the EP starts and stops again, on
- * chips where this is known to help. Wait for 100ms.
+ * Keep retrying until the EP starts and stops again.
*/
- if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
- break;
fallthrough;
case EP_STATE_RUNNING:
/* Race, HW handled stop ep cmd before ep was running */
xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n",
GET_EP_CTX_STATE(ep_ctx));
+ /*
+ * Don't retry forever if we guessed wrong or a defective HC never starts
+ * the EP or says 'Running' but fails the command. We must give back TDs.
+ */
+ if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
+ break;
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!command) {
@@ -2644,6 +2647,22 @@ static int handle_transferless_tx_event(struct xhci_hcd *xhci, struct xhci_virt_
return 0;
}
+static bool xhci_spurious_success_tx_event(struct xhci_hcd *xhci,
+ struct xhci_ring *ring)
+{
+ switch (ring->old_trb_comp_code) {
+ case COMP_SHORT_PACKET:
+ return xhci->quirks & XHCI_SPURIOUS_SUCCESS;
+ case COMP_USB_TRANSACTION_ERROR:
+ case COMP_BABBLE_DETECTED_ERROR:
+ case COMP_ISOCH_BUFFER_OVERRUN:
+ return xhci->quirks & XHCI_ETRON_HOST &&
+ ring->type == TYPE_ISOC;
+ default:
+ return false;
+ }
+}
+
/*
* If this function returns an error condition, it means it got a Transfer
* event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
@@ -2664,6 +2683,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
+ bool ring_xrun_event = false;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
@@ -2697,8 +2717,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
case COMP_SUCCESS:
if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
trb_comp_code = COMP_SHORT_PACKET;
- xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td short %d\n",
- slot_id, ep_index, ep_ring->last_td_was_short);
+ xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td comp code %d\n",
+ slot_id, ep_index, ep_ring->old_trb_comp_code);
}
break;
case COMP_SHORT_PACKET:
@@ -2770,14 +2790,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* Underrun Event for OUT Isoch endpoint.
*/
xhci_dbg(xhci, "Underrun event on slot %u ep %u\n", slot_id, ep_index);
- if (ep->skip)
- break;
- return 0;
+ ring_xrun_event = true;
+ break;
case COMP_RING_OVERRUN:
xhci_dbg(xhci, "Overrun event on slot %u ep %u\n", slot_id, ep_index);
- if (ep->skip)
- break;
- return 0;
+ ring_xrun_event = true;
+ break;
case COMP_MISSED_SERVICE_ERROR:
/*
* When encounter missed service error, one or more isoc tds
@@ -2789,7 +2807,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_dbg(xhci,
"Miss service interval error for slot %u ep %u, set skip flag\n",
slot_id, ep_index);
- return 0;
+ break;
case COMP_NO_PING_RESPONSE_ERROR:
ep->skip = true;
xhci_dbg(xhci,
@@ -2837,6 +2855,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_dequeue_td(xhci, td, ep_ring, td->status);
}
+ /* Missed TDs will be skipped on the next event */
+ if (trb_comp_code == COMP_MISSED_SERVICE_ERROR)
+ return 0;
+
if (list_empty(&ep_ring->td_list)) {
/*
* Don't print wanings if ring is empty due to a stopped endpoint generating an
@@ -2846,7 +2868,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
if (trb_comp_code != COMP_STOPPED &&
trb_comp_code != COMP_STOPPED_LENGTH_INVALID &&
- !ep_ring->last_td_was_short) {
+ !ring_xrun_event &&
+ !xhci_spurious_success_tx_event(xhci, ep_ring)) {
xhci_warn(xhci, "Event TRB for slot %u ep %u with no TDs queued\n",
slot_id, ep_index);
}
@@ -2880,6 +2903,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
goto check_endpoint_halted;
}
+ /* TD was queued after xrun, maybe xrun was on a link, don't panic yet */
+ if (ring_xrun_event)
+ return 0;
+
/*
* Skip the Force Stopped Event. The 'ep_trb' of FSE is not in the current
* TD pointed by 'ep_ring->dequeue' because that the hardware dequeue
@@ -2894,11 +2921,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/*
* Some hosts give a spurious success event after a short
- * transfer. Ignore it.
+ * transfer or error on last TRB. Ignore it.
*/
- if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
- ep_ring->last_td_was_short) {
- ep_ring->last_td_was_short = false;
+ if (xhci_spurious_success_tx_event(xhci, ep_ring)) {
+ xhci_dbg(xhci, "Spurious event dma %pad, comp_code %u after %u\n",
+ &ep_trb_dma, trb_comp_code, ep_ring->old_trb_comp_code);
+ ep_ring->old_trb_comp_code = 0;
return 0;
}
@@ -2926,10 +2954,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
} while (ep->skip);
- if (trb_comp_code == COMP_SHORT_PACKET)
- ep_ring->last_td_was_short = true;
- else
- ep_ring->last_td_was_short = false;
+ ep_ring->old_trb_comp_code = trb_comp_code;
+
+ /* Get out if a TD was queued at enqueue after the xrun occurred */
+ if (ring_xrun_event)
+ return 0;
ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)];
trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb, ep_trb_dma);
@@ -3780,7 +3809,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
* enqueue a No Op TRB, this can prevent the Setup and Data Stage
* TRB to be breaked by the Link TRB.
*/
- if (trb_is_link(ep_ring->enqueue + 1)) {
+ if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue + 1)) {
field = TRB_TYPE(TRB_TR_NOOP) | ep_ring->cycle_state;
queue_trb(xhci, ep_ring, false, 0, 0,
TRB_INTR_TARGET(0), field);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1a90ebc8a30e..72070f7e6a76 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -322,7 +322,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
xhci_info(xhci, "Fault detected\n");
}
-static int xhci_enable_interrupter(struct xhci_interrupter *ir)
+int xhci_enable_interrupter(struct xhci_interrupter *ir)
{
u32 iman;
@@ -335,7 +335,7 @@ static int xhci_enable_interrupter(struct xhci_interrupter *ir)
return 0;
}
-static int xhci_disable_interrupter(struct xhci_interrupter *ir)
+int xhci_disable_interrupter(struct xhci_interrupter *ir)
{
u32 iman;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 59c6c1c701b9..2c394cba120f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1371,7 +1371,7 @@ struct xhci_ring {
unsigned int num_trbs_free; /* used only by xhci DbC */
unsigned int bounce_buf_len;
enum xhci_ring_type type;
- bool last_td_was_short;
+ u32 old_trb_comp_code;
struct radix_tree_root *trb_address_map;
};
@@ -1890,6 +1890,8 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
struct usb_tt *tt, gfp_t mem_flags);
int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
u32 imod_interval);
+int xhci_enable_interrupter(struct xhci_interrupter *ir);
+int xhci_disable_interrupter(struct xhci_interrupter *ir);
/* xHCI ring, segment, TRB, and TD functions */
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 9b34e23b7091..6ac7a0a5cf07 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1093,6 +1093,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 1) },
{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 2) },
{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 3) },
+ /* Abacus Electrics */
+ { USB_DEVICE(FTDI_VID, ABACUS_OPTICAL_PROBE_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 52be47d684ea..9acb6f837327 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -443,6 +443,11 @@
#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */
/*
+ * Abacus Electrics
+ */
+#define ABACUS_OPTICAL_PROBE_PID 0xf458 /* ABACUS ELECTRICS Optical Probe */
+
+/*
* Oceanic product ids
*/
#define FTDI_OCEANIC_PID 0xF460 /* Oceanic dive instrument */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5cd26dac2069..27879cc57536 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -611,6 +611,7 @@ static void option_instat_callback(struct urb *urb);
/* Sierra Wireless products */
#define SIERRA_VENDOR_ID 0x1199
#define SIERRA_PRODUCT_EM9191 0x90d3
+#define SIERRA_PRODUCT_EM9291 0x90e3
/* UNISOC (Spreadtrum) products */
#define UNISOC_VENDOR_ID 0x1782
@@ -2432,6 +2433,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 2c12449ff60c..a0afaf254d12 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -100,6 +100,11 @@ DEVICE(nokia, NOKIA_IDS);
{ USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */
DEVICE_N(novatel_gps, NOVATEL_IDS, 3);
+/* OWON electronic test and measurement equipment driver */
+#define OWON_IDS() \
+ { USB_DEVICE(0x5345, 0x1234) } /* HDS200 oscilloscopes and others */
+DEVICE(owon, OWON_IDS);
+
/* Siemens USB/MPI adapter */
#define SIEMENS_IDS() \
{ USB_DEVICE(0x908, 0x0004) }
@@ -134,6 +139,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
&motorola_tetra_device,
&nokia_device,
&novatel_gps_device,
+ &owon_device,
&siemens_mpi_device,
&suunto_device,
&vivopay_device,
@@ -153,6 +159,7 @@ static const struct usb_device_id id_table[] = {
MOTOROLA_TETRA_IDS(),
NOKIA_IDS(),
NOVATEL_IDS(),
+ OWON_IDS(),
SIEMENS_IDS(),
SUUNTO_IDS(),
VIVOPAY_IDS(),
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 1f8c9b16a0fb..d460d71b4257 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -83,6 +83,13 @@ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_LUNS),
+/* Reported-by: Oliver Neukum <oneukum@suse.com> */
+UNUSUAL_DEV(0x125f, 0xa94a, 0x0160, 0x0160,
+ "ADATA",
+ "Portable HDD CH94",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
"Initio Corporation",
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 9c76c3d0c6cf..67a533e35150 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -1052,9 +1052,11 @@ struct typec_partner *typec_register_partner(struct typec_port *port,
partner->usb_mode = USB_MODE_USB3;
}
+ mutex_lock(&port->partner_link_lock);
ret = device_register(&partner->dev);
if (ret) {
dev_err(&port->dev, "failed to register partner (%d)\n", ret);
+ mutex_unlock(&port->partner_link_lock);
put_device(&partner->dev);
return ERR_PTR(ret);
}
@@ -1063,6 +1065,7 @@ struct typec_partner *typec_register_partner(struct typec_port *port,
typec_partner_link_device(partner, port->usb2_dev);
if (port->usb3_dev)
typec_partner_link_device(partner, port->usb3_dev);
+ mutex_unlock(&port->partner_link_lock);
return partner;
}
@@ -1083,12 +1086,18 @@ void typec_unregister_partner(struct typec_partner *partner)
port = to_typec_port(partner->dev.parent);
- if (port->usb2_dev)
+ mutex_lock(&port->partner_link_lock);
+ if (port->usb2_dev) {
typec_partner_unlink_device(partner, port->usb2_dev);
- if (port->usb3_dev)
+ port->usb2_dev = NULL;
+ }
+ if (port->usb3_dev) {
typec_partner_unlink_device(partner, port->usb3_dev);
+ port->usb3_dev = NULL;
+ }
device_unregister(&partner->dev);
+ mutex_unlock(&port->partner_link_lock);
}
EXPORT_SYMBOL_GPL(typec_unregister_partner);
@@ -2041,10 +2050,11 @@ static struct typec_partner *typec_get_partner(struct typec_port *port)
static void typec_partner_attach(struct typec_connector *con, struct device *dev)
{
struct typec_port *port = container_of(con, struct typec_port, con);
- struct typec_partner *partner = typec_get_partner(port);
+ struct typec_partner *partner;
struct usb_device *udev = to_usb_device(dev);
enum usb_mode usb_mode;
+ mutex_lock(&port->partner_link_lock);
if (udev->speed < USB_SPEED_SUPER) {
usb_mode = USB_MODE_USB2;
port->usb2_dev = dev;
@@ -2053,18 +2063,22 @@ static void typec_partner_attach(struct typec_connector *con, struct device *dev
port->usb3_dev = dev;
}
+ partner = typec_get_partner(port);
if (partner) {
typec_partner_set_usb_mode(partner, usb_mode);
typec_partner_link_device(partner, dev);
put_device(&partner->dev);
}
+ mutex_unlock(&port->partner_link_lock);
}
static void typec_partner_deattach(struct typec_connector *con, struct device *dev)
{
struct typec_port *port = container_of(con, struct typec_port, con);
- struct typec_partner *partner = typec_get_partner(port);
+ struct typec_partner *partner;
+ mutex_lock(&port->partner_link_lock);
+ partner = typec_get_partner(port);
if (partner) {
typec_partner_unlink_device(partner, dev);
put_device(&partner->dev);
@@ -2074,6 +2088,7 @@ static void typec_partner_deattach(struct typec_connector *con, struct device *d
port->usb2_dev = NULL;
else if (port->usb3_dev == dev)
port->usb3_dev = NULL;
+ mutex_unlock(&port->partner_link_lock);
}
/**
@@ -2614,6 +2629,7 @@ struct typec_port *typec_register_port(struct device *parent,
ida_init(&port->mode_ids);
mutex_init(&port->port_type_lock);
+ mutex_init(&port->partner_link_lock);
port->id = id;
port->ops = cap->ops;
diff --git a/drivers/usb/typec/class.h b/drivers/usb/typec/class.h
index b3076a24ad2e..db2fe96c48ff 100644
--- a/drivers/usb/typec/class.h
+++ b/drivers/usb/typec/class.h
@@ -59,6 +59,7 @@ struct typec_port {
enum typec_port_type port_type;
enum usb_mode usb_mode;
struct mutex port_type_lock;
+ struct mutex partner_link_lock;
enum typec_orientation orientation;
struct typec_switch *sw;
diff --git a/drivers/usb/typec/ucsi/cros_ec_ucsi.c b/drivers/usb/typec/ucsi/cros_ec_ucsi.c
index c605c8616726..744f0709a40e 100644
--- a/drivers/usb/typec/ucsi/cros_ec_ucsi.c
+++ b/drivers/usb/typec/ucsi/cros_ec_ucsi.c
@@ -105,12 +105,13 @@ static int cros_ucsi_async_control(struct ucsi *ucsi, u64 cmd)
return 0;
}
-static int cros_ucsi_sync_control(struct ucsi *ucsi, u64 cmd)
+static int cros_ucsi_sync_control(struct ucsi *ucsi, u64 cmd, u32 *cci,
+ void *data, size_t size)
{
struct cros_ucsi_data *udata = ucsi_get_drvdata(ucsi);
int ret;
- ret = ucsi_sync_control_common(ucsi, cmd);
+ ret = ucsi_sync_control_common(ucsi, cmd, cci, data, size);
switch (ret) {
case -EBUSY:
/* EC may return -EBUSY if CCI.busy is set.
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 2a2915b0a645..e8c7e9dc4930 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -55,7 +55,8 @@ void ucsi_notify_common(struct ucsi *ucsi, u32 cci)
}
EXPORT_SYMBOL_GPL(ucsi_notify_common);
-int ucsi_sync_control_common(struct ucsi *ucsi, u64 command)
+int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size)
{
bool ack = UCSI_COMMAND(command) == UCSI_ACK_CC_CI;
int ret;
@@ -80,6 +81,13 @@ out_clear_bit:
else
clear_bit(COMMAND_PENDING, &ucsi->flags);
+ if (!ret && cci)
+ ret = ucsi->ops->read_cci(ucsi, cci);
+
+ if (!ret && data &&
+ (*cci & UCSI_CCI_COMMAND_COMPLETE))
+ ret = ucsi->ops->read_message_in(ucsi, data, size);
+
return ret;
}
EXPORT_SYMBOL_GPL(ucsi_sync_control_common);
@@ -95,7 +103,7 @@ static int ucsi_acknowledge(struct ucsi *ucsi, bool conn_ack)
ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
}
- return ucsi->ops->sync_control(ucsi, ctrl);
+ return ucsi->ops->sync_control(ucsi, ctrl, NULL, NULL, 0);
}
static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
@@ -108,9 +116,7 @@ static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
if (size > UCSI_MAX_DATA_LENGTH(ucsi))
return -EINVAL;
- ret = ucsi->ops->sync_control(ucsi, command);
- if (ucsi->ops->read_cci(ucsi, cci))
- return -EIO;
+ ret = ucsi->ops->sync_control(ucsi, command, cci, data, size);
if (*cci & UCSI_CCI_BUSY)
return ucsi_run_command(ucsi, UCSI_CANCEL, cci, NULL, 0, false) ?: -EBUSY;
@@ -127,9 +133,6 @@ static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
else
err = 0;
- if (!err && data && UCSI_CCI_LENGTH(*cci))
- err = ucsi->ops->read_message_in(ucsi, data, size);
-
/*
* Don't ACK connection change if there was an error.
*/
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 28780acc4af2..892bcf8dbcd5 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -79,7 +79,8 @@ struct ucsi_operations {
int (*read_cci)(struct ucsi *ucsi, u32 *cci);
int (*poll_cci)(struct ucsi *ucsi, u32 *cci);
int (*read_message_in)(struct ucsi *ucsi, void *val, size_t val_len);
- int (*sync_control)(struct ucsi *ucsi, u64 command);
+ int (*sync_control)(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size);
int (*async_control)(struct ucsi *ucsi, u64 command);
bool (*update_altmodes)(struct ucsi *ucsi, struct ucsi_altmode *orig,
struct ucsi_altmode *updated);
@@ -531,7 +532,8 @@ void ucsi_altmode_update_active(struct ucsi_connector *con);
int ucsi_resume(struct ucsi *ucsi);
void ucsi_notify_common(struct ucsi *ucsi, u32 cci);
-int ucsi_sync_control_common(struct ucsi *ucsi, u64 command);
+int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size);
#if IS_ENABLED(CONFIG_POWER_SUPPLY)
int ucsi_register_port_psy(struct ucsi_connector *con);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index ac1ebb5d9527..0ac6e5ce4a28 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -128,12 +128,13 @@ static int ucsi_gram_read_message_in(struct ucsi *ucsi, void *val, size_t val_le
return ret;
}
-static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command)
+static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
int ret;
- ret = ucsi_sync_control_common(ucsi, command);
+ ret = ucsi_sync_control_common(ucsi, command, cci, data, size);
if (ret < 0)
return ret;
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 511dd1b224ae..c1d776c82fc2 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -222,7 +222,6 @@ struct ucsi_ccg {
u16 fw_build;
struct work_struct pm_work;
- u64 last_cmd_sent;
bool has_multiple_dp;
struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
@@ -538,9 +537,10 @@ static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
* first and then vdo=0x3
*/
static void ucsi_ccg_nvidia_altmode(struct ucsi_ccg *uc,
- struct ucsi_altmode *alt)
+ struct ucsi_altmode *alt,
+ u64 command)
{
- switch (UCSI_ALTMODE_OFFSET(uc->last_cmd_sent)) {
+ switch (UCSI_ALTMODE_OFFSET(command)) {
case NVIDIA_FTB_DP_OFFSET:
if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DBG_VDO)
alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DP_VDO |
@@ -578,37 +578,11 @@ static int ucsi_ccg_read_cci(struct ucsi *ucsi, u32 *cci)
static int ucsi_ccg_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
- struct ucsi_capability *cap;
- struct ucsi_altmode *alt;
spin_lock(&uc->op_lock);
memcpy(val, uc->op_data.message_in, val_len);
spin_unlock(&uc->op_lock);
- switch (UCSI_COMMAND(uc->last_cmd_sent)) {
- case UCSI_GET_CURRENT_CAM:
- if (uc->has_multiple_dp)
- ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)val);
- break;
- case UCSI_GET_ALTERNATE_MODES:
- if (UCSI_ALTMODE_RECIPIENT(uc->last_cmd_sent) ==
- UCSI_RECIPIENT_SOP) {
- alt = val;
- if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
- ucsi_ccg_nvidia_altmode(uc, alt);
- }
- break;
- case UCSI_GET_CAPABILITY:
- if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
- cap = val;
- cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
- }
- break;
- default:
- break;
- }
- uc->last_cmd_sent = 0;
-
return 0;
}
@@ -628,7 +602,8 @@ static int ucsi_ccg_async_control(struct ucsi *ucsi, u64 command)
return ccg_write(uc, reg, (u8 *)&command, sizeof(command));
}
-static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
+static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
struct ucsi_connector *con;
@@ -638,11 +613,9 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
mutex_lock(&uc->lock);
pm_runtime_get_sync(uc->dev);
- uc->last_cmd_sent = command;
-
- if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_SET_NEW_CAM &&
+ if (UCSI_COMMAND(command) == UCSI_SET_NEW_CAM &&
uc->has_multiple_dp) {
- con_index = (uc->last_cmd_sent >> 16) &
+ con_index = (command >> 16) &
UCSI_CMD_CONNECTOR_MASK;
if (con_index == 0) {
ret = -EINVAL;
@@ -652,7 +625,31 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
ucsi_ccg_update_set_new_cam_cmd(uc, con, &command);
}
- ret = ucsi_sync_control_common(ucsi, command);
+ ret = ucsi_sync_control_common(ucsi, command, cci, data, size);
+
+ switch (UCSI_COMMAND(command)) {
+ case UCSI_GET_CURRENT_CAM:
+ if (uc->has_multiple_dp)
+ ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)data);
+ break;
+ case UCSI_GET_ALTERNATE_MODES:
+ if (UCSI_ALTMODE_RECIPIENT(command) == UCSI_RECIPIENT_SOP) {
+ struct ucsi_altmode *alt = data;
+
+ if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
+ ucsi_ccg_nvidia_altmode(uc, alt, command);
+ }
+ break;
+ case UCSI_GET_CAPABILITY:
+ if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
+ struct ucsi_capability *cap = data;
+
+ cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
+ }
+ break;
+ default:
+ break;
+ }
err_put:
pm_runtime_put_sync(uc->dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 7aeff435c1d8..35a03306d134 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -630,7 +630,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
tag = sbitmap_get(&svq->scsi_tags);
if (tag < 0) {
- pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
+ pr_warn_once("Guest sent too many cmds. Returning TASK_SET_FULL.\n");
return ERR_PTR(-ENOMEM);
}
@@ -930,24 +930,69 @@ static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
}
static void
-vhost_scsi_send_bad_target(struct vhost_scsi *vs,
- struct vhost_virtqueue *vq,
- int head, unsigned out)
+vhost_scsi_send_status(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ struct vhost_scsi_ctx *vc, u8 status)
{
- struct virtio_scsi_cmd_resp __user *resp;
struct virtio_scsi_cmd_resp rsp;
+ struct iov_iter iov_iter;
int ret;
memset(&rsp, 0, sizeof(rsp));
- rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
- resp = vq->iov[out].iov_base;
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
- if (!ret)
- vhost_add_used_and_signal(&vs->dev, vq, head, 0);
+ rsp.status = status;
+
+ iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
+ sizeof(rsp));
+
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+
+ if (likely(ret == sizeof(rsp)))
+ vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
}
+#define TYPE_IO_CMD 0
+#define TYPE_CTRL_TMF 1
+#define TYPE_CTRL_AN 2
+
+static void
+vhost_scsi_send_bad_target(struct vhost_scsi *vs,
+ struct vhost_virtqueue *vq,
+ struct vhost_scsi_ctx *vc, int type)
+{
+ union {
+ struct virtio_scsi_cmd_resp cmd;
+ struct virtio_scsi_ctrl_tmf_resp tmf;
+ struct virtio_scsi_ctrl_an_resp an;
+ } rsp;
+ struct iov_iter iov_iter;
+ size_t rsp_size;
+ int ret;
+
+ memset(&rsp, 0, sizeof(rsp));
+
+ if (type == TYPE_IO_CMD) {
+ rsp_size = sizeof(struct virtio_scsi_cmd_resp);
+ rsp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+ } else if (type == TYPE_CTRL_TMF) {
+ rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
+ rsp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
+ } else {
+ rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
+ rsp.an.response = VIRTIO_SCSI_S_BAD_TARGET;
+ }
+
+ iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
+ rsp_size);
+
+ ret = copy_to_iter(&rsp, rsp_size, &iov_iter);
+
+ if (likely(ret == rsp_size))
+ vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
+ else
+ pr_err("Faulted on virtio scsi type=%d\n", type);
+}
+
static int
vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
struct vhost_scsi_ctx *vc)
@@ -1216,8 +1261,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
exp_data_len + prot_bytes,
data_direction);
if (IS_ERR(cmd)) {
- vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
- PTR_ERR(cmd));
+ ret = PTR_ERR(cmd);
+ vq_err(vq, "vhost_scsi_get_tag failed %dd\n", ret);
goto err;
}
cmd->tvc_vhost = vs;
@@ -1254,11 +1299,15 @@ err:
* EINVAL: Invalid response buffer, drop the request
* EIO: Respond with bad target
* EAGAIN: Pending request
+ * ENOMEM: Could not allocate resources for request
*/
if (ret == -ENXIO)
break;
else if (ret == -EIO)
- vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
+ vhost_scsi_send_bad_target(vs, vq, &vc, TYPE_IO_CMD);
+ else if (ret == -ENOMEM)
+ vhost_scsi_send_status(vs, vq, &vc,
+ SAM_STAT_TASK_SET_FULL);
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
@@ -1488,7 +1537,10 @@ err:
if (ret == -ENXIO)
break;
else if (ret == -EIO)
- vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
+ vhost_scsi_send_bad_target(vs, vq, &vc,
+ v_req.type == VIRTIO_SCSI_T_TMF ?
+ TYPE_CTRL_TMF :
+ TYPE_CTRL_AN);
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 5eaade757860..d50fe030d825 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -247,7 +247,7 @@ virtio_pci_admin_cmd_dev_parts_objects_enable(struct virtio_device *virtio_dev)
sg_init_one(&data_sg, get_data, sizeof(*get_data));
sg_init_one(&result_sg, result, sizeof(*result));
cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET);
- cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
cmd.data_sg = &data_sg;
cmd.result_sg = &result_sg;
ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
@@ -305,7 +305,7 @@ static void virtio_pci_admin_cmd_cap_init(struct virtio_device *virtio_dev)
sg_init_one(&result_sg, data, sizeof(*data));
cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY);
- cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
cmd.result_sg = &result_sg;
ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index f7d6f47971fd..24f485827e03 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -278,7 +278,7 @@ config XEN_PRIVCMD_EVENTFD
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
- depends on XEN && XEN_PV_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
+ depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
default m
help
This ACPI processor uploads Power Management information to the Xen