summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/qaic/qaic_drv.c4
-rw-r--r--drivers/acpi/video_detect.c16
-rw-r--r--drivers/acpi/x86/utils.c79
-rw-r--r--drivers/base/arch_numa.c4
-rw-r--r--drivers/base/cacheinfo.c14
-rw-r--r--drivers/base/regmap/internal.h1
-rw-r--r--drivers/base/regmap/regcache-maple.c3
-rw-r--r--drivers/base/regmap/regmap.c13
-rw-r--r--drivers/block/zram/zram_drv.c27
-rw-r--r--drivers/bluetooth/btusb.c26
-rw-r--r--drivers/clk/clk-en7523.c4
-rw-r--r--drivers/clk/qcom/Kconfig4
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c11
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h1
-rw-r--r--drivers/clk/qcom/clk-rcg.h1
-rw-r--r--drivers/clk/qcom/clk-rcg2.c48
-rw-r--r--drivers/clk/qcom/clk-rpmh.c13
-rw-r--r--drivers/clk/qcom/dispcc-sm8550.c18
-rw-r--r--drivers/clk/qcom/tcsrcc-sm8550.c18
-rw-r--r--drivers/dma-buf/dma-fence-array.c28
-rw-r--r--drivers/dma-buf/dma-fence-unwrap.c126
-rw-r--r--drivers/firmware/qcom/qcom_scm.c2
-rw-r--r--drivers/gpio/gpio-grgpio.c26
-rw-r--r--drivers/gpio/gpiolib.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c13
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c150
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h15
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c166
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c167
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c168
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c41
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c43
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c167
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c138
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c168
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c25
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h4
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c1
-rw-r--r--drivers/gpu/drm/display/drm_dp_dual_mode_helper.c4
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c55
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c18
-rw-r--r--drivers/gpu/drm/drm_panic.c10
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c28
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c8
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c11
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h3
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c146
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.h6
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_device.c1
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.h16
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c14
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c18
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c40
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c1
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c2
-rw-r--r--drivers/gpu/drm/xe/xe_query.c42
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c47
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules2
-rw-r--r--drivers/hid/hid-core.c5
-rw-r--r--drivers/hid/hid-generic.c3
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-magicmouse.c56
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c20
-rw-r--r--drivers/hid/wacom_sys.c3
-rw-r--r--drivers/hwmon/nct6775-platform.c2
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c6
-rw-r--r--drivers/i3c/master.c85
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c2
-rw-r--r--drivers/iio/adc/ad7192.c3
-rw-r--r--drivers/iio/light/ltr501.c2
-rw-r--r--drivers/iio/magnetometer/af8133j.c3
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c13
-rw-r--r--drivers/iommu/amd/io_pgtable.c11
-rw-r--r--drivers/iommu/iommufd/fault.c2
-rw-r--r--drivers/irqchip/Kconfig2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c50
-rw-r--r--drivers/leds/led-class.c14
-rw-r--r--drivers/mailbox/pcc.c61
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/media/pci/intel/ipu6/Kconfig2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.c66
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.h1
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.c19
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c2
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c31
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c10
-rw-r--r--drivers/mmc/core/block.c26
-rw-r--r--drivers/mmc/core/bus.c6
-rw-r--r--drivers/mmc/core/card.h10
-rw-r--r--drivers/mmc/core/core.c3
-rw-r--r--drivers/mmc/core/quirks.h9
-rw-r--r--drivers/mmc/core/sd.c30
-rw-r--r--drivers/mmc/core/sd.h2
-rw-r--r--drivers/mmc/core/sdio.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c64
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c6
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c72
-rw-r--r--drivers/mmc/host/sdhci-pci.h1
-rw-r--r--drivers/net/can/c_can/c_can_main.c26
-rw-r--r--drivers/net/can/dev/dev.c2
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c58
-rw-r--r--drivers/net/can/m_can/m_can.c33
-rw-r--r--drivers/net/can/sja1000/sja1000.c67
-rw-r--r--drivers/net/can/spi/hi311x.c50
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c29
-rw-r--r--drivers/net/can/sun4i_can.c22
-rw-r--r--drivers/net/can/usb/ems_usb.c58
-rw-r--r--drivers/net/can/usb/f81604.c10
-rw-r--r--drivers/net/can/usb/gs_usb.c25
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c8
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c3
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h3
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c5
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c72
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c14
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c5
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/phy/microchip.c21
-rw-r--r--drivers/net/phy/sfp.c3
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c18
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c3
-rw-r--r--drivers/nvdimm/dax_devs.c4
-rw-r--r--drivers/nvdimm/nd.h7
-rw-r--r--drivers/nvme/host/core.c25
-rw-r--r--drivers/nvme/host/rdma.c8
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c1
-rw-r--r--drivers/pci/controller/plda/pcie-starfive.c10
-rw-r--r--drivers/pci/controller/vmd.c17
-rw-r--r--drivers/pci/pci-sysfs.c26
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/probe.c30
-rw-r--r--drivers/pci/quirks.c15
-rw-r--r--drivers/pinctrl/core.c3
-rw-r--r--drivers/pinctrl/core.h1
-rw-r--r--drivers/pinctrl/freescale/Kconfig2
-rw-r--r--drivers/pinctrl/pinmux.c173
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c1
-rw-r--r--drivers/platform/x86/asus-wmi.c11
-rw-r--r--drivers/pmdomain/core.c37
-rw-r--r--drivers/pmdomain/imx/gpcv2.c2
-rw-r--r--drivers/ptp/ptp_clock.c3
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c83
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c1
-rw-r--r--drivers/rtc/rtc-cmos.c31
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c100
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c36
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c124
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c15
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/st.c31
-rw-r--r--drivers/soc/imx/soc-imx8m.c107
-rw-r--r--drivers/soc/qcom/llcc-qcom.c2644
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c1
-rw-r--r--drivers/spi/spi-fsl-lpspi.c7
-rw-r--r--drivers/spi/spi-mpc52xx.c1
-rw-r--r--drivers/thermal/qcom/tsens-v1.c21
-rw-r--r--drivers/thermal/qcom/tsens.c3
-rw-r--r--drivers/thermal/qcom/tsens.h2
-rw-r--r--drivers/tty/serial/8250/8250_dw.c5
-rw-r--r--drivers/ufs/core/ufs-sysfs.c6
-rw-r--r--drivers/ufs/core/ufs_bsg.c2
-rw-r--r--drivers/ufs/core/ufshcd-priv.h1
-rw-r--r--drivers/ufs/core/ufshcd.c59
-rw-r--r--drivers/ufs/host/cdns-pltfrm.c4
-rw-r--r--drivers/ufs/host/tc-dwc-g210-pltfrm.c5
-rw-r--r--drivers/ufs/host/ufs-exynos.c3
-rw-r--r--drivers/ufs/host/ufs-hisi.c4
-rw-r--r--drivers/ufs/host/ufs-mediatek.c5
-rw-r--r--drivers/ufs/host/ufs-qcom.c7
-rw-r--r--drivers/ufs/host/ufs-renesas.c13
-rw-r--r--drivers/ufs/host/ufs-sprd.c5
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c16
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h1
-rw-r--r--drivers/usb/chipidea/ci.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c1
-rw-r--r--drivers/usb/chipidea/core.c2
-rw-r--r--drivers/usb/chipidea/udc.c156
-rw-r--r--drivers/usb/chipidea/udc.h2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c56
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c10
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c47
-rw-r--r--drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c6
-rw-r--r--drivers/watchdog/apple_wdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c21
-rw-r--r--drivers/watchdog/mtk_wdt.c6
-rw-r--r--drivers/watchdog/rti_wdt.c3
-rw-r--r--drivers/watchdog/xilinx_wwdt.c75
283 files changed, 6619 insertions, 1927 deletions
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index bf10156c334e..f139c564eadf 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -34,6 +34,7 @@
MODULE_IMPORT_NS(DMA_BUF);
+#define PCI_DEV_AIC080 0xa080
#define PCI_DEV_AIC100 0xa100
#define QAIC_NAME "qaic"
#define QAIC_DESC "Qualcomm Cloud AI Accelerators"
@@ -365,7 +366,7 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
return NULL;
qdev->dev_state = QAIC_OFFLINE;
- if (id->device == PCI_DEV_AIC100) {
+ if (id->device == PCI_DEV_AIC080 || id->device == PCI_DEV_AIC100) {
qdev->num_dbc = 16;
qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
if (!qdev->dbc)
@@ -607,6 +608,7 @@ static struct mhi_driver qaic_mhi_driver = {
};
static const struct pci_device_id qaic_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC080), },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC100), },
{ }
};
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 015bd8e66c1c..d507d5e08435 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -551,6 +551,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* Apple MacBook Air 7,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir7,2"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Apple MacBook Air 9,1 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -566,6 +574,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ .callback = video_detect_force_native,
+ /* Apple MacBook Pro 11,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro11,2"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
.callback = video_detect_force_native,
/* Apple MacBook Pro 12,1 */
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 6af546b21574..cb45ef5240da 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -12,6 +12,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
@@ -295,6 +296,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
/*
* 2. Devices which also have the skip i2c/serdev quirks and which
* need the x86-android-tablets module to properly work.
+ * Sorted alphabetically.
*/
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
{
@@ -308,6 +310,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
+ /* Acer Iconia One 8 A1-840 (non FHD version) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
+ /* Above strings are too generic also match BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "04/01/2014"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ },
+ {
+ /* Asus ME176C tablet */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"),
@@ -318,23 +333,24 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
- /* Lenovo Yoga Book X90F/L */
+ /* Asus TF103C transformer 2-in-1 */
.matches = {
- DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
- DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
- ACPI_QUIRK_UART1_SKIP |
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
+ /* Lenovo Yoga Book X90F/L */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_UART1_SKIP |
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
@@ -392,6 +408,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
},
{
+ /* Vexia Edu Atla 10 tablet 9V version */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_UART1_SKIP |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ },
+ {
/* Whitelabel (sold as various brands) TM800A550L */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
@@ -411,6 +440,7 @@ static const struct acpi_device_id i2c_acpi_known_good_ids[] = {
{ "10EC5640", 0 }, /* RealTek ALC5640 audio codec */
{ "10EC5651", 0 }, /* RealTek ALC5651 audio codec */
{ "INT33F4", 0 }, /* X-Powers AXP288 PMIC */
+ { "INT33F5", 0 }, /* TI Dollar Cove PMIC */
{ "INT33FD", 0 }, /* Intel Crystal Cove PMIC */
{ "INT34D3", 0 }, /* Intel Whiskey Cove PMIC */
{ "NPCE69A", 0 }, /* Asus Transformer keyboard dock */
@@ -439,18 +469,35 @@ static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bo
struct acpi_device *adev = ACPI_COMPANION(controller_parent);
const struct dmi_system_id *dmi_id;
long quirks = 0;
- u64 uid;
- int ret;
+ u64 uid = 0;
- ret = acpi_dev_uid_to_integer(adev, &uid);
- if (ret)
+ dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
+ if (!dmi_id)
return 0;
- dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
- if (dmi_id)
- quirks = (unsigned long)dmi_id->driver_data;
+ quirks = (unsigned long)dmi_id->driver_data;
+
+ /* uid is left at 0 on errors and 0 is not a valid UART UID */
+ acpi_dev_uid_to_integer(adev, &uid);
+
+ /* For PCI UARTs without an UID */
+ if (!uid && dev_is_pci(controller_parent)) {
+ struct pci_dev *pdev = to_pci_dev(controller_parent);
+
+ /*
+ * Devfn values for PCI UARTs on Bay Trail SoCs, which are
+ * the only devices where this fallback is necessary.
+ */
+ if (pdev->devfn == PCI_DEVFN(0x1e, 3))
+ uid = 1;
+ else if (pdev->devfn == PCI_DEVFN(0x1e, 4))
+ uid = 2;
+ }
+
+ if (!uid)
+ return 0;
- if (!dev_is_platform(controller_parent)) {
+ if (!dev_is_platform(controller_parent) && !dev_is_pci(controller_parent)) {
/* PNP enumerated UARTs */
if ((quirks & ACPI_QUIRK_PNP_UART1_SKIP) && uid == 1)
*skip = true;
@@ -505,7 +552,7 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
* Set skip to true so that the tty core creates a serdev ctrl device.
* The backlight driver will manually create the serdev client device.
*/
- if (acpi_dev_hid_match(adev, "DELL0501")) {
+ if (adev && acpi_dev_hid_match(adev, "DELL0501")) {
*skip = true;
/*
* Create a platform dev for dell-uart-backlight to bind to.
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index e18701676426..c99f2ab105e5 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -208,6 +208,10 @@ static int __init numa_register_nodes(void)
{
int nid;
+ /* Check the validity of the memblock/node mapping */
+ if (!memblock_validate_numa_coverage(0))
+ return -EINVAL;
+
/* Finally register nodes. */
for_each_node_mask(nid, numa_nodes_parsed) {
unsigned long start_pfn, end_pfn;
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 7a7609298e18..89410127089b 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -58,7 +58,7 @@ bool last_level_cache_is_valid(unsigned int cpu)
{
struct cacheinfo *llc;
- if (!cache_leaves(cpu))
+ if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu))
return false;
llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
@@ -463,11 +463,9 @@ int __weak populate_cache_leaves(unsigned int cpu)
return -ENOENT;
}
-static inline
-int allocate_cache_info(int cpu)
+static inline int allocate_cache_info(int cpu)
{
- per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
- sizeof(struct cacheinfo), GFP_ATOMIC);
+ per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct cacheinfo), GFP_ATOMIC);
if (!per_cpu_cacheinfo(cpu)) {
cache_leaves(cpu) = 0;
return -ENOMEM;
@@ -539,7 +537,11 @@ static inline int init_level_allocate_ci(unsigned int cpu)
*/
ci_cacheinfo(cpu)->early_ci_levels = false;
- if (cache_leaves(cpu) <= early_leaves)
+ /*
+ * Some architectures (e.g., x86) do not use early initialization.
+ * Allocate memory now in such case.
+ */
+ if (cache_leaves(cpu) <= early_leaves && per_cpu_cacheinfo(cpu))
return 0;
kfree(per_cpu_cacheinfo(cpu));
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 83acccdc1008..bdb450436cbc 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -59,6 +59,7 @@ struct regmap {
unsigned long raw_spinlock_flags;
};
};
+ struct lock_class_key *lock_key;
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 8d27d3653ea3..23da7b31d715 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -355,6 +355,9 @@ static int regcache_maple_init(struct regmap *map)
mt_init(mt);
+ if (!mt_external_lock(mt) && map->lock_key)
+ lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
+
if (!map->num_reg_defaults)
return 0;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 4ded93687c1f..e3e2afc2c83c 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -598,6 +598,17 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
}
EXPORT_SYMBOL_GPL(regmap_attach_dev);
+static int dev_get_regmap_match(struct device *dev, void *res, void *data);
+
+static int regmap_detach_dev(struct device *dev, struct regmap *map)
+{
+ if (!dev)
+ return 0;
+
+ return devres_release(dev, dev_get_regmap_release,
+ dev_get_regmap_match, (void *)map->name);
+}
+
static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
const struct regmap_config *config)
{
@@ -745,6 +756,7 @@ struct regmap *__regmap_init(struct device *dev,
lock_key, lock_name);
}
map->lock_arg = map;
+ map->lock_key = lock_key;
}
/*
@@ -1444,6 +1456,7 @@ void regmap_exit(struct regmap *map)
{
struct regmap_async *async;
+ regmap_detach_dev(map->dev, map);
regcache_exit(map);
regmap_debugfs_exit(map);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index d6a1ba969266..d0432b1707ce 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -298,17 +298,30 @@ static void mark_idle(struct zram *zram, ktime_t cutoff)
/*
* Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
* See the comment in writeback_store.
+ *
+ * Also do not mark ZRAM_SAME slots as ZRAM_IDLE, because no
+ * post-processing (recompress, writeback) happens to the
+ * ZRAM_SAME slot.
+ *
+ * And ZRAM_WB slots simply cannot be ZRAM_IDLE.
*/
zram_slot_lock(zram, index);
- if (zram_allocated(zram, index) &&
- !zram_test_flag(zram, index, ZRAM_UNDER_WB)) {
+ if (!zram_allocated(zram, index) ||
+ zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_UNDER_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME)) {
+ zram_slot_unlock(zram, index);
+ continue;
+ }
+
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
- is_idle = !cutoff || ktime_after(cutoff,
- zram->table[index].ac_time);
+ is_idle = !cutoff ||
+ ktime_after(cutoff, zram->table[index].ac_time);
#endif
- if (is_idle)
- zram_set_flag(zram, index, ZRAM_IDLE);
- }
+ if (is_idle)
+ zram_set_flag(zram, index, ZRAM_IDLE);
+ else
+ zram_clear_flag(zram, index, ZRAM_IDLE);
zram_slot_unlock(zram, index);
}
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 4ccaddb46ddd..11755cb1eb16 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -524,6 +524,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe123), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -563,6 +565,16 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x043e, 0x3109), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Additional MediaTek MT7920 Bluetooth devices */
+ { USB_DEVICE(0x0489, 0xe134), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3620), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3621), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3622), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Additional MediaTek MT7921 Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
@@ -630,12 +642,24 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
/* Additional MediaTek MT7925 Bluetooth devices */
+ { USB_DEVICE(0x0489, 0xe111), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe113), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe118), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe11e), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe124), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe139), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14f), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe150), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe151), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3602), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3603), .driver_info = BTUSB_MEDIATEK |
@@ -3897,6 +3921,8 @@ static int btusb_probe(struct usb_interface *intf,
set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT, &hdev->quirks);
}
if (!reset)
diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c
index fdd8ea989ed2..bc21b2921449 100644
--- a/drivers/clk/clk-en7523.c
+++ b/drivers/clk/clk-en7523.c
@@ -508,6 +508,8 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
u32 rate;
int i;
+ clk_data->num = EN7523_NUM_CLOCKS;
+
for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
const struct en_clk_desc *desc = &en7523_base_clks[i];
u32 reg = desc->div_reg ? desc->div_reg : desc->base_reg;
@@ -529,8 +531,6 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
hw = en7523_register_pcie_clk(dev, np_base);
clk_data->hws[EN7523_CLK_PCIE] = hw;
-
- clk_data->num = EN7523_NUM_CLOCKS;
}
static int en7523_clk_hw_init(struct platform_device *pdev,
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 4444dafa4e3d..9ba675f229b1 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -959,10 +959,10 @@ config SM_DISPCC_8450
config SM_DISPCC_8550
tristate "SM8550 Display Clock Controller"
depends on ARM64 || COMPILE_TEST
- depends on SM_GCC_8550 || SM_GCC_8650
+ depends on SM_GCC_8550 || SM_GCC_8650 || SAR_GCC_2130P
help
Support for the display clock controller on Qualcomm Technologies, Inc
- SM8550 or SM8650 devices.
+ SAR2130P, SM8550 or SM8650 devices.
Say Y if you want to support display devices and functionality such as
splash screen.
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index be9bee6ab65f..49687512184b 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -267,6 +267,17 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x30,
[PLL_OFF_STATUS] = 0x3c,
},
+ [CLK_ALPHA_PLL_TYPE_NSS_HUAYRA] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_TEST_CTL] = 0x0c,
+ [PLL_OFF_TEST_CTL_U] = 0x10,
+ [PLL_OFF_USER_CTL] = 0x14,
+ [PLL_OFF_CONFIG_CTL] = 0x18,
+ [PLL_OFF_CONFIG_CTL_U] = 0x1c,
+ [PLL_OFF_STATUS] = 0x20,
+ },
+
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 55eca04b23a1..c6d1b8429f95 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -32,6 +32,7 @@ enum {
CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
CLK_ALPHA_PLL_TYPE_STROMER,
CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
+ CLK_ALPHA_PLL_TYPE_NSS_HUAYRA,
CLK_ALPHA_PLL_TYPE_MAX,
};
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 8e0f3372dc7a..80f1f4fcd52a 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -198,6 +198,7 @@ extern const struct clk_ops clk_byte2_ops;
extern const struct clk_ops clk_pixel_ops;
extern const struct clk_ops clk_gfx3d_ops;
extern const struct clk_ops clk_rcg2_shared_ops;
+extern const struct clk_ops clk_rcg2_shared_floor_ops;
extern const struct clk_ops clk_rcg2_shared_no_init_park_ops;
extern const struct clk_ops clk_dp_ops;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index bf26c5448f00..bf6406f5279a 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1186,15 +1186,23 @@ clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
return clk_rcg2_clear_force_enable(hw);
}
-static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+static int __clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate,
+ enum freq_policy policy)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
const struct freq_tbl *f;
- f = qcom_find_freq(rcg->freq_tbl, rate);
- if (!f)
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(rcg->freq_tbl, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ break;
+ default:
return -EINVAL;
+ }
/*
* In case clock is disabled, update the M, N and D registers, cache
@@ -1207,10 +1215,28 @@ static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
return clk_rcg2_shared_force_enable_clear(hw, f);
}
+static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL);
+}
+
static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
unsigned long rate, unsigned long parent_rate, u8 index)
{
- return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
+ return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL);
+}
+
+static int clk_rcg2_shared_set_floor_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR);
+}
+
+static int clk_rcg2_shared_set_floor_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR);
}
static int clk_rcg2_shared_enable(struct clk_hw *hw)
@@ -1348,6 +1374,18 @@ const struct clk_ops clk_rcg2_shared_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
+const struct clk_ops clk_rcg2_shared_floor_ops = {
+ .enable = clk_rcg2_shared_enable,
+ .disable = clk_rcg2_shared_disable,
+ .get_parent = clk_rcg2_shared_get_parent,
+ .set_parent = clk_rcg2_shared_set_parent,
+ .recalc_rate = clk_rcg2_shared_recalc_rate,
+ .determine_rate = clk_rcg2_determine_floor_rate,
+ .set_rate = clk_rcg2_shared_set_floor_rate,
+ .set_rate_and_parent = clk_rcg2_shared_set_floor_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_shared_floor_ops);
+
static int clk_rcg2_shared_no_init_park(struct clk_hw *hw)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 4acde937114a..eefc322ce367 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -389,6 +389,18 @@ DEFINE_CLK_RPMH_BCM(ipa, "IP0");
DEFINE_CLK_RPMH_BCM(pka, "PKA0");
DEFINE_CLK_RPMH_BCM(qpic_clk, "QP0");
+static struct clk_hw *sar2130p_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div1.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div1_ao.hw,
+ [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sar2130p = {
+ .clks = sar2130p_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sar2130p_rpmh_clocks),
+};
+
static struct clk_hw *sdm845_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
[RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
@@ -880,6 +892,7 @@ static int clk_rpmh_probe(struct platform_device *pdev)
static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,qdu1000-rpmh-clk", .data = &clk_rpmh_qdu1000},
{ .compatible = "qcom,sa8775p-rpmh-clk", .data = &clk_rpmh_sa8775p},
+ { .compatible = "qcom,sar2130p-rpmh-clk", .data = &clk_rpmh_sar2130p},
{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
{ .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
{ .compatible = "qcom,sc8280xp-rpmh-clk", .data = &clk_rpmh_sc8280xp},
diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c
index 7f9021ca0ecb..e41d4104d770 100644
--- a/drivers/clk/qcom/dispcc-sm8550.c
+++ b/drivers/clk/qcom/dispcc-sm8550.c
@@ -75,7 +75,7 @@ static struct pll_vco lucid_ole_vco[] = {
{ 249600000, 2000000000, 0 },
};
-static const struct alpha_pll_config disp_cc_pll0_config = {
+static struct alpha_pll_config disp_cc_pll0_config = {
.l = 0xd,
.alpha = 0x6492,
.config_ctl_val = 0x20485699,
@@ -106,7 +106,7 @@ static struct clk_alpha_pll disp_cc_pll0 = {
},
};
-static const struct alpha_pll_config disp_cc_pll1_config = {
+static struct alpha_pll_config disp_cc_pll1_config = {
.l = 0x1f,
.alpha = 0x4000,
.config_ctl_val = 0x20485699,
@@ -594,6 +594,13 @@ static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sar2130p[] = {
+ F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sm8650[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
@@ -1750,6 +1757,7 @@ static struct qcom_cc_desc disp_cc_sm8550_desc = {
};
static const struct of_device_id disp_cc_sm8550_match_table[] = {
+ { .compatible = "qcom,sar2130p-dispcc" },
{ .compatible = "qcom,sm8550-dispcc" },
{ .compatible = "qcom,sm8650-dispcc" },
{ }
@@ -1780,6 +1788,12 @@ static int disp_cc_sm8550_probe(struct platform_device *pdev)
disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sm8650;
disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr.hw.init->parent_hws[0] =
&disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw;
+ } else if (of_device_is_compatible(pdev->dev.of_node, "qcom,sar2130p-dispcc")) {
+ disp_cc_pll0_config.l = 0x1f;
+ disp_cc_pll0_config.alpha = 0x4000;
+ disp_cc_pll0_config.user_ctl_val = 0x1;
+ disp_cc_pll1_config.user_ctl_val = 0x1;
+ disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sar2130p;
}
clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
diff --git a/drivers/clk/qcom/tcsrcc-sm8550.c b/drivers/clk/qcom/tcsrcc-sm8550.c
index e5e8f2e82b94..41d73f92a000 100644
--- a/drivers/clk/qcom/tcsrcc-sm8550.c
+++ b/drivers/clk/qcom/tcsrcc-sm8550.c
@@ -129,6 +129,13 @@ static struct clk_branch tcsr_usb3_clkref_en = {
},
};
+static struct clk_regmap *tcsr_cc_sar2130p_clocks[] = {
+ [TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
+ [TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
+ [TCSR_USB2_CLKREF_EN] = &tcsr_usb2_clkref_en.clkr,
+ [TCSR_USB3_CLKREF_EN] = &tcsr_usb3_clkref_en.clkr,
+};
+
static struct clk_regmap *tcsr_cc_sm8550_clocks[] = {
[TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
[TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
@@ -146,6 +153,12 @@ static const struct regmap_config tcsr_cc_sm8550_regmap_config = {
.fast_io = true,
};
+static const struct qcom_cc_desc tcsr_cc_sar2130p_desc = {
+ .config = &tcsr_cc_sm8550_regmap_config,
+ .clks = tcsr_cc_sar2130p_clocks,
+ .num_clks = ARRAY_SIZE(tcsr_cc_sar2130p_clocks),
+};
+
static const struct qcom_cc_desc tcsr_cc_sm8550_desc = {
.config = &tcsr_cc_sm8550_regmap_config,
.clks = tcsr_cc_sm8550_clocks,
@@ -153,7 +166,8 @@ static const struct qcom_cc_desc tcsr_cc_sm8550_desc = {
};
static const struct of_device_id tcsr_cc_sm8550_match_table[] = {
- { .compatible = "qcom,sm8550-tcsr" },
+ { .compatible = "qcom,sar2130p-tcsr", .data = &tcsr_cc_sar2130p_desc },
+ { .compatible = "qcom,sm8550-tcsr", .data = &tcsr_cc_sm8550_desc },
{ }
};
MODULE_DEVICE_TABLE(of, tcsr_cc_sm8550_match_table);
@@ -162,7 +176,7 @@ static int tcsr_cc_sm8550_probe(struct platform_device *pdev)
{
struct regmap *regmap;
- regmap = qcom_cc_map(pdev, &tcsr_cc_sm8550_desc);
+ regmap = qcom_cc_map(pdev, of_device_get_match_data(&pdev->dev));
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index 8a08ffde31e7..6657d4b30af9 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -103,10 +103,36 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
static bool dma_fence_array_signaled(struct dma_fence *fence)
{
struct dma_fence_array *array = to_dma_fence_array(fence);
+ int num_pending;
+ unsigned int i;
- if (atomic_read(&array->num_pending) > 0)
+ /*
+ * We need to read num_pending before checking the enable_signal bit
+ * to avoid racing with the enable_signaling() implementation, which
+ * might decrement the counter, and cause a partial check.
+ * atomic_read_acquire() pairs with atomic_dec_and_test() in
+ * dma_fence_array_enable_signaling()
+ *
+ * The !--num_pending check is here to account for the any_signaled case
+ * if we race with enable_signaling(), that means the !num_pending check
+ * in the is_signalling_enabled branch might be outdated (num_pending
+ * might have been decremented), but that's fine. The user will get the
+ * right value when testing again later.
+ */
+ num_pending = atomic_read_acquire(&array->num_pending);
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &array->base.flags)) {
+ if (num_pending <= 0)
+ goto signal;
return false;
+ }
+
+ for (i = 0; i < array->num_fences; ++i) {
+ if (dma_fence_is_signaled(array->fences[i]) && !--num_pending)
+ goto signal;
+ }
+ return false;
+signal:
dma_fence_array_clear_pending_error(array);
return true;
}
diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
index 628af51c81af..6345062731f1 100644
--- a/drivers/dma-buf/dma-fence-unwrap.c
+++ b/drivers/dma-buf/dma-fence-unwrap.c
@@ -12,6 +12,7 @@
#include <linux/dma-fence-chain.h>
#include <linux/dma-fence-unwrap.h>
#include <linux/slab.h>
+#include <linux/sort.h>
/* Internal helper to start new array iteration, don't use directly */
static struct dma_fence *
@@ -59,6 +60,25 @@ struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
}
EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
+
+static int fence_cmp(const void *_a, const void *_b)
+{
+ struct dma_fence *a = *(struct dma_fence **)_a;
+ struct dma_fence *b = *(struct dma_fence **)_b;
+
+ if (a->context < b->context)
+ return -1;
+ else if (a->context > b->context)
+ return 1;
+
+ if (dma_fence_is_later(b, a))
+ return 1;
+ else if (dma_fence_is_later(a, b))
+ return -1;
+
+ return 0;
+}
+
/* Implementation for the dma_fence_merge() marco, don't use directly */
struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence **fences,
@@ -67,8 +87,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence_array *result;
struct dma_fence *tmp, **array;
ktime_t timestamp;
- unsigned int i;
- size_t count;
+ int i, j, count;
count = 0;
timestamp = ns_to_ktime(0);
@@ -96,78 +115,55 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
if (!array)
return NULL;
- /*
- * This trashes the input fence array and uses it as position for the
- * following merge loop. This works because the dma_fence_merge()
- * wrapper macro is creating this temporary array on the stack together
- * with the iterators.
- */
- for (i = 0; i < num_fences; ++i)
- fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]);
-
count = 0;
- do {
- unsigned int sel;
-
-restart:
- tmp = NULL;
- for (i = 0; i < num_fences; ++i) {
- struct dma_fence *next;
-
- while (fences[i] && dma_fence_is_signaled(fences[i]))
- fences[i] = dma_fence_unwrap_next(&iter[i]);
-
- next = fences[i];
- if (!next)
- continue;
-
- /*
- * We can't guarantee that inpute fences are ordered by
- * context, but it is still quite likely when this
- * function is used multiple times. So attempt to order
- * the fences by context as we pass over them and merge
- * fences with the same context.
- */
- if (!tmp || tmp->context > next->context) {
- tmp = next;
- sel = i;
-
- } else if (tmp->context < next->context) {
- continue;
-
- } else if (dma_fence_is_later(tmp, next)) {
- fences[i] = dma_fence_unwrap_next(&iter[i]);
- goto restart;
+ for (i = 0; i < num_fences; ++i) {
+ dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
+ if (!dma_fence_is_signaled(tmp)) {
+ array[count++] = dma_fence_get(tmp);
} else {
- fences[sel] = dma_fence_unwrap_next(&iter[sel]);
- goto restart;
+ ktime_t t = dma_fence_timestamp(tmp);
+
+ if (ktime_after(t, timestamp))
+ timestamp = t;
}
}
+ }
- if (tmp) {
- array[count++] = dma_fence_get(tmp);
- fences[sel] = dma_fence_unwrap_next(&iter[sel]);
- }
- } while (tmp);
+ if (count == 0 || count == 1)
+ goto return_fastpath;
- if (count == 0) {
- tmp = dma_fence_allocate_private_stub(ktime_get());
- goto return_tmp;
- }
+ sort(array, count, sizeof(*array), fence_cmp, NULL);
- if (count == 1) {
- tmp = array[0];
- goto return_tmp;
+ /*
+ * Only keep the most recent fence for each context.
+ */
+ j = 0;
+ for (i = 1; i < count; i++) {
+ if (array[i]->context == array[j]->context)
+ dma_fence_put(array[i]);
+ else
+ array[++j] = array[i];
}
-
- result = dma_fence_array_create(count, array,
- dma_fence_context_alloc(1),
- 1, false);
- if (!result) {
- tmp = NULL;
- goto return_tmp;
+ count = ++j;
+
+ if (count > 1) {
+ result = dma_fence_array_create(count, array,
+ dma_fence_context_alloc(1),
+ 1, false);
+ if (!result) {
+ for (i = 0; i < count; i++)
+ dma_fence_put(array[i]);
+ tmp = NULL;
+ goto return_tmp;
+ }
+ return &result->base;
}
- return &result->base;
+
+return_fastpath:
+ if (count == 0)
+ tmp = dma_fence_allocate_private_stub(timestamp);
+ else
+ tmp = array[0];
return_tmp:
kfree(array);
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 2e4260ba5f79..14afd68664a9 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -1742,9 +1742,11 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
+ any potential issues with this, only allow validated machines for now.
*/
static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
+ { .compatible = "dell,xps13-9345" },
{ .compatible = "lenovo,flex-5g" },
{ .compatible = "lenovo,thinkpad-t14s" },
{ .compatible = "lenovo,thinkpad-x13s", },
+ { .compatible = "lenovo,yoga-slim7x" },
{ .compatible = "microsoft,romulus13", },
{ .compatible = "microsoft,romulus15", },
{ .compatible = "qcom,sc8180x-primus" },
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 017c7170eb57..620793740c66 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -328,6 +328,7 @@ static const struct irq_domain_ops grgpio_irq_domain_ops = {
static int grgpio_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
+ struct device *dev = &ofdev->dev;
void __iomem *regs;
struct gpio_chip *gc;
struct grgpio_priv *priv;
@@ -337,7 +338,7 @@ static int grgpio_probe(struct platform_device *ofdev)
int size;
int i;
- priv = devm_kzalloc(&ofdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -346,28 +347,31 @@ static int grgpio_probe(struct platform_device *ofdev)
return PTR_ERR(regs);
gc = &priv->gc;
- err = bgpio_init(gc, &ofdev->dev, 4, regs + GRGPIO_DATA,
+ err = bgpio_init(gc, dev, 4, regs + GRGPIO_DATA,
regs + GRGPIO_OUTPUT, NULL, regs + GRGPIO_DIR, NULL,
BGPIOF_BIG_ENDIAN_BYTE_ORDER);
if (err) {
- dev_err(&ofdev->dev, "bgpio_init() failed\n");
+ dev_err(dev, "bgpio_init() failed\n");
return err;
}
priv->regs = regs;
priv->imask = gc->read_reg(regs + GRGPIO_IMASK);
- priv->dev = &ofdev->dev;
+ priv->dev = dev;
gc->owner = THIS_MODULE;
gc->to_irq = grgpio_to_irq;
- gc->label = devm_kasprintf(&ofdev->dev, GFP_KERNEL, "%pOF", np);
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
gc->base = -1;
err = of_property_read_u32(np, "nbits", &prop);
if (err || prop <= 0 || prop > GRGPIO_MAX_NGPIO) {
gc->ngpio = GRGPIO_MAX_NGPIO;
- dev_dbg(&ofdev->dev,
- "No or invalid nbits property: assume %d\n", gc->ngpio);
+ dev_dbg(dev, "No or invalid nbits property: assume %d\n",
+ gc->ngpio);
} else {
gc->ngpio = prop;
}
@@ -379,7 +383,7 @@ static int grgpio_probe(struct platform_device *ofdev)
irqmap = (s32 *)of_get_property(np, "irqmap", &size);
if (irqmap) {
if (size < gc->ngpio) {
- dev_err(&ofdev->dev,
+ dev_err(dev,
"irqmap shorter than ngpio (%d < %d)\n",
size, gc->ngpio);
return -EINVAL;
@@ -389,7 +393,7 @@ static int grgpio_probe(struct platform_device *ofdev)
&grgpio_irq_domain_ops,
priv);
if (!priv->domain) {
- dev_err(&ofdev->dev, "Could not add irq domain\n");
+ dev_err(dev, "Could not add irq domain\n");
return -EINVAL;
}
@@ -419,13 +423,13 @@ static int grgpio_probe(struct platform_device *ofdev)
err = gpiochip_add_data(gc, priv);
if (err) {
- dev_err(&ofdev->dev, "Could not add gpiochip\n");
+ dev_err(dev, "Could not add gpiochip\n");
if (priv->domain)
irq_domain_remove(priv->domain);
return err;
}
- dev_info(&ofdev->dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
+ dev_info(dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
priv->regs, gc->base, gc->ngpio, priv->domain ? "on" : "off");
return 0;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 2b02655abb56..44372f8647d5 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -14,6 +14,7 @@
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdesc.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/lockdep.h>
@@ -713,6 +714,45 @@ bool gpiochip_line_is_valid(const struct gpio_chip *gc,
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
+static void gpiod_free_irqs(struct gpio_desc *desc)
+{
+ int irq = gpiod_to_irq(desc);
+ struct irq_desc *irqd = irq_to_desc(irq);
+ void *cookie;
+
+ for (;;) {
+ /*
+ * Make sure the action doesn't go away while we're
+ * dereferencing it. Retrieve and store the cookie value.
+ * If the irq is freed after we release the lock, that's
+ * alright - the underlying maple tree lookup will return NULL
+ * and nothing will happen in free_irq().
+ */
+ scoped_guard(mutex, &irqd->request_mutex) {
+ if (!irq_desc_has_action(irqd))
+ return;
+
+ cookie = irqd->action->dev_id;
+ }
+
+ free_irq(irq, cookie);
+ }
+}
+
+/*
+ * The chip is going away but there may be users who had requested interrupts
+ * on its GPIO lines who have no idea about its removal and have no way of
+ * being notified about it. We need to free any interrupts still in use here or
+ * we'll leak memory and resources (like procfs files).
+ */
+static void gpiochip_free_remaining_irqs(struct gpio_chip *gc)
+{
+ struct gpio_desc *desc;
+
+ for_each_gpio_desc_with_flag(gc, desc, FLAG_USED_AS_IRQ)
+ gpiod_free_irqs(desc);
+}
+
static void gpiodev_release(struct device *dev)
{
struct gpio_device *gdev = to_gpio_device(dev);
@@ -1125,6 +1165,7 @@ void gpiochip_remove(struct gpio_chip *gc)
/* FIXME: should the legacy sysfs handling be moved to gpio_device? */
gpiochip_sysfs_unregister(gdev);
gpiochip_free_hogs(gc);
+ gpiochip_free_remaining_irqs(gc);
scoped_guard(mutex, &gpio_devices_lock)
list_del_rcu(&gdev->list);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 7dd55ed57c1d..b8d4e07d2043 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -800,6 +800,7 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
return -EIO;
}
+ kfree(info);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1f08cb88d51b..51904906545e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3666,7 +3666,7 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
*
* @adev: amdgpu_device pointer
*
- * First resume function for hardware IPs. The list of all the hardware
+ * Second resume function for hardware IPs. The list of all the hardware
* IPs that make up the asic is walked and the resume callbacks are run for
* all blocks except COMMON, GMC, and IH. resume puts the hardware into a
* functional state after a suspend and updates the software state as
@@ -3684,6 +3684,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
continue;
r = adev->ip_blocks[i].version->funcs->resume(adev);
@@ -3699,6 +3700,36 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
}
/**
+ * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Third resume function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked and the resume callbacks are run for
+ * all DCE. resume puts the hardware into a functional state after a suspend
+ * and updates the software state as necessary. This function is also used
+ * for restoring the GPU after a GPU reset.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
+ continue;
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+/**
* amdgpu_device_ip_resume - run resume for hardware IPs
*
* @adev: amdgpu_device pointer
@@ -3727,6 +3758,13 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
if (adev->mman.buffer_funcs_ring->sched.ready)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ if (r)
+ return r;
+
+ amdgpu_fence_driver_hw_init(adev);
+
+ r = amdgpu_device_ip_resume_phase3(adev);
+
return r;
}
@@ -4809,7 +4847,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
goto exit;
}
- amdgpu_fence_driver_hw_init(adev);
if (!adev->in_s0ix) {
r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
@@ -5431,6 +5468,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
+ r = amdgpu_device_ip_resume_phase3(tmp_adev);
+ if (r)
+ goto out;
+
if (vram_lost)
amdgpu_device_fill_reset_magic(tmp_adev);
@@ -6344,6 +6385,9 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
struct amdgpu_device *adev = drm_to_adev(dev);
int r;
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
r = pci_save_state(pdev);
if (!r) {
kfree(adev->pci_state);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 74adb983ab03..9f922ec50ea2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -812,7 +812,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
/* Map SG to device */
r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
if (r)
- goto release_sg;
+ goto release_sg_table;
/* convert SG to linear array of pages and dma addresses */
drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
@@ -820,6 +820,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
return 0;
+release_sg_table:
+ sg_free_table(ttm->sg);
release_sg:
kfree(ttm->sg);
ttm->sg = NULL;
@@ -1849,6 +1851,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
+ dma_set_max_seg_size(adev->dev, UINT_MAX);
/* No others user of address space so set it to 0 */
r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 785a343a95f0..e7cd51c95141 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2223,6 +2223,18 @@ static int gfx_v9_0_sw_init(void *handle)
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 2):
+ adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
+ if (adev->gfx.mec_fw_version >= 88) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
index 36c0292b5110..0b6bd09b7529 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2018 Advanced Micro Devices, Inc.
+ * Copyright 2024 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,3 +24,45 @@
static const u32 __maybe_unused gfx_9_0_cleaner_shader_hex[] = {
/* Add the cleaner shader code here */
};
+
+/* Define the cleaner shader gfx_9_4_2 */
+static const u32 gfx_9_4_2_cleaner_shader_hex[] = {
+ 0xbf068100, 0xbf84003b,
+ 0xbf8a0000, 0xb07c0000,
+ 0xbe8200ff, 0x00000078,
+ 0xbf110802, 0x7e000280,
+ 0x7e020280, 0x7e040280,
+ 0x7e060280, 0x7e080280,
+ 0x7e0a0280, 0x7e0c0280,
+ 0x7e0e0280, 0x80828802,
+ 0xbe803202, 0xbf84fff5,
+ 0xbf9c0000, 0xbe8200ff,
+ 0x80000000, 0x86020102,
+ 0xbf840011, 0xbefe00c1,
+ 0xbeff00c1, 0xd28c0001,
+ 0x0001007f, 0xd28d0001,
+ 0x0002027e, 0x10020288,
+ 0xbe8200bf, 0xbefc00c1,
+ 0xd89c2000, 0x00020201,
+ 0xd89c6040, 0x00040401,
+ 0x320202ff, 0x00000400,
+ 0x80828102, 0xbf84fff8,
+ 0xbefc00ff, 0x0000005c,
+ 0xbf800000, 0xbe802c80,
+ 0xbe812c80, 0xbe822c80,
+ 0xbe832c80, 0x80fc847c,
+ 0xbf84fffa, 0xbee60080,
+ 0xbee70080, 0xbeea0180,
+ 0xbeec0180, 0xbeee0180,
+ 0xbef00180, 0xbef20180,
+ 0xbef40180, 0xbef60180,
+ 0xbef80180, 0xbefa0180,
+ 0xbf810000, 0xbf8d0001,
+ 0xbefc00ff, 0x0000005c,
+ 0xbf800000, 0xbe802c80,
+ 0xbe812c80, 0xbe822c80,
+ 0xbe832c80, 0x80fc847c,
+ 0xbf84fffa, 0xbee60080,
+ 0xbee70080, 0xbeea01ff,
+ 0x000000ee, 0xbf810000,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm
new file mode 100644
index 000000000000..35b8cf9070bd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader.
+//To turn this shader program on for complitaion change this to main and lower shader main to main_1
+
+// MI200 : Clear SGPRs, VGPRs and LDS
+// Uses two kernels launched separately:
+// 1. Clean VGPRs, LDS, and lower SGPRs
+// Launches one workgroup per CU, each workgroup with 4x wave64 per SIMD in the CU
+// Waves are "wave64" and have 128 VGPRs each, which uses all 512 VGPRs per SIMD
+// Waves in the workgroup share the 64KB of LDS
+// Each wave clears SGPRs 0 - 95. Because there are 4 waves/SIMD, this is physical SGPRs 0-383
+// Each wave clears 128 VGPRs, so all 512 in the SIMD
+// The first wave of the workgroup clears its 64KB of LDS
+// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup
+// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared.
+// 2. Clean remaining SGPRs
+// Launches a workgroup with 24 waves per workgroup, yielding 6 waves per SIMD in each CU
+// Waves are allocating 96 SGPRs
+// CP sets up SPI_RESOURCE_RESERVE_* registers to prevent these waves from allocating SGPRs 0-223.
+// As such, these 6 waves per SIMD are allocated physical SGPRs 224-799
+// Barriers do not work for >16 waves per workgroup, so we cannot start with S_BARRIER
+// Instead, the shader starts with an S_SETHALT 1. Once all waves are launched CP will send unhalt command
+// The shader then clears all SGPRs allocated to it, cleaning out physical SGPRs 224-799
+
+shader main
+ asic(MI200)
+ type(CS)
+ wave_size(64)
+// Note: original source code from SQ team
+
+// (theorhetical fastest = ~512clks vgpr + 1536 lds + ~128 sgpr = 2176 clks)
+
+ s_cmp_eq_u32 s0, 1 // Bit0 is set, sgpr0 is set then clear VGPRS and LDS as FW set COMPUTE_USER_DATA_3
+ s_cbranch_scc0 label_0023 // Clean VGPRs and LDS if sgpr0 of wave is set, scc = (s3 == 1)
+ S_BARRIER
+
+ s_movk_i32 m0, 0x0000
+ s_mov_b32 s2, 0x00000078 // Loop 128/8=16 times (loop unrolled for performance)
+ //
+ // CLEAR VGPRs
+ //
+ s_set_gpr_idx_on s2, 0x8 // enable Dest VGPR indexing
+label_0005:
+ v_mov_b32 v0, 0
+ v_mov_b32 v1, 0
+ v_mov_b32 v2, 0
+ v_mov_b32 v3, 0
+ v_mov_b32 v4, 0
+ v_mov_b32 v5, 0
+ v_mov_b32 v6, 0
+ v_mov_b32 v7, 0
+ s_sub_u32 s2, s2, 8
+ s_set_gpr_idx_idx s2
+ s_cbranch_scc0 label_0005
+ s_set_gpr_idx_off
+
+ //
+ //
+
+ s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
+ s_and_b32 s2, s2, s1 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
+ s_cbranch_scc0 label_clean_sgpr_1 // Clean LDS if its first wave of ThreadGroup/WorkGroup
+ // CLEAR LDS
+ //
+ s_mov_b32 exec_lo, 0xffffffff
+ s_mov_b32 exec_hi, 0xffffffff
+ v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63)
+ v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63)
+ v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte)
+ s_mov_b32 s2, 0x00000003f // 64 loop iterations
+ s_mov_b32 m0, 0xffffffff
+ // Clear all of LDS space
+ // Each FirstWave of WorkGroup clears 64kbyte block
+
+label_001F:
+ ds_write2_b64 v1, v[2:3], v[2:3] offset1:32
+ ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96
+ v_add_co_u32 v1, vcc, 0x00000400, v1
+ s_sub_u32 s2, s2, 1
+ s_cbranch_scc0 label_001F
+ //
+ // CLEAR SGPRs
+ //
+label_clean_sgpr_1:
+ s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance)
+ s_nop 0
+label_sgpr_loop:
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop
+
+ //clear vcc, flat scratch
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 vcc, 0 //clear vcc
+ s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1
+ s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3
+ s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5
+ s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7
+ s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9
+ s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11
+ s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13
+ s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15
+s_endpgm
+
+label_0023:
+
+ s_sethalt 1
+
+ s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance)
+ s_nop 0
+label_sgpr_loop1:
+
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop1
+
+ //clear vcc, flat scratch
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 vcc, 0xee //clear vcc
+
+s_endpgm
+end
+
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index e019249883fb..194026e9be33 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -40,10 +40,12 @@
static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
- if (!ring || !ring->funcs->emit_wreg)
+ if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
+ RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ } else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ }
}
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
@@ -54,11 +56,13 @@ static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 5))
return;
- if (!ring || !ring->funcs->emit_wreg)
+ if (!ring || !ring->funcs->emit_wreg) {
WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
- else
+ RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE);
+ } else {
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
+ }
}
static void hdp_v4_0_query_ras_error_count(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
index ed7facacf2fe..d3962d469088 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -31,10 +31,12 @@
static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
- if (!ring || !ring->funcs->emit_wreg)
+ if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
+ RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ } else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ }
}
static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
@@ -42,6 +44,7 @@ static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+ RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE);
} else {
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
index 29c3484ae1f1..f52552c5fa27 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
@@ -31,13 +31,15 @@
static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
- if (!ring || !ring->funcs->emit_wreg)
+ if (!ring || !ring->funcs->emit_wreg) {
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0);
- else
+ RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ } else {
amdgpu_ring_emit_wreg(ring,
(adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0);
+ }
}
static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
index 33736d361dd0..6948fe9956ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
@@ -34,10 +34,12 @@
static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
- if (!ring || !ring->funcs->emit_wreg)
+ if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
+ RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ } else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ }
}
static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
index 1c99bb09e2a1..63820329f67e 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
@@ -31,10 +31,12 @@
static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
- if (!ring || !ring->funcs->emit_wreg)
+ if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
+ RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ } else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ }
}
static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 0fda70336300..6fca2915ea8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -116,6 +116,20 @@ static int vcn_v4_0_3_early_init(void *handle)
return amdgpu_vcn_early_init(adev);
}
+static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
+{
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+
+ fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+ fw_shared->sq.is_enabled = 1;
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
+
+ return 0;
+}
+
/**
* vcn_v4_0_3_sw_init - sw init for VCN block
*
@@ -148,8 +162,6 @@ static int vcn_v4_0_3_sw_init(void *handle)
return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
-
vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
@@ -172,12 +184,7 @@ static int vcn_v4_0_3_sw_init(void *handle)
if (r)
return r;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
- fw_shared->sq.is_enabled = true;
-
- if (amdgpu_vcnfw_log)
- amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+ vcn_v4_0_3_fw_shared_init(adev, i);
}
if (amdgpu_sriov_vf(adev)) {
@@ -273,6 +280,8 @@ static int vcn_v4_0_3_hw_init(void *handle)
}
} else {
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+
vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
@@ -296,6 +305,11 @@ static int vcn_v4_0_3_hw_init(void *handle)
regVCN_RB1_DB_CTRL);
}
+ /* Re-init fw_shared when RAS fatal error occurred */
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ if (!fw_shared->sq.is_enabled)
+ vcn_v4_0_3_fw_shared_init(adev, i);
+
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index ac439f0565e3..16f5561fb86e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -114,6 +114,33 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
+ if (enable) {
+ /* Unset the CLEAR_OVERFLOW bit to make sure the next step
+ * is switching the bit from 0 to 1
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ /* Clear RB_OVERFLOW bit */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ }
+
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 48caecf7e72e..8de61cc524c9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1509,6 +1509,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
if (adev->gfx.config.gc_tcp_size_per_cu) {
pcache_info[i].cache_size = adev->gfx.config.gc_tcp_size_per_cu;
pcache_info[i].cache_level = 1;
+ /* Cacheline size not available in IP discovery for gc943,gc944 */
+ pcache_info[i].cache_line_size = 128;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
@@ -1520,6 +1522,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
pcache_info[i].cache_size =
adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
pcache_info[i].cache_level = 1;
+ pcache_info[i].cache_line_size = 64;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
@@ -1530,6 +1533,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
if (adev->gfx.config.gc_l1_data_cache_size_per_sqc) {
pcache_info[i].cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
pcache_info[i].cache_level = 1;
+ pcache_info[i].cache_line_size = 64;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
@@ -1540,6 +1544,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
if (adev->gfx.config.gc_tcc_size) {
pcache_info[i].cache_size = adev->gfx.config.gc_tcc_size;
pcache_info[i].cache_level = 2;
+ pcache_info[i].cache_line_size = 128;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
@@ -1550,6 +1555,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
if (adev->gmc.mall_size) {
pcache_info[i].cache_size = adev->gmc.mall_size / 1024;
pcache_info[i].cache_level = 3;
+ pcache_info[i].cache_line_size = 64;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index fad1c8f2bc83..b05be24531e1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -235,6 +235,9 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
*/
kfd->device_info.needs_pci_atomics = true;
kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0;
+ } else if (gc_version < IP_VERSION(13, 0, 0)) {
+ kfd->device_info.needs_pci_atomics = true;
+ kfd->device_info.no_atomic_fw_version = 2090;
} else {
kfd->device_info.needs_pci_atomics = true;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 24fbde7dd1c4..ad3a3aa72b51 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1910,7 +1910,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
else
init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0);
} else {
- init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3))
+ init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1);
+ else
+ init_data.flags.gpu_vm_support =
+ (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
}
adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support;
@@ -7337,10 +7341,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
enum dc_status dc_result = DC_OK;
+ uint8_t bpc_limit = 6;
if (!dm_state)
return NULL;
+ if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ bpc_limit = 8;
+
do {
stream = create_stream_for_sink(connector, drm_mode,
dm_state, old_stream,
@@ -7361,11 +7370,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
if (dc_result != DC_OK) {
- DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
+ DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n",
drm_mode->hdisplay,
drm_mode->vdisplay,
drm_mode->clock,
- dc_result,
+ dc_pixel_encoding_to_str(stream->timing.pixel_encoding),
+ dc_color_depth_to_str(stream->timing.display_color_depth),
dc_status_to_str(dc_result));
dc_stream_release(stream);
@@ -7373,10 +7383,13 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
requested_bpc -= 2; /* lower bpc to retry validation */
}
- } while (stream == NULL && requested_bpc >= 6);
+ } while (stream == NULL && requested_bpc >= bpc_limit);
- if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
- DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
+ if ((dc_result == DC_FAIL_ENC_VALIDATE ||
+ dc_result == DC_EXCEED_DONGLE_CAP) &&
+ !aconnector->force_yuv420_output) {
+ DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n",
+ __func__, __LINE__);
aconnector->force_yuv420_output = true;
stream = create_validate_stream_for_sink(aconnector, drm_mode,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index b46a3afe48ca..3bd0d46c1701 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -132,6 +132,8 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+ struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dccg *dccg = clk_mgr_internal->dccg;
struct pipe_ctx *pipe = safe_to_lower
? &context->res_ctx.pipe_ctx[i]
: &dc->current_state->res_ctx.pipe_ctx[i];
@@ -148,8 +150,13 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *
new_pipe->stream_res.stream_enc &&
new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled &&
new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc);
- if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
- !pipe->stream->link_enc) && !stream_changed_otg_dig_on) {
+ bool has_active_hpo = dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(old_pipe) && dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(new_pipe);
+
+ if (!has_active_hpo && !dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe) &&
+ (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
+ !pipe->stream->link_enc) && !stream_changed_otg_dig_on)) {
+
+
/* This w/a should not trigger when we have a dig active */
if (disable) {
if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
@@ -257,11 +264,11 @@ static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
uint32_t host_router_bw_kbps[MAX_HOST_ROUTERS_NUM] = { 0 };
int i;
-
for (i = 0; i < context->stream_count; ++i) {
const struct dc_stream_state *stream = context->streams[i];
const struct dc_link *link = stream->link;
- uint8_t lowest_dpia_index = 0, hr_index = 0;
+ uint8_t lowest_dpia_index = 0;
+ unsigned int hr_index = 0;
if (!link)
continue;
@@ -271,6 +278,8 @@ static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_
continue;
hr_index = (link->link_index - lowest_dpia_index) / 2;
+ if (hr_index >= MAX_HOST_ROUTERS_NUM)
+ continue;
host_router_bw_kbps[hr_index] += dc_bandwidth_in_kbps_from_timing(
&stream->timing, dc_link_get_highest_encoding_format(link));
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index a6911bb2cf0c..9f570d447c20 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -6006,3 +6006,21 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
return profile;
}
+
+/*
+ **********************************************************************************
+ * dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state
+ *
+ * Called when DM wants to log detile buffer size from dc_state
+ *
+ **********************************************************************************
+ */
+unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
+{
+ struct dc *dc = context->clk_mgr->ctx->dc;
+
+ if (dc->res_pool->funcs->get_det_buffer_size)
+ return dc->res_pool->funcs->get_det_buffer_size(context);
+ else
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 801cdbc8117d..e255c204b7e8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -434,3 +434,43 @@ char *dc_status_to_str(enum dc_status status)
return "Unexpected status error";
}
+
+char *dc_pixel_encoding_to_str(enum dc_pixel_encoding pixel_encoding)
+{
+ switch (pixel_encoding) {
+ case PIXEL_ENCODING_RGB:
+ return "RGB";
+ case PIXEL_ENCODING_YCBCR422:
+ return "YUV422";
+ case PIXEL_ENCODING_YCBCR444:
+ return "YUV444";
+ case PIXEL_ENCODING_YCBCR420:
+ return "YUV420";
+ default:
+ return "Unknown";
+ }
+}
+
+char *dc_color_depth_to_str(enum dc_color_depth color_depth)
+{
+ switch (color_depth) {
+ case COLOR_DEPTH_666:
+ return "6-bpc";
+ case COLOR_DEPTH_888:
+ return "8-bpc";
+ case COLOR_DEPTH_101010:
+ return "10-bpc";
+ case COLOR_DEPTH_121212:
+ return "12-bpc";
+ case COLOR_DEPTH_141414:
+ return "14-bpc";
+ case COLOR_DEPTH_161616:
+ return "16-bpc";
+ case COLOR_DEPTH_999:
+ return "9-bpc";
+ case COLOR_DEPTH_111111:
+ return "11-bpc";
+ default:
+ return "Unknown";
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index c7599c40d4be..d915020a4295 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -765,25 +765,6 @@ static inline void get_vp_scan_direction(
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
-/*
- * This is a preliminary vp size calculation to allow us to check taps support.
- * The result is completely overridden afterwards.
- */
-static void calculate_viewport_size(struct pipe_ctx *pipe_ctx)
-{
- struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
-
- data->viewport.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz, data->recout.width));
- data->viewport.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert, data->recout.height));
- data->viewport_c.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz_c, data->recout.width));
- data->viewport_c.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert_c, data->recout.height));
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
- pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
- swap(data->viewport.width, data->viewport.height);
- swap(data->viewport_c.width, data->viewport_c.height);
- }
-}
-
static struct rect intersect_rec(const struct rect *r0, const struct rect *r1)
{
struct rect rec;
@@ -1468,6 +1449,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
const struct rect odm_slice_src = resource_get_odm_slice_src_rect(pipe_ctx);
+ struct scaling_taps temp = {0};
bool res = false;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
@@ -1519,14 +1501,16 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
res = spl_calculate_scaler_params(spl_in, spl_out);
// Convert respective out params from SPL to scaler data
translate_SPL_out_params_to_pipe_ctx(pipe_ctx, spl_out);
+
+ /* Ignore scaler failure if pipe context plane is phantom plane */
+ if (!res && plane_state->is_phantom)
+ res = true;
} else {
#endif
/* depends on h_active */
calculate_recout(pipe_ctx);
/* depends on pixel format */
calculate_scaling_ratios(pipe_ctx);
- /* depends on scaling ratios and recout, does not calculate offset yet */
- calculate_viewport_size(pipe_ctx);
/*
* LB calculations depend on vp size, h/v_active and scaling ratios
@@ -1547,6 +1531,24 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
+ // get TAP value with 100x100 dummy data for max scaling qualify, override
+ // if a new scaling quality required
+ pipe_ctx->plane_res.scl_data.viewport.width = 100;
+ pipe_ctx->plane_res.scl_data.viewport.height = 100;
+ pipe_ctx->plane_res.scl_data.viewport_c.width = 100;
+ pipe_ctx->plane_res.scl_data.viewport_c.height = 100;
+ if (pipe_ctx->plane_res.xfm != NULL)
+ res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+
+ if (pipe_ctx->plane_res.dpp != NULL)
+ res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+
+ temp = pipe_ctx->plane_res.scl_data.taps;
+
+ calculate_inits_and_viewports(pipe_ctx);
+
if (pipe_ctx->plane_res.xfm != NULL)
res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
@@ -1573,11 +1575,14 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
&plane_state->scaling_quality);
}
- /*
- * Depends on recout, scaling ratios, h_active and taps
- * May need to re-check lb size after this in some obscure scenario
- */
- if (res)
+ /* Ignore scaler failure if pipe context plane is phantom plane */
+ if (!res && plane_state->is_phantom)
+ res = true;
+
+ if (res && (pipe_ctx->plane_res.scl_data.taps.v_taps != temp.v_taps ||
+ pipe_ctx->plane_res.scl_data.taps.h_taps != temp.h_taps ||
+ pipe_ctx->plane_res.scl_data.taps.v_taps_c != temp.v_taps_c ||
+ pipe_ctx->plane_res.scl_data.taps.h_taps_c != temp.h_taps_c))
calculate_inits_and_viewports(pipe_ctx);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 9a406d74c0dd..3d93efdc1026 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -819,12 +819,12 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
stream->dst.height,
stream->output_color_space);
DC_LOG_DC(
- "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n",
+ "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixel_encoding:%s, color_depth:%s\n",
stream->timing.pix_clk_100hz / 10,
stream->timing.h_total,
stream->timing.v_total,
- stream->timing.pixel_encoding,
- stream->timing.display_color_depth);
+ dc_pixel_encoding_to_str(stream->timing.pixel_encoding),
+ dc_color_depth_to_str(stream->timing.display_color_depth));
DC_LOG_DC(
"\tlink: %d\n",
stream->link->link_index);
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 3992ad73165b..7c163aa7e8bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -285,6 +285,7 @@ struct dc_caps {
uint16_t subvp_vertical_int_margin_us;
bool seamless_odm;
uint32_t max_v_total;
+ bool vtotal_limited_by_fp2;
uint32_t max_disp_clock_khz_at_vmin;
uint8_t subvp_drr_vblank_start_margin_us;
bool cursor_not_scaled;
@@ -2543,6 +2544,8 @@ struct dc_power_profile {
struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context);
+unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
+
/* DSC Interfaces */
#include "dc_dsc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 1e7de0f03290..ec5009f411eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -1294,6 +1294,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
memset(&new_signals, 0, sizeof(new_signals));
+ new_signals.bits.allow_idle = 1; /* always set */
+
if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
new_signals.bits.allow_pg = 1;
@@ -1389,7 +1391,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
*/
dc_dmub_srv->needs_idle_wake = false;
- if (prev_driver_signals.bits.allow_ips2 &&
+ if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
(!dc->debug.optimize_ips_handshake ||
ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
DC_LOG_IPS(
@@ -1450,7 +1452,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
}
dc_dmub_srv_notify_idle(dc, false);
- if (prev_driver_signals.bits.allow_ips1) {
+ if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) {
DC_LOG_IPS(
"wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
index 5b343f745cf3..ae81451a3a72 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
@@ -83,6 +83,15 @@ void enc314_disable_fifo(struct stream_encoder *enc)
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
}
+static bool enc314_is_fifo_enabled(struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t reset_val;
+
+ REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val);
+ return (reset_val != 0);
+}
+
void enc314_dp_set_odm_combine(
struct stream_encoder *enc,
bool odm_combine)
@@ -468,6 +477,7 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = {
.enable_fifo = enc314_enable_fifo,
.disable_fifo = enc314_disable_fifo,
+ .is_fifo_enabled = enc314_is_fifo_enabled,
.set_input_mode = enc314_set_dig_input_mode,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index d851c081e376..8dabb1ac0b68 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -1222,6 +1222,7 @@ static dml_bool_t CalculatePrefetchSchedule(struct display_mode_lib_scratch_st *
s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto;
s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + dml_max(p->TWait + p->TCalc, *p->Tdmdl)) / s->LineTime - (*p->DSTYAfterScaler + (dml_float_t) *p->DSTXAfterScaler / (dml_float_t)p->myPipe->HTotal);
+ s->dst_y_prefetch_equ = dml_min(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index 8697eac1e1f7..8dee0d397e03 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -339,11 +339,22 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in
// }
}
+static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
+{
+ unsigned int max_hw_v_total = stream->ctx->dc->caps.max_v_total;
+
+ if (stream->ctx->dc->caps.vtotal_limited_by_fp2) {
+ max_hw_v_total -= stream->timing.v_front_porch + 1;
+ }
+
+ return max_hw_v_total;
+}
+
static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cfg *timing,
struct dc_stream_state *stream,
struct dml2_context *dml_ctx)
{
- unsigned int hblank_start, vblank_start;
+ unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz;
timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
timing->v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
@@ -371,11 +382,23 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
- stream->timing.v_border_top - stream->timing.v_border_bottom;
timing->drr_config.enabled = stream->ignore_msa_timing_param;
- timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
timing->drr_config.drr_active_variable = stream->vrr_active_variable;
timing->drr_config.drr_active_fixed = stream->vrr_active_fixed;
timing->drr_config.disallowed = !stream->allow_freesync;
+ /* limit min refresh rate to DC cap */
+ min_hardware_refresh_in_uhz = stream->timing.min_refresh_in_uhz;
+ if (stream->ctx->dc->caps.max_v_total != 0) {
+ min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
+ (stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
+ }
+
+ if (stream->timing.min_refresh_in_uhz > min_hardware_refresh_in_uhz) {
+ timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
+ } else {
+ timing->drr_config.min_refresh_uhz = min_hardware_refresh_in_uhz;
+ }
+
if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase &&
stream->ctx->dc->config.enable_fpo_flicker_detection == 1)
timing->drr_config.max_instant_vtotal_delta = dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase(stream, false);
@@ -859,7 +882,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->immediate_flip = plane_state->flip_immediate;
plane->composition.rect_out_height_spans_vactive =
- plane_state->dst_rect.height >= stream->timing.v_addressable &&
+ plane_state->dst_rect.height >= stream->src.height &&
stream->dst.height >= stream->timing.v_addressable;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index 4e93eeedfc1b..efcc1a6b364c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -355,6 +355,20 @@ void dcn314_calculate_pix_rate_divider(
}
}
+static bool dcn314_is_pipe_dig_fifo_on(struct pipe_ctx *pipe)
+{
+ return pipe && pipe->stream
+ // Check dig's otg instance.
+ && pipe->stream_res.stream_enc
+ && pipe->stream_res.stream_enc->funcs->dig_source_otg
+ && pipe->stream_res.tg->inst == pipe->stream_res.stream_enc->funcs->dig_source_otg(pipe->stream_res.stream_enc)
+ && pipe->stream->link && pipe->stream->link->link_enc
+ && pipe->stream->link->link_enc->funcs->is_dig_enabled
+ && pipe->stream->link->link_enc->funcs->is_dig_enabled(pipe->stream->link->link_enc)
+ && pipe->stream_res.stream_enc->funcs->is_fifo_enabled
+ && pipe->stream_res.stream_enc->funcs->is_fifo_enabled(pipe->stream_res.stream_enc);
+}
+
void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx)
{
unsigned int i;
@@ -371,7 +385,11 @@ void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) &&
+ !pipe->stream->apply_seamless_boot_optimization &&
+ !pipe->stream->apply_edp_fast_boot_optimization) {
+ if (dcn314_is_pipe_dig_fifo_on(pipe))
+ continue;
pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
otg_disabled[i] = true;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index fa5edd03d004..b5afd8c3103d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -60,5 +60,7 @@ enum dc_status {
};
char *dc_status_to_str(enum dc_status status);
+char *dc_pixel_encoding_to_str(enum dc_pixel_encoding pixel_encoding);
+char *dc_color_depth_to_str(enum dc_color_depth color_depth);
#endif /* _CORE_STATUS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index bfb8b8502d20..e1e3142cdc00 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -215,6 +215,7 @@ struct resource_funcs {
void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
void (*build_pipe_pix_clk_params)(struct pipe_ctx *pipe_ctx);
+ unsigned int (*get_det_buffer_size)(const struct dc_state *context);
};
struct audio_support{
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index eea2b3b307cd..45e4de8d5cff 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -1511,6 +1511,7 @@ bool dcn20_split_stream_for_odm(
if (prev_odm_pipe->plane_state) {
struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data;
+ struct output_pixel_processor *opp = next_odm_pipe->stream_res.opp;
int new_width;
/* HACTIVE halved for odm combine */
@@ -1544,7 +1545,28 @@ bool dcn20_split_stream_for_odm(
sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
sd->ratios.horz_c, sd->h_active - sd->recout.x));
sd->recout.x = 0;
+
+ /*
+ * When odm is used in YcbCr422 or 420 colour space, a split screen
+ * will be seen with the previous calculations since the extra left
+ * edge pixel is accounted for in fmt but not in viewport.
+ *
+ * Below are calculations which fix the split by fixing the calculations
+ * if there is an extra left edge pixel.
+ */
+ if (opp && opp->funcs->opp_get_left_edge_extra_pixel_count
+ && opp->funcs->opp_get_left_edge_extra_pixel_count(
+ opp, next_odm_pipe->stream->timing.pixel_encoding,
+ resource_is_pipe_type(next_odm_pipe, OTG_MASTER)) == 1) {
+ sd->h_active += 1;
+ sd->recout.width += 1;
+ sd->viewport.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
+ sd->viewport_c.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
+ sd->viewport_c.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
+ sd->viewport.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
+ }
}
+
if (!next_odm_pipe->top_pipe)
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
else
@@ -2133,6 +2155,7 @@ bool dcn20_fast_validate_bw(
ASSERT(0);
}
}
+
/* Actual dsc count per stream dsc validation*/
if (!dcn20_validate_dsc(dc, context)) {
context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 347e6aaea582..14b28841657d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -1298,7 +1298,7 @@ static struct link_encoder *dcn21_link_encoder_create(
kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc21)
+ if (!enc21 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 5040a4c6ed18..75cc84473a57 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -2354,6 +2354,7 @@ static bool dcn30_resource_construct(
dc->caps.dp_hdmi21_pcon_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
/* read VBIOS LTTPR caps */
{
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index 5791b5cc2875..320b040d591d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -1234,6 +1234,7 @@ static bool dcn302_resource_construct(
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 63f0f882c861..297cf4b5600d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -1179,6 +1179,7 @@ static bool dcn303_resource_construct(
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index ac8cb20e2e3b..80386f698ae4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1721,6 +1721,12 @@ int dcn31_populate_dml_pipes_from_context(
return pipe_cnt;
}
+unsigned int dcn31_get_det_buffer_size(
+ const struct dc_state *context)
+{
+ return context->bw_ctx.dml.ip.det_buffer_size_kbytes;
+}
+
void dcn31_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -1843,6 +1849,7 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.update_bw_bounding_box = dcn31_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn31_get_panel_config_defaults,
+ .get_det_buffer_size = dcn31_get_det_buffer_size,
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
index 901436591ed4..551ad912f7be 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
@@ -63,6 +63,9 @@ struct resource_pool *dcn31_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc);
+unsigned int dcn31_get_det_buffer_size(
+ const struct dc_state *context);
+
/*temp: B0 specific before switch to dcn313 headers*/
#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index 169924d0a839..01d95108ce66 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -1778,6 +1778,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn314_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
+ .get_det_buffer_size = dcn31_get_det_buffer_size,
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 3f4b9dba4112..f2ce687c0e03 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1840,6 +1840,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.update_bw_bounding_box = dcn315_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn315_get_panel_config_defaults,
+ .get_det_buffer_size = dcn31_get_det_buffer_size,
};
static bool dcn315_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index 5fd52c5fcee4..af82e13029c9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
@@ -1720,6 +1720,7 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.update_bw_bounding_box = dcn316_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn316_get_panel_config_defaults,
+ .get_det_buffer_size = dcn31_get_det_buffer_size,
};
static bool dcn316_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index a124ad9bd108..6b889c8be0ca 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -2186,6 +2186,7 @@ static bool dcn32_resource_construct(
dc->caps.dmcub_support = true;
dc->caps.seamless_odm = true;
dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 827a94f84f10..74113c578bac 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1743,6 +1743,7 @@ static bool dcn321_resource_construct(
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 893a9d9ee870..d0c4693c1224 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -1779,6 +1779,7 @@ static struct resource_funcs dcn35_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia,
+ .get_det_buffer_size = dcn31_get_det_buffer_size,
};
static bool dcn35_resource_construct(
@@ -1850,6 +1851,7 @@ static bool dcn35_resource_construct(
dc->caps.zstate_support = true;
dc->caps.ips_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 70abd32ce2ad..575c0aa12229 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -1758,6 +1758,7 @@ static struct resource_funcs dcn351_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia,
+ .get_det_buffer_size = dcn31_get_det_buffer_size,
};
static bool dcn351_resource_construct(
@@ -1829,6 +1830,7 @@ static bool dcn351_resource_construct(
dc->caps.zstate_support = true;
dc->caps.ips_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 9d56fbdcd06a..4aa975418fb1 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -1826,6 +1826,7 @@ static bool dcn401_resource_construct(
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev))
dc->caps.dcc_plane_width_limit = 7680;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index ebcf68bfae2b..7835100b37c4 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -747,7 +747,8 @@ union dmub_shared_state_ips_driver_signals {
uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */
- uint32_t reserved_bits : 28; /**< Reversed bits */
+ uint32_t allow_idle : 1; /**< 1 if driver is allowing idle */
+ uint32_t reserved_bits : 27; /**< Reversed bits */
} bits;
uint32_t all;
};
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index bbd259cea4f4..ab62a76d48cf 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -121,6 +121,17 @@ static unsigned int calc_duration_in_us_from_v_total(
return duration_in_us;
}
+static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
+{
+ unsigned int max_hw_v_total = stream->ctx->dc->caps.max_v_total;
+
+ if (stream->ctx->dc->caps.vtotal_limited_by_fp2) {
+ max_hw_v_total -= stream->timing.v_front_porch + 1;
+ }
+
+ return max_hw_v_total;
+}
+
unsigned int mod_freesync_calc_v_total_from_refresh(
const struct dc_stream_state *stream,
unsigned int refresh_in_uhz)
@@ -1002,7 +1013,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
if (stream->ctx->dc->caps.max_v_total != 0 && stream->timing.h_total != 0) {
min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
- (stream->timing.h_total * (long long)stream->ctx->dc->caps.max_v_total));
+ (stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
}
/* Limit minimum refresh rate to what can be supported by hardware */
min_refresh_in_uhz = min_hardware_refresh_in_uhz > in_config->min_refresh_in_uhz ?
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index d5d6ab484e5a..0fa6fbee1978 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1409,7 +1409,11 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
* create a custom set of heuristics, write a string of numbers to the file
* starting with the number of the custom profile along with a setting
* for each heuristic parameter. Due to differences across asic families
- * the heuristic parameters vary from family to family.
+ * the heuristic parameters vary from family to family. Additionally,
+ * you can apply the custom heuristics to different clock domains. Each
+ * clock domain is considered a distinct operation so if you modify the
+ * gfxclk heuristics and then the memclk heuristics, the all of the
+ * custom heuristics will be retained until you switch to another profile.
*
*/
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 32bdeac2676b..0c0b9aa44dfa 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -72,6 +72,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit);
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
+static void smu_power_profile_mode_get(struct smu_context *smu,
+ enum PP_SMC_POWER_PROFILE profile_mode);
+static void smu_power_profile_mode_put(struct smu_context *smu,
+ enum PP_SMC_POWER_PROFILE profile_mode);
static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
@@ -1257,35 +1261,19 @@ static int smu_sw_init(void *handle)
INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
atomic64_set(&smu->throttle_int_counter, 0);
smu->watermarks_bitmap = 0;
- smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
- smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
- smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
- smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
- smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
- smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
- smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
- smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
- smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
-
if (smu->is_apu ||
!smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
- smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+ smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
else
- smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
-
- smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
- smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
- smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
- smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
- smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
- smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
- smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
+ smu->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ smu_power_profile_mode_get(smu, smu->power_profile_mode);
+
smu->display_config = &adev->pm.pm_display_cfg;
smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
@@ -1338,6 +1326,11 @@ static int smu_sw_fini(void *handle)
return ret;
}
+ if (smu->custom_profile_params) {
+ kfree(smu->custom_profile_params);
+ smu->custom_profile_params = NULL;
+ }
+
smu_fini_microcode(smu);
return 0;
@@ -2117,6 +2110,9 @@ static int smu_suspend(void *handle)
if (!ret)
adev->gfx.gfx_off_entrycount = count;
+ /* clear this on suspend so it will get reprogrammed on resume */
+ smu->workload_mask = 0;
+
return 0;
}
@@ -2229,25 +2225,49 @@ static int smu_enable_umd_pstate(void *handle,
}
static int smu_bump_power_profile_mode(struct smu_context *smu,
- long *param,
- uint32_t param_size)
+ long *custom_params,
+ u32 custom_params_max_idx)
{
- int ret = 0;
+ u32 workload_mask = 0;
+ int i, ret = 0;
+
+ for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
+ if (smu->workload_refcount[i])
+ workload_mask |= 1 << i;
+ }
+
+ if (smu->workload_mask == workload_mask)
+ return 0;
if (smu->ppt_funcs->set_power_profile_mode)
- ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
+ ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask,
+ custom_params,
+ custom_params_max_idx);
+
+ if (!ret)
+ smu->workload_mask = workload_mask;
return ret;
}
+static void smu_power_profile_mode_get(struct smu_context *smu,
+ enum PP_SMC_POWER_PROFILE profile_mode)
+{
+ smu->workload_refcount[profile_mode]++;
+}
+
+static void smu_power_profile_mode_put(struct smu_context *smu,
+ enum PP_SMC_POWER_PROFILE profile_mode)
+{
+ if (smu->workload_refcount[profile_mode])
+ smu->workload_refcount[profile_mode]--;
+}
+
static int smu_adjust_power_state_dynamic(struct smu_context *smu,
enum amd_dpm_forced_level level,
- bool skip_display_settings,
- bool init)
+ bool skip_display_settings)
{
int ret = 0;
- int index = 0;
- long workload[1];
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
if (!skip_display_settings) {
@@ -2284,14 +2304,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
- smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
- index = fls(smu->workload_mask);
- index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
- workload[0] = smu->workload_setting[index];
-
- if (init || smu->power_profile_mode != workload[0])
- smu_bump_power_profile_mode(smu, workload, 0);
- }
+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+ smu_bump_power_profile_mode(smu, NULL, 0);
return ret;
}
@@ -2310,13 +2324,13 @@ static int smu_handle_task(struct smu_context *smu,
ret = smu_pre_display_config_changed(smu);
if (ret)
return ret;
- ret = smu_adjust_power_state_dynamic(smu, level, false, false);
+ ret = smu_adjust_power_state_dynamic(smu, level, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
- ret = smu_adjust_power_state_dynamic(smu, level, true, true);
+ ret = smu_adjust_power_state_dynamic(smu, level, true);
break;
case AMD_PP_TASK_READJUST_POWER_STATE:
- ret = smu_adjust_power_state_dynamic(smu, level, true, false);
+ ret = smu_adjust_power_state_dynamic(smu, level, true);
break;
default:
break;
@@ -2338,12 +2352,11 @@ static int smu_handle_dpm_task(void *handle,
static int smu_switch_power_profile(void *handle,
enum PP_SMC_POWER_PROFILE type,
- bool en)
+ bool enable)
{
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- long workload[1];
- uint32_t index;
+ int ret;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -2351,21 +2364,21 @@ static int smu_switch_power_profile(void *handle,
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
return -EINVAL;
- if (!en) {
- smu->workload_mask &= ~(1 << smu->workload_prority[type]);
- index = fls(smu->workload_mask);
- index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
- workload[0] = smu->workload_setting[index];
- } else {
- smu->workload_mask |= (1 << smu->workload_prority[type]);
- index = fls(smu->workload_mask);
- index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
- workload[0] = smu->workload_setting[index];
- }
-
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
- smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
- smu_bump_power_profile_mode(smu, workload, 0);
+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ if (enable)
+ smu_power_profile_mode_get(smu, type);
+ else
+ smu_power_profile_mode_put(smu, type);
+ ret = smu_bump_power_profile_mode(smu, NULL, 0);
+ if (ret) {
+ if (enable)
+ smu_power_profile_mode_put(smu, type);
+ else
+ smu_power_profile_mode_get(smu, type);
+ return ret;
+ }
+ }
return 0;
}
@@ -3053,12 +3066,35 @@ static int smu_set_power_profile_mode(void *handle,
uint32_t param_size)
{
struct smu_context *smu = handle;
+ bool custom = false;
+ int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->set_power_profile_mode)
return -EOPNOTSUPP;
- return smu_bump_power_profile_mode(smu, param, param_size);
+ if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) {
+ custom = true;
+ /* clear frontend mask so custom changes propogate */
+ smu->workload_mask = 0;
+ }
+
+ if ((param[param_size] != smu->power_profile_mode) || custom) {
+ /* clear the old user preference */
+ smu_power_profile_mode_put(smu, smu->power_profile_mode);
+ /* set the new user preference */
+ smu_power_profile_mode_get(smu, param[param_size]);
+ ret = smu_bump_power_profile_mode(smu,
+ custom ? param : NULL,
+ custom ? param_size : 0);
+ if (ret)
+ smu_power_profile_mode_put(smu, param[param_size]);
+ else
+ /* store the user's preference */
+ smu->power_profile_mode = param[param_size];
+ }
+
+ return ret;
}
static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index b44a185d07e8..2b8a18ce25d9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -556,11 +556,13 @@ struct smu_context {
uint32_t hard_min_uclk_req_from_dal;
bool disable_uclk_switch;
+ /* asic agnostic workload mask */
uint32_t workload_mask;
- uint32_t workload_prority[WORKLOAD_POLICY_MAX];
- uint32_t workload_setting[WORKLOAD_POLICY_MAX];
+ /* default/user workload preference */
uint32_t power_profile_mode;
- uint32_t default_power_profile_mode;
+ uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];
+ /* backend specific custom workload settings */
+ long *custom_profile_params;
bool pm_enabled;
bool is_apu;
@@ -731,9 +733,12 @@ struct pptable_funcs {
* @set_power_profile_mode: Set a power profile mode. Also used to
* create/set custom power profile modes.
* &input: Power profile mode parameters.
- * &size: Size of &input.
+ * &workload_mask: mask of workloads to enable
+ * &custom_params: custom profile parameters
+ * &custom_params_max_idx: max valid idx into custom_params
*/
- int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
+ int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask,
+ long *custom_params, u32 custom_params_max_idx);
/**
* @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index d52512f5f1bd..fc1297fecc62 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1445,98 +1445,120 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
return size;
}
-static int arcturus_set_power_profile_mode(struct smu_context *smu,
- long *input,
- uint32_t size)
+#define ARCTURUS_CUSTOM_PARAMS_COUNT 10
+#define ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT 2
+#define ARCTURUS_CUSTOM_PARAMS_SIZE (ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT * ARCTURUS_CUSTOM_PARAMS_COUNT * sizeof(long))
+
+static int arcturus_set_power_profile_mode_coeff(struct smu_context *smu,
+ long *input)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
- int workload_type = 0;
- uint32_t profile_mode = input[size];
- int ret = 0;
+ int ret, idx;
- if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
- return -EINVAL;
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor),
+ false);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return ret;
}
+ idx = 0 * ARCTURUS_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Gfxclk */
+ activity_monitor.Gfx_FPS = input[idx + 1];
+ activity_monitor.Gfx_UseRlcBusy = input[idx + 2];
+ activity_monitor.Gfx_MinActiveFreqType = input[idx + 3];
+ activity_monitor.Gfx_MinActiveFreq = input[idx + 4];
+ activity_monitor.Gfx_BoosterFreqType = input[idx + 5];
+ activity_monitor.Gfx_BoosterFreq = input[idx + 6];
+ activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7];
+ activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8];
+ activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9];
+ }
+ idx = 1 * ARCTURUS_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Uclk */
+ activity_monitor.Mem_FPS = input[idx + 1];
+ activity_monitor.Mem_UseRlcBusy = input[idx + 2];
+ activity_monitor.Mem_MinActiveFreqType = input[idx + 3];
+ activity_monitor.Mem_MinActiveFreq = input[idx + 4];
+ activity_monitor.Mem_BoosterFreqType = input[idx + 5];
+ activity_monitor.Mem_BoosterFreq = input[idx + 6];
+ activity_monitor.Mem_PD_Data_limit_c = input[idx + 7];
+ activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8];
+ activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9];
+ }
- if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
- (smu->smc_fw_version >= 0x360d00)) {
- if (size != 10)
- return -EINVAL;
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor),
+ true);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
- WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor),
- false);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
- return ret;
- }
+ return ret;
+}
- switch (input[0]) {
- case 0: /* Gfxclk */
- activity_monitor.Gfx_FPS = input[1];
- activity_monitor.Gfx_UseRlcBusy = input[2];
- activity_monitor.Gfx_MinActiveFreqType = input[3];
- activity_monitor.Gfx_MinActiveFreq = input[4];
- activity_monitor.Gfx_BoosterFreqType = input[5];
- activity_monitor.Gfx_BoosterFreq = input[6];
- activity_monitor.Gfx_PD_Data_limit_c = input[7];
- activity_monitor.Gfx_PD_Data_error_coeff = input[8];
- activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
- break;
- case 1: /* Uclk */
- activity_monitor.Mem_FPS = input[1];
- activity_monitor.Mem_UseRlcBusy = input[2];
- activity_monitor.Mem_MinActiveFreqType = input[3];
- activity_monitor.Mem_MinActiveFreq = input[4];
- activity_monitor.Mem_BoosterFreqType = input[5];
- activity_monitor.Mem_BoosterFreq = input[6];
- activity_monitor.Mem_PD_Data_limit_c = input[7];
- activity_monitor.Mem_PD_Data_error_coeff = input[8];
- activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
- break;
- default:
+static int arcturus_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
+{
+ u32 backend_workload_mask = 0;
+ int ret, idx = -1, i;
+
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
+
+ if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
+ if (smu->smc_fw_version < 0x360d00)
return -EINVAL;
+ if (!smu->custom_profile_params) {
+ smu->custom_profile_params =
+ kzalloc(ARCTURUS_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
+ if (!smu->custom_profile_params)
+ return -ENOMEM;
}
-
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
- WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor),
- true);
+ if (custom_params && custom_params_max_idx) {
+ if (custom_params_max_idx != ARCTURUS_CUSTOM_PARAMS_COUNT)
+ return -EINVAL;
+ if (custom_params[0] >= ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT)
+ return -EINVAL;
+ idx = custom_params[0] * ARCTURUS_CUSTOM_PARAMS_COUNT;
+ smu->custom_profile_params[idx] = 1;
+ for (i = 1; i < custom_params_max_idx; i++)
+ smu->custom_profile_params[idx + i] = custom_params[i];
+ }
+ ret = arcturus_set_power_profile_mode_coeff(smu,
+ smu->custom_profile_params);
if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
return ret;
}
- }
-
- /*
- * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
- * Not all profile modes are supported on arcturus.
- */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- profile_mode);
- if (workload_type < 0) {
- dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode);
- return -EINVAL;
+ } else if (smu->custom_profile_params) {
+ memset(smu->custom_profile_params, 0, ARCTURUS_CUSTOM_PARAMS_SIZE);
}
ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetWorkloadMask,
- 1 << workload_type,
- NULL);
+ SMU_MSG_SetWorkloadMask,
+ backend_workload_mask,
+ NULL);
if (ret) {
- dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
+ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
+ workload_mask);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
return ret;
}
- smu->power_profile_mode = profile_mode;
-
- return 0;
+ return ret;
}
static int arcturus_set_performance_level(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 16af1a329621..27c1892b2c74 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2004,87 +2004,122 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
return size;
}
-static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+#define NAVI10_CUSTOM_PARAMS_COUNT 10
+#define NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT 3
+#define NAVI10_CUSTOM_PARAMS_SIZE (NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT * NAVI10_CUSTOM_PARAMS_COUNT * sizeof(long))
+
+static int navi10_set_power_profile_mode_coeff(struct smu_context *smu,
+ long *input)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
- int workload_type, ret = 0;
+ int ret, idx;
- smu->power_profile_mode = input[size];
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor), false);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
- if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
- return -EINVAL;
+ idx = 0 * NAVI10_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Gfxclk */
+ activity_monitor.Gfx_FPS = input[idx + 1];
+ activity_monitor.Gfx_MinFreqStep = input[idx + 2];
+ activity_monitor.Gfx_MinActiveFreqType = input[idx + 3];
+ activity_monitor.Gfx_MinActiveFreq = input[idx + 4];
+ activity_monitor.Gfx_BoosterFreqType = input[idx + 5];
+ activity_monitor.Gfx_BoosterFreq = input[idx + 6];
+ activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7];
+ activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8];
+ activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9];
+ }
+ idx = 1 * NAVI10_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Socclk */
+ activity_monitor.Soc_FPS = input[idx + 1];
+ activity_monitor.Soc_MinFreqStep = input[idx + 2];
+ activity_monitor.Soc_MinActiveFreqType = input[idx + 3];
+ activity_monitor.Soc_MinActiveFreq = input[idx + 4];
+ activity_monitor.Soc_BoosterFreqType = input[idx + 5];
+ activity_monitor.Soc_BoosterFreq = input[idx + 6];
+ activity_monitor.Soc_PD_Data_limit_c = input[idx + 7];
+ activity_monitor.Soc_PD_Data_error_coeff = input[idx + 8];
+ activity_monitor.Soc_PD_Data_error_rate_coeff = input[idx + 9];
+ }
+ idx = 2 * NAVI10_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Memclk */
+ activity_monitor.Mem_FPS = input[idx + 1];
+ activity_monitor.Mem_MinFreqStep = input[idx + 2];
+ activity_monitor.Mem_MinActiveFreqType = input[idx + 3];
+ activity_monitor.Mem_MinActiveFreq = input[idx + 4];
+ activity_monitor.Mem_BoosterFreqType = input[idx + 5];
+ activity_monitor.Mem_BoosterFreq = input[idx + 6];
+ activity_monitor.Mem_PD_Data_limit_c = input[idx + 7];
+ activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8];
+ activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9];
+ }
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor), true);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ return ret;
}
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size != 10)
- return -EINVAL;
+ return ret;
+}
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor), false);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
- return ret;
- }
+static int navi10_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
+{
+ u32 backend_workload_mask = 0;
+ int ret, idx = -1, i;
- switch (input[0]) {
- case 0: /* Gfxclk */
- activity_monitor.Gfx_FPS = input[1];
- activity_monitor.Gfx_MinFreqStep = input[2];
- activity_monitor.Gfx_MinActiveFreqType = input[3];
- activity_monitor.Gfx_MinActiveFreq = input[4];
- activity_monitor.Gfx_BoosterFreqType = input[5];
- activity_monitor.Gfx_BoosterFreq = input[6];
- activity_monitor.Gfx_PD_Data_limit_c = input[7];
- activity_monitor.Gfx_PD_Data_error_coeff = input[8];
- activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
- break;
- case 1: /* Socclk */
- activity_monitor.Soc_FPS = input[1];
- activity_monitor.Soc_MinFreqStep = input[2];
- activity_monitor.Soc_MinActiveFreqType = input[3];
- activity_monitor.Soc_MinActiveFreq = input[4];
- activity_monitor.Soc_BoosterFreqType = input[5];
- activity_monitor.Soc_BoosterFreq = input[6];
- activity_monitor.Soc_PD_Data_limit_c = input[7];
- activity_monitor.Soc_PD_Data_error_coeff = input[8];
- activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
- break;
- case 2: /* Memclk */
- activity_monitor.Mem_FPS = input[1];
- activity_monitor.Mem_MinFreqStep = input[2];
- activity_monitor.Mem_MinActiveFreqType = input[3];
- activity_monitor.Mem_MinActiveFreq = input[4];
- activity_monitor.Mem_BoosterFreqType = input[5];
- activity_monitor.Mem_BoosterFreq = input[6];
- activity_monitor.Mem_PD_Data_limit_c = input[7];
- activity_monitor.Mem_PD_Data_error_coeff = input[8];
- activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
- break;
- default:
- return -EINVAL;
- }
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor), true);
+ if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
+ if (!smu->custom_profile_params) {
+ smu->custom_profile_params = kzalloc(NAVI10_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
+ if (!smu->custom_profile_params)
+ return -ENOMEM;
+ }
+ if (custom_params && custom_params_max_idx) {
+ if (custom_params_max_idx != NAVI10_CUSTOM_PARAMS_COUNT)
+ return -EINVAL;
+ if (custom_params[0] >= NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT)
+ return -EINVAL;
+ idx = custom_params[0] * NAVI10_CUSTOM_PARAMS_COUNT;
+ smu->custom_profile_params[idx] = 1;
+ for (i = 1; i < custom_params_max_idx; i++)
+ smu->custom_profile_params[idx + i] = custom_params[i];
+ }
+ ret = navi10_set_power_profile_mode_coeff(smu,
+ smu->custom_profile_params);
if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
return ret;
}
+ } else if (smu->custom_profile_params) {
+ memset(smu->custom_profile_params, 0, NAVI10_CUSTOM_PARAMS_SIZE);
}
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- smu->power_profile_mode);
- if (workload_type < 0)
- return -EINVAL;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type, NULL);
- if (ret)
- dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
+ backend_workload_mask, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
+ workload_mask);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
+ return ret;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 9c3c48297cba..1af90990d05c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1706,90 +1706,126 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
return size;
}
-static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+#define SIENNA_CICHLID_CUSTOM_PARAMS_COUNT 10
+#define SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT 3
+#define SIENNA_CICHLID_CUSTOM_PARAMS_SIZE (SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT * sizeof(long))
+
+static int sienna_cichlid_set_power_profile_mode_coeff(struct smu_context *smu,
+ long *input)
{
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
- int workload_type, ret = 0;
+ int ret, idx;
- smu->power_profile_mode = input[size];
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external), false);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
- if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
- return -EINVAL;
+ idx = 0 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Gfxclk */
+ activity_monitor->Gfx_FPS = input[idx + 1];
+ activity_monitor->Gfx_MinFreqStep = input[idx + 2];
+ activity_monitor->Gfx_MinActiveFreqType = input[idx + 3];
+ activity_monitor->Gfx_MinActiveFreq = input[idx + 4];
+ activity_monitor->Gfx_BoosterFreqType = input[idx + 5];
+ activity_monitor->Gfx_BoosterFreq = input[idx + 6];
+ activity_monitor->Gfx_PD_Data_limit_c = input[idx + 7];
+ activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 8];
+ activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 9];
+ }
+ idx = 1 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Socclk */
+ activity_monitor->Fclk_FPS = input[idx + 1];
+ activity_monitor->Fclk_MinFreqStep = input[idx + 2];
+ activity_monitor->Fclk_MinActiveFreqType = input[idx + 3];
+ activity_monitor->Fclk_MinActiveFreq = input[idx + 4];
+ activity_monitor->Fclk_BoosterFreqType = input[idx + 5];
+ activity_monitor->Fclk_BoosterFreq = input[idx + 6];
+ activity_monitor->Fclk_PD_Data_limit_c = input[idx + 7];
+ activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 8];
+ activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 9];
+ }
+ idx = 2 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Memclk */
+ activity_monitor->Mem_FPS = input[idx + 1];
+ activity_monitor->Mem_MinFreqStep = input[idx + 2];
+ activity_monitor->Mem_MinActiveFreqType = input[idx + 3];
+ activity_monitor->Mem_MinActiveFreq = input[idx + 4];
+ activity_monitor->Mem_BoosterFreqType = input[idx + 5];
+ activity_monitor->Mem_BoosterFreq = input[idx + 6];
+ activity_monitor->Mem_PD_Data_limit_c = input[idx + 7];
+ activity_monitor->Mem_PD_Data_error_coeff = input[idx + 8];
+ activity_monitor->Mem_PD_Data_error_rate_coeff = input[idx + 9];
}
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size != 10)
- return -EINVAL;
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external), true);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external), false);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
- return ret;
- }
+ return ret;
+}
- switch (input[0]) {
- case 0: /* Gfxclk */
- activity_monitor->Gfx_FPS = input[1];
- activity_monitor->Gfx_MinFreqStep = input[2];
- activity_monitor->Gfx_MinActiveFreqType = input[3];
- activity_monitor->Gfx_MinActiveFreq = input[4];
- activity_monitor->Gfx_BoosterFreqType = input[5];
- activity_monitor->Gfx_BoosterFreq = input[6];
- activity_monitor->Gfx_PD_Data_limit_c = input[7];
- activity_monitor->Gfx_PD_Data_error_coeff = input[8];
- activity_monitor->Gfx_PD_Data_error_rate_coeff = input[9];
- break;
- case 1: /* Socclk */
- activity_monitor->Fclk_FPS = input[1];
- activity_monitor->Fclk_MinFreqStep = input[2];
- activity_monitor->Fclk_MinActiveFreqType = input[3];
- activity_monitor->Fclk_MinActiveFreq = input[4];
- activity_monitor->Fclk_BoosterFreqType = input[5];
- activity_monitor->Fclk_BoosterFreq = input[6];
- activity_monitor->Fclk_PD_Data_limit_c = input[7];
- activity_monitor->Fclk_PD_Data_error_coeff = input[8];
- activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9];
- break;
- case 2: /* Memclk */
- activity_monitor->Mem_FPS = input[1];
- activity_monitor->Mem_MinFreqStep = input[2];
- activity_monitor->Mem_MinActiveFreqType = input[3];
- activity_monitor->Mem_MinActiveFreq = input[4];
- activity_monitor->Mem_BoosterFreqType = input[5];
- activity_monitor->Mem_BoosterFreq = input[6];
- activity_monitor->Mem_PD_Data_limit_c = input[7];
- activity_monitor->Mem_PD_Data_error_coeff = input[8];
- activity_monitor->Mem_PD_Data_error_rate_coeff = input[9];
- break;
- default:
- return -EINVAL;
+static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
+{
+ u32 backend_workload_mask = 0;
+ int ret, idx = -1, i;
+
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
+
+ if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
+ if (!smu->custom_profile_params) {
+ smu->custom_profile_params =
+ kzalloc(SIENNA_CICHLID_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
+ if (!smu->custom_profile_params)
+ return -ENOMEM;
}
-
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external), true);
+ if (custom_params && custom_params_max_idx) {
+ if (custom_params_max_idx != SIENNA_CICHLID_CUSTOM_PARAMS_COUNT)
+ return -EINVAL;
+ if (custom_params[0] >= SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT)
+ return -EINVAL;
+ idx = custom_params[0] * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
+ smu->custom_profile_params[idx] = 1;
+ for (i = 1; i < custom_params_max_idx; i++)
+ smu->custom_profile_params[idx + i] = custom_params[i];
+ }
+ ret = sienna_cichlid_set_power_profile_mode_coeff(smu,
+ smu->custom_profile_params);
if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
return ret;
}
+ } else if (smu->custom_profile_params) {
+ memset(smu->custom_profile_params, 0, SIENNA_CICHLID_CUSTOM_PARAMS_SIZE);
}
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- smu->power_profile_mode);
- if (workload_type < 0)
- return -EINVAL;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type, NULL);
- if (ret)
- dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
+ backend_workload_mask, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
+ workload_mask);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
+ return ret;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 1fe020f1f4db..9bca748ac2e9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -1054,42 +1054,27 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu,
return size;
}
-static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+static int vangogh_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
{
- int workload_type, ret;
- uint32_t profile_mode = input[size];
+ u32 backend_workload_mask = 0;
+ int ret;
- if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
- return -EINVAL;
- }
-
- if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
- profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
- return 0;
-
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- profile_mode);
- if (workload_type < 0) {
- dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
- profile_mode);
- return -EINVAL;
- }
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
- 1 << workload_type,
- NULL);
+ backend_workload_mask,
+ NULL);
if (ret) {
- dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
- workload_type);
+ dev_err_once(smu->adev->dev, "Fail to set workload mask 0x%08x\n",
+ workload_mask);
return ret;
}
- smu->power_profile_mode = profile_mode;
-
- return 0;
+ return ret;
}
static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index cc0504b063fa..1a8a42b176e5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -862,44 +862,27 @@ static int renoir_force_clk_levels(struct smu_context *smu,
return ret;
}
-static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+static int renoir_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
{
- int workload_type, ret;
- uint32_t profile_mode = input[size];
+ int ret;
+ u32 backend_workload_mask = 0;
- if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
- return -EINVAL;
- }
-
- if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
- profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
- return 0;
-
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- profile_mode);
- if (workload_type < 0) {
- /*
- * TODO: If some case need switch to powersave/default power mode
- * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving.
- */
- dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on RENOIR\n", profile_mode);
- return -EINVAL;
- }
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
- 1 << workload_type,
- NULL);
+ backend_workload_mask,
+ NULL);
if (ret) {
- dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
+ dev_err_once(smu->adev->dev, "Failed to set workload mask 0x08%x\n",
+ workload_mask);
return ret;
}
- smu->power_profile_mode = profile_mode;
-
- return 0;
+ return ret;
}
static int renoir_set_peak_clock_by_device(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index d53e162dcd8d..a93739688071 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2477,82 +2477,76 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu,
return size;
}
-static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
- long *input,
- uint32_t size)
+#define SMU_13_0_0_CUSTOM_PARAMS_COUNT 9
+#define SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT 2
+#define SMU_13_0_0_CUSTOM_PARAMS_SIZE (SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_0_CUSTOM_PARAMS_COUNT * sizeof(long))
+
+static int smu_v13_0_0_set_power_profile_mode_coeff(struct smu_context *smu,
+ long *input)
{
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
- int workload_type, ret = 0;
- u32 workload_mask, selected_workload_mask;
-
- smu->power_profile_mode = input[size];
+ int ret, idx;
- if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
- return -EINVAL;
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external),
+ false);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return ret;
}
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size != 9)
- return -EINVAL;
-
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
- WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external),
- false);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
- return ret;
- }
-
- switch (input[0]) {
- case 0: /* Gfxclk */
- activity_monitor->Gfx_FPS = input[1];
- activity_monitor->Gfx_MinActiveFreqType = input[2];
- activity_monitor->Gfx_MinActiveFreq = input[3];
- activity_monitor->Gfx_BoosterFreqType = input[4];
- activity_monitor->Gfx_BoosterFreq = input[5];
- activity_monitor->Gfx_PD_Data_limit_c = input[6];
- activity_monitor->Gfx_PD_Data_error_coeff = input[7];
- activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
- break;
- case 1: /* Fclk */
- activity_monitor->Fclk_FPS = input[1];
- activity_monitor->Fclk_MinActiveFreqType = input[2];
- activity_monitor->Fclk_MinActiveFreq = input[3];
- activity_monitor->Fclk_BoosterFreqType = input[4];
- activity_monitor->Fclk_BoosterFreq = input[5];
- activity_monitor->Fclk_PD_Data_limit_c = input[6];
- activity_monitor->Fclk_PD_Data_error_coeff = input[7];
- activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
- break;
- default:
- return -EINVAL;
- }
+ idx = 0 * SMU_13_0_0_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Gfxclk */
+ activity_monitor->Gfx_FPS = input[idx + 1];
+ activity_monitor->Gfx_MinActiveFreqType = input[idx + 2];
+ activity_monitor->Gfx_MinActiveFreq = input[idx + 3];
+ activity_monitor->Gfx_BoosterFreqType = input[idx + 4];
+ activity_monitor->Gfx_BoosterFreq = input[idx + 5];
+ activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6];
+ activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7];
+ activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8];
+ }
+ idx = 1 * SMU_13_0_0_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Fclk */
+ activity_monitor->Fclk_FPS = input[idx + 1];
+ activity_monitor->Fclk_MinActiveFreqType = input[idx + 2];
+ activity_monitor->Fclk_MinActiveFreq = input[idx + 3];
+ activity_monitor->Fclk_BoosterFreqType = input[idx + 4];
+ activity_monitor->Fclk_BoosterFreq = input[idx + 5];
+ activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6];
+ activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7];
+ activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8];
+ }
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
- WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external),
- true);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
- return ret;
- }
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external),
+ true);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ return ret;
}
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- smu->power_profile_mode);
+ return ret;
+}
- if (workload_type < 0)
- return -EINVAL;
+static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
+{
+ u32 backend_workload_mask = 0;
+ int workload_type, ret, idx = -1, i;
- selected_workload_mask = workload_mask = 1 << workload_type;
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
@@ -2564,15 +2558,48 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
CMN2ASIC_MAPPING_WORKLOAD,
PP_SMC_POWER_PROFILE_POWERSAVING);
if (workload_type >= 0)
- workload_mask |= 1 << workload_type;
+ backend_workload_mask |= 1 << workload_type;
+ }
+
+ if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
+ if (!smu->custom_profile_params) {
+ smu->custom_profile_params =
+ kzalloc(SMU_13_0_0_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
+ if (!smu->custom_profile_params)
+ return -ENOMEM;
+ }
+ if (custom_params && custom_params_max_idx) {
+ if (custom_params_max_idx != SMU_13_0_0_CUSTOM_PARAMS_COUNT)
+ return -EINVAL;
+ if (custom_params[0] >= SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT)
+ return -EINVAL;
+ idx = custom_params[0] * SMU_13_0_0_CUSTOM_PARAMS_COUNT;
+ smu->custom_profile_params[idx] = 1;
+ for (i = 1; i < custom_params_max_idx; i++)
+ smu->custom_profile_params[idx + i] = custom_params[i];
+ }
+ ret = smu_v13_0_0_set_power_profile_mode_coeff(smu,
+ smu->custom_profile_params);
+ if (ret) {
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
+ return ret;
+ }
+ } else if (smu->custom_profile_params) {
+ memset(smu->custom_profile_params, 0, SMU_13_0_0_CUSTOM_PARAMS_SIZE);
}
ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetWorkloadMask,
- workload_mask,
- NULL);
- if (!ret)
- smu->workload_mask = selected_workload_mask;
+ SMU_MSG_SetWorkloadMask,
+ backend_workload_mask,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
+ workload_mask);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
+ return ret;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index ceaf4572db25..d0e6d051e9cf 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2436,78 +2436,110 @@ out:
return result;
}
-static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+#define SMU_13_0_7_CUSTOM_PARAMS_COUNT 8
+#define SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT 2
+#define SMU_13_0_7_CUSTOM_PARAMS_SIZE (SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_7_CUSTOM_PARAMS_COUNT * sizeof(long))
+
+static int smu_v13_0_7_set_power_profile_mode_coeff(struct smu_context *smu,
+ long *input)
{
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
- int workload_type, ret = 0;
+ int ret, idx;
- smu->power_profile_mode = input[size];
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external), false);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
- if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
- return -EINVAL;
+ idx = 0 * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Gfxclk */
+ activity_monitor->Gfx_ActiveHystLimit = input[idx + 1];
+ activity_monitor->Gfx_IdleHystLimit = input[idx + 2];
+ activity_monitor->Gfx_FPS = input[idx + 3];
+ activity_monitor->Gfx_MinActiveFreqType = input[idx + 4];
+ activity_monitor->Gfx_BoosterFreqType = input[idx + 5];
+ activity_monitor->Gfx_MinActiveFreq = input[idx + 6];
+ activity_monitor->Gfx_BoosterFreq = input[idx + 7];
+ }
+ idx = 1 * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Fclk */
+ activity_monitor->Fclk_ActiveHystLimit = input[idx + 1];
+ activity_monitor->Fclk_IdleHystLimit = input[idx + 2];
+ activity_monitor->Fclk_FPS = input[idx + 3];
+ activity_monitor->Fclk_MinActiveFreqType = input[idx + 4];
+ activity_monitor->Fclk_BoosterFreqType = input[idx + 5];
+ activity_monitor->Fclk_MinActiveFreq = input[idx + 6];
+ activity_monitor->Fclk_BoosterFreq = input[idx + 7];
}
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size != 8)
- return -EINVAL;
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external), true);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external), false);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
- return ret;
- }
+ return ret;
+}
- switch (input[0]) {
- case 0: /* Gfxclk */
- activity_monitor->Gfx_ActiveHystLimit = input[1];
- activity_monitor->Gfx_IdleHystLimit = input[2];
- activity_monitor->Gfx_FPS = input[3];
- activity_monitor->Gfx_MinActiveFreqType = input[4];
- activity_monitor->Gfx_BoosterFreqType = input[5];
- activity_monitor->Gfx_MinActiveFreq = input[6];
- activity_monitor->Gfx_BoosterFreq = input[7];
- break;
- case 1: /* Fclk */
- activity_monitor->Fclk_ActiveHystLimit = input[1];
- activity_monitor->Fclk_IdleHystLimit = input[2];
- activity_monitor->Fclk_FPS = input[3];
- activity_monitor->Fclk_MinActiveFreqType = input[4];
- activity_monitor->Fclk_BoosterFreqType = input[5];
- activity_monitor->Fclk_MinActiveFreq = input[6];
- activity_monitor->Fclk_BoosterFreq = input[7];
- break;
- default:
- return -EINVAL;
+static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
+{
+ u32 backend_workload_mask = 0;
+ int ret, idx = -1, i;
+
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
+
+ if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
+ if (!smu->custom_profile_params) {
+ smu->custom_profile_params =
+ kzalloc(SMU_13_0_7_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
+ if (!smu->custom_profile_params)
+ return -ENOMEM;
}
-
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external), true);
+ if (custom_params && custom_params_max_idx) {
+ if (custom_params_max_idx != SMU_13_0_7_CUSTOM_PARAMS_COUNT)
+ return -EINVAL;
+ if (custom_params[0] >= SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT)
+ return -EINVAL;
+ idx = custom_params[0] * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
+ smu->custom_profile_params[idx] = 1;
+ for (i = 1; i < custom_params_max_idx; i++)
+ smu->custom_profile_params[idx + i] = custom_params[i];
+ }
+ ret = smu_v13_0_7_set_power_profile_mode_coeff(smu,
+ smu->custom_profile_params);
if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
return ret;
}
+ } else if (smu->custom_profile_params) {
+ memset(smu->custom_profile_params, 0, SMU_13_0_7_CUSTOM_PARAMS_SIZE);
}
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- smu->power_profile_mode);
- if (workload_type < 0)
- return -EINVAL;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type, NULL);
+ backend_workload_mask, NULL);
- if (ret)
- dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
- else
- smu->workload_mask = (1 << workload_type);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
+ workload_mask);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
+ return ret;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 82aef8626afa..b22fb7eafcd3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1751,90 +1751,120 @@ static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu,
return size;
}
-static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
- long *input,
- uint32_t size)
+#define SMU_14_0_2_CUSTOM_PARAMS_COUNT 9
+#define SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT 2
+#define SMU_14_0_2_CUSTOM_PARAMS_SIZE (SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT * SMU_14_0_2_CUSTOM_PARAMS_COUNT * sizeof(long))
+
+static int smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context *smu,
+ long *input)
{
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
- int workload_type, ret = 0;
- uint32_t current_profile_mode = smu->power_profile_mode;
- smu->power_profile_mode = input[size];
+ int ret, idx;
- if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
- return -EINVAL;
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external),
+ false);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return ret;
}
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size != 9)
- return -EINVAL;
+ idx = 0 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Gfxclk */
+ activity_monitor->Gfx_FPS = input[idx + 1];
+ activity_monitor->Gfx_MinActiveFreqType = input[idx + 2];
+ activity_monitor->Gfx_MinActiveFreq = input[idx + 3];
+ activity_monitor->Gfx_BoosterFreqType = input[idx + 4];
+ activity_monitor->Gfx_BoosterFreq = input[idx + 5];
+ activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6];
+ activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7];
+ activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8];
+ }
+ idx = 1 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Fclk */
+ activity_monitor->Fclk_FPS = input[idx + 1];
+ activity_monitor->Fclk_MinActiveFreqType = input[idx + 2];
+ activity_monitor->Fclk_MinActiveFreq = input[idx + 3];
+ activity_monitor->Fclk_BoosterFreqType = input[idx + 4];
+ activity_monitor->Fclk_BoosterFreq = input[idx + 5];
+ activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6];
+ activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7];
+ activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8];
+ }
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
- WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external),
- false);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
- return ret;
- }
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external),
+ true);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
- switch (input[0]) {
- case 0: /* Gfxclk */
- activity_monitor->Gfx_FPS = input[1];
- activity_monitor->Gfx_MinActiveFreqType = input[2];
- activity_monitor->Gfx_MinActiveFreq = input[3];
- activity_monitor->Gfx_BoosterFreqType = input[4];
- activity_monitor->Gfx_BoosterFreq = input[5];
- activity_monitor->Gfx_PD_Data_limit_c = input[6];
- activity_monitor->Gfx_PD_Data_error_coeff = input[7];
- activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
- break;
- case 1: /* Fclk */
- activity_monitor->Fclk_FPS = input[1];
- activity_monitor->Fclk_MinActiveFreqType = input[2];
- activity_monitor->Fclk_MinActiveFreq = input[3];
- activity_monitor->Fclk_BoosterFreqType = input[4];
- activity_monitor->Fclk_BoosterFreq = input[5];
- activity_monitor->Fclk_PD_Data_limit_c = input[6];
- activity_monitor->Fclk_PD_Data_error_coeff = input[7];
- activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
- break;
- default:
- return -EINVAL;
- }
+ return ret;
+}
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
- WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external),
- true);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
- return ret;
- }
- }
+static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
+{
+ u32 backend_workload_mask = 0;
+ int ret, idx = -1, i;
+
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
+ /* disable deep sleep if compute is enabled */
+ if (workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE))
smu_v14_0_deep_sleep_control(smu, false);
- else if (current_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
+ else
smu_v14_0_deep_sleep_control(smu, true);
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- smu->power_profile_mode);
- if (workload_type < 0)
- return -EINVAL;
+ if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
+ if (!smu->custom_profile_params) {
+ smu->custom_profile_params =
+ kzalloc(SMU_14_0_2_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
+ if (!smu->custom_profile_params)
+ return -ENOMEM;
+ }
+ if (custom_params && custom_params_max_idx) {
+ if (custom_params_max_idx != SMU_14_0_2_CUSTOM_PARAMS_COUNT)
+ return -EINVAL;
+ if (custom_params[0] >= SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT)
+ return -EINVAL;
+ idx = custom_params[0] * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
+ smu->custom_profile_params[idx] = 1;
+ for (i = 1; i < custom_params_max_idx; i++)
+ smu->custom_profile_params[idx + i] = custom_params[i];
+ }
+ ret = smu_v14_0_2_set_power_profile_mode_coeff(smu,
+ smu->custom_profile_params);
+ if (ret) {
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
+ return ret;
+ }
+ } else if (smu->custom_profile_params) {
+ memset(smu->custom_profile_params, 0, SMU_14_0_2_CUSTOM_PARAMS_SIZE);
+ }
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetWorkloadMask,
- 1 << workload_type,
- NULL);
- if (!ret)
- smu->workload_mask = 1 << workload_type;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+ backend_workload_mask, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
+ workload_mask);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
+ return ret;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 91ad434bcdae..0d71db7be325 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -1215,3 +1215,28 @@ void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy)
{
policy->desc = &xgmi_plpd_policy_desc;
}
+
+void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
+ u32 workload_mask,
+ u32 *backend_workload_mask)
+{
+ int workload_type;
+ u32 profile_mode;
+
+ *backend_workload_mask = 0;
+
+ for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) {
+ if (!(workload_mask & (1 << profile_mode)))
+ continue;
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ profile_mode);
+
+ if (workload_type < 0)
+ continue;
+
+ *backend_workload_mask |= 1 << workload_type;
+ }
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index 1de685defe85..a020277dec3e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -147,5 +147,9 @@ bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy);
void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy);
+void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
+ u32 workload_mask,
+ u32 *backend_workload_mask);
+
#endif
#endif
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 65b57de20203..008d86cc562a 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -3507,6 +3507,7 @@ static const struct of_device_id it6505_of_match[] = {
{ .compatible = "ite,it6505" },
{ }
};
+MODULE_DEVICE_TABLE(of, it6505_of_match);
static struct i2c_driver it6505_i2c_driver = {
.driver = {
diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
index 14a2a8473682..c491e3203bf1 100644
--- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
@@ -160,11 +160,11 @@ EXPORT_SYMBOL(drm_dp_dual_mode_write);
static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
{
- static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] =
+ static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN + 1] =
"DP-HDMI ADAPTOR\x04";
return memcmp(hdmi_id, dp_dual_mode_hdmi_id,
- sizeof(dp_dual_mode_hdmi_id)) == 0;
+ DP_DUAL_MODE_HDMI_ID_LEN) == 0;
}
static bool is_type1_adaptor(uint8_t adaptor_id)
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index ac90118b9e7a..bcf3a33123be 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -320,6 +320,9 @@ static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr
hdr->broadcast = (buf[idx] >> 7) & 0x1;
hdr->path_msg = (buf[idx] >> 6) & 0x1;
hdr->msg_len = buf[idx] & 0x3f;
+ if (hdr->msg_len < 1) /* min space for body CRC */
+ return false;
+
idx++;
hdr->somt = (buf[idx] >> 7) & 0x1;
hdr->eomt = (buf[idx] >> 6) & 0x1;
@@ -3697,8 +3700,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
ret = 0;
mgr->payload_id_table_cleared = false;
- memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
- memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
+ mgr->reset_rx_state = true;
}
out_unlock:
@@ -3856,6 +3858,11 @@ out_fail:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
+static void reset_msg_rx_state(struct drm_dp_sideband_msg_rx *msg)
+{
+ memset(msg, 0, sizeof(*msg));
+}
+
static bool
drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
struct drm_dp_mst_branch **mstb)
@@ -3934,6 +3941,34 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
return true;
}
+static int get_msg_request_type(u8 data)
+{
+ return data & 0x7f;
+}
+
+static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr,
+ const struct drm_dp_sideband_msg_tx *txmsg,
+ const struct drm_dp_sideband_msg_rx *rxmsg)
+{
+ const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr;
+ const struct drm_dp_mst_branch *mstb = txmsg->dst;
+ int tx_req_type = get_msg_request_type(txmsg->msg[0]);
+ int rx_req_type = get_msg_request_type(rxmsg->msg[0]);
+ char rad_str[64];
+
+ if (tx_req_type == rx_req_type)
+ return true;
+
+ drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str));
+ drm_dbg_kms(mgr->dev,
+ "Got unexpected MST reply, mstb: %p seqno: %d lct: %d rad: %s rx_req_type: %s (%02x) != tx_req_type: %s (%02x)\n",
+ mstb, hdr->seqno, mstb->lct, rad_str,
+ drm_dp_mst_req_type_str(rx_req_type), rx_req_type,
+ drm_dp_mst_req_type_str(tx_req_type), tx_req_type);
+
+ return false;
+}
+
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
@@ -3963,6 +3998,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
goto out_clear_reply;
}
+ if (!verify_rx_request_type(mgr, txmsg, msg))
+ goto out_clear_reply;
+
drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
@@ -4138,6 +4176,17 @@ out:
return 0;
}
+static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->lock);
+ if (mgr->reset_rx_state) {
+ mgr->reset_rx_state = false;
+ reset_msg_rx_state(&mgr->down_rep_recv);
+ reset_msg_rx_state(&mgr->up_req_recv);
+ }
+ mutex_unlock(&mgr->lock);
+}
+
/**
* drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
* @mgr: manager to notify irq for.
@@ -4172,6 +4221,8 @@ int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u
*handled = true;
}
+ update_msg_rx_state(mgr);
+
if (esi[1] & DP_DOWN_REP_MSG_RDY) {
ret = drm_dp_mst_handle_down_rep(mgr);
*handled = true;
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 2d84d7ea1ab7..4a73821b81f6 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -184,6 +184,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO AYANEO 2 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* AYA NEO 2021 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"),
@@ -196,6 +202,18 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "AIR"),
},
.driver_data = (void *)&lcd1080x1920_leftside_up,
+ }, { /* AYA NEO Founder */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYA NEO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AYA NEO Founder"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO GEEK */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GEEK"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* AYA NEO NEXT */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index 74412b7bf936..0a9ecc1380d2 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -209,6 +209,14 @@ static u32 convert_xrgb8888_to_argb2101010(u32 pix)
return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
}
+static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
+{
+ pix = ((pix & 0x00FF0000) >> 14) |
+ ((pix & 0x0000FF00) << 4) |
+ ((pix & 0x000000FF) << 22);
+ return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
+}
+
/*
* convert_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
* @color: input color, in xrgb8888 format
@@ -242,6 +250,8 @@ static u32 convert_from_xrgb8888(u32 color, u32 format)
return convert_xrgb8888_to_xrgb2101010(color);
case DRM_FORMAT_ARGB2101010:
return convert_xrgb8888_to_argb2101010(color);
+ case DRM_FORMAT_ABGR2101010:
+ return convert_xrgb8888_to_abgr2101010(color);
default:
WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
return 0;
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 10c06440c7e7..f1bb38f4e673 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -473,6 +473,7 @@ static const struct of_device_id mcde_of_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, mcde_of_match);
static struct platform_driver mcde_driver = {
.driver = {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 86735430462f..06381c628209 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -4565,6 +4565,31 @@ static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct drm_display_mode mchp_ac69t88a_mode = {
+ .clock = 25000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 88,
+ .hsync_end = 800 + 88 + 5,
+ .htotal = 800 + 88 + 5 + 40,
+ .vdisplay = 480,
+ .vsync_start = 480 + 23,
+ .vsync_end = 480 + 23 + 5,
+ .vtotal = 480 + 23 + 5 + 1,
+};
+
+static const struct panel_desc mchp_ac69t88a = {
+ .modes = &mchp_ac69t88a_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 108,
+ .height = 65,
+ },
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode arm_rtsm_mode[] = {
{
.clock = 65000,
@@ -5049,6 +5074,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "yes-optoelectronics,ytc700tlag-05-201c",
.data = &yes_optoelectronics_ytc700tlag_05_201c,
}, {
+ .compatible = "microchip,ac69t88a",
+ .data = &mchp_ac69t88a,
+ }, {
/* Must be the last entry */
.compatible = "panel-dpi",
.data = &panel_dpi,
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 1b2d31c4d77c..ac77d1246b94 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2104,7 +2104,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- offset = radeon_get_ib_value(p, idx+1) << 8;
+ offset = (u64)radeon_get_ib_value(p, idx+1) << 8;
if (offset != track->vgt_strmout_bo_offset[idx_value]) {
DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
offset, track->vgt_strmout_bo_offset[idx_value]);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index e97c6c60bc96..416590ea0dc3 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -803,6 +803,14 @@ int drm_sched_job_init(struct drm_sched_job *job,
return -EINVAL;
}
+ /*
+ * We don't know for sure how the user has allocated. Thus, zero the
+ * struct so that unallowed (i.e., too early) usage of pointers that
+ * this function does not set is guaranteed to lead to a NULL pointer
+ * exception instead of UB.
+ */
+ memset(job, 0, sizeof(*job));
+
job->entity = entity;
job->credits = credits;
job->s_fence = drm_sched_fence_alloc(entity, owner);
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 7e5f14646625..06c1b81912f7 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -137,7 +137,7 @@ static void mixer_dbg_crb(struct seq_file *s, int val)
}
}
-static void mixer_dbg_mxn(struct seq_file *s, void *addr)
+static void mixer_dbg_mxn(struct seq_file *s, void __iomem *addr)
{
int i;
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
index 00cd081d7873..6ee56cbd3f1b 100644
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
@@ -254,9 +254,9 @@ void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon)
V3D_CORE_WRITE(0, V3D_V4_PCTR_0_SRC_X(source), channel);
}
+ V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask);
V3D_CORE_WRITE(0, V3D_V4_PCTR_0_CLR, mask);
V3D_CORE_WRITE(0, V3D_PCTR_0_OVERFLOW, mask);
- V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask);
v3d->active_perfmon = perfmon;
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 2d7d3e90f3be..7e0a5ea7ab85 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -1924,7 +1924,7 @@ static int vc4_hdmi_audio_startup(struct device *dev, void *data)
}
if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) {
- ret = -ENODEV;
+ ret = -ENOTSUPP;
goto out_dev_exit;
}
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 863539e1f7e0..c389e82463bf 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -974,6 +974,17 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1);
dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2);
+ /* Set AXI panic mode.
+ * VC4 panics when < 2 lines in FIFO.
+ * VC5 panics when less than 1 line in the FIFO.
+ */
+ dispctrl &= ~(SCALER_DISPCTRL_PANIC0_MASK |
+ SCALER_DISPCTRL_PANIC1_MASK |
+ SCALER_DISPCTRL_PANIC2_MASK);
+ dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0);
+ dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1);
+ dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2);
+
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
/* Recompute Composite Output Buffer (COB) allocations for the displays
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 81b71903675e..7c78496e6213 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -186,6 +186,7 @@
#define VDBOX_CGCTL3F10(base) XE_REG((base) + 0x3f10)
#define IECPUNIT_CLKGATE_DIS REG_BIT(22)
+#define RAMDFTUNIT_CLKGATE_DIS REG_BIT(9)
#define VDBOX_CGCTL3F18(base) XE_REG((base) + 0x3f18)
#define ALNUNIT_CLKGATE_DIS REG_BIT(13)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index bd604b9f08e4..5404de2aea54 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -286,6 +286,9 @@
#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16)
#define LTCDD_CLKGATE_DIS REG_BIT(10)
+#define UNSLCGCTL9454 XE_REG(0x9454)
+#define LSCFE_CLKGATE_DIS REG_BIT(4)
+
#define XEHP_SLICE_UNIT_LEVEL_CLKGATE XE_REG_MCR(0x94d4)
#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
#define L3_CLKGATE_DIS REG_BIT(16)
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index bdb76e834e4c..5221ee3f1214 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -6,6 +6,7 @@
#include "xe_devcoredump.h"
#include "xe_devcoredump_types.h"
+#include <linux/ascii85.h>
#include <linux/devcoredump.h>
#include <generated/utsrelease.h>
@@ -85,9 +86,9 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
p = drm_coredump_printer(&iter);
- drm_printf(&p, "**** Xe Device Coredump ****\n");
- drm_printf(&p, "kernel: " UTS_RELEASE "\n");
- drm_printf(&p, "module: " KBUILD_MODNAME "\n");
+ drm_puts(&p, "**** Xe Device Coredump ****\n");
+ drm_puts(&p, "kernel: " UTS_RELEASE "\n");
+ drm_puts(&p, "module: " KBUILD_MODNAME "\n");
ts = ktime_to_timespec64(ss->snapshot_time);
drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
@@ -96,20 +97,25 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
drm_printf(&p, "Process: %s\n", ss->process_name);
xe_device_snapshot_print(xe, &p);
- drm_printf(&p, "\n**** GuC CT ****\n");
- xe_guc_ct_snapshot_print(coredump->snapshot.ct, &p);
- xe_guc_exec_queue_snapshot_print(coredump->snapshot.ge, &p);
+ drm_printf(&p, "\n**** GT #%d ****\n", ss->gt->info.id);
+ drm_printf(&p, "\tTile: %d\n", ss->gt->tile->id);
- drm_printf(&p, "\n**** Job ****\n");
- xe_sched_job_snapshot_print(coredump->snapshot.job, &p);
+ drm_puts(&p, "\n**** GuC CT ****\n");
+ xe_guc_ct_snapshot_print(ss->ct, &p);
- drm_printf(&p, "\n**** HW Engines ****\n");
+ drm_puts(&p, "\n**** Contexts ****\n");
+ xe_guc_exec_queue_snapshot_print(ss->ge, &p);
+
+ drm_puts(&p, "\n**** Job ****\n");
+ xe_sched_job_snapshot_print(ss->job, &p);
+
+ drm_puts(&p, "\n**** HW Engines ****\n");
for (i = 0; i < XE_NUM_HW_ENGINES; i++)
- if (coredump->snapshot.hwe[i])
- xe_hw_engine_snapshot_print(coredump->snapshot.hwe[i],
- &p);
- drm_printf(&p, "\n**** VM state ****\n");
- xe_vm_snapshot_print(coredump->snapshot.vm, &p);
+ if (ss->hwe[i])
+ xe_hw_engine_snapshot_print(ss->hwe[i], &p);
+
+ drm_puts(&p, "\n**** VM state ****\n");
+ xe_vm_snapshot_print(ss->vm, &p);
return count - iter.remain;
}
@@ -141,13 +147,15 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
{
struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
+ unsigned int fw_ref;
/* keep going if fw fails as we still want to save the memory and SW data */
- if (xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL))
+ fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
xe_vm_snapshot_capture_delayed(ss->vm);
xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
- xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
+ xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
/* Calculate devcoredump size */
ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
@@ -220,8 +228,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
u32 width_mask = (0x1 << q->width) - 1;
const char *process_name = "no process";
- int i;
+ unsigned int fw_ref;
bool cookie;
+ int i;
ss->snapshot_time = ktime_get_real();
ss->boot_time = ktime_get_boottime();
@@ -244,26 +253,25 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
}
/* keep going if fw fails as we still want to save the memory and SW data */
- if (xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL))
- xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
+ fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
- coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
- coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q);
- coredump->snapshot.job = xe_sched_job_snapshot_capture(job);
- coredump->snapshot.vm = xe_vm_snapshot_capture(q->vm);
+ ss->ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
+ ss->ge = xe_guc_exec_queue_snapshot_capture(q);
+ ss->job = xe_sched_job_snapshot_capture(job);
+ ss->vm = xe_vm_snapshot_capture(q->vm);
for_each_hw_engine(hwe, q->gt, id) {
if (hwe->class != q->hwe->class ||
!(BIT(hwe->logical_instance) & adj_logical_mask)) {
- coredump->snapshot.hwe[id] = NULL;
+ ss->hwe[id] = NULL;
continue;
}
- coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe);
+ ss->hwe[id] = xe_hw_engine_snapshot_capture(hwe);
}
queue_work(system_unbound_wq, &ss->work);
- xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
+ xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
dma_fence_end_signalling(cookie);
}
@@ -310,3 +318,89 @@ int xe_devcoredump_init(struct xe_device *xe)
}
#endif
+
+/**
+ * xe_print_blob_ascii85 - print a BLOB to some useful location in ASCII85
+ *
+ * The output is split to multiple lines because some print targets, e.g. dmesg
+ * cannot handle arbitrarily long lines. Note also that printing to dmesg in
+ * piece-meal fashion is not possible, each separate call to drm_puts() has a
+ * line-feed automatically added! Therefore, the entire output line must be
+ * constructed in a local buffer first, then printed in one atomic output call.
+ *
+ * There is also a scheduler yield call to prevent the 'task has been stuck for
+ * 120s' kernel hang check feature from firing when printing to a slow target
+ * such as dmesg over a serial port.
+ *
+ * TODO: Add compression prior to the ASCII85 encoding to shrink huge buffers down.
+ *
+ * @p: the printer object to output to
+ * @prefix: optional prefix to add to output string
+ * @blob: the Binary Large OBject to dump out
+ * @offset: offset in bytes to skip from the front of the BLOB, must be a multiple of sizeof(u32)
+ * @size: the size in bytes of the BLOB, must be a multiple of sizeof(u32)
+ */
+void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
+ const void *blob, size_t offset, size_t size)
+{
+ const u32 *blob32 = (const u32 *)blob;
+ char buff[ASCII85_BUFSZ], *line_buff;
+ size_t line_pos = 0;
+
+#define DMESG_MAX_LINE_LEN 800
+#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */
+
+ if (size & 3)
+ drm_printf(p, "Size not word aligned: %zu", size);
+ if (offset & 3)
+ drm_printf(p, "Offset not word aligned: %zu", size);
+
+ line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(line_buff)) {
+ drm_printf(p, "Failed to allocate line buffer: %pe", line_buff);
+ return;
+ }
+
+ blob32 += offset / sizeof(*blob32);
+ size /= sizeof(*blob32);
+
+ if (prefix) {
+ strscpy(line_buff, prefix, DMESG_MAX_LINE_LEN - MIN_SPACE - 2);
+ line_pos = strlen(line_buff);
+
+ line_buff[line_pos++] = ':';
+ line_buff[line_pos++] = ' ';
+ }
+
+ while (size--) {
+ u32 val = *(blob32++);
+
+ strscpy(line_buff + line_pos, ascii85_encode(val, buff),
+ DMESG_MAX_LINE_LEN - line_pos);
+ line_pos += strlen(line_buff + line_pos);
+
+ if ((line_pos + MIN_SPACE) >= DMESG_MAX_LINE_LEN) {
+ line_buff[line_pos++] = '\n';
+ line_buff[line_pos++] = 0;
+
+ drm_puts(p, line_buff);
+
+ line_pos = 0;
+
+ /* Prevent 'stuck thread' time out errors */
+ cond_resched();
+ }
+ }
+
+ if (line_pos) {
+ line_buff[line_pos++] = '\n';
+ line_buff[line_pos++] = 0;
+
+ drm_puts(p, line_buff);
+ }
+
+ kfree(line_buff);
+
+#undef MIN_SPACE
+#undef DMESG_MAX_LINE_LEN
+}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
index e2fa65ce0932..a4eebc285fc8 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump.h
@@ -6,6 +6,9 @@
#ifndef _XE_DEVCOREDUMP_H_
#define _XE_DEVCOREDUMP_H_
+#include <linux/types.h>
+
+struct drm_printer;
struct xe_device;
struct xe_sched_job;
@@ -23,4 +26,7 @@ static inline int xe_devcoredump_init(struct xe_device *xe)
}
#endif
+void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
+ const void *blob, size_t offset, size_t size);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h
index 440d05d77a5a..3cc2f095fdfb 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump_types.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h
@@ -37,7 +37,8 @@ struct xe_devcoredump_snapshot {
/* GuC snapshots */
/** @ct: GuC CT snapshot */
struct xe_guc_ct_snapshot *ct;
- /** @ge: Guc Engine snapshot */
+
+ /** @ge: GuC Submission Engine snapshot */
struct xe_guc_submit_exec_queue_snapshot *ge;
/** @hwe: HW Engine snapshot array */
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index a1987b554a8d..bb85208cf1a9 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -919,6 +919,7 @@ void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
for_each_gt(gt, xe, id) {
drm_printf(p, "GT id: %u\n", id);
+ drm_printf(p, "\tTile: %u\n", gt->tile->id);
drm_printf(p, "\tType: %s\n",
gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media");
drm_printf(p, "\tIP ver: %u.%u.%u\n",
diff --git a/drivers/gpu/drm/xe/xe_force_wake.h b/drivers/gpu/drm/xe/xe_force_wake.h
index a2577672f4e3..1608a55edc84 100644
--- a/drivers/gpu/drm/xe/xe_force_wake.h
+++ b/drivers/gpu/drm/xe/xe_force_wake.h
@@ -46,4 +46,20 @@ xe_force_wake_assert_held(struct xe_force_wake *fw,
xe_gt_assert(fw->gt, fw->awake_domains & domain);
}
+/**
+ * xe_force_wake_ref_has_domain - verifies if the domains are in fw_ref
+ * @fw_ref : the force_wake reference
+ * @domain : forcewake domain to verify
+ *
+ * This function confirms whether the @fw_ref includes a reference to the
+ * specified @domain.
+ *
+ * Return: true if domain is refcounted.
+ */
+static inline bool
+xe_force_wake_ref_has_domain(unsigned int fw_ref, enum xe_force_wake_domains domain)
+{
+ return fw_ref & domain;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 0662f71c6ede..3e113422b88d 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -5,6 +5,7 @@
#include "xe_gt_topology.h"
+#include <generated/xe_wa_oob.h>
#include <linux/bitmap.h>
#include <linux/compiler.h>
@@ -12,6 +13,7 @@
#include "xe_assert.h"
#include "xe_gt.h"
#include "xe_mmio.h"
+#include "xe_wa.h"
static void
load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
@@ -129,6 +131,18 @@ load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
struct xe_device *xe = gt_to_xe(gt);
u32 fuse3 = xe_mmio_read32(gt, MIRROR_FUSE3);
+ /*
+ * PTL platforms with media version 30.00 do not provide proper values
+ * for the media GT's L3 bank registers. Skip the readout since we
+ * don't have any way to obtain real values.
+ *
+ * This may get re-described as an official workaround in the future,
+ * but there's no tracking number assigned yet so we use a custom
+ * OOB workaround descriptor.
+ */
+ if (XE_WA(gt, no_media_l3))
+ return;
+
if (GRAPHICS_VER(xe) >= 20) {
xe_l3_bank_mask_t per_node = {};
u32 meml3_en = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, fuse3);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 9c505d3517cd..cd6a5f09d631 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -907,6 +907,24 @@ retry_same_fence:
}
/*
+ * Occasionally it is seen that the G2H worker starts running after a delay of more than
+ * a second even after being queued and activated by the Linux workqueue subsystem. This
+ * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
+ * Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS
+ * and this is beyond xe kmd.
+ *
+ * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
+ */
+ if (!ret) {
+ flush_work(&ct->g2h_worker);
+ if (g2h_fence.done) {
+ xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
+ g2h_fence.seqno, action[0]);
+ ret = 1;
+ }
+ }
+
+ /*
* Ensure we serialize with completion side to prevent UAF with fence going out of scope on
* the stack, since we have no clue if it will fire after the timeout before we can erase
* from the xa. Also we have some dependent loads and stores below for which we need the
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index a37ee3419428..be47780ec2a7 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -6,9 +6,12 @@
#include "xe_guc_log.h"
#include <drm/drm_managed.h>
+#include <linux/vmalloc.h>
#include "xe_bo.h"
+#include "xe_devcoredump.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_map.h"
#include "xe_module.h"
@@ -49,32 +52,35 @@ static size_t guc_log_size(void)
CAPTURE_BUFFER_SIZE;
}
+/**
+ * xe_guc_log_print - dump a copy of the GuC log to some useful location
+ * @log: GuC log structure
+ * @p: the printer object to output to
+ */
void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p)
{
struct xe_device *xe = log_to_xe(log);
size_t size;
- int i, j;
+ void *copy;
- xe_assert(xe, log->bo);
+ if (!log->bo) {
+ drm_puts(p, "GuC log buffer not allocated");
+ return;
+ }
size = log->bo->size;
-#define DW_PER_READ 128
- xe_assert(xe, !(size % (DW_PER_READ * sizeof(u32))));
- for (i = 0; i < size / sizeof(u32); i += DW_PER_READ) {
- u32 read[DW_PER_READ];
-
- xe_map_memcpy_from(xe, read, &log->bo->vmap, i * sizeof(u32),
- DW_PER_READ * sizeof(u32));
-#define DW_PER_PRINT 4
- for (j = 0; j < DW_PER_READ / DW_PER_PRINT; ++j) {
- u32 *print = read + j * DW_PER_PRINT;
-
- drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
- *(print + 0), *(print + 1),
- *(print + 2), *(print + 3));
- }
+ copy = vmalloc(size);
+ if (!copy) {
+ drm_printf(p, "Failed to allocate %zu", size);
+ return;
}
+
+ xe_map_memcpy_from(xe, copy, &log->bo->vmap, 0, size);
+
+ xe_print_blob_ascii85(p, "Log data", copy, 0, size);
+
+ vfree(copy);
}
int xe_guc_log_init(struct xe_guc_log *log)
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 2927745d6895..fed23304e4da 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -2193,7 +2193,7 @@ xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
if (!snapshot)
return;
- drm_printf(p, "\nGuC ID: %d\n", snapshot->guc.id);
+ drm_printf(p, "GuC ID: %d\n", snapshot->guc.id);
drm_printf(p, "\tName: %s\n", snapshot->name);
drm_printf(p, "\tClass: %d\n", snapshot->class);
drm_printf(p, "\tLogical mask: 0x%x\n", snapshot->logical_mask);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index c9c3beb3ce8d..547919e8ce9e 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -1053,7 +1053,6 @@ void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot,
if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE)
drm_printf(p, "\tRCU_MODE: 0x%08x\n",
snapshot->reg.rcu_mode);
- drm_puts(p, "\n");
}
/**
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 5e962e72c97e..025d64943467 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -383,10 +383,12 @@ static const struct pci_device_id pciidlist[] = {
XE_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
XE_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
XE_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc),
+ XE_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
XE_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
XE_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
XE_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc),
XE_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc),
+ XE_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
XE_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc),
XE_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
XE_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 848da8e68c7a..1c96375bd7df 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -9,6 +9,7 @@
#include <linux/sched/clock.h>
#include <drm/ttm/ttm_placement.h>
+#include <generated/xe_wa_oob.h>
#include <uapi/drm/xe_drm.h>
#include "regs/xe_engine_regs.h"
@@ -23,6 +24,7 @@
#include "xe_macros.h"
#include "xe_mmio.h"
#include "xe_ttm_vram_mgr.h"
+#include "xe_wa.h"
static const u16 xe_to_user_engine_class[] = {
[XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER,
@@ -458,12 +460,23 @@ static int query_hwconfig(struct xe_device *xe,
static size_t calc_topo_query_size(struct xe_device *xe)
{
- return xe->info.gt_count *
- (4 * sizeof(struct drm_xe_query_topology_mask) +
- sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) +
- sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) +
- sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask) +
- sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss));
+ struct xe_gt *gt;
+ size_t query_size = 0;
+ int id;
+
+ for_each_gt(gt, xe, id) {
+ query_size += 3 * sizeof(struct drm_xe_query_topology_mask) +
+ sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) +
+ sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) +
+ sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss);
+
+ /* L3bank mask may not be available for some GTs */
+ if (!XE_WA(gt, no_media_l3))
+ query_size += sizeof(struct drm_xe_query_topology_mask) +
+ sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask);
+ }
+
+ return query_size;
}
static int copy_mask(void __user **ptr,
@@ -516,11 +529,18 @@ static int query_gt_topology(struct xe_device *xe,
if (err)
return err;
- topo.type = DRM_XE_TOPO_L3_BANK;
- err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask,
- sizeof(gt->fuse_topo.l3_bank_mask));
- if (err)
- return err;
+ /*
+ * If the kernel doesn't have a way to obtain a correct L3bank
+ * mask, then it's better to omit L3 from the query rather than
+ * reporting bogus or zeroed information to userspace.
+ */
+ if (!XE_WA(gt, no_media_l3)) {
+ topo.type = DRM_XE_TOPO_L3_BANK;
+ err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask,
+ sizeof(gt->fuse_topo.l3_bank_mask));
+ if (err)
+ return err;
+ }
topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ?
DRM_XE_TOPO_SIMD16_EU_PER_DSS :
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 353936a0f877..37e592b2bf06 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -251,6 +251,34 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
+ /* Xe3_LPG */
+
+ { XE_RTP_NAME("14021871409"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(SET(UNSLCGCTL9454, LSCFE_CLKGATE_DIS))
+ },
+
+ /* Xe3_LPM */
+
+ { XE_RTP_NAME("16021867713"),
+ XE_RTP_RULES(MEDIA_VERSION(3000),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+ { XE_RTP_NAME("16021865536"),
+ XE_RTP_RULES(MEDIA_VERSION(3000),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+ { XE_RTP_NAME("14021486841"),
+ XE_RTP_RULES(MEDIA_VERSION(3000), MEDIA_STEP(A0, B0),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), RAMDFTUNIT_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+
{}
};
@@ -567,6 +595,13 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
+ /* Xe3_LPG */
+
+ { XE_RTP_NAME("14021402888"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
+ },
+
{}
};
@@ -742,6 +777,18 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
},
+ /* Xe3_LPG */
+ { XE_RTP_NAME("14021490052"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(FF_MODE,
+ DIS_MESH_PARTIAL_AUTOSTRIP |
+ DIS_MESH_AUTOSTRIP),
+ SET(VFLSKPD,
+ DIS_PARTIAL_AUTOSTRIP |
+ DIS_AUTOSTRIP))
+ },
+
{}
};
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 920ca5060146..264d6e116499 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -33,7 +33,9 @@
GRAPHICS_VERSION(2004)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001)
+ MEDIA_VERSION(3000), MEDIA_STEP(A0, B0)
22019338487_display PLATFORM(LUNARLAKE)
16023588340 GRAPHICS_VERSION(2001)
14019789679 GRAPHICS_VERSION(1255)
GRAPHICS_VERSION_RANGE(1270, 2004)
+no_media_l3 MEDIA_VERSION(3000)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 582fd234eec7..935ccc38d129 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2674,9 +2674,10 @@ static bool hid_check_device_match(struct hid_device *hdev,
/*
* hid-generic implements .match(), so we must be dealing with a
* different HID driver here, and can simply check if
- * hid_ignore_special_drivers is set or not.
+ * hid_ignore_special_drivers or HID_QUIRK_IGNORE_SPECIAL_DRIVER
+ * are set or not.
*/
- return !hid_ignore_special_drivers;
+ return !hid_ignore_special_drivers && !(hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER);
}
static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
diff --git a/drivers/hid/hid-generic.c b/drivers/hid/hid-generic.c
index d2439399fb35..9e04c6d0fcc8 100644
--- a/drivers/hid/hid-generic.c
+++ b/drivers/hid/hid-generic.c
@@ -40,6 +40,9 @@ static bool hid_generic_match(struct hid_device *hdev,
if (ignore_special_driver)
return true;
+ if (hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER)
+ return true;
+
if (hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)
return false;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 92cff3f2658c..0f23be98c56e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -94,6 +94,7 @@
#define USB_DEVICE_ID_APPLE_MAGICMOUSE2 0x0269
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 0x0265
+#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC 0x0324
#define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e
#define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f
#define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 8a73b59e0827..ec110dea8772 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -227,7 +227,9 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
touch_minor = tdata[4];
state = tdata[7] & TOUCH_STATE_MASK;
down = state != TOUCH_STATE_NONE;
- } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ input->id.product ==
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
id = tdata[8] & 0xf;
x = (tdata[1] << 27 | tdata[0] << 19) >> 19;
y = -((tdata[3] << 30 | tdata[2] << 22 | tdata[1] << 14) >> 19);
@@ -259,8 +261,9 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
/* If requested, emulate a scroll wheel by detecting small
* vertical touch motions.
*/
- if (emulate_scroll_wheel && (input->id.product !=
- USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)) {
+ if (emulate_scroll_wheel &&
+ input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
+ input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
unsigned long now = jiffies;
int step_x = msc->touches[id].scroll_x - x;
int step_y = msc->touches[id].scroll_y - y;
@@ -359,7 +362,9 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
- if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ input->id.product ==
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC)
input_report_abs(input, ABS_MT_PRESSURE, pressure);
if (report_undeciphered) {
@@ -367,7 +372,9 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
input_event(input, EV_MSC, MSC_RAW, tdata[7]);
else if (input->id.product !=
- USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
+ input->id.product !=
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC)
input_event(input, EV_MSC, MSC_RAW, tdata[8]);
}
}
@@ -493,7 +500,9 @@ static int magicmouse_raw_event(struct hid_device *hdev,
magicmouse_emit_buttons(msc, clicks & 3);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
- } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ input->id.product ==
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
input_mt_sync_frame(input);
input_report_key(input, BTN_MOUSE, clicks & 1);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
@@ -545,7 +554,9 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(REL_WHEEL_HI_RES, input->relbit);
__set_bit(REL_HWHEEL_HI_RES, input->relbit);
}
- } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ input->id.product ==
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
/* If the trackpad has been connected to a Mac, the name is
* automatically personalized, e.g., "José Expósito's Trackpad".
* When connected through Bluetooth, the personalized name is
@@ -621,7 +632,9 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
MOUSE_RES_X);
input_abs_set_res(input, ABS_MT_POSITION_Y,
MOUSE_RES_Y);
- } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ input->id.product ==
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
input_set_abs_params(input, ABS_MT_PRESSURE, 0, 253, 0, 0);
input_set_abs_params(input, ABS_PRESSURE, 0, 253, 0, 0);
input_set_abs_params(input, ABS_MT_ORIENTATION, -3, 4, 0, 0);
@@ -660,7 +673,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
input_set_events_per_packet(input, 60);
if (report_undeciphered &&
- input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
+ input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
__set_bit(EV_MSC, input->evbit);
__set_bit(MSC_RAW, input->mscbit);
}
@@ -685,7 +699,9 @@ static int magicmouse_input_mapping(struct hid_device *hdev,
/* Magic Trackpad does not give relative data after switching to MT */
if ((hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD ||
- hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) &&
+ hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ hi->input->id.product ==
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
field->flags & HID_MAIN_ITEM_RELATIVE)
return -1;
@@ -721,7 +737,8 @@ static int magicmouse_enable_multitouch(struct hid_device *hdev)
int ret;
int feature_size;
- if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
if (hdev->vendor == BT_VENDOR_ID_APPLE) {
feature_size = sizeof(feature_mt_trackpad2_bt);
feature = feature_mt_trackpad2_bt;
@@ -766,7 +783,8 @@ static int magicmouse_fetch_battery(struct hid_device *hdev)
if (!hdev->battery || hdev->vendor != USB_VENDOR_ID_APPLE ||
(hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2 &&
- hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2))
+ hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
+ hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC))
return -1;
report_enum = &hdev->report_enum[hdev->battery_report_type];
@@ -835,7 +853,9 @@ static int magicmouse_probe(struct hid_device *hdev,
if (id->vendor == USB_VENDOR_ID_APPLE &&
(id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
- (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && hdev->type != HID_TYPE_USBMOUSE)))
+ ((id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
+ hdev->type != HID_TYPE_USBMOUSE)))
return 0;
if (!msc->input) {
@@ -850,7 +870,8 @@ static int magicmouse_probe(struct hid_device *hdev,
else if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
report = hid_register_report(hdev, HID_INPUT_REPORT,
MOUSE2_REPORT_ID, 0);
- else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
if (id->vendor == BT_VENDOR_ID_APPLE)
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD2_BT_REPORT_ID, 0);
@@ -920,7 +941,8 @@ static const __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*/
if (hdev->vendor == USB_VENDOR_ID_APPLE &&
(hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
- hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) &&
+ hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
*rsize == 83 && rdesc[46] == 0x84 && rdesc[58] == 0x85) {
hid_info(hdev,
"fixing up magicmouse battery report descriptor\n");
@@ -951,6 +973,10 @@ static const struct hid_device_id magic_mice[] = {
USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 },
{ }
};
MODULE_DEVICE_TABLE(hid, magic_mice);
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 43664a24176f..4e87380d3edd 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -414,7 +414,19 @@ static int i2c_hid_set_power(struct i2c_hid *ihid, int power_state)
i2c_hid_dbg(ihid, "%s\n", __func__);
+ /*
+ * Some STM-based devices need 400µs after a rising clock edge to wake
+ * from deep sleep, in which case the first request will fail due to
+ * the address not being acknowledged. Try after a short sleep to see
+ * if the device came alive on the bus. Certain Weida Tech devices also
+ * need this.
+ */
ret = i2c_hid_set_power_command(ihid, power_state);
+ if (ret && power_state == I2C_HID_PWR_ON) {
+ usleep_range(400, 500);
+ ret = i2c_hid_set_power_command(ihid, I2C_HID_PWR_ON);
+ }
+
if (ret)
dev_err(&ihid->client->dev,
"failed to change power setting.\n");
@@ -976,14 +988,6 @@ static int i2c_hid_core_resume(struct i2c_hid *ihid)
enable_irq(client->irq);
- /* Make sure the device is awake on the bus */
- ret = i2c_hid_probe_address(ihid);
- if (ret < 0) {
- dev_err(&client->dev, "nothing at address after resume: %d\n",
- ret);
- return -ENXIO;
- }
-
/* On Goodix 27c6:0d42 wait extra time before device wakeup.
* It's not clear why but if we send wakeup too early, the device will
* never trigger input interrupts.
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 2bc45b24075c..9843b52bd017 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -2241,7 +2241,8 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
if (hid_is_usb(wacom->hdev)) {
struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent);
struct usb_device *dev = interface_to_usbdev(intf);
- product_name = dev->product;
+ if (dev->product != NULL)
+ product_name = dev->product;
}
if (wacom->hdev->bus == BUS_I2C) {
diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
index 096f1daa8f2b..1218a3b449a8 100644
--- a/drivers/hwmon/nct6775-platform.c
+++ b/drivers/hwmon/nct6775-platform.c
@@ -1350,6 +1350,8 @@ static const char * const asus_msi_boards[] = {
"Pro H610M-CT D4",
"Pro H610T D4",
"Pro Q670M-C",
+ "Pro WS 600M-CL",
+ "Pro WS 665-ACE",
"Pro WS W680-ACE",
"Pro WS W680-ACE IPMI",
"Pro WS W790-ACE",
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6b3ba7e5723a..2254abda5c46 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -160,6 +160,7 @@ config I2C_I801
Meteor Lake (SOC and PCH)
Birch Stream (SOC)
Arrow Lake (SOC)
+ Panther Lake (SOC)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 299fe9d3afab..75dab01d43a7 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -81,6 +81,8 @@
* Meteor Lake PCH-S (PCH) 0x7f23 32 hard yes yes yes
* Birch Stream (SOC) 0x5796 32 hard yes yes yes
* Arrow Lake-H (SOC) 0x7722 32 hard yes yes yes
+ * Panther Lake-H (SOC) 0xe322 32 hard yes yes yes
+ * Panther Lake-P (SOC) 0xe422 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -261,6 +263,8 @@
#define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323
#define PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS 0xa3a3
#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_SOC_S_SMBUS 0xae22
+#define PCI_DEVICE_ID_INTEL_PANTHER_LAKE_H_SMBUS 0xe322
+#define PCI_DEVICE_ID_INTEL_PANTHER_LAKE_P_SMBUS 0xe422
struct i801_mux_config {
char *gpio_chip;
@@ -1055,6 +1059,8 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ 0, }
};
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index da83c49223b3..42310c9a00c2 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -282,7 +282,8 @@ static int i3c_device_uevent(const struct device *dev, struct kobj_uevent_env *e
struct i3c_device_info devinfo;
u16 manuf, part, ext;
- i3c_device_get_info(i3cdev, &devinfo);
+ if (i3cdev->desc)
+ devinfo = i3cdev->desc->info;
manuf = I3C_PID_MANUF_ID(devinfo.pid);
part = I3C_PID_PART_ID(devinfo.pid);
ext = I3C_PID_EXTRA_INFO(devinfo.pid);
@@ -345,10 +346,10 @@ const struct bus_type i3c_bus_type = {
EXPORT_SYMBOL_GPL(i3c_bus_type);
static enum i3c_addr_slot_status
-i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
+i3c_bus_get_addr_slot_status_mask(struct i3c_bus *bus, u16 addr, u32 mask)
{
unsigned long status;
- int bitpos = addr * 2;
+ int bitpos = addr * I3C_ADDR_SLOT_STATUS_BITS;
if (addr > I2C_MAX_ADDR)
return I3C_ADDR_SLOT_RSVD;
@@ -356,22 +357,33 @@ i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
status = bus->addrslots[bitpos / BITS_PER_LONG];
status >>= bitpos % BITS_PER_LONG;
- return status & I3C_ADDR_SLOT_STATUS_MASK;
+ return status & mask;
}
-static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr,
- enum i3c_addr_slot_status status)
+static enum i3c_addr_slot_status
+i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
+{
+ return i3c_bus_get_addr_slot_status_mask(bus, addr, I3C_ADDR_SLOT_STATUS_MASK);
+}
+
+static void i3c_bus_set_addr_slot_status_mask(struct i3c_bus *bus, u16 addr,
+ enum i3c_addr_slot_status status, u32 mask)
{
- int bitpos = addr * 2;
+ int bitpos = addr * I3C_ADDR_SLOT_STATUS_BITS;
unsigned long *ptr;
if (addr > I2C_MAX_ADDR)
return;
ptr = bus->addrslots + (bitpos / BITS_PER_LONG);
- *ptr &= ~((unsigned long)I3C_ADDR_SLOT_STATUS_MASK <<
- (bitpos % BITS_PER_LONG));
- *ptr |= (unsigned long)status << (bitpos % BITS_PER_LONG);
+ *ptr &= ~((unsigned long)mask << (bitpos % BITS_PER_LONG));
+ *ptr |= ((unsigned long)status & mask) << (bitpos % BITS_PER_LONG);
+}
+
+static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr,
+ enum i3c_addr_slot_status status)
+{
+ i3c_bus_set_addr_slot_status_mask(bus, addr, status, I3C_ADDR_SLOT_STATUS_MASK);
}
static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr)
@@ -383,13 +395,44 @@ static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr)
return status == I3C_ADDR_SLOT_FREE;
}
+/*
+ * ┌────┬─────────────┬───┬─────────┬───┐
+ * │S/Sr│ 7'h7E RnW=0 │ACK│ ENTDAA │ T ├────┐
+ * └────┴─────────────┴───┴─────────┴───┘ │
+ * ┌─────────────────────────────────────────┘
+ * │ ┌──┬─────────────┬───┬─────────────────┬────────────────┬───┬─────────┐
+ * └─►│Sr│7'h7E RnW=1 │ACK│48bit UID BCR DCR│Assign 7bit Addr│PAR│ ACK/NACK│
+ * └──┴─────────────┴───┴─────────────────┴────────────────┴───┴─────────┘
+ * Some master controllers (such as HCI) need to prepare the entire above transaction before
+ * sending it out to the I3C bus. This means that a 7-bit dynamic address needs to be allocated
+ * before knowing the target device's UID information.
+ *
+ * However, some I3C targets may request specific addresses (called as "init_dyn_addr"), which is
+ * typically specified by the DT-'s assigned-address property. Lower addresses having higher IBI
+ * priority. If it is available, i3c_bus_get_free_addr() preferably return a free address that is
+ * not in the list of desired addresses (called as "init_dyn_addr"). This allows the device with
+ * the "init_dyn_addr" to switch to its "init_dyn_addr" when it hot-joins the I3C bus. Otherwise,
+ * if the "init_dyn_addr" is already in use by another I3C device, the target device will not be
+ * able to switch to its desired address.
+ *
+ * If the previous step fails, fallback returning one of the remaining unassigned address,
+ * regardless of its state in the desired list.
+ */
static int i3c_bus_get_free_addr(struct i3c_bus *bus, u8 start_addr)
{
enum i3c_addr_slot_status status;
u8 addr;
for (addr = start_addr; addr < I3C_MAX_ADDR; addr++) {
- status = i3c_bus_get_addr_slot_status(bus, addr);
+ status = i3c_bus_get_addr_slot_status_mask(bus, addr,
+ I3C_ADDR_SLOT_EXT_STATUS_MASK);
+ if (status == I3C_ADDR_SLOT_FREE)
+ return addr;
+ }
+
+ for (addr = start_addr; addr < I3C_MAX_ADDR; addr++) {
+ status = i3c_bus_get_addr_slot_status_mask(bus, addr,
+ I3C_ADDR_SLOT_STATUS_MASK);
if (status == I3C_ADDR_SLOT_FREE)
return addr;
}
@@ -1506,16 +1549,9 @@ static int i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
u8 old_dyn_addr)
{
struct i3c_master_controller *master = i3c_dev_get_master(dev);
- enum i3c_addr_slot_status status;
int ret;
- if (dev->info.dyn_addr != old_dyn_addr &&
- (!dev->boardinfo ||
- dev->info.dyn_addr != dev->boardinfo->init_dyn_addr)) {
- status = i3c_bus_get_addr_slot_status(&master->bus,
- dev->info.dyn_addr);
- if (status != I3C_ADDR_SLOT_FREE)
- return -EBUSY;
+ if (dev->info.dyn_addr != old_dyn_addr) {
i3c_bus_set_addr_slot_status(&master->bus,
dev->info.dyn_addr,
I3C_ADDR_SLOT_I3C_DEV);
@@ -1918,9 +1954,11 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
goto err_rstdaa;
}
- i3c_bus_set_addr_slot_status(&master->bus,
- i3cboardinfo->init_dyn_addr,
- I3C_ADDR_SLOT_I3C_DEV);
+ /* Do not mark as occupied until real device exist in bus */
+ i3c_bus_set_addr_slot_status_mask(&master->bus,
+ i3cboardinfo->init_dyn_addr,
+ I3C_ADDR_SLOT_EXT_DESIRED,
+ I3C_ADDR_SLOT_EXT_STATUS_MASK);
/*
* Only try to create/attach devices that have a static
@@ -2088,7 +2126,8 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
else
expected_dyn_addr = newdev->info.dyn_addr;
- if (newdev->info.dyn_addr != expected_dyn_addr) {
+ if (newdev->info.dyn_addr != expected_dyn_addr &&
+ i3c_bus_get_addr_slot_status(&master->bus, expected_dyn_addr) == I3C_ADDR_SLOT_FREE) {
/*
* Try to apply the expected dynamic address. If it fails, keep
* the address assigned by the master.
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index a918e96b21fd..13adc5840094 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -159,10 +159,10 @@ static void hci_dma_cleanup(struct i3c_hci *hci)
for (i = 0; i < rings->total; i++) {
rh = &rings->headers[i];
+ rh_reg_write(INTR_SIGNAL_ENABLE, 0);
rh_reg_write(RING_CONTROL, 0);
rh_reg_write(CR_SETUP, 0);
rh_reg_write(IBI_SETUP, 0);
- rh_reg_write(INTR_SIGNAL_ENABLE, 0);
if (rh->xfer)
dma_free_coherent(&hci->master.dev,
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 7042ddfdfc03..955e9eff0099 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -1394,6 +1394,9 @@ static int ad7192_probe(struct spi_device *spi)
st->int_vref_mv = ret == -ENODEV ? avdd_mv : ret / MILLI;
st->chip_info = spi_get_device_match_data(spi);
+ if (!st->chip_info)
+ return -ENODEV;
+
indio_dev->name = st->chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = st->chip_info->info;
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 8c516ede9116..640a5d3aa2c6 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1613,6 +1613,8 @@ static const struct acpi_device_id ltr_acpi_match[] = {
{ "LTER0501", ltr501 },
{ "LTER0559", ltr559 },
{ "LTER0301", ltr301 },
+ /* https://www.catalog.update.microsoft.com/Search.aspx?q=lter0303 */
+ { "LTER0303", ltr303 },
{ },
};
MODULE_DEVICE_TABLE(acpi, ltr_acpi_match);
diff --git a/drivers/iio/magnetometer/af8133j.c b/drivers/iio/magnetometer/af8133j.c
index d81d89af6283..acd291f3e792 100644
--- a/drivers/iio/magnetometer/af8133j.c
+++ b/drivers/iio/magnetometer/af8133j.c
@@ -312,10 +312,11 @@ static int af8133j_set_scale(struct af8133j_data *data,
* When suspended, just store the new range to data->range to be
* applied later during power up.
*/
- if (!pm_runtime_status_suspended(dev))
+ if (!pm_runtime_status_suspended(dev)) {
scoped_guard(mutex, &data->mutex)
ret = regmap_write(data->regmap,
AF8133J_REG_RANGE, range);
+ }
pm_runtime_enable(dev);
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
index 65011a8598d3..c55a38650c0d 100644
--- a/drivers/iio/magnetometer/yamaha-yas530.c
+++ b/drivers/iio/magnetometer/yamaha-yas530.c
@@ -372,6 +372,7 @@ static int yas537_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y
u8 data[8];
u16 xy1y2[3];
s32 h[3], s[3];
+ int half_range = BIT(13);
int i, ret;
mutex_lock(&yas5xx->lock);
@@ -406,13 +407,13 @@ static int yas537_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y
/* The second version of YAS537 needs to include calibration coefficients */
if (yas5xx->version == YAS537_VERSION_1) {
for (i = 0; i < 3; i++)
- s[i] = xy1y2[i] - BIT(13);
- h[0] = (c->k * (128 * s[0] + c->a2 * s[1] + c->a3 * s[2])) / BIT(13);
- h[1] = (c->k * (c->a4 * s[0] + c->a5 * s[1] + c->a6 * s[2])) / BIT(13);
- h[2] = (c->k * (c->a7 * s[0] + c->a8 * s[1] + c->a9 * s[2])) / BIT(13);
+ s[i] = xy1y2[i] - half_range;
+ h[0] = (c->k * (128 * s[0] + c->a2 * s[1] + c->a3 * s[2])) / half_range;
+ h[1] = (c->k * (c->a4 * s[0] + c->a5 * s[1] + c->a6 * s[2])) / half_range;
+ h[2] = (c->k * (c->a7 * s[0] + c->a8 * s[1] + c->a9 * s[2])) / half_range;
for (i = 0; i < 3; i++) {
- clamp_val(h[i], -BIT(13), BIT(13) - 1);
- xy1y2[i] = h[i] + BIT(13);
+ h[i] = clamp(h[i], -half_range, half_range - 1);
+ xy1y2[i] = h[i] + half_range;
}
}
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 804b788f3f16..f3399087859f 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -118,6 +118,7 @@ static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
*/
static bool increase_address_space(struct amd_io_pgtable *pgtable,
unsigned long address,
+ unsigned int page_size_level,
gfp_t gfp)
{
struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
@@ -133,7 +134,8 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
spin_lock_irqsave(&domain->lock, flags);
- if (address <= PM_LEVEL_SIZE(pgtable->mode))
+ if (address <= PM_LEVEL_SIZE(pgtable->mode) &&
+ pgtable->mode - 1 >= page_size_level)
goto out;
ret = false;
@@ -163,18 +165,21 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
gfp_t gfp,
bool *updated)
{
+ unsigned long last_addr = address + (page_size - 1);
struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
int level, end_lvl;
u64 *pte, *page;
BUG_ON(!is_power_of_2(page_size));
- while (address > PM_LEVEL_SIZE(pgtable->mode)) {
+ while (last_addr > PM_LEVEL_SIZE(pgtable->mode) ||
+ pgtable->mode - 1 < PAGE_SIZE_LEVEL(page_size)) {
/*
* Return an error if there is no memory to update the
* page-table.
*/
- if (!increase_address_space(pgtable, address, gfp))
+ if (!increase_address_space(pgtable, last_addr,
+ PAGE_SIZE_LEVEL(page_size), gfp))
return NULL;
}
diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
index e590973ce5cf..b8393a8c0753 100644
--- a/drivers/iommu/iommufd/fault.c
+++ b/drivers/iommu/iommufd/fault.c
@@ -415,8 +415,6 @@ out_put_fdno:
put_unused_fd(fdno);
out_fput:
fput(filep);
- refcount_dec(&fault->obj.users);
- iommufd_ctx_put(fault->ictx);
out_abort:
iommufd_object_abort_and_destroy(ucmd->ictx, &fault->obj);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index d82bcab233a1..66ce15027f28 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -407,7 +407,7 @@ config PARTITION_PERCPU
config STM32MP_EXTI
tristate "STM32MP extended interrupts and event controller"
depends on (ARCH_STM32 && !ARM_SINGLE_ARMV7M) || COMPILE_TEST
- default y
+ default ARCH_STM32 && !ARM_SINGLE_ARMV7M
select IRQ_DOMAIN_HIERARCHY
select GENERIC_IRQ_CHIP
help
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 52f625e07658..d9b6ec844cdd 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -44,6 +44,7 @@
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
#define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3)
+#define ITS_FLAGS_WORKAROUND_HISILICON_162100801 (1ULL << 4)
#define RD_LOCAL_LPI_ENABLED BIT(0)
#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1)
@@ -61,6 +62,7 @@ static u32 lpi_id_bits;
#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
static u8 __ro_after_init lpi_prop_prio;
+static struct its_node *find_4_1_its(void);
/*
* Collection structure - just an ID, and a redistributor address to
@@ -3797,6 +3799,20 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
}
+static void its_vpe_4_1_invall_locked(int cpu, struct its_vpe *vpe)
+{
+ void __iomem *rdbase;
+ u64 val;
+
+ val = GICR_INVALLR_V;
+ val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
+
+ guard(raw_spinlock)(&gic_data_rdist_cpu(cpu)->rd_lock);
+ rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
+ gic_write_lpir(val, rdbase + GICR_INVALLR);
+ wait_for_syncr(rdbase);
+}
+
static int its_vpe_set_affinity(struct irq_data *d,
const struct cpumask *mask_val,
bool force)
@@ -3804,6 +3820,7 @@ static int its_vpe_set_affinity(struct irq_data *d,
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
unsigned int from, cpu = nr_cpu_ids;
struct cpumask *table_mask;
+ struct its_node *its;
unsigned long flags;
/*
@@ -3866,6 +3883,11 @@ static int its_vpe_set_affinity(struct irq_data *d,
vpe->col_idx = cpu;
its_send_vmovp(vpe);
+
+ its = find_4_1_its();
+ if (its && its->flags & ITS_FLAGS_WORKAROUND_HISILICON_162100801)
+ its_vpe_4_1_invall_locked(cpu, vpe);
+
its_vpe_db_proxy_move(vpe, from, cpu);
out:
@@ -4173,22 +4195,12 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
static void its_vpe_4_1_invall(struct its_vpe *vpe)
{
- void __iomem *rdbase;
unsigned long flags;
- u64 val;
int cpu;
- val = GICR_INVALLR_V;
- val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
-
/* Target the redistributor this vPE is currently known on */
cpu = vpe_to_cpuid_lock(vpe, &flags);
- raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
- rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
- gic_write_lpir(val, rdbase + GICR_INVALLR);
-
- wait_for_syncr(rdbase);
- raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ its_vpe_4_1_invall_locked(cpu, vpe);
vpe_to_cpuid_unlock(vpe, flags);
}
@@ -4781,6 +4793,14 @@ static bool its_set_non_coherent(void *data)
return true;
}
+static bool __maybe_unused its_enable_quirk_hip09_162100801(void *data)
+{
+ struct its_node *its = data;
+
+ its->flags |= ITS_FLAGS_WORKAROUND_HISILICON_162100801;
+ return true;
+}
+
static const struct gic_quirk its_quirks[] = {
#ifdef CONFIG_CAVIUM_ERRATUM_22375
{
@@ -4827,6 +4847,14 @@ static const struct gic_quirk its_quirks[] = {
.init = its_enable_quirk_hip07_161600802,
},
#endif
+#ifdef CONFIG_HISILICON_ERRATUM_162100801
+ {
+ .desc = "ITS: Hip09 erratum 162100801",
+ .iidr = 0x00051736,
+ .mask = 0xffffffff,
+ .init = its_enable_quirk_hip09_162100801,
+ },
+#endif
#ifdef CONFIG_ROCKCHIP_ERRATUM_3588001
{
.desc = "ITS: Rockchip erratum RK3588001",
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 06b97fd49ad9..f69f4e928d61 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -29,11 +29,14 @@ static ssize_t brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ unsigned int brightness;
- /* no lock needed for this */
+ mutex_lock(&led_cdev->led_access);
led_update_brightness(led_cdev);
+ brightness = led_cdev->brightness;
+ mutex_unlock(&led_cdev->led_access);
- return sprintf(buf, "%u\n", led_cdev->brightness);
+ return sprintf(buf, "%u\n", brightness);
}
static ssize_t brightness_store(struct device *dev,
@@ -70,8 +73,13 @@ static ssize_t max_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ unsigned int max_brightness;
+
+ mutex_lock(&led_cdev->led_access);
+ max_brightness = led_cdev->max_brightness;
+ mutex_unlock(&led_cdev->led_access);
- return sprintf(buf, "%u\n", led_cdev->max_brightness);
+ return sprintf(buf, "%u\n", max_brightness);
}
static DEVICE_ATTR_RO(max_brightness);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 94885e411085..82102a4c5d68 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -269,6 +269,35 @@ static bool pcc_mbox_cmd_complete_check(struct pcc_chan_info *pchan)
return !!val;
}
+static void check_and_ack(struct pcc_chan_info *pchan, struct mbox_chan *chan)
+{
+ struct acpi_pcct_ext_pcc_shared_memory pcc_hdr;
+
+ if (pchan->type != ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
+ return;
+ /* If the memory region has not been mapped, we cannot
+ * determine if we need to send the message, but we still
+ * need to set the cmd_update flag before returning.
+ */
+ if (pchan->chan.shmem == NULL) {
+ pcc_chan_reg_read_modify_write(&pchan->cmd_update);
+ return;
+ }
+ memcpy_fromio(&pcc_hdr, pchan->chan.shmem,
+ sizeof(struct acpi_pcct_ext_pcc_shared_memory));
+ /*
+ * The PCC slave subspace channel needs to set the command complete bit
+ * after processing message. If the PCC_ACK_FLAG is set, it should also
+ * ring the doorbell.
+ *
+ * The PCC master subspace channel clears chan_in_use to free channel.
+ */
+ if (le32_to_cpup(&pcc_hdr.flags) & PCC_ACK_FLAG_MASK)
+ pcc_send_data(chan, NULL);
+ else
+ pcc_chan_reg_read_modify_write(&pchan->cmd_update);
+}
+
/**
* pcc_mbox_irq - PCC mailbox interrupt handler
* @irq: interrupt number
@@ -306,14 +335,7 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
mbox_chan_received_data(chan, NULL);
- /*
- * The PCC slave subspace channel needs to set the command complete bit
- * and ring doorbell after processing message.
- *
- * The PCC master subspace channel clears chan_in_use to free channel.
- */
- if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
- pcc_send_data(chan, NULL);
+ check_and_ack(pchan, chan);
pchan->chan_in_use = false;
return IRQ_HANDLED;
@@ -365,14 +387,37 @@ EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
{
struct mbox_chan *chan = pchan->mchan;
+ struct pcc_chan_info *pchan_info;
+ struct pcc_mbox_chan *pcc_mbox_chan;
if (!chan || !chan->cl)
return;
+ pchan_info = chan->con_priv;
+ pcc_mbox_chan = &pchan_info->chan;
+ if (pcc_mbox_chan->shmem) {
+ iounmap(pcc_mbox_chan->shmem);
+ pcc_mbox_chan->shmem = NULL;
+ }
mbox_free_channel(chan);
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
+int pcc_mbox_ioremap(struct mbox_chan *chan)
+{
+ struct pcc_chan_info *pchan_info;
+ struct pcc_mbox_chan *pcc_mbox_chan;
+
+ if (!chan || !chan->cl)
+ return -1;
+ pchan_info = chan->con_priv;
+ pcc_mbox_chan = &pchan_info->chan;
+ pcc_mbox_chan->shmem = ioremap(pcc_mbox_chan->shmem_base_addr,
+ pcc_mbox_chan->shmem_size);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pcc_mbox_ioremap);
+
/**
* pcc_send_data - Called from Mailbox Controller code. Used
* here only to ring the channel doorbell. The PCC client
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index e7abfdd77c3b..e42f1400cea9 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1718,7 +1718,7 @@ static CLOSURE_CALLBACK(cache_set_flush)
if (!IS_ERR_OR_NULL(c->gc_thread))
kthread_stop(c->gc_thread);
- if (!IS_ERR(c->root))
+ if (!IS_ERR_OR_NULL(c->root))
list_add(&c->root->list, &c->btree_cache);
/*
diff --git a/drivers/media/pci/intel/ipu6/Kconfig b/drivers/media/pci/intel/ipu6/Kconfig
index a4537818a58c..cd1c54529357 100644
--- a/drivers/media/pci/intel/ipu6/Kconfig
+++ b/drivers/media/pci/intel/ipu6/Kconfig
@@ -8,7 +8,7 @@ config VIDEO_INTEL_IPU6
select IOMMU_IOVA
select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER
- select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_DMA_SG
select V4L2_FWNODE
help
This is the 6th Gen Intel Image Processing Unit, found in Intel SoCs
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
index 03dbb0e0ea79..bbb66b56ee88 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
@@ -13,17 +13,48 @@
#include <media/media-entity.h>
#include <media/v4l2-subdev.h>
-#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-dma-sg.h>
#include <media/videobuf2-v4l2.h>
#include "ipu6-bus.h"
+#include "ipu6-dma.h"
#include "ipu6-fw-isys.h"
#include "ipu6-isys.h"
#include "ipu6-isys-video.h"
-static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
- unsigned int *num_planes, unsigned int sizes[],
- struct device *alloc_devs[])
+static int ipu6_isys_buf_init(struct vb2_buffer *vb)
+{
+ struct ipu6_isys *isys = vb2_get_drv_priv(vb->vb2_queue);
+ struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0);
+ struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb);
+ struct ipu6_isys_video_buffer *ivb =
+ vb2_buffer_to_ipu6_isys_video_buffer(vvb);
+ int ret;
+
+ ret = ipu6_dma_map_sgtable(isys->adev, sg, DMA_TO_DEVICE, 0);
+ if (ret)
+ return ret;
+
+ ivb->dma_addr = sg_dma_address(sg->sgl);
+
+ return 0;
+}
+
+static void ipu6_isys_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct ipu6_isys *isys = vb2_get_drv_priv(vb->vb2_queue);
+ struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0);
+ struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb);
+ struct ipu6_isys_video_buffer *ivb =
+ vb2_buffer_to_ipu6_isys_video_buffer(vvb);
+
+ ivb->dma_addr = 0;
+ ipu6_dma_unmap_sgtable(isys->adev, sg, DMA_TO_DEVICE, 0);
+}
+
+static int ipu6_isys_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
{
struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q);
struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
@@ -207,9 +238,11 @@ ipu6_isys_buf_to_fw_frame_buf_pin(struct vb2_buffer *vb,
struct ipu6_fw_isys_frame_buff_set_abi *set)
{
struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb);
+ struct ipu6_isys_video_buffer *ivb =
+ vb2_buffer_to_ipu6_isys_video_buffer(vvb);
- set->output_pins[aq->fw_output].addr =
- vb2_dma_contig_plane_dma_addr(vb, 0);
+ set->output_pins[aq->fw_output].addr = ivb->dma_addr;
set->output_pins[aq->fw_output].out_buf_id = vb->index + 1;
}
@@ -332,7 +365,7 @@ static void buf_queue(struct vb2_buffer *vb)
dev_dbg(dev, "queue buffer %u for %s\n", vb->index, av->vdev.name);
- dma = vb2_dma_contig_plane_dma_addr(vb, 0);
+ dma = ivb->dma_addr;
dev_dbg(dev, "iova: iova %pad\n", &dma);
spin_lock_irqsave(&aq->lock, flags);
@@ -724,10 +757,14 @@ void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
}
list_for_each_entry_reverse(ib, &aq->active, head) {
+ struct ipu6_isys_video_buffer *ivb;
+ struct vb2_v4l2_buffer *vvb;
dma_addr_t addr;
vb = ipu6_isys_buffer_to_vb2_buffer(ib);
- addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ vvb = to_vb2_v4l2_buffer(vb);
+ ivb = vb2_buffer_to_ipu6_isys_video_buffer(vvb);
+ addr = ivb->dma_addr;
if (info->pin.addr != addr) {
if (first)
@@ -766,10 +803,12 @@ void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
}
static const struct vb2_ops ipu6_isys_queue_ops = {
- .queue_setup = queue_setup,
+ .queue_setup = ipu6_isys_queue_setup,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
+ .buf_init = ipu6_isys_buf_init,
.buf_prepare = ipu6_isys_buf_prepare,
+ .buf_cleanup = ipu6_isys_buf_cleanup,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
.buf_queue = buf_queue,
@@ -779,16 +818,17 @@ int ipu6_isys_queue_init(struct ipu6_isys_queue *aq)
{
struct ipu6_isys *isys = ipu6_isys_queue_to_video(aq)->isys;
struct ipu6_isys_video *av = ipu6_isys_queue_to_video(aq);
+ struct ipu6_bus_device *adev = isys->adev;
int ret;
/* no support for userptr */
if (!aq->vbq.io_modes)
aq->vbq.io_modes = VB2_MMAP | VB2_DMABUF;
- aq->vbq.drv_priv = aq;
+ aq->vbq.drv_priv = isys;
aq->vbq.ops = &ipu6_isys_queue_ops;
aq->vbq.lock = &av->mutex;
- aq->vbq.mem_ops = &vb2_dma_contig_memops;
+ aq->vbq.mem_ops = &vb2_dma_sg_memops;
aq->vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
aq->vbq.min_queued_buffers = 1;
aq->vbq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
@@ -797,8 +837,8 @@ int ipu6_isys_queue_init(struct ipu6_isys_queue *aq)
if (ret)
return ret;
- aq->dev = &isys->adev->auxdev.dev;
- aq->vbq.dev = &isys->adev->auxdev.dev;
+ aq->dev = &adev->auxdev.dev;
+ aq->vbq.dev = &adev->isp->pdev->dev;
spin_lock_init(&aq->lock);
INIT_LIST_HEAD(&aq->active);
INIT_LIST_HEAD(&aq->incoming);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
index 95cfd4869d93..fe8fc796a58f 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
@@ -38,6 +38,7 @@ struct ipu6_isys_buffer {
struct ipu6_isys_video_buffer {
struct vb2_v4l2_buffer vb_v4l2;
struct ipu6_isys_buffer ib;
+ dma_addr_t dma_addr;
};
#define IPU6_ISYS_BUFFER_LIST_FL_INCOMING BIT(0)
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c
index c4aff2e2009b..c85e056cb904 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c
@@ -34,6 +34,7 @@
#include "ipu6-bus.h"
#include "ipu6-cpd.h"
+#include "ipu6-dma.h"
#include "ipu6-isys.h"
#include "ipu6-isys-csi2.h"
#include "ipu6-mmu.h"
@@ -933,29 +934,27 @@ static const struct dev_pm_ops isys_pm_ops = {
static void free_fw_msg_bufs(struct ipu6_isys *isys)
{
- struct device *dev = &isys->adev->auxdev.dev;
struct isys_fw_msgs *fwmsg, *safe;
list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head)
- dma_free_attrs(dev, sizeof(struct isys_fw_msgs), fwmsg,
- fwmsg->dma_addr, 0);
+ ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), fwmsg,
+ fwmsg->dma_addr, 0);
list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head)
- dma_free_attrs(dev, sizeof(struct isys_fw_msgs), fwmsg,
- fwmsg->dma_addr, 0);
+ ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), fwmsg,
+ fwmsg->dma_addr, 0);
}
static int alloc_fw_msg_bufs(struct ipu6_isys *isys, int amount)
{
- struct device *dev = &isys->adev->auxdev.dev;
struct isys_fw_msgs *addr;
dma_addr_t dma_addr;
unsigned long flags;
unsigned int i;
for (i = 0; i < amount; i++) {
- addr = dma_alloc_attrs(dev, sizeof(struct isys_fw_msgs),
- &dma_addr, GFP_KERNEL, 0);
+ addr = ipu6_dma_alloc(isys->adev, sizeof(*addr),
+ &dma_addr, GFP_KERNEL, 0);
if (!addr)
break;
addr->dma_addr = dma_addr;
@@ -974,8 +973,8 @@ static int alloc_fw_msg_bufs(struct ipu6_isys *isys, int amount)
struct isys_fw_msgs, head);
list_del(&addr->head);
spin_unlock_irqrestore(&isys->listlock, flags);
- dma_free_attrs(dev, sizeof(struct isys_fw_msgs), addr,
- addr->dma_addr, 0);
+ ipu6_dma_free(isys->adev, sizeof(struct isys_fw_msgs), addr,
+ addr->dma_addr, 0);
spin_lock_irqsave(&isys->listlock, flags);
}
spin_unlock_irqrestore(&isys->listlock, flags);
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 92efe6c1f47b..bda729b42d05 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -994,6 +994,8 @@ const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
/* table of devices that work with this driver */
struct usb_device_id cx231xx_id_table[] = {
+ {USB_DEVICE(0x1D19, 0x6108),
+ .driver_info = CX231XX_BOARD_PV_XCAPTURE_USB},
{USB_DEVICE(0x1D19, 0x6109),
.driver_info = CX231XX_BOARD_PV_XCAPTURE_USB},
{USB_DEVICE(0x0572, 0x5A3C),
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 675be4858366..9f38a9b23c01 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2474,12 +2474,25 @@ static const struct uvc_device_info uvc_quirk_force_y8 = {
* The Logitech cameras listed below have their interface class set to
* VENDOR_SPEC because they don't announce themselves as UVC devices, even
* though they are compliant.
+ *
+ * Sort these by vendor/product ID.
*/
static const struct usb_device_id uvc_ids[] = {
/* Quanta ACER HD User Facing */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x0408,
+ .idProduct = 0x4033,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = UVC_PC_PROTOCOL_15,
+ .driver_info = (kernel_ulong_t)&(const struct uvc_device_info){
+ .uvc_version = 0x010a,
+ } },
+ /* Quanta ACER HD User Facing */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x0408,
.idProduct = 0x4035,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = 1,
@@ -3010,6 +3023,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX
| UVC_QUIRK_IGNORE_SELECTOR_UNIT) },
+ /* NXP Semiconductors IR VIDEO */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x1fc9,
+ .idProduct = 0x009b,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax },
/* Oculus VR Positional Tracker DK2 */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -3118,6 +3140,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) },
+ /* Intel D421 Depth Module */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x8086,
+ .idProduct = 0x1155,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) },
/* Generic USB Video Class */
{ USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_UNDEFINED) },
{ USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_15) },
diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 9627294fe3e9..4c9827fe9217 100644
--- a/drivers/misc/eeprom/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
@@ -186,6 +186,11 @@ void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word,
eeprom_93cx6_write_bits(eeprom, command,
PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+ if (has_quirk_extra_read_cycle(eeprom)) {
+ eeprom_93cx6_pulse_high(eeprom);
+ eeprom_93cx6_pulse_low(eeprom);
+ }
+
/*
* Read the requested 16 bits.
*/
@@ -252,6 +257,11 @@ void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom, const u8 byte,
eeprom_93cx6_write_bits(eeprom, command,
PCI_EEPROM_WIDTH_OPCODE + eeprom->width + 1);
+ if (has_quirk_extra_read_cycle(eeprom)) {
+ eeprom_93cx6_pulse_high(eeprom);
+ eeprom_93cx6_pulse_low(eeprom);
+ }
+
/*
* Read the requested 8 bits.
*/
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index ef06a4d5d65b..1d08009f2bd8 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -50,6 +50,7 @@
#include <linux/mmc/sd.h>
#include <linux/uaccess.h>
+#include <linux/unaligned.h>
#include "queue.h"
#include "block.h"
@@ -993,11 +994,12 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
int err;
u32 result;
__be32 *blocks;
+ u8 resp_sz = mmc_card_ult_capacity(card) ? 8 : 4;
+ unsigned int noio_flag;
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_data data = {};
-
struct scatterlist sg;
err = mmc_app_cmd(card->host, card);
@@ -1008,7 +1010,7 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- data.blksz = 4;
+ data.blksz = resp_sz;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
@@ -1018,15 +1020,29 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
mrq.cmd = &cmd;
mrq.data = &data;
- blocks = kmalloc(4, GFP_KERNEL);
+ noio_flag = memalloc_noio_save();
+ blocks = kmalloc(resp_sz, GFP_KERNEL);
+ memalloc_noio_restore(noio_flag);
if (!blocks)
return -ENOMEM;
- sg_init_one(&sg, blocks, 4);
+ sg_init_one(&sg, blocks, resp_sz);
mmc_wait_for_req(card->host, &mrq);
- result = ntohl(*blocks);
+ if (mmc_card_ult_capacity(card)) {
+ /*
+ * Normally, ACMD22 returns the number of written sectors as
+ * u32. SDUC, however, returns it as u64. This is not a
+ * superfluous requirement, because SDUC writes may exceed 2TB.
+ * For Linux mmc however, the previously write operation could
+ * not be more than the block layer limits, thus just make room
+ * for a u64 and cast the response back to u32.
+ */
+ result = clamp_val(get_unaligned_be64(blocks), 0, UINT_MAX);
+ } else {
+ result = ntohl(*blocks);
+ }
kfree(blocks);
if (cmd.error || data.error)
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 0ddaee0eae54..4f3a26676ccb 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -149,6 +149,8 @@ static void mmc_bus_shutdown(struct device *dev)
if (dev->driver && drv->shutdown)
drv->shutdown(card);
+ __mmc_stop_host(host);
+
if (host->bus_ops->shutdown) {
ret = host->bus_ops->shutdown(host);
if (ret)
@@ -321,7 +323,9 @@ int mmc_add_card(struct mmc_card *card)
case MMC_TYPE_SD:
type = "SD";
if (mmc_card_blockaddr(card)) {
- if (mmc_card_ext_capacity(card))
+ if (mmc_card_ult_capacity(card))
+ type = "SDUC";
+ else if (mmc_card_ext_capacity(card))
type = "SDXC";
else
type = "SDHC";
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index b7754a1b8d97..3205feb1e8ff 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -23,6 +23,7 @@
#define MMC_CARD_SDXC (1<<3) /* card is SDXC */
#define MMC_CARD_REMOVED (1<<4) /* card has been removed */
#define MMC_STATE_SUSPENDED (1<<5) /* card is suspended */
+#define MMC_CARD_SDUC (1<<6) /* card is SDUC */
#define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT)
#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
@@ -30,11 +31,13 @@
#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED))
#define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED)
+#define mmc_card_ult_capacity(c) ((c)->state & MMC_CARD_SDUC)
#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
+#define mmc_card_set_ult_capacity(c) ((c)->state |= MMC_CARD_SDUC)
#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
#define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED)
#define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED)
@@ -82,6 +85,7 @@ struct mmc_fixup {
#define CID_MANFID_SANDISK_SD 0x3
#define CID_MANFID_ATP 0x9
#define CID_MANFID_TOSHIBA 0x11
+#define CID_MANFID_GIGASTONE 0x12
#define CID_MANFID_MICRON 0x13
#define CID_MANFID_SAMSUNG 0x15
#define CID_MANFID_APACER 0x27
@@ -284,4 +288,10 @@ static inline int mmc_card_broken_cache_flush(const struct mmc_card *c)
{
return c->quirks & MMC_QUIRK_BROKEN_CACHE_FLUSH;
}
+
+static inline int mmc_card_broken_sd_poweroff_notify(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY;
+}
+
#endif
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index d6c819dd68ed..327029f5c59b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2296,6 +2296,9 @@ void mmc_start_host(struct mmc_host *host)
void __mmc_stop_host(struct mmc_host *host)
{
+ if (host->rescan_disable)
+ return;
+
if (host->slot.cd_irq >= 0) {
mmc_gpio_set_cd_wake(host, false);
disable_irq(host->slot.cd_irq);
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index 92905fc46436..89b512905be1 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -25,6 +25,15 @@ static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = {
0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
+ /*
+ * GIGASTONE Gaming Plus microSD cards manufactured on 02/2022 never
+ * clear Flush Cache bit and set Poweroff Notification Ready bit.
+ */
+ _FIXUP_EXT("ASTC", CID_MANFID_GIGASTONE, 0x3456, 2022, 2,
+ 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_CACHE | MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY,
+ EXT_CSD_REV_ANY),
+
END_FIXUP
};
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 12fe282bea77..63915541c0e4 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -100,7 +100,7 @@ void mmc_decode_cid(struct mmc_card *card)
/*
* Given a 128-bit response, decode to our card CSD structure.
*/
-static int mmc_decode_csd(struct mmc_card *card)
+static int mmc_decode_csd(struct mmc_card *card, bool is_sduc)
{
struct mmc_csd *csd = &card->csd;
unsigned int e, m, csd_struct;
@@ -144,9 +144,10 @@ static int mmc_decode_csd(struct mmc_card *card)
mmc_card_set_readonly(card);
break;
case 1:
+ case 2:
/*
- * This is a block-addressed SDHC or SDXC card. Most
- * interesting fields are unused and have fixed
+ * This is a block-addressed SDHC, SDXC or SDUC card.
+ * Most interesting fields are unused and have fixed
* values. To avoid getting tripped by buggy cards,
* we assume those fixed values ourselves.
*/
@@ -159,14 +160,19 @@ static int mmc_decode_csd(struct mmc_card *card)
e = unstuff_bits(resp, 96, 3);
csd->max_dtr = tran_exp[e] * tran_mant[m];
csd->cmdclass = unstuff_bits(resp, 84, 12);
- csd->c_size = unstuff_bits(resp, 48, 22);
- /* SDXC cards have a minimum C_SIZE of 0x00FFFF */
- if (csd->c_size >= 0xFFFF)
+ if (csd_struct == 1)
+ m = unstuff_bits(resp, 48, 22);
+ else
+ m = unstuff_bits(resp, 48, 28);
+ csd->c_size = m;
+
+ if (csd->c_size >= 0x400000 && is_sduc)
+ mmc_card_set_ult_capacity(card);
+ else if (csd->c_size >= 0xFFFF)
mmc_card_set_ext_capacity(card);
- m = unstuff_bits(resp, 48, 22);
- csd->capacity = (1 + m) << 10;
+ csd->capacity = (1 + (typeof(sector_t))m) << 10;
csd->read_blkbits = 9;
csd->read_partial = 0;
@@ -876,7 +882,7 @@ try_again:
return err;
}
-int mmc_sd_get_csd(struct mmc_card *card)
+int mmc_sd_get_csd(struct mmc_card *card, bool is_sduc)
{
int err;
@@ -887,7 +893,7 @@ int mmc_sd_get_csd(struct mmc_card *card)
if (err)
return err;
- err = mmc_decode_csd(card);
+ err = mmc_decode_csd(card, is_sduc);
if (err)
return err;
@@ -1107,7 +1113,7 @@ static int sd_parse_ext_reg_power(struct mmc_card *card, u8 fno, u8 page,
card->ext_power.rev = reg_buf[0] & 0xf;
/* Power Off Notification support at bit 4. */
- if (reg_buf[1] & BIT(4))
+ if ((reg_buf[1] & BIT(4)) && !mmc_card_broken_sd_poweroff_notify(card))
card->ext_power.feature_support |= SD_EXT_POWER_OFF_NOTIFY;
/* Power Sustenance support at bit 5. */
@@ -1442,7 +1448,7 @@ retry:
}
if (!oldcard) {
- err = mmc_sd_get_csd(card);
+ err = mmc_sd_get_csd(card, false);
if (err)
goto free_card;
diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h
index fe6dd46927a4..7e8beface2ca 100644
--- a/drivers/mmc/core/sd.h
+++ b/drivers/mmc/core/sd.h
@@ -10,7 +10,7 @@ struct mmc_host;
struct mmc_card;
int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr);
-int mmc_sd_get_csd(struct mmc_card *card);
+int mmc_sd_get_csd(struct mmc_card *card, bool is_sduc);
void mmc_decode_cid(struct mmc_card *card);
int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
bool reinit);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 4fb247fde5c0..9566837c9848 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -769,7 +769,7 @@ try_again:
* Read CSD, before selecting the card
*/
if (!oldcard && mmc_card_sd_combo(card)) {
- err = mmc_sd_get_csd(card);
+ err = mmc_sd_get_csd(card, false);
if (err)
goto remove;
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 89018b6c97b9..813bc20cfb5a 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -2736,20 +2736,18 @@ static int msdc_drv_probe(struct platform_device *pdev)
}
/* Allocate MMC host for this device */
- mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
+ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(struct msdc_host));
if (!mmc)
return -ENOMEM;
host = mmc_priv(mmc);
ret = mmc_of_parse(mmc);
if (ret)
- goto host_free;
+ return ret;
host->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(host->base)) {
- ret = PTR_ERR(host->base);
- goto host_free;
- }
+ if (IS_ERR(host->base))
+ return PTR_ERR(host->base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
@@ -2760,53 +2758,45 @@ static int msdc_drv_probe(struct platform_device *pdev)
ret = mmc_regulator_get_supply(mmc);
if (ret)
- goto host_free;
+ return ret;
ret = msdc_of_clock_parse(pdev, host);
if (ret)
- goto host_free;
+ return ret;
host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev,
"hrst");
- if (IS_ERR(host->reset)) {
- ret = PTR_ERR(host->reset);
- goto host_free;
- }
+ if (IS_ERR(host->reset))
+ return PTR_ERR(host->reset);
/* only eMMC has crypto property */
if (!(mmc->caps2 & MMC_CAP2_NO_MMC)) {
host->crypto_clk = devm_clk_get_optional(&pdev->dev, "crypto");
if (IS_ERR(host->crypto_clk))
- host->crypto_clk = NULL;
- else
+ return PTR_ERR(host->crypto_clk);
+ else if (host->crypto_clk)
mmc->caps2 |= MMC_CAP2_CRYPTO;
}
host->irq = platform_get_irq(pdev, 0);
- if (host->irq < 0) {
- ret = host->irq;
- goto host_free;
- }
+ if (host->irq < 0)
+ return host->irq;
host->pinctrl = devm_pinctrl_get(&pdev->dev);
- if (IS_ERR(host->pinctrl)) {
- ret = PTR_ERR(host->pinctrl);
- dev_err(&pdev->dev, "Cannot find pinctrl!\n");
- goto host_free;
- }
+ if (IS_ERR(host->pinctrl))
+ return dev_err_probe(&pdev->dev, PTR_ERR(host->pinctrl),
+ "Cannot find pinctrl");
host->pins_default = pinctrl_lookup_state(host->pinctrl, "default");
if (IS_ERR(host->pins_default)) {
- ret = PTR_ERR(host->pins_default);
dev_err(&pdev->dev, "Cannot find pinctrl default!\n");
- goto host_free;
+ return PTR_ERR(host->pins_default);
}
host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
if (IS_ERR(host->pins_uhs)) {
- ret = PTR_ERR(host->pins_uhs);
dev_err(&pdev->dev, "Cannot find pinctrl uhs!\n");
- goto host_free;
+ return PTR_ERR(host->pins_uhs);
}
/* Support for SDIO eint irq ? */
@@ -2885,7 +2875,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
ret = msdc_ungate_clock(host);
if (ret) {
dev_err(&pdev->dev, "Cannot ungate clocks!\n");
- goto release_mem;
+ goto release_clk;
}
msdc_init_hw(host);
@@ -2895,14 +2885,14 @@ static int msdc_drv_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!host->cq_host) {
ret = -ENOMEM;
- goto host_free;
+ goto release;
}
host->cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
host->cq_host->mmio = host->base + 0x800;
host->cq_host->ops = &msdc_cmdq_ops;
ret = cqhci_init(host->cq_host, mmc, true);
if (ret)
- goto host_free;
+ goto release;
mmc->max_segs = 128;
/* cqhci 16bit length */
/* 0 size, means 65536 so we don't have to -1 here */
@@ -2929,9 +2919,10 @@ static int msdc_drv_probe(struct platform_device *pdev)
end:
pm_runtime_disable(host->dev);
release:
- platform_set_drvdata(pdev, NULL);
msdc_deinit_hw(host);
+release_clk:
msdc_gate_clock(host);
+ platform_set_drvdata(pdev, NULL);
release_mem:
if (host->dma.gpd)
dma_free_coherent(&pdev->dev,
@@ -2939,11 +2930,8 @@ release_mem:
host->dma.gpd, host->dma.gpd_addr);
if (host->dma.bd)
dma_free_coherent(&pdev->dev,
- MAX_BD_NUM * sizeof(struct mt_bdma_desc),
- host->dma.bd, host->dma.bd_addr);
-host_free:
- mmc_free_host(mmc);
-
+ MAX_BD_NUM * sizeof(struct mt_bdma_desc),
+ host->dma.bd, host->dma.bd_addr);
return ret;
}
@@ -2968,9 +2956,7 @@ static void msdc_drv_remove(struct platform_device *pdev)
2 * sizeof(struct mt_gpdma_desc),
host->dma.gpd, host->dma.gpd_addr);
dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc),
- host->dma.bd, host->dma.bd_addr);
-
- mmc_free_host(mmc);
+ host->dma.bd, host->dma.bd_addr);
}
static void msdc_save_reg(struct msdc_host *host)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 8f0bc6dca2b0..ef3a44f2dff1 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -238,6 +238,7 @@ struct esdhc_platform_data {
struct esdhc_soc_data {
u32 flags;
+ u32 quirks;
};
static const struct esdhc_soc_data esdhc_imx25_data = {
@@ -309,10 +310,12 @@ static struct esdhc_soc_data usdhc_imx7ulp_data = {
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_PMQOS | ESDHC_FLAG_HS400
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
+ .quirks = SDHCI_QUIRK_NO_LED,
};
static struct esdhc_soc_data usdhc_imxrt1050_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200,
+ .quirks = SDHCI_QUIRK_NO_LED,
};
static struct esdhc_soc_data usdhc_imx8qxp_data = {
@@ -321,6 +324,7 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = {
| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
| ESDHC_FLAG_STATE_LOST_IN_LPMODE
| ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME,
+ .quirks = SDHCI_QUIRK_NO_LED,
};
static struct esdhc_soc_data usdhc_imx8mm_data = {
@@ -328,6 +332,7 @@ static struct esdhc_soc_data usdhc_imx8mm_data = {
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
+ .quirks = SDHCI_QUIRK_NO_LED,
};
struct pltfm_imx_data {
@@ -1687,6 +1692,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
imx_data->socdata = device_get_match_data(&pdev->dev);
+ host->quirks |= imx_data->socdata->quirks;
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index ed45ed0bdafd..2e2e15e2d8fb 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/debugfs.h>
@@ -1235,6 +1236,29 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
.priv_size = sizeof(struct intel_host),
};
+/* DMI quirks for devices with missing or broken CD GPIO info */
+static const struct gpiod_lookup_table vexia_edu_atla10_cd_gpios = {
+ .dev_id = "0000:00:12.0",
+ .table = {
+ GPIO_LOOKUP("INT33FC:00", 38, "cd", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static const struct dmi_system_id sdhci_intel_byt_cd_gpio_override[] = {
+ {
+ /* Vexia Edu Atla 10 tablet 9V version */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"),
+ },
+ .driver_data = (void *)&vexia_edu_atla10_cd_gpios,
+ },
+ { }
+};
+
static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
#ifdef CONFIG_PM_SLEEP
.resume = byt_resume,
@@ -1253,6 +1277,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
.add_host = byt_add_host,
.remove_slot = byt_remove_slot,
.ops = &sdhci_intel_byt_ops,
+ .cd_gpio_override = sdhci_intel_byt_cd_gpio_override,
.priv_size = sizeof(struct intel_host),
};
@@ -2054,6 +2079,42 @@ static const struct dev_pm_ops sdhci_pci_pm_ops = {
* *
\*****************************************************************************/
+static struct gpiod_lookup_table *sdhci_pci_add_gpio_lookup_table(
+ struct sdhci_pci_chip *chip)
+{
+ struct gpiod_lookup_table *cd_gpio_lookup_table;
+ const struct dmi_system_id *dmi_id = NULL;
+ size_t count;
+
+ if (chip->fixes && chip->fixes->cd_gpio_override)
+ dmi_id = dmi_first_match(chip->fixes->cd_gpio_override);
+
+ if (!dmi_id)
+ return NULL;
+
+ cd_gpio_lookup_table = dmi_id->driver_data;
+ for (count = 0; cd_gpio_lookup_table->table[count].key; count++)
+ ;
+
+ cd_gpio_lookup_table = kmemdup(dmi_id->driver_data,
+ /* count + 1 terminating entry */
+ struct_size(cd_gpio_lookup_table, table, count + 1),
+ GFP_KERNEL);
+ if (!cd_gpio_lookup_table)
+ return ERR_PTR(-ENOMEM);
+
+ gpiod_add_lookup_table(cd_gpio_lookup_table);
+ return cd_gpio_lookup_table;
+}
+
+static void sdhci_pci_remove_gpio_lookup_table(struct gpiod_lookup_table *lookup_table)
+{
+ if (lookup_table) {
+ gpiod_remove_lookup_table(lookup_table);
+ kfree(lookup_table);
+ }
+}
+
static struct sdhci_pci_slot *sdhci_pci_probe_slot(
struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar,
int slotno)
@@ -2129,8 +2190,19 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
device_init_wakeup(&pdev->dev, true);
if (slot->cd_idx >= 0) {
+ struct gpiod_lookup_table *cd_gpio_lookup_table;
+
+ cd_gpio_lookup_table = sdhci_pci_add_gpio_lookup_table(chip);
+ if (IS_ERR(cd_gpio_lookup_table)) {
+ ret = PTR_ERR(cd_gpio_lookup_table);
+ goto remove;
+ }
+
ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
slot->cd_override_level, 0);
+
+ sdhci_pci_remove_gpio_lookup_table(cd_gpio_lookup_table);
+
if (ret && ret != -EPROBE_DEFER)
ret = mmc_gpiod_request_cd(host->mmc, NULL,
slot->cd_idx,
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 153704f812ed..4973fa859217 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -156,6 +156,7 @@ struct sdhci_pci_fixes {
#endif
const struct sdhci_ops *ops;
+ const struct dmi_system_id *cd_gpio_override;
size_t priv_size;
};
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index 511615dc3341..cc371d0c9f3c 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -1014,49 +1014,57 @@ static int c_can_handle_bus_err(struct net_device *dev,
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
- if (unlikely(!skb))
- return 0;
/* check for 'last error code' which tells us the
* type of the last error to occur on the CAN bus
*/
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (likely(skb))
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (lec_type) {
case LEC_STUFF_ERROR:
netdev_dbg(dev, "stuff error\n");
- cf->data[2] |= CAN_ERR_PROT_STUFF;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
stats->rx_errors++;
break;
case LEC_FORM_ERROR:
netdev_dbg(dev, "form error\n");
- cf->data[2] |= CAN_ERR_PROT_FORM;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_FORM;
stats->rx_errors++;
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
stats->tx_errors++;
break;
case LEC_BIT1_ERROR:
netdev_dbg(dev, "bit1 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT1;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
stats->tx_errors++;
break;
case LEC_BIT0_ERROR:
netdev_dbg(dev, "bit0 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT0;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
stats->tx_errors++;
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
stats->rx_errors++;
break;
default:
break;
}
+ if (unlikely(!skb))
+ return 0;
+
netif_receive_skb(skb);
return 1;
}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 6792c14fd7eb..681643ab3780 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -468,7 +468,7 @@ static int can_set_termination(struct net_device *ndev, u16 term)
else
set = 0;
- gpiod_set_value(priv->termination_gpio, set);
+ gpiod_set_value_cansleep(priv->termination_gpio, set);
return 0;
}
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index d32b10900d2f..c86b57d47085 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -390,36 +390,55 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev)
return 0;
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
/* Propagate the error condition to the CAN stack. */
skb = alloc_can_err_skb(ndev, &cf);
- if (unlikely(!skb))
- return 0;
/* Read the error counter register and check for new errors. */
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (likely(skb))
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST)
- cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST) {
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST)
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST) {
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST)
- cf->data[2] |= CAN_ERR_PROT_BIT0;
+ if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST) {
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST)
- cf->data[2] |= CAN_ERR_PROT_BIT1;
+ if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST) {
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST)
- cf->data[2] |= CAN_ERR_PROT_STUFF;
+ if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST) {
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST)
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST) {
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST)
- cf->data[2] |= CAN_ERR_PROT_FORM;
+ if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST) {
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ }
/* Reset the error counter, ack the IRQ and re-enable the counter. */
writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
@@ -427,6 +446,9 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev)
priv->base + IFI_CANFD_INTERRUPT);
writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
+ if (unlikely(!skb))
+ return 0;
+
netif_receive_skb(skb);
return 1;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 16e9e7d7527d..533bcb77c9f9 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -695,47 +695,60 @@ static int m_can_handle_lec_err(struct net_device *dev,
u32 timestamp = 0;
cdev->can.can_stats.bus_error++;
- stats->rx_errors++;
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
- if (unlikely(!skb))
- return 0;
/* check for 'last error code' which tells us the
* type of the last error to occur on the CAN bus
*/
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (likely(skb))
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (lec_type) {
case LEC_STUFF_ERROR:
netdev_dbg(dev, "stuff error\n");
- cf->data[2] |= CAN_ERR_PROT_STUFF;
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
case LEC_FORM_ERROR:
netdev_dbg(dev, "form error\n");
- cf->data[2] |= CAN_ERR_PROT_FORM;
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_FORM;
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
break;
case LEC_BIT1_ERROR:
netdev_dbg(dev, "bit1 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT1;
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
break;
case LEC_BIT0_ERROR:
netdev_dbg(dev, "bit0 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT0;
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
break;
default:
break;
}
+ if (unlikely(!skb))
+ return 0;
+
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index ddb3247948ad..4d245857ef1c 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -416,8 +416,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
int ret = 0;
skb = alloc_can_err_skb(dev, &cf);
- if (skb == NULL)
- return -ENOMEM;
txerr = priv->read_reg(priv, SJA1000_TXERR);
rxerr = priv->read_reg(priv, SJA1000_RXERR);
@@ -425,8 +423,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
if (isrc & IRQ_DOI) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+
stats->rx_over_errors++;
stats->rx_errors++;
sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */
@@ -452,7 +453,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
- if (state != CAN_STATE_BUS_OFF) {
+ if (state != CAN_STATE_BUS_OFF && skb) {
cf->can_id |= CAN_ERR_CNT;
cf->data[6] = txerr;
cf->data[7] = rxerr;
@@ -460,33 +461,38 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
if (isrc & IRQ_BEI) {
/* bus error interrupt */
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
ecc = priv->read_reg(priv, SJA1000_ECC);
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
-
- /* set error type */
- switch (ecc & ECC_MASK) {
- case ECC_BIT:
- cf->data[2] |= CAN_ERR_PROT_BIT;
- break;
- case ECC_FORM:
- cf->data[2] |= CAN_ERR_PROT_FORM;
- break;
- case ECC_STUFF:
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- break;
- default:
- break;
- }
+ /* set error type */
+ switch (ecc & ECC_MASK) {
+ case ECC_BIT:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case ECC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case ECC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ break;
+ }
- /* set error location */
- cf->data[3] = ecc & ECC_SEG;
+ /* set error location */
+ cf->data[3] = ecc & ECC_SEG;
+ }
/* Error occurred during transmission? */
- if ((ecc & ECC_DIR) == 0)
- cf->data[2] |= CAN_ERR_PROT_TX;
+ if ((ecc & ECC_DIR) == 0) {
+ stats->tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ } else {
+ stats->rx_errors++;
+ }
}
if (isrc & IRQ_EPI) {
/* error passive interrupt */
@@ -502,8 +508,10 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
netdev_dbg(dev, "arbitration lost interrupt\n");
alc = priv->read_reg(priv, SJA1000_ALC);
priv->can.can_stats.arbitration_lost++;
- cf->can_id |= CAN_ERR_LOSTARB;
- cf->data[0] = alc & 0x1f;
+ if (skb) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] = alc & 0x1f;
+ }
}
if (state != priv->can.state) {
@@ -516,6 +524,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
can_bus_off(dev);
}
+ if (!skb)
+ return -ENOMEM;
+
netif_rx(skb);
return ret;
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 148d974ebb21..1b9501ee10de 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -671,9 +671,9 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
tx_state = txerr >= rxerr ? new_state : 0;
rx_state = txerr <= rxerr ? new_state : 0;
can_change_state(net, cf, tx_state, rx_state);
- netif_rx(skb);
if (new_state == CAN_STATE_BUS_OFF) {
+ netif_rx(skb);
can_bus_off(net);
if (priv->can.restart_ms == 0) {
priv->force_quit = 1;
@@ -684,6 +684,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
cf->can_id |= CAN_ERR_CNT;
cf->data[6] = txerr;
cf->data[7] = rxerr;
+ netif_rx(skb);
}
}
@@ -696,27 +697,38 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
/* Check for protocol errors */
if (eflag & HI3110_ERR_PROTOCOL_MASK) {
skb = alloc_can_err_skb(net, &cf);
- if (!skb)
- break;
+ if (skb)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
priv->can.can_stats.bus_error++;
- priv->net->stats.rx_errors++;
- if (eflag & HI3110_ERR_BITERR)
- cf->data[2] |= CAN_ERR_PROT_BIT;
- else if (eflag & HI3110_ERR_FRMERR)
- cf->data[2] |= CAN_ERR_PROT_FORM;
- else if (eflag & HI3110_ERR_STUFERR)
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- else if (eflag & HI3110_ERR_CRCERR)
- cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
- else if (eflag & HI3110_ERR_ACKERR)
- cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
-
- cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
- cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
+ if (eflag & HI3110_ERR_BITERR) {
+ priv->net->stats.tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ } else if (eflag & HI3110_ERR_FRMERR) {
+ priv->net->stats.rx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ } else if (eflag & HI3110_ERR_STUFERR) {
+ priv->net->stats.rx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ } else if (eflag & HI3110_ERR_CRCERR) {
+ priv->net->stats.rx_errors++;
+ if (skb)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ } else if (eflag & HI3110_ERR_ACKERR) {
+ priv->net->stats.tx_errors++;
+ if (skb)
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+ }
+
netdev_dbg(priv->net, "Bus Error\n");
- netif_rx(skb);
+ if (skb) {
+ cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
+ cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
+ netif_rx(skb);
+ }
}
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
index d3ac865933fd..e94321849fd7 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
@@ -21,6 +21,11 @@ static inline bool mcp251xfd_tx_fifo_sta_empty(u32 fifo_sta)
return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF;
}
+static inline bool mcp251xfd_tx_fifo_sta_less_than_half_full(u32 fifo_sta)
+{
+ return fifo_sta & MCP251XFD_REG_FIFOSTA_TFHRFHIF;
+}
+
static inline int
mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
u8 *tef_tail)
@@ -147,7 +152,29 @@ mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p)
BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len));
len = (chip_tx_tail << shift) - (tail << shift);
- *len_p = len >> shift;
+ len >>= shift;
+
+ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
+ * bits of a FIFOSTA register, here the TX-FIFO tail index
+ * might be corrupted.
+ *
+ * However here it seems the bit indicating that the TX-FIFO
+ * is empty (MCP251XFD_REG_FIFOSTA_TFERFFIF) is not correct
+ * while the TX-FIFO tail index is.
+ *
+ * We assume the TX-FIFO is empty, i.e. all pending CAN frames
+ * haven been send, if:
+ * - Chip's head and tail index are equal (len == 0).
+ * - The TX-FIFO is less than half full.
+ * (The TX-FIFO empty case has already been checked at the
+ * beginning of this function.)
+ * - No free buffers in the TX ring.
+ */
+ if (len == 0 && mcp251xfd_tx_fifo_sta_less_than_half_full(fifo_sta) &&
+ mcp251xfd_get_tx_free(tx_ring) == 0)
+ len = tx_ring->obj_num;
+
+ *len_p = len;
return 0;
}
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 360158c295d3..4311c1f0eafd 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -579,11 +579,9 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
/* bus error interrupt */
netdev_dbg(dev, "bus error interrupt\n");
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
+ ecc = readl(priv->base + SUN4I_REG_STA_ADDR);
if (likely(skb)) {
- ecc = readl(priv->base + SUN4I_REG_STA_ADDR);
-
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (ecc & SUN4I_STA_MASK_ERR) {
@@ -601,9 +599,15 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
>> 16;
break;
}
- /* error occurred during transmission? */
- if ((ecc & SUN4I_STA_ERR_DIR) == 0)
+ }
+
+ /* error occurred during transmission? */
+ if ((ecc & SUN4I_STA_ERR_DIR) == 0) {
+ if (likely(skb))
cf->data[2] |= CAN_ERR_PROT_TX;
+ stats->tx_errors++;
+ } else {
+ stats->rx_errors++;
}
}
if (isrc & SUN4I_INT_ERR_PASSIVE) {
@@ -629,10 +633,10 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
tx_state = txerr >= rxerr ? state : 0;
rx_state = txerr <= rxerr ? state : 0;
- if (likely(skb))
- can_change_state(dev, cf, tx_state, rx_state);
- else
- priv->can.state = state;
+ /* The skb allocation might fail, but can_change_state()
+ * handles cf == NULL.
+ */
+ can_change_state(dev, cf, tx_state, rx_state);
if (state == CAN_STATE_BUS_OFF)
can_bus_off(dev);
}
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 050c0b49938a..5355bac4dccb 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -335,15 +335,14 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
struct net_device_stats *stats = &dev->netdev->stats;
skb = alloc_can_err_skb(dev->netdev, &cf);
- if (skb == NULL)
- return;
if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
u8 state = msg->msg.can_state;
if (state & SJA1000_SR_BS) {
dev->can.state = CAN_STATE_BUS_OFF;
- cf->can_id |= CAN_ERR_BUSOFF;
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
dev->can.can_stats.bus_off++;
can_bus_off(dev->netdev);
@@ -361,44 +360,53 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
/* bus error interrupt */
dev->can.can_stats.bus_error++;
- stats->rx_errors++;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- switch (ecc & SJA1000_ECC_MASK) {
- case SJA1000_ECC_BIT:
- cf->data[2] |= CAN_ERR_PROT_BIT;
- break;
- case SJA1000_ECC_FORM:
- cf->data[2] |= CAN_ERR_PROT_FORM;
- break;
- case SJA1000_ECC_STUFF:
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- break;
- default:
- cf->data[3] = ecc & SJA1000_ECC_SEG;
- break;
+ switch (ecc & SJA1000_ECC_MASK) {
+ case SJA1000_ECC_BIT:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case SJA1000_ECC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case SJA1000_ECC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ cf->data[3] = ecc & SJA1000_ECC_SEG;
+ break;
+ }
}
/* Error occurred during transmission? */
- if ((ecc & SJA1000_ECC_DIR) == 0)
- cf->data[2] |= CAN_ERR_PROT_TX;
+ if ((ecc & SJA1000_ECC_DIR) == 0) {
+ stats->tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ } else {
+ stats->rx_errors++;
+ }
- if (dev->can.state == CAN_STATE_ERROR_WARNING ||
- dev->can.state == CAN_STATE_ERROR_PASSIVE) {
+ if (skb && (dev->can.state == CAN_STATE_ERROR_WARNING ||
+ dev->can.state == CAN_STATE_ERROR_PASSIVE)) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (txerr > rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
}
} else if (msg->type == CPC_MSG_TYPE_OVERRUN) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
stats->rx_over_errors++;
stats->rx_errors++;
}
- netif_rx(skb);
+ if (skb)
+ netif_rx(skb);
}
/*
diff --git a/drivers/net/can/usb/f81604.c b/drivers/net/can/usb/f81604.c
index bc0c8903fe77..e0cfa1460b0b 100644
--- a/drivers/net/can/usb/f81604.c
+++ b/drivers/net/can/usb/f81604.c
@@ -526,7 +526,6 @@ static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv,
netdev_dbg(netdev, "bus error interrupt\n");
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
if (skb) {
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -548,10 +547,15 @@ static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv,
/* set error location */
cf->data[3] = data->ecc & F81604_SJA1000_ECC_SEG;
+ }
- /* Error occurred during transmission? */
- if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0)
+ /* Error occurred during transmission? */
+ if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0) {
+ stats->tx_errors++;
+ if (skb)
cf->data[2] |= CAN_ERR_PROT_TX;
+ } else {
+ stats->rx_errors++;
}
set_bit(F81604_CLEAR_ECC, &priv->clear_flags);
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index bc86e9b329fd..b6f4de375df7 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -43,9 +43,6 @@
#define USB_XYLANTA_SAINT3_VENDOR_ID 0x16d0
#define USB_XYLANTA_SAINT3_PRODUCT_ID 0x0f30
-#define GS_USB_ENDPOINT_IN 1
-#define GS_USB_ENDPOINT_OUT 2
-
/* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts
* for timer overflow (will be after ~71 minutes)
*/
@@ -336,6 +333,9 @@ struct gs_usb {
unsigned int hf_size_rx;
u8 active_channels;
+
+ unsigned int pipe_in;
+ unsigned int pipe_out;
};
/* 'allocate' a tx context.
@@ -687,7 +687,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
resubmit_urb:
usb_fill_bulk_urb(urb, parent->udev,
- usb_rcvbulkpipe(parent->udev, GS_USB_ENDPOINT_IN),
+ parent->pipe_in,
hf, dev->parent->hf_size_rx,
gs_usb_receive_bulk_callback, parent);
@@ -819,7 +819,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
}
usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev, GS_USB_ENDPOINT_OUT),
+ dev->parent->pipe_out,
hf, dev->hf_size_tx,
gs_usb_xmit_callback, txc);
@@ -925,8 +925,7 @@ static int gs_can_open(struct net_device *netdev)
/* fill, anchor, and submit rx urb */
usb_fill_bulk_urb(urb,
dev->udev,
- usb_rcvbulkpipe(dev->udev,
- GS_USB_ENDPOINT_IN),
+ dev->parent->pipe_in,
buf,
dev->parent->hf_size_rx,
gs_usb_receive_bulk_callback, parent);
@@ -1413,6 +1412,7 @@ static int gs_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *ep_in, *ep_out;
struct gs_host_frame *hf;
struct gs_usb *parent;
struct gs_host_config hconf = {
@@ -1422,6 +1422,13 @@ static int gs_usb_probe(struct usb_interface *intf,
unsigned int icount, i;
int rc;
+ rc = usb_find_common_endpoints(intf->cur_altsetting,
+ &ep_in, &ep_out, NULL, NULL);
+ if (rc) {
+ dev_err(&intf->dev, "Required endpoints not found\n");
+ return rc;
+ }
+
/* send host config */
rc = usb_control_msg_send(udev, 0,
GS_USB_BREQ_HOST_FORMAT,
@@ -1466,6 +1473,10 @@ static int gs_usb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, parent);
parent->udev = udev;
+ /* store the detected endpoints */
+ parent->pipe_in = usb_rcvbulkpipe(parent->udev, ep_in->bEndpointAddress);
+ parent->pipe_out = usb_sndbulkpipe(parent->udev, ep_out->bEndpointAddress);
+
for (i = 0; i < icount; i++) {
unsigned int hf_size_rx = 0;
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index f8d8c70642c4..59b4a7240b58 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -673,7 +673,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
* We therefore need to lock the MDIO bus onto which the switch is
* connected.
*/
- mutex_lock(&priv->bus->mdio_lock);
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
/* Actually start the request:
* 1. Send mdio master packet
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 20ba14eb87e0..b901ecb57f25 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1193,10 +1193,14 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
}
}
- if (fltr->base.flags & BNXT_ACT_DROP)
+ if (fltr->base.flags & BNXT_ACT_DROP) {
fs->ring_cookie = RX_CLS_FLOW_DISC;
- else
+ } else if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
+ fs->flow_type |= FLOW_RSS;
+ cmd->rss_context = fltr->base.fw_vnic_id;
+ } else {
fs->ring_cookie = fltr->base.rxq;
+ }
rc = 0;
fltr_err:
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index c09370eab319..16a7908c79f7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -28,6 +28,9 @@ EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv,
u8 preemptible_tcs)
{
+ if (!(priv->si->hw_features & ENETC_SI_F_QBU))
+ return;
+
priv->preemptible_tcs = preemptible_tcs;
enetc_mm_commit_preemptible_tcs(priv);
}
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index 39689826cc8f..ce253aac5344 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -94,7 +94,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
goto out_free;
}
- snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start);
bus->priv = priv;
bus->parent = dev;
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index d96028f01770..fb416d60dcd7 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -24,7 +24,6 @@
/* General defines */
#define FMAN_LIODN_TBL 64 /* size of LIODN table */
-#define MAX_NUM_OF_MACS 10
#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
#define BASE_RX_PORTID 0x08
#define BASE_TX_PORTID 0x28
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index 2ea575a46675..74eb62eba0d7 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -74,6 +74,9 @@
#define BM_MAX_NUM_OF_POOLS 64 /* Buffers pools */
#define FMAN_PORT_MAX_EXT_POOLS_NUM 8 /* External BM pools per Rx port */
+/* General defines */
+#define MAX_NUM_OF_MACS 10
+
struct fman; /* FMan data */
/* Enum for defining port types */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 11da139082e1..1916a2ac48b9 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -259,6 +259,11 @@ static int mac_probe(struct platform_device *_of_dev)
err = -EINVAL;
goto _return_dev_put;
}
+ if (val >= MAX_NUM_OF_MACS) {
+ dev_err(dev, "cell-index value is too big for %pOF\n", mac_node);
+ err = -EINVAL;
+ goto _return_dev_put;
+ }
priv->cell_index = (u8)val;
/* Get the MAC address */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 2e210a003558..249b482e32d3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -123,7 +123,7 @@ static int fs_mii_bitbang_init(struct mii_bus *bus, struct device_node *np)
* we get is an int, and the odds of multiple bitbang mdio buses
* is low enough that it's not worth going too crazy.
*/
- snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start);
data = of_get_property(np, "fsl,mdio-pin", &len);
if (!data || len != 4)
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 009716a12a26..f1324e25b2af 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -542,7 +542,8 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
/**
* ice_find_netlist_node
* @hw: pointer to the hw struct
- * @node_type_ctx: type of netlist node to look for
+ * @node_type: type of netlist node to look for
+ * @ctx: context of the search
* @node_part_number: node part number to look for
* @node_handle: output parameter if node found - optional
*
@@ -552,10 +553,12 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
* valid if the function returns zero, and should be ignored on any non-zero
* return value.
*
- * Returns: 0 if the node is found, -ENOENT if no handle was found, and
- * a negative error code on failure to access the AQ.
+ * Return:
+ * * 0 if the node is found,
+ * * -ENOENT if no handle was found,
+ * * negative error code on failure to access the AQ.
*/
-static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
+static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type, u8 ctx,
u8 node_part_number, u16 *node_handle)
{
u8 idx;
@@ -566,8 +569,8 @@ static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
int status;
cmd.addr.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M,
- node_type_ctx);
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, node_type) |
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ctx);
cmd.addr.topo_params.index = idx;
status = ice_aq_get_netlist_node(hw, &cmd,
@@ -2726,9 +2729,11 @@ bool ice_is_pf_c827(struct ice_hw *hw)
*/
bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
{
- if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PORT,
ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) &&
- ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PORT,
ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL))
return false;
@@ -2744,6 +2749,7 @@ bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
{
if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX,
NULL))
return false;
@@ -2764,12 +2770,14 @@ bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
bool ice_is_cgu_in_netlist(struct ice_hw *hw)
{
if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032,
NULL)) {
hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032;
return true;
} else if (!ice_find_netlist_node(hw,
ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384,
NULL)) {
hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384;
@@ -2788,6 +2796,7 @@ bool ice_is_cgu_in_netlist(struct ice_hw *hw)
bool ice_is_gps_in_netlist(struct ice_hw *hw)
{
if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL))
return false;
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index b1e7727b8677..8f2e758c3942 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -6361,10 +6361,12 @@ ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
int err = 0;
/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
- * if either bit is set
+ * if either bit is set. In switchdev mode Rx filtering should never be
+ * enabled.
*/
- if (features &
- (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
+ if ((features &
+ (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) &&
+ !ice_is_eswitch_mode_switchdev(vsi->back))
err = vlan_ops->ena_rx_filtering(vsi);
else
err = vlan_ops->dis_rx_filtering(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index ec8db830ac73..3816e45b6ab4 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -1495,7 +1495,8 @@ static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx,
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
+ *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) |
+ FIELD_PREP(TS_PHY_LOW_M, lo);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 6cedc1a906af..4c8b84571344 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -663,9 +663,8 @@ static inline u64 ice_get_base_incval(struct ice_hw *hw)
#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
-#define TS_PHY_LOW_M 0xFF
-#define TS_PHY_HIGH_M 0xFFFFFFFF
-#define TS_PHY_HIGH_S 8
+#define TS_PHY_LOW_M GENMASK(7, 0)
+#define TS_PHY_HIGH_M GENMASK_ULL(39, 8)
#define BYTES_PER_IDX_ADDR_L_U 8
#define BYTES_PER_IDX_ADDR_L 4
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index d4e6f0e10487..60d15b3e6e2f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2448,6 +2448,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
* rest of the packet.
*/
tx_buf->type = LIBETH_SQE_EMPTY;
+ idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
/* Adjust the DMA offset and the remaining size of the
* fragment. On the first iteration of this loop,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f1d088168723..18284a838e24 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -637,6 +637,10 @@ static int __init igb_init_module(void)
dca_register_notify(&dca_notifier);
#endif
ret = pci_register_driver(&igb_driver);
+#ifdef CONFIG_IGB_DCA
+ if (ret)
+ dca_unregister_notify(&dca_notifier);
+#endif
return ret;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 6493abf189de..6639069ad528 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -194,6 +194,8 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
dev_err(&adapter->pdev->dev, format, ## arg)
#define e_dev_notice(format, arg...) \
dev_notice(&adapter->pdev->dev, format, ## arg)
+#define e_dbg(msglvl, format, arg...) \
+ netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_info(msglvl, format, arg...) \
netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_err(msglvl, format, arg...) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 14aa2ca51f70..81179c60af4e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -40,7 +40,7 @@
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
-#define IXGBE_SFF_BASEBX10_CAPABLE 0x64
+#define IXGBE_SFF_BASEBX10_CAPABLE 0x40
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index e71715f5da22..20415c1238ef 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1047,7 +1047,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
break;
}
- e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
+ e_dbg(drv, "VF %d requested unsupported api version %u\n", vf, api);
return -1;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index 66cf17f19408..f804b35d79c7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -629,7 +629,6 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
switch (adapter->hw.api_version) {
case ixgbe_mbox_api_14:
- case ixgbe_mbox_api_15:
break;
default:
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 878cbdbf5ec8..e7e01f3298ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -5,6 +5,7 @@
#include <net/nexthop.h>
#include <net/ip_tunnels.h>
#include "tc_tun_encap.h"
+#include "fs_core.h"
#include "en_tc.h"
#include "tc_tun.h"
#include "rep/tc.h"
@@ -24,10 +25,18 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex);
- if (!route_dev || !netif_is_ovs_master(route_dev) ||
- attr->parse_attr->filter_dev == e->out_dev)
+ if (!route_dev || !netif_is_ovs_master(route_dev))
goto out;
+ if (priv->mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS &&
+ mlx5e_eswitch_uplink_rep(attr->parse_attr->filter_dev) &&
+ (attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) {
+ mlx5_core_warn(priv->mdev,
+ "Matching on external port with encap + fwd to table actions is not allowed for firmware steering\n");
+ err = -EINVAL;
+ goto out;
+ }
+
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex,
MLX5E_TC_INT_PORT_EGRESS,
&attr->action, out_index);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 13a3fa8dc0cb..c14bef83d84d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2652,11 +2652,11 @@ void mlx5e_trigger_napi_sched(struct napi_struct *napi)
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
- struct mlx5e_channel_param *cparam,
struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
struct net_device *netdev = priv->netdev;
+ struct mlx5e_channel_param *cparam;
struct mlx5_core_dev *mdev;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
@@ -2678,8 +2678,15 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
return err;
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
- if (!c)
- return -ENOMEM;
+ cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL);
+ if (!c || !cparam) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ err = mlx5e_build_channel_param(mdev, params, cparam);
+ if (err)
+ goto err_free;
c->priv = priv;
c->mdev = mdev;
@@ -2713,6 +2720,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
*cp = c;
+ kvfree(cparam);
return 0;
err_close_queues:
@@ -2721,6 +2729,8 @@ err_close_queues:
err_napi_del:
netif_napi_del(&c->napi);
+err_free:
+ kvfree(cparam);
kvfree(c);
return err;
@@ -2779,20 +2789,14 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
int mlx5e_open_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs)
{
- struct mlx5e_channel_param *cparam;
int err = -ENOMEM;
int i;
chs->num = chs->params.num_channels;
chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
- cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
- if (!chs->c || !cparam)
- goto err_free;
-
- err = mlx5e_build_channel_param(priv->mdev, &chs->params, cparam);
- if (err)
- goto err_free;
+ if (!chs->c)
+ goto err_out;
for (i = 0; i < chs->num; i++) {
struct xsk_buff_pool *xsk_pool = NULL;
@@ -2800,7 +2804,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
if (chs->params.xdp_prog)
xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
- err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]);
+ err = mlx5e_open_channel(priv, i, &chs->params, xsk_pool, &chs->c[i]);
if (err)
goto err_close_channels;
}
@@ -2818,7 +2822,6 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
}
mlx5e_health_channels_update(priv);
- kvfree(cparam);
return 0;
err_close_ptp:
@@ -2829,9 +2832,8 @@ err_close_channels:
for (i--; i >= 0; i--)
mlx5e_close_channel(chs->c[i]);
-err_free:
kfree(chs->c);
- kvfree(cparam);
+err_out:
chs->num = 0;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 6e4f8aaf8d2f..2eabfcc247c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -3698,6 +3698,7 @@ void mlx5_fs_core_free(struct mlx5_core_dev *dev)
int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering;
+ char name[80];
int err = 0;
err = mlx5_init_fc_stats(dev);
@@ -3722,10 +3723,12 @@ int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
else
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
- steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
+ snprintf(name, sizeof(name), "%s-mlx5_fs_fgs", dev_name(dev->device));
+ steering->fgs_cache = kmem_cache_create(name,
sizeof(struct mlx5_flow_group), 0,
0, NULL);
- steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
+ snprintf(name, sizeof(name), "%s-mlx5_fs_ftes", dev_name(dev->device));
+ steering->ftes_cache = kmem_cache_create(name, sizeof(struct fs_fte), 0,
0, NULL);
if (!steering->ftes_cache || !steering->fgs_cache) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
index 601fad5fc54a..ee4058bafe11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
@@ -39,6 +39,8 @@ bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
} else {
mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
}
+ } else {
+ kfree(mt->fc);
}
mlx5hws_match_template_destroy(mt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
index 6d443e6ee8d9..08be034bd1e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
@@ -990,6 +990,7 @@ static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
for (i = 0; i < bwc_queues; i++) {
mutex_init(&ctx->bwc_send_queue_locks[i]);
lockdep_register_key(ctx->bwc_lock_class_keys + i);
+ lockdep_set_class(ctx->bwc_send_queue_locks + i, ctx->bwc_lock_class_keys + i);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 947500f8ed71..7aa1a462a103 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -67,7 +67,7 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk)
for (j = 0; j < block->instances_count; j++) {
const struct mlxsw_afk_element_info *elinfo;
- struct mlxsw_afk_element_inst *elinst;
+ const struct mlxsw_afk_element_inst *elinst;
elinst = &block->instances[j];
elinfo = &mlxsw_afk_element_infos[elinst->element];
@@ -154,7 +154,7 @@ static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk,
const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i];
for (j = 0; j < block->instances_count; j++) {
- struct mlxsw_afk_element_inst *elinst;
+ const struct mlxsw_afk_element_inst *elinst;
elinst = &block->instances[j];
if (elinst->element == element) {
@@ -386,7 +386,7 @@ mlxsw_afk_block_elinst_get(const struct mlxsw_afk_block *block,
int i;
for (i = 0; i < block->instances_count; i++) {
- struct mlxsw_afk_element_inst *elinst;
+ const struct mlxsw_afk_element_inst *elinst;
elinst = &block->instances[i];
if (elinst->element == element)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index 98a05598178b..5aa1afb3f2ca 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -117,7 +117,7 @@ struct mlxsw_afk_element_inst { /* element instance in actual block */
struct mlxsw_afk_block {
u16 encoding; /* block ID */
- struct mlxsw_afk_element_inst *instances;
+ const struct mlxsw_afk_element_inst *instances;
unsigned int instances_count;
bool high_entropy;
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index eaad78605602..1850a975b380 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -7,7 +7,7 @@
#include "item.h"
#include "core_acl_flex_keys.h"
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x00, 2),
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x02, 4),
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
@@ -15,7 +15,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x00, 2),
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x02, 4),
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
@@ -23,27 +23,27 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x02, 2),
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2),
MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8),
@@ -51,35 +51,35 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16),
};
@@ -124,90 +124,90 @@ const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
.clear_block = mlxsw_sp1_afk_clear_block,
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
MLXSW_AFK_ELEMENT_INST_U32(FDB_MISS, 0x00, 3, 1),
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = {
MLXSW_AFK_ELEMENT_INST_U32(FDB_MISS, 0x00, 3, 1),
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x04, 2),
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = {
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = {
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x04, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = {
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 8, -1, true), /* RX_ACL_SYSTEM_PORT */
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x04, 0, 6),
MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 6, 2),
MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 8, 8),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5[] = {
MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER, 0x04, 20, 11, 0, true),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_0_3, 0x00, 0, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = {
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_4_7, 0x00, 0, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = {
MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = {
MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x04, 16, 16),
MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x04, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = {
MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */
MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 0, 16),
};
@@ -319,16 +319,20 @@ const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
.clear_block = mlxsw_sp2_afk_clear_block,
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = {
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 18, 12),
MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 9, -1, true), /* RX_ACL_SYSTEM_PORT */
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1b[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
+};
+
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = {
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER, 0x04, 20, 12),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = {
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
};
@@ -341,7 +345,7 @@ static const struct mlxsw_afk_block mlxsw_sp4_afk_blocks[] = {
MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x1A, mlxsw_sp_afk_element_info_mac_5b),
MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x38, mlxsw_sp_afk_element_info_ipv4_0),
- MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x39, mlxsw_sp_afk_element_info_ipv4_1),
+ MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x3F, mlxsw_sp_afk_element_info_ipv4_1b),
MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
MLXSW_AFK_BLOCK(0x36, mlxsw_sp_afk_element_info_ipv4_5b),
MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index c47266d1c7c2..b2d206dec70c 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -2439,6 +2439,7 @@ void mana_query_gf_stats(struct mana_port_context *apc)
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
sizeof(req), sizeof(resp));
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE |
STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED |
STATISTICS_FLAGS_HC_RX_BYTES |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 16e6bd466143..6218d9c26855 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -3314,7 +3314,9 @@ int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
+ if (((rsp & FW_MSG_CODE_MASK) == FW_MSG_CODE_UNSUPPORTED))
+ rc = -EOPNOTSUPP;
+ else if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
rc = -EINVAL;
return rc;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 713a89bb21e9..5ed2818bac25 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4233,8 +4233,8 @@ static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
{
unsigned int padto = 0, len = skb->len;
- if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN &&
- rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) {
+ if (len < 128 + RTL_MIN_PATCH_LEN && rtl_skb_is_udp(skb) &&
+ skb_transport_header_was_set(skb)) {
unsigned int trans_data_len = skb_tail_pointer(skb) -
skb_transport_header(skb);
@@ -4258,9 +4258,15 @@ static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
struct sk_buff *skb)
{
- unsigned int padto;
+ unsigned int padto = 0;
- padto = rtl8125_quirk_udp_padto(tp, skb);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ padto = rtl8125_quirk_udp_padto(tp, skb);
+ break;
+ default:
+ break;
+ }
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 84fa911c78db..fe0bf1d3217a 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2502,7 +2502,7 @@ static void rocker_carrier_init(const struct rocker_port *rocker_port)
u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
bool link_up;
- link_up = link_status & (1 << rocker_port->pport);
+ link_up = link_status & (1ULL << rocker_port->pport);
if (link_up)
netif_carrier_on(rocker_port->dev);
else
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 93a78fd0737b..28fff6cab812 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -44,6 +44,7 @@
#define GMAC_MDIO_DATA 0x00000204
#define GMAC_GPIO_STATUS 0x0000020C
#define GMAC_ARP_ADDR 0x00000210
+#define GMAC_EXT_CFG1 0x00000238
#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
#define GMAC_L3L4_CTRL(reg) (0x900 + (reg) * 0x30)
@@ -284,6 +285,10 @@ enum power_event {
#define GMAC_HW_FEAT_DVLAN BIT(5)
#define GMAC_HW_FEAT_NRVF GENMASK(2, 0)
+/* MAC extended config 1 */
+#define GMAC_CONFIG1_SAVE_EN BIT(24)
+#define GMAC_CONFIG1_SPLM(v) FIELD_PREP(GENMASK(9, 8), v)
+
/* GMAC GPIO Status reg */
#define GMAC_GPO0 BIT(16)
#define GMAC_GPO1 BIT(17)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 77b35abc6f6f..22a044d93e17 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -534,6 +534,11 @@ static void dwmac4_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr,
value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
writel(value, ioaddr + GMAC_EXT_CONFIG);
+ value = readl(ioaddr + GMAC_EXT_CFG1);
+ value |= GMAC_CONFIG1_SPLM(1); /* Split mode set to L2OFST */
+ value |= GMAC_CONFIG1_SAVE_EN; /* Enable Split AV mode */
+ writel(value, ioaddr + GMAC_EXT_CFG1);
+
value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
if (en)
value |= DMA_CONTROL_SPH;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 7f611c74eb62..ba15a0a4ce62 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -895,7 +895,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (geneve->cfg.df == GENEVE_DF_SET) {
df = htons(IP_DF);
} else if (geneve->cfg.df == GENEVE_DF_INHERIT) {
- struct ethhdr *eth = eth_hdr(skb);
+ struct ethhdr *eth = skb_eth_hdr(skb);
if (ntohs(eth->h_proto) == ETH_P_IPV6) {
df = htons(IP_DF);
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index d3273bc0da4a..691969a4910f 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -351,6 +351,22 @@ static int lan88xx_config_aneg(struct phy_device *phydev)
static void lan88xx_link_change_notify(struct phy_device *phydev)
{
int temp;
+ int ret;
+
+ /* Reset PHY to ensure MII_LPA provides up-to-date information. This
+ * issue is reproducible only after parallel detection, as described
+ * in IEEE 802.3-2022, Section 28.2.3.1 ("Parallel detection function"),
+ * where the link partner does not support auto-negotiation.
+ */
+ if (phydev->state == PHY_NOLINK) {
+ ret = phy_init_hw(phydev);
+ if (ret < 0)
+ goto link_change_notify_failed;
+
+ ret = _phy_start_aneg(phydev);
+ if (ret < 0)
+ goto link_change_notify_failed;
+ }
/* At forced 100 F/H mode, chip may fail to set mode correctly
* when cable is switched between long(~50+m) and short one.
@@ -377,6 +393,11 @@ static void lan88xx_link_change_notify(struct phy_device *phydev)
temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
phy_write(phydev, LAN88XX_INT_MASK, temp);
}
+
+ return;
+
+link_change_notify_failed:
+ phydev_err(phydev, "Link change process failed %pe\n", ERR_PTR(ret));
}
/**
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index a5684ef5884b..dcec92625cf6 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -466,7 +466,8 @@ static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id,
static const struct sfp_quirk sfp_quirks[] = {
// Alcatel Lucent G-010S-P can operate at 2500base-X, but incorrectly
// report 2500MBd NRZ in their EEPROM
- SFP_QUIRK_M("ALCATELLUCENT", "G010SP", sfp_quirk_2500basex),
+ SFP_QUIRK("ALCATELLUCENT", "G010SP", sfp_quirk_2500basex,
+ sfp_fixup_ignore_tx_fault),
// Alcatel Lucent G-010S-A can operate at 2500base-X, but report 3.2GBd
// NRZ in their EEPROM
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 53a038fcbe99..c897afef0b41 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -946,9 +946,6 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
void *buf, *head;
dma_addr_t addr;
- if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
- return NULL;
-
head = page_address(alloc_frag->page);
if (rq->do_dma) {
@@ -2443,6 +2440,9 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
len = SKB_DATA_ALIGN(len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ if (unlikely(!skb_page_frag_refill(len, &rq->alloc_frag, gfp)))
+ return -ENOMEM;
+
buf = virtnet_rq_alloc(rq, len, gfp);
if (unlikely(!buf))
return -ENOMEM;
@@ -2545,6 +2545,12 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
*/
len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
+ if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
+ return -ENOMEM;
+
+ if (!alloc_frag->offset && len + room + sizeof(struct virtnet_rq_dma) > alloc_frag->size)
+ len -= sizeof(struct virtnet_rq_dma);
+
buf = virtnet_rq_alloc(rq, len + room, gfp);
if (unlikely(!buf))
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 08a6f36a6be9..6805357ee29e 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -3,7 +3,7 @@
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -2648,9 +2648,9 @@ static void ath10k_sdio_remove(struct sdio_func *func)
netif_napi_del(&ar->napi);
- ath10k_core_destroy(ar);
-
destroy_workqueue(ar_sdio->workqueue);
+
+ ath10k_core_destroy(ar);
}
static const struct sdio_device_id ath10k_sdio_devices[] = {
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 6d0784a21558..8946141aa0dc 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -8186,9 +8186,9 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
goto out;
}
- ieee80211_iterate_stations_atomic(hw,
- ath12k_mac_disable_peer_fixed_rate,
- arvif);
+ ieee80211_iterate_stations_mtx(hw,
+ ath12k_mac_disable_peer_fixed_rate,
+ arvif);
} else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask,
&single_nss)) {
rate = WMI_FIXED_RATE_NONE;
@@ -8233,16 +8233,16 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
goto out;
}
- ieee80211_iterate_stations_atomic(hw,
- ath12k_mac_disable_peer_fixed_rate,
- arvif);
+ ieee80211_iterate_stations_mtx(hw,
+ ath12k_mac_disable_peer_fixed_rate,
+ arvif);
mutex_lock(&ar->conf_mutex);
arvif->bitrate_mask = *mask;
- ieee80211_iterate_stations_atomic(hw,
- ath12k_mac_set_bitrate_mask_iter,
- arvif);
+ ieee80211_iterate_stations_mtx(hw,
+ ath12k_mac_set_bitrate_mask_iter,
+ arvif);
mutex_unlock(&ar->conf_mutex);
}
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index b51fce5ae260..f5ca2fe0d074 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -46,6 +46,8 @@ static const struct pci_device_id ath5k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
{ PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
{ PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
+ { PCI_VDEVICE(ATHEROS, 0xff16) }, /* Gigaset SX76[23] AR241[34]A */
+ { PCI_VDEVICE(ATHEROS, 0xff1a) }, /* Arcadyan ARV45XX AR2417 */
{ PCI_VDEVICE(ATHEROS, 0xff1b) }, /* AR5BXB63 */
{ 0 }
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index d35262335eaf..8a1e33764244 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -770,7 +770,7 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE,
sdiodev->settings->bus.sdio.txglomsz);
- nents += (nents >> 4) + 1;
+ nents *= 2;
WARN_ON(nents > sdiodev->max_segment_count);
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
index 48d6870bbf4e..9a97ab9b89ae 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
@@ -870,8 +870,8 @@ void libipw_rx_any(struct libipw_device *ieee,
switch (ieee->iw_mode) {
case IW_MODE_ADHOC:
/* our BSS and not from/to DS */
- if (ether_addr_equal(hdr->addr3, ieee->bssid))
- if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == 0) {
+ if (ether_addr_equal(hdr->addr3, ieee->bssid) &&
+ ((fc & (IEEE80211_FCTL_TODS + IEEE80211_FCTL_FROMDS)) == 0)) {
/* promisc: get all */
if (ieee->dev->flags & IFF_PROMISC)
is_packet_for_us = 1;
@@ -885,8 +885,8 @@ void libipw_rx_any(struct libipw_device *ieee,
break;
case IW_MODE_INFRA:
/* our BSS (== from our AP) and from DS */
- if (ether_addr_equal(hdr->addr2, ieee->bssid))
- if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS) {
+ if (ether_addr_equal(hdr->addr2, ieee->bssid) &&
+ ((fc & (IEEE80211_FCTL_TODS + IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS)) {
/* promisc: get all */
if (ieee->dev->flags & IFF_PROMISC)
is_packet_for_us = 1;
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index 21d0754dd7f6..b67e551fcee3 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -1297,12 +1297,12 @@ static void rtw_sdio_deinit_tx(struct rtw_dev *rtwdev)
struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
int i;
- for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++)
- skb_queue_purge(&rtwsdio->tx_queue[i]);
-
flush_workqueue(rtwsdio->txwq);
destroy_workqueue(rtwsdio->txwq);
kfree(rtwsdio->tx_handler_data);
+
+ for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++)
+ ieee80211_purge_tx_queue(rtwdev->hw, &rtwsdio->tx_queue[i]);
}
int rtw_sdio_probe(struct sdio_func *sdio_func,
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index b17a429bcd29..07695294767a 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -423,10 +423,11 @@ static void rtw_usb_tx_handler(struct work_struct *work)
static void rtw_usb_tx_queue_purge(struct rtw_usb *rtwusb)
{
+ struct rtw_dev *rtwdev = rtwusb->rtwdev;
int i;
for (i = 0; i < ARRAY_SIZE(rtwusb->tx_queue); i++)
- skb_queue_purge(&rtwusb->tx_queue[i]);
+ ieee80211_purge_tx_queue(rtwdev->hw, &rtwusb->tx_queue[i]);
}
static void rtw_usb_write_port_complete(struct urb *urb)
@@ -888,9 +889,9 @@ static void rtw_usb_deinit_tx(struct rtw_dev *rtwdev)
{
struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
- rtw_usb_tx_queue_purge(rtwusb);
flush_workqueue(rtwusb->txwq);
destroy_workqueue(rtwusb->txwq);
+ rtw_usb_tx_queue_purge(rtwusb);
}
static int rtw_usb_intf_init(struct rtw_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 13a7c39ceb6f..e6bceef691e9 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -6074,6 +6074,9 @@ static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev,
skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr,
NULL, 0, req->ie_len);
+ if (!skb)
+ return -ENOMEM;
+
skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]);
skb_put_data(skb, ies->common_ies, ies->common_ie_len);
hdr = (struct ieee80211_hdr *)skb->data;
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index 6b4922de3047..37b743acbb7b 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -106,12 +106,12 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
nvdimm_bus_lock(&ndns->dev);
nd_dax = nd_dax_alloc(nd_region);
- nd_pfn = &nd_dax->nd_pfn;
- dax_dev = nd_pfn_devinit(nd_pfn, ndns);
+ dax_dev = nd_dax_devinit(nd_dax, ndns);
nvdimm_bus_unlock(&ndns->dev);
if (!dax_dev)
return -ENOMEM;
pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+ nd_pfn = &nd_dax->nd_pfn;
nd_pfn->pfn_sb = pfn_sb;
rc = nd_pfn_validate(nd_pfn, DAX_SIG);
dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : "<none>");
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 2dbb1dca17b5..5ca06e9a2d29 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -600,6 +600,13 @@ struct nd_dax *to_nd_dax(struct device *dev);
int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns);
bool is_nd_dax(const struct device *dev);
struct device *nd_dax_create(struct nd_region *nd_region);
+static inline struct device *nd_dax_devinit(struct nd_dax *nd_dax,
+ struct nd_namespace_common *ndns)
+{
+ if (!nd_dax)
+ return NULL;
+ return nd_pfn_devinit(&nd_dax->nd_pfn, ndns);
+}
#else
static inline int nd_dax_probe(struct device *dev,
struct nd_namespace_common *ndns)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f0d4c6f3cb05..249914b90dbf 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1303,9 +1303,10 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
}
-static void nvme_keep_alive_finish(struct request *rq,
- blk_status_t status, struct nvme_ctrl *ctrl)
+static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
+ blk_status_t status)
{
+ struct nvme_ctrl *ctrl = rq->end_io_data;
unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
unsigned long delay = nvme_keep_alive_work_period(ctrl);
enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
@@ -1322,17 +1323,20 @@ static void nvme_keep_alive_finish(struct request *rq,
delay = 0;
}
+ blk_mq_free_request(rq);
+
if (status) {
dev_err(ctrl->device,
"failed nvme_keep_alive_end_io error=%d\n",
status);
- return;
+ return RQ_END_IO_NONE;
}
ctrl->ka_last_check_time = jiffies;
ctrl->comp_seen = false;
if (state == NVME_CTRL_LIVE || state == NVME_CTRL_CONNECTING)
queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
+ return RQ_END_IO_NONE;
}
static void nvme_keep_alive_work(struct work_struct *work)
@@ -1341,7 +1345,6 @@ static void nvme_keep_alive_work(struct work_struct *work)
struct nvme_ctrl, ka_work);
bool comp_seen = ctrl->comp_seen;
struct request *rq;
- blk_status_t status;
ctrl->ka_last_check_time = jiffies;
@@ -1364,9 +1367,9 @@ static void nvme_keep_alive_work(struct work_struct *work)
nvme_init_request(rq, &ctrl->ka_cmd);
rq->timeout = ctrl->kato * HZ;
- status = blk_execute_rq(rq, false);
- nvme_keep_alive_finish(rq, status, ctrl);
- blk_mq_free_request(rq);
+ rq->end_io = nvme_keep_alive_end_io;
+ rq->end_io_data = ctrl;
+ blk_execute_rq_nowait(rq, false);
}
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
@@ -2064,7 +2067,8 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
lim->physical_block_size = min(phys_bs, atomic_bs);
lim->io_min = phys_bs;
lim->io_opt = io_opt;
- if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
+ if ((ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) &&
+ (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM))
lim->max_write_zeroes_sectors = UINT_MAX;
else
lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors;
@@ -3250,8 +3254,9 @@ static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ct
}
if (!ctrl->maxcmd) {
- dev_err(ctrl->device, "Maximum outstanding commands is 0\n");
- return -EINVAL;
+ dev_warn(ctrl->device,
+ "Firmware bug: maximum outstanding commands is 0\n");
+ ctrl->maxcmd = ctrl->sqsize + 1;
}
return 0;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 24a2759798d0..913e6e5a8070 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1091,13 +1091,7 @@ destroy_io:
}
destroy_admin:
nvme_stop_keep_alive(&ctrl->ctrl);
- nvme_quiesce_admin_queue(&ctrl->ctrl);
- blk_sync_queue(ctrl->ctrl.admin_q);
- nvme_rdma_stop_queue(&ctrl->queues[0]);
- nvme_cancel_admin_tagset(&ctrl->ctrl);
- if (new)
- nvme_remove_admin_tag_set(&ctrl->ctrl);
- nvme_rdma_destroy_admin_queue(ctrl);
+ nvme_rdma_teardown_admin_queue(ctrl, new);
return ret;
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 3e416af2659f..55abfe5e1d25 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2278,7 +2278,7 @@ destroy_io:
}
destroy_admin:
nvme_stop_keep_alive(ctrl);
- nvme_tcp_teardown_admin_queue(ctrl, false);
+ nvme_tcp_teardown_admin_queue(ctrl, new);
return ret;
}
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index b5447228696d..6483e1874477 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1830,6 +1830,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
{ .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
+ { .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
diff --git a/drivers/pci/controller/plda/pcie-starfive.c b/drivers/pci/controller/plda/pcie-starfive.c
index c9933ecf6833..0564fdce47c2 100644
--- a/drivers/pci/controller/plda/pcie-starfive.c
+++ b/drivers/pci/controller/plda/pcie-starfive.c
@@ -404,6 +404,9 @@ static int starfive_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
plda->host_ops = &sf_host_ops;
plda->num_events = PLDA_MAX_EVENT_NUM;
/* mask doorbell event */
@@ -413,11 +416,12 @@ static int starfive_pcie_probe(struct platform_device *pdev)
plda->events_bitmap <<= PLDA_NUM_DMA_EVENTS;
ret = plda_pcie_host_init(&pcie->plda, &starfive_pcie_ops,
&stf_pcie_event);
- if (ret)
+ if (ret) {
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return ret;
+ }
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
platform_set_drvdata(pdev, pcie);
return 0;
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 264a180403a0..9d9596947350 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -740,11 +740,9 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
if (!(features & VMD_FEAT_BIOS_PM_QUIRK))
return 0;
- pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
-
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
if (!pos)
- return 0;
+ goto out_state_change;
/*
* Skip if the max snoop LTR is non-zero, indicating BIOS has set it
@@ -752,7 +750,7 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
*/
pci_read_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, &ltr_reg);
if (!!(ltr_reg & (PCI_LTR_VALUE_MASK | PCI_LTR_SCALE_MASK)))
- return 0;
+ goto out_state_change;
/*
* Set the default values to the maximum required by the platform to
@@ -764,6 +762,13 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
pci_write_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, ltr_reg);
pci_info(pdev, "VMD: Default LTR value set by driver\n");
+out_state_change:
+ /*
+ * Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
+ * PCIe r6.0, sec 5.5.4.
+ */
+ pci_set_power_state_locked(pdev, PCI_D0);
+ pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
return 0;
}
@@ -1100,6 +1105,10 @@ static const struct pci_device_id vmd_ids[] = {
.driver_data = VMD_FEATS_CLIENT,},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
.driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xb60b),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xb06f),
+ .driver_data = VMD_FEATS_CLIENT,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 5d0f4db1cab7..3e5a117f5b5d 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -521,6 +521,31 @@ static ssize_t bus_rescan_store(struct device *dev,
static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
bus_rescan_store);
+static ssize_t reset_subordinate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_bus *bus = pdev->subordinate;
+ unsigned long val;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val) {
+ int ret = __pci_reset_bus(bus);
+
+ if (ret)
+ return ret;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(reset_subordinate);
+
#if defined(CONFIG_PM) && defined(CONFIG_ACPI)
static ssize_t d3cold_allowed_store(struct device *dev,
struct device_attribute *attr,
@@ -625,6 +650,7 @@ static struct attribute *pci_dev_attrs[] = {
static struct attribute *pci_bridge_attrs[] = {
&dev_attr_subordinate_bus_number.attr,
&dev_attr_secondary_bus_number.attr,
+ &dev_attr_reset_subordinate.attr,
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 08f170fd3efb..dd3c6dcb47ae 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -5885,7 +5885,7 @@ EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
*
* Same as above except return -EAGAIN if the bus cannot be locked
*/
-static int __pci_reset_bus(struct pci_bus *bus)
+int __pci_reset_bus(struct pci_bus *bus)
{
int rc;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 14d00ce45bfa..1cdc2c9547a7 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -104,6 +104,7 @@ bool pci_reset_supported(struct pci_dev *dev);
void pci_init_reset_methods(struct pci_dev *dev);
int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
int pci_bus_error_reset(struct pci_dev *dev);
+int __pci_reset_bus(struct pci_bus *bus);
struct pci_cap_saved_data {
u16 cap_nr;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f1615805f5b0..ebb0c1d5cae2 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1633,23 +1633,33 @@ static void set_pcie_thunderbolt(struct pci_dev *dev)
static void set_pcie_untrusted(struct pci_dev *dev)
{
- struct pci_dev *parent;
+ struct pci_dev *parent = pci_upstream_bridge(dev);
+ if (!parent)
+ return;
/*
- * If the upstream bridge is untrusted we treat this device
+ * If the upstream bridge is untrusted we treat this device as
* untrusted as well.
*/
- parent = pci_upstream_bridge(dev);
- if (parent && (parent->untrusted || parent->external_facing))
+ if (parent->untrusted) {
+ dev->untrusted = true;
+ return;
+ }
+
+ if (arch_pci_dev_is_removable(dev)) {
+ pci_dbg(dev, "marking as untrusted\n");
dev->untrusted = true;
+ }
}
static void pci_set_removable(struct pci_dev *dev)
{
struct pci_dev *parent = pci_upstream_bridge(dev);
+ if (!parent)
+ return;
/*
- * We (only) consider everything downstream from an external_facing
+ * We (only) consider everything tunneled below an external_facing
* device to be removable by the user. We're mainly concerned with
* consumer platforms with user accessible thunderbolt ports that are
* vulnerable to DMA attacks, and we expect those ports to be marked by
@@ -1659,9 +1669,15 @@ static void pci_set_removable(struct pci_dev *dev)
* accessible to user / may not be removed by end user, and thus not
* exposed as "removable" to userspace.
*/
- if (parent &&
- (parent->external_facing || dev_is_removable(&parent->dev)))
+ if (dev_is_removable(&parent->dev)) {
+ dev_set_removable(&dev->dev, DEVICE_REMOVABLE);
+ return;
+ }
+
+ if (arch_pci_dev_is_removable(dev)) {
+ pci_dbg(dev, "marking as removable\n");
dev_set_removable(&dev->dev, DEVICE_REMOVABLE);
+ }
}
/**
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index dccb60c1d9cc..8103bc24a54e 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4996,18 +4996,21 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
}
/*
- * Wangxun 10G/1G NICs have no ACS capability, and on multi-function
- * devices, peer-to-peer transactions are not be used between the functions.
- * So add an ACS quirk for below devices to isolate functions.
+ * Wangxun 40G/25G/10G/1G NICs have no ACS capability, but on
+ * multi-function devices, the hardware isolates the functions by
+ * directing all peer-to-peer traffic upstream as though PCI_ACS_RR and
+ * PCI_ACS_CR were set.
* SFxxx 1G NICs(em).
* RP1000/RP2000 10G NICs(sp).
+ * FF5xxx 40G/25G/10G NICs(aml).
*/
static int pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags)
{
switch (dev->device) {
- case 0x0100 ... 0x010F:
- case 0x1001:
- case 0x2001:
+ case 0x0100 ... 0x010F: /* EM */
+ case 0x1001: case 0x2001: /* SP */
+ case 0x5010: case 0x5025: case 0x5040: /* AML */
+ case 0x5110: case 0x5125: case 0x5140: /* AML */
return pci_acs_ctrl_enabled(acs_flags,
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 4061890a1748..b3eec63c00ba 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -220,6 +220,9 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
/* Set owner */
pindesc->pctldev = pctldev;
+#ifdef CONFIG_PINMUX
+ mutex_init(&pindesc->mux_lock);
+#endif
/* Copy basic pin info */
if (pin->name) {
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 4e07707d2435..d6c24978e708 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -177,6 +177,7 @@ struct pin_desc {
const char *mux_owner;
const struct pinctrl_setting_mux *mux_setting;
const char *gpio_owner;
+ struct mutex mux_lock;
#endif
};
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 3b59d7189004..139bc0fb8a9d 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -20,7 +20,7 @@ config PINCTRL_IMX_SCMI
config PINCTRL_IMX_SCU
tristate
- depends on IMX_SCU
+ depends on IMX_SCU || COMPILE_TEST
select PINCTRL_IMX
config PINCTRL_IMX1_CORE
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 02033ea1c643..0743190da59e 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -14,6 +14,7 @@
#include <linux/array_size.h>
#include <linux/ctype.h>
+#include <linux/cleanup.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -93,6 +94,7 @@ bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned int pin)
if (!desc || !ops)
return true;
+ guard(mutex)(&desc->mux_lock);
if (ops->strict && desc->mux_usecount)
return false;
@@ -127,29 +129,31 @@ static int pin_request(struct pinctrl_dev *pctldev,
dev_dbg(pctldev->dev, "request pin %d (%s) for %s\n",
pin, desc->name, owner);
- if ((!gpio_range || ops->strict) &&
- desc->mux_usecount && strcmp(desc->mux_owner, owner)) {
- dev_err(pctldev->dev,
- "pin %s already requested by %s; cannot claim for %s\n",
- desc->name, desc->mux_owner, owner);
- goto out;
- }
+ scoped_guard(mutex, &desc->mux_lock) {
+ if ((!gpio_range || ops->strict) &&
+ desc->mux_usecount && strcmp(desc->mux_owner, owner)) {
+ dev_err(pctldev->dev,
+ "pin %s already requested by %s; cannot claim for %s\n",
+ desc->name, desc->mux_owner, owner);
+ goto out;
+ }
- if ((gpio_range || ops->strict) && desc->gpio_owner) {
- dev_err(pctldev->dev,
- "pin %s already requested by %s; cannot claim for %s\n",
- desc->name, desc->gpio_owner, owner);
- goto out;
- }
+ if ((gpio_range || ops->strict) && desc->gpio_owner) {
+ dev_err(pctldev->dev,
+ "pin %s already requested by %s; cannot claim for %s\n",
+ desc->name, desc->gpio_owner, owner);
+ goto out;
+ }
- if (gpio_range) {
- desc->gpio_owner = owner;
- } else {
- desc->mux_usecount++;
- if (desc->mux_usecount > 1)
- return 0;
+ if (gpio_range) {
+ desc->gpio_owner = owner;
+ } else {
+ desc->mux_usecount++;
+ if (desc->mux_usecount > 1)
+ return 0;
- desc->mux_owner = owner;
+ desc->mux_owner = owner;
+ }
}
/* Let each pin increase references to this module */
@@ -178,12 +182,14 @@ static int pin_request(struct pinctrl_dev *pctldev,
out_free_pin:
if (status) {
- if (gpio_range) {
- desc->gpio_owner = NULL;
- } else {
- desc->mux_usecount--;
- if (!desc->mux_usecount)
- desc->mux_owner = NULL;
+ scoped_guard(mutex, &desc->mux_lock) {
+ if (gpio_range) {
+ desc->gpio_owner = NULL;
+ } else {
+ desc->mux_usecount--;
+ if (!desc->mux_usecount)
+ desc->mux_owner = NULL;
+ }
}
}
out:
@@ -219,15 +225,17 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
return NULL;
}
- if (!gpio_range) {
- /*
- * A pin should not be freed more times than allocated.
- */
- if (WARN_ON(!desc->mux_usecount))
- return NULL;
- desc->mux_usecount--;
- if (desc->mux_usecount)
- return NULL;
+ scoped_guard(mutex, &desc->mux_lock) {
+ if (!gpio_range) {
+ /*
+ * A pin should not be freed more times than allocated.
+ */
+ if (WARN_ON(!desc->mux_usecount))
+ return NULL;
+ desc->mux_usecount--;
+ if (desc->mux_usecount)
+ return NULL;
+ }
}
/*
@@ -239,13 +247,15 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
else if (ops->free)
ops->free(pctldev, pin);
- if (gpio_range) {
- owner = desc->gpio_owner;
- desc->gpio_owner = NULL;
- } else {
- owner = desc->mux_owner;
- desc->mux_owner = NULL;
- desc->mux_setting = NULL;
+ scoped_guard(mutex, &desc->mux_lock) {
+ if (gpio_range) {
+ owner = desc->gpio_owner;
+ desc->gpio_owner = NULL;
+ } else {
+ owner = desc->mux_owner;
+ desc->mux_owner = NULL;
+ desc->mux_setting = NULL;
+ }
}
module_put(pctldev->owner);
@@ -458,7 +468,8 @@ int pinmux_enable_setting(const struct pinctrl_setting *setting)
pins[i]);
continue;
}
- desc->mux_setting = &(setting->data.mux);
+ scoped_guard(mutex, &desc->mux_lock)
+ desc->mux_setting = &(setting->data.mux);
}
ret = ops->set_mux(pctldev, setting->data.mux.func,
@@ -472,8 +483,10 @@ int pinmux_enable_setting(const struct pinctrl_setting *setting)
err_set_mux:
for (i = 0; i < num_pins; i++) {
desc = pin_desc_get(pctldev, pins[i]);
- if (desc)
- desc->mux_setting = NULL;
+ if (desc) {
+ scoped_guard(mutex, &desc->mux_lock)
+ desc->mux_setting = NULL;
+ }
}
err_pin_request:
/* On error release all taken pins */
@@ -492,6 +505,7 @@ void pinmux_disable_setting(const struct pinctrl_setting *setting)
unsigned int num_pins = 0;
int i;
struct pin_desc *desc;
+ bool is_equal;
if (pctlops->get_group_pins)
ret = pctlops->get_group_pins(pctldev, setting->data.mux.group,
@@ -517,7 +531,10 @@ void pinmux_disable_setting(const struct pinctrl_setting *setting)
pins[i]);
continue;
}
- if (desc->mux_setting == &(setting->data.mux)) {
+ scoped_guard(mutex, &desc->mux_lock)
+ is_equal = (desc->mux_setting == &(setting->data.mux));
+
+ if (is_equal) {
pin_free(pctldev, pins[i], NULL);
} else {
const char *gname;
@@ -608,40 +625,42 @@ static int pinmux_pins_show(struct seq_file *s, void *what)
if (desc == NULL)
continue;
- if (desc->mux_owner &&
- !strcmp(desc->mux_owner, pinctrl_dev_get_name(pctldev)))
- is_hog = true;
-
- if (pmxops->strict) {
- if (desc->mux_owner)
- seq_printf(s, "pin %d (%s): device %s%s",
- pin, desc->name, desc->mux_owner,
+ scoped_guard(mutex, &desc->mux_lock) {
+ if (desc->mux_owner &&
+ !strcmp(desc->mux_owner, pinctrl_dev_get_name(pctldev)))
+ is_hog = true;
+
+ if (pmxops->strict) {
+ if (desc->mux_owner)
+ seq_printf(s, "pin %d (%s): device %s%s",
+ pin, desc->name, desc->mux_owner,
+ is_hog ? " (HOG)" : "");
+ else if (desc->gpio_owner)
+ seq_printf(s, "pin %d (%s): GPIO %s",
+ pin, desc->name, desc->gpio_owner);
+ else
+ seq_printf(s, "pin %d (%s): UNCLAIMED",
+ pin, desc->name);
+ } else {
+ /* For non-strict controllers */
+ seq_printf(s, "pin %d (%s): %s %s%s", pin, desc->name,
+ desc->mux_owner ? desc->mux_owner
+ : "(MUX UNCLAIMED)",
+ desc->gpio_owner ? desc->gpio_owner
+ : "(GPIO UNCLAIMED)",
is_hog ? " (HOG)" : "");
- else if (desc->gpio_owner)
- seq_printf(s, "pin %d (%s): GPIO %s",
- pin, desc->name, desc->gpio_owner);
+ }
+
+ /* If mux: print function+group claiming the pin */
+ if (desc->mux_setting)
+ seq_printf(s, " function %s group %s\n",
+ pmxops->get_function_name(pctldev,
+ desc->mux_setting->func),
+ pctlops->get_group_name(pctldev,
+ desc->mux_setting->group));
else
- seq_printf(s, "pin %d (%s): UNCLAIMED",
- pin, desc->name);
- } else {
- /* For non-strict controllers */
- seq_printf(s, "pin %d (%s): %s %s%s", pin, desc->name,
- desc->mux_owner ? desc->mux_owner
- : "(MUX UNCLAIMED)",
- desc->gpio_owner ? desc->gpio_owner
- : "(GPIO UNCLAIMED)",
- is_hog ? " (HOG)" : "");
+ seq_putc(s, '\n');
}
-
- /* If mux: print function+group claiming the pin */
- if (desc->mux_setting)
- seq_printf(s, " function %s group %s\n",
- pmxops->get_function_name(pctldev,
- desc->mux_setting->func),
- pctlops->get_group_name(pctldev,
- desc->mux_setting->group));
- else
- seq_putc(s, '\n');
}
mutex_unlock(&pctldev->mutex);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index a0eb4e01b3a7..1b7eecff3ffa 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1226,6 +1226,8 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8550ve-gpio", .data = (void *) 8 },
{ .compatible = "qcom,pm8550vs-gpio", .data = (void *) 6 },
{ .compatible = "qcom,pm8916-gpio", .data = (void *) 4 },
+ /* pm8937 has 8 GPIOs with holes on 3, 4 and 6 */
+ { .compatible = "qcom,pm8937-gpio", .data = (void *) 8 },
{ .compatible = "qcom,pm8941-gpio", .data = (void *) 36 },
/* pm8950 has 8 GPIOs with holes on 3 */
{ .compatible = "qcom,pm8950-gpio", .data = (void *) 8 },
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index d16ece90d926..5fa04e7c1d5c 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -983,6 +983,7 @@ static const struct of_device_id pmic_mpp_of_match[] = {
{ .compatible = "qcom,pm8226-mpp", .data = (void *) 8 },
{ .compatible = "qcom,pm8841-mpp", .data = (void *) 4 },
{ .compatible = "qcom,pm8916-mpp", .data = (void *) 4 },
+ { .compatible = "qcom,pm8937-mpp", .data = (void *) 4 },
{ .compatible = "qcom,pm8941-mpp", .data = (void *) 8 },
{ .compatible = "qcom,pm8950-mpp", .data = (void *) 4 },
{ .compatible = "qcom,pmi8950-mpp", .data = (void *) 4 },
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 89f5f44857d5..1101e5b2488e 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -3696,7 +3696,6 @@ static int asus_wmi_custom_fan_curve_init(struct asus_wmi *asus)
/* Throttle thermal policy ****************************************************/
static int throttle_thermal_policy_write(struct asus_wmi *asus)
{
- u32 retval;
u8 value;
int err;
@@ -3718,8 +3717,8 @@ static int throttle_thermal_policy_write(struct asus_wmi *asus)
value = asus->throttle_thermal_policy_mode;
}
- err = asus_wmi_set_devstate(asus->throttle_thermal_policy_dev,
- value, &retval);
+ /* Some machines do not return an error code as a result, so we ignore it */
+ err = asus_wmi_set_devstate(asus->throttle_thermal_policy_dev, value, NULL);
sysfs_notify(&asus->platform_device->dev.kobj, NULL,
"throttle_thermal_policy");
@@ -3729,12 +3728,6 @@ static int throttle_thermal_policy_write(struct asus_wmi *asus)
return err;
}
- if (retval != 1) {
- pr_warn("Failed to set throttle thermal policy (retval): 0x%x\n",
- retval);
- return -EIO;
- }
-
/* Must set to disabled if mode is toggled */
if (asus->cpu_fan_curve_available)
asus->custom_fan_curves[FAN_CURVE_DEV_CPU].enabled = false;
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 29ad510e881c..778ff187ac59 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -2171,8 +2171,24 @@ static int genpd_alloc_data(struct generic_pm_domain *genpd)
}
genpd->gd = gd;
- return 0;
+ device_initialize(&genpd->dev);
+
+ if (!genpd_is_dev_name_fw(genpd)) {
+ dev_set_name(&genpd->dev, "%s", genpd->name);
+ } else {
+ ret = ida_alloc(&genpd_ida, GFP_KERNEL);
+ if (ret < 0)
+ goto put;
+ genpd->device_id = ret;
+ dev_set_name(&genpd->dev, "%s_%u", genpd->name, genpd->device_id);
+ }
+
+ return 0;
+put:
+ put_device(&genpd->dev);
+ if (genpd->free_states == genpd_free_default_power_state)
+ kfree(genpd->states);
free:
if (genpd_is_cpu_domain(genpd))
free_cpumask_var(genpd->cpus);
@@ -2182,6 +2198,9 @@ free:
static void genpd_free_data(struct generic_pm_domain *genpd)
{
+ put_device(&genpd->dev);
+ if (genpd->device_id != -ENXIO)
+ ida_free(&genpd_ida, genpd->device_id);
if (genpd_is_cpu_domain(genpd))
free_cpumask_var(genpd->cpus);
if (genpd->free_states)
@@ -2270,20 +2289,6 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
if (ret)
return ret;
- device_initialize(&genpd->dev);
-
- if (!genpd_is_dev_name_fw(genpd)) {
- dev_set_name(&genpd->dev, "%s", genpd->name);
- } else {
- ret = ida_alloc(&genpd_ida, GFP_KERNEL);
- if (ret < 0) {
- put_device(&genpd->dev);
- return ret;
- }
- genpd->device_id = ret;
- dev_set_name(&genpd->dev, "%s_%u", genpd->name, genpd->device_id);
- }
-
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock);
@@ -2324,8 +2329,6 @@ static int genpd_remove(struct generic_pm_domain *genpd)
genpd_unlock(genpd);
genpd_debug_remove(genpd);
cancel_work_sync(&genpd->power_off_work);
- if (genpd->device_id != -ENXIO)
- ida_free(&genpd_ida, genpd->device_id);
genpd_free_data(genpd);
pr_debug("%s: removed %s\n", __func__, dev_name(&genpd->dev));
diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
index 963d61c5af6d..3f0e6960f47f 100644
--- a/drivers/pmdomain/imx/gpcv2.c
+++ b/drivers/pmdomain/imx/gpcv2.c
@@ -403,7 +403,7 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
* already reaches target before udelay()
*/
regmap_read_bypassed(domain->regmap, domain->regs->hsk, &reg_val);
- udelay(5);
+ udelay(10);
}
/* Disable reset clocks for all devices in the domain */
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index c56cd0f63909..77a36e7bddd5 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -150,7 +150,8 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
if (ppb > ops->max_adj || ppb < -ops->max_adj)
return -ERANGE;
err = ops->adjfine(ops, tx->freq);
- ptp->dialed_frequency = tx->freq;
+ if (!err)
+ ptp->dialed_frequency = tx->freq;
} else if (tx->modes & ADJ_OFFSET) {
if (ops->adjphase) {
s32 max_phase_adj = ops->getmaxphase(ops);
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 6c343b4b9d15..7870722b6ee2 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -843,26 +843,15 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps520 = {
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
};
-static const struct rpmh_vreg_hw_data pmic5_ftsmps525_lv = {
+static const struct rpmh_vreg_hw_data pmic5_ftsmps525 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
.voltage_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(300000, 0, 267, 4000),
+ REGULATOR_LINEAR_RANGE(1376000, 268, 438, 8000),
},
- .n_linear_ranges = 1,
- .n_voltages = 268,
- .pmic_mode_map = pmic_mode_map_pmic5_smps,
- .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
-};
-
-static const struct rpmh_vreg_hw_data pmic5_ftsmps525_mv = {
- .regulator_type = VRM,
- .ops = &rpmh_regulator_vrm_ops,
- .voltage_ranges = (struct linear_range[]) {
- REGULATOR_LINEAR_RANGE(600000, 0, 267, 8000),
- },
- .n_linear_ranges = 1,
- .n_voltages = 268,
+ .n_linear_ranges = 2,
+ .n_voltages = 439,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
};
@@ -1190,12 +1179,12 @@ static const struct rpmh_vreg_init_data pm8550_vreg_data[] = {
};
static const struct rpmh_vreg_init_data pm8550vs_vreg_data[] = {
- RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"),
- RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"),
- RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"),
- RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_lv, "vdd-s4"),
- RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"),
- RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_mv, "vdd-s6"),
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525, "vdd-s6"),
RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"),
RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2"),
RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"),
@@ -1203,14 +1192,14 @@ static const struct rpmh_vreg_init_data pm8550vs_vreg_data[] = {
};
static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = {
- RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"),
- RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"),
- RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"),
- RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"),
- RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"),
- RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"),
- RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"),
- RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525_lv, "vdd-s8"),
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525, "vdd-s6"),
+ RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525, "vdd-s7"),
+ RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525, "vdd-s8"),
RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"),
RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2"),
RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"),
@@ -1218,14 +1207,14 @@ static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = {
};
static const struct rpmh_vreg_init_data pmc8380_vreg_data[] = {
- RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"),
- RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"),
- RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"),
- RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"),
- RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"),
- RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"),
- RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"),
- RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525_lv, "vdd-s8"),
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525, "vdd-s6"),
+ RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525, "vdd-s7"),
+ RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525, "vdd-s8"),
RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"),
RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2"),
RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"),
@@ -1409,16 +1398,16 @@ static const struct rpmh_vreg_init_data pmx65_vreg_data[] = {
};
static const struct rpmh_vreg_init_data pmx75_vreg_data[] = {
- RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"),
- RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"),
- RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"),
- RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"),
- RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"),
- RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"),
- RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"),
- RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525_lv, "vdd-s8"),
- RPMH_VREG("smps9", "smp%s9", &pmic5_ftsmps525_lv, "vdd-s9"),
- RPMH_VREG("smps10", "smp%s10", &pmic5_ftsmps525_lv, "vdd-s10"),
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525, "vdd-s6"),
+ RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525, "vdd-s7"),
+ RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525, "vdd-s8"),
+ RPMH_VREG("smps9", "smp%s9", &pmic5_ftsmps525, "vdd-s9"),
+ RPMH_VREG("smps10", "smp%s10", &pmic5_ftsmps525, "vdd-s10"),
RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"),
RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2-18"),
RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"),
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 793b1d274be3..1a2d08ec9de9 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -1433,6 +1433,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sa8775p-cdsp1-pas", .data = &sa8775p_cdsp1_resource},
{ .compatible = "qcom,sa8775p-gpdsp0-pas", .data = &sa8775p_gpdsp0_resource},
{ .compatible = "qcom,sa8775p-gpdsp1-pas", .data = &sa8775p_gpdsp1_resource},
+ { .compatible = "qcom,sar2130p-adsp-pas", .data = &sm8350_adsp_resource},
{ .compatible = "qcom,sc7180-adsp-pas", .data = &sm8250_adsp_resource},
{ .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sc7280-adsp-pas", .data = &sm8350_adsp_resource},
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 35dca2accbb8..5849d2970bba 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -645,18 +645,17 @@ static int cmos_nvram_read(void *priv, unsigned int off, void *val,
unsigned char *buf = val;
off += NVRAM_OFFSET;
- spin_lock_irq(&rtc_lock);
- for (; count; count--, off++) {
+ for (; count; count--, off++, buf++) {
+ guard(spinlock_irq)(&rtc_lock);
if (off < 128)
- *buf++ = CMOS_READ(off);
+ *buf = CMOS_READ(off);
else if (can_bank2)
- *buf++ = cmos_read_bank2(off);
+ *buf = cmos_read_bank2(off);
else
- break;
+ return -EIO;
}
- spin_unlock_irq(&rtc_lock);
- return count ? -EIO : 0;
+ return 0;
}
static int cmos_nvram_write(void *priv, unsigned int off, void *val,
@@ -671,23 +670,23 @@ static int cmos_nvram_write(void *priv, unsigned int off, void *val,
* NVRAM to update, updating checksums is also part of its job.
*/
off += NVRAM_OFFSET;
- spin_lock_irq(&rtc_lock);
- for (; count; count--, off++) {
+ for (; count; count--, off++, buf++) {
/* don't trash RTC registers */
if (off == cmos->day_alrm
|| off == cmos->mon_alrm
|| off == cmos->century)
- buf++;
- else if (off < 128)
- CMOS_WRITE(*buf++, off);
+ continue;
+
+ guard(spinlock_irq)(&rtc_lock);
+ if (off < 128)
+ CMOS_WRITE(*buf, off);
else if (can_bank2)
- cmos_write_bank2(*buf++, off);
+ cmos_write_bank2(*buf, off);
else
- break;
+ return -EIO;
}
- spin_unlock_irq(&rtc_lock);
- return count ? -EIO : 0;
+ return 0;
}
/*----------------------------------------------------------------*/
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 4cd3a3eab6f1..cd394d8c9f07 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2493,6 +2493,7 @@ static int complete_v3_hw(struct hisi_sas_cq *cq)
/* update rd_point */
cq->rd_point = rd_point;
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+ cond_resched();
return completed;
}
@@ -3550,6 +3551,11 @@ debugfs_to_reg_name_v3_hw(int off, int base_off,
return NULL;
}
+static bool debugfs_dump_is_generated_v3_hw(void *p)
+{
+ return p ? true : false;
+}
+
static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s,
const struct hisi_sas_debugfs_reg *reg)
{
@@ -3575,6 +3581,9 @@ static int debugfs_global_v3_hw_show(struct seq_file *s, void *p)
{
struct hisi_sas_debugfs_regs *global = s->private;
+ if (!debugfs_dump_is_generated_v3_hw(global->data))
+ return -EPERM;
+
debugfs_print_reg_v3_hw(global->data, s,
&debugfs_global_reg);
@@ -3586,6 +3595,9 @@ static int debugfs_axi_v3_hw_show(struct seq_file *s, void *p)
{
struct hisi_sas_debugfs_regs *axi = s->private;
+ if (!debugfs_dump_is_generated_v3_hw(axi->data))
+ return -EPERM;
+
debugfs_print_reg_v3_hw(axi->data, s,
&debugfs_axi_reg);
@@ -3597,6 +3609,9 @@ static int debugfs_ras_v3_hw_show(struct seq_file *s, void *p)
{
struct hisi_sas_debugfs_regs *ras = s->private;
+ if (!debugfs_dump_is_generated_v3_hw(ras->data))
+ return -EPERM;
+
debugfs_print_reg_v3_hw(ras->data, s,
&debugfs_ras_reg);
@@ -3609,6 +3624,9 @@ static int debugfs_port_v3_hw_show(struct seq_file *s, void *p)
struct hisi_sas_debugfs_port *port = s->private;
const struct hisi_sas_debugfs_reg *reg_port = &debugfs_port_reg;
+ if (!debugfs_dump_is_generated_v3_hw(port->data))
+ return -EPERM;
+
debugfs_print_reg_v3_hw(port->data, s, reg_port);
return 0;
@@ -3664,6 +3682,9 @@ static int debugfs_cq_v3_hw_show(struct seq_file *s, void *p)
struct hisi_sas_debugfs_cq *debugfs_cq = s->private;
int slot;
+ if (!debugfs_dump_is_generated_v3_hw(debugfs_cq->complete_hdr))
+ return -EPERM;
+
for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++)
debugfs_cq_show_slot_v3_hw(s, slot, debugfs_cq);
@@ -3685,8 +3706,12 @@ static void debugfs_dq_show_slot_v3_hw(struct seq_file *s, int slot,
static int debugfs_dq_v3_hw_show(struct seq_file *s, void *p)
{
+ struct hisi_sas_debugfs_dq *debugfs_dq = s->private;
int slot;
+ if (!debugfs_dump_is_generated_v3_hw(debugfs_dq->hdr))
+ return -EPERM;
+
for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++)
debugfs_dq_show_slot_v3_hw(s, slot, s->private);
@@ -3700,6 +3725,9 @@ static int debugfs_iost_v3_hw_show(struct seq_file *s, void *p)
struct hisi_sas_iost *iost = debugfs_iost->iost;
int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
+ if (!debugfs_dump_is_generated_v3_hw(iost))
+ return -EPERM;
+
for (i = 0; i < max_command_entries; i++, iost++) {
__le64 *data = &iost->qw0;
@@ -3719,6 +3747,9 @@ static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p)
int i, tab_idx;
__le64 *iost;
+ if (!debugfs_dump_is_generated_v3_hw(iost_cache))
+ return -EPERM;
+
for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) {
/*
* Data struct of IOST cache:
@@ -3742,6 +3773,9 @@ static int debugfs_itct_v3_hw_show(struct seq_file *s, void *p)
struct hisi_sas_debugfs_itct *debugfs_itct = s->private;
struct hisi_sas_itct *itct = debugfs_itct->itct;
+ if (!debugfs_dump_is_generated_v3_hw(itct))
+ return -EPERM;
+
for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
__le64 *data = &itct->qw0;
@@ -3761,6 +3795,9 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p)
int i, tab_idx;
__le64 *itct;
+ if (!debugfs_dump_is_generated_v3_hw(itct_cache))
+ return -EPERM;
+
for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) {
/*
* Data struct of ITCT cache:
@@ -3778,10 +3815,9 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p)
}
DEFINE_SHOW_ATTRIBUTE(debugfs_itct_cache_v3_hw);
-static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
+static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index)
{
u64 *debugfs_timestamp;
- int dump_index = hisi_hba->debugfs_dump_index;
struct dentry *dump_dentry;
struct dentry *dentry;
char name[256];
@@ -3789,17 +3825,17 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
int c;
int d;
- snprintf(name, 256, "%d", dump_index);
+ snprintf(name, 256, "%d", index);
dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry);
- debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index];
+ debugfs_timestamp = &hisi_hba->debugfs_timestamp[index];
debugfs_create_u64("timestamp", 0400, dump_dentry,
debugfs_timestamp);
debugfs_create_file("global", 0400, dump_dentry,
- &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL],
+ &hisi_hba->debugfs_regs[index][DEBUGFS_GLOBAL],
&debugfs_global_v3_hw_fops);
/* Create port dir and files */
@@ -3808,7 +3844,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
snprintf(name, 256, "%d", p);
debugfs_create_file(name, 0400, dentry,
- &hisi_hba->debugfs_port_reg[dump_index][p],
+ &hisi_hba->debugfs_port_reg[index][p],
&debugfs_port_v3_hw_fops);
}
@@ -3818,7 +3854,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
snprintf(name, 256, "%d", c);
debugfs_create_file(name, 0400, dentry,
- &hisi_hba->debugfs_cq[dump_index][c],
+ &hisi_hba->debugfs_cq[index][c],
&debugfs_cq_v3_hw_fops);
}
@@ -3828,32 +3864,32 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
snprintf(name, 256, "%d", d);
debugfs_create_file(name, 0400, dentry,
- &hisi_hba->debugfs_dq[dump_index][d],
+ &hisi_hba->debugfs_dq[index][d],
&debugfs_dq_v3_hw_fops);
}
debugfs_create_file("iost", 0400, dump_dentry,
- &hisi_hba->debugfs_iost[dump_index],
+ &hisi_hba->debugfs_iost[index],
&debugfs_iost_v3_hw_fops);
debugfs_create_file("iost_cache", 0400, dump_dentry,
- &hisi_hba->debugfs_iost_cache[dump_index],
+ &hisi_hba->debugfs_iost_cache[index],
&debugfs_iost_cache_v3_hw_fops);
debugfs_create_file("itct", 0400, dump_dentry,
- &hisi_hba->debugfs_itct[dump_index],
+ &hisi_hba->debugfs_itct[index],
&debugfs_itct_v3_hw_fops);
debugfs_create_file("itct_cache", 0400, dump_dentry,
- &hisi_hba->debugfs_itct_cache[dump_index],
+ &hisi_hba->debugfs_itct_cache[index],
&debugfs_itct_cache_v3_hw_fops);
debugfs_create_file("axi", 0400, dump_dentry,
- &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI],
+ &hisi_hba->debugfs_regs[index][DEBUGFS_AXI],
&debugfs_axi_v3_hw_fops);
debugfs_create_file("ras", 0400, dump_dentry,
- &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS],
+ &hisi_hba->debugfs_regs[index][DEBUGFS_RAS],
&debugfs_ras_v3_hw_fops);
}
@@ -4516,22 +4552,34 @@ static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
int i;
devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache);
+ hisi_hba->debugfs_iost_cache[dump_index].cache = NULL;
devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache);
+ hisi_hba->debugfs_itct_cache[dump_index].cache = NULL;
devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost);
+ hisi_hba->debugfs_iost[dump_index].iost = NULL;
devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct);
+ hisi_hba->debugfs_itct[dump_index].itct = NULL;
- for (i = 0; i < hisi_hba->queue_count; i++)
+ for (i = 0; i < hisi_hba->queue_count; i++) {
devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr);
+ hisi_hba->debugfs_dq[dump_index][i].hdr = NULL;
+ }
- for (i = 0; i < hisi_hba->queue_count; i++)
+ for (i = 0; i < hisi_hba->queue_count; i++) {
devm_kfree(dev,
hisi_hba->debugfs_cq[dump_index][i].complete_hdr);
+ hisi_hba->debugfs_cq[dump_index][i].complete_hdr = NULL;
+ }
- for (i = 0; i < DEBUGFS_REGS_NUM; i++)
+ for (i = 0; i < DEBUGFS_REGS_NUM; i++) {
devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data);
+ hisi_hba->debugfs_regs[dump_index][i].data = NULL;
+ }
- for (i = 0; i < hisi_hba->n_phy; i++)
+ for (i = 0; i < hisi_hba->n_phy; i++) {
devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data);
+ hisi_hba->debugfs_port_reg[dump_index][i].data = NULL;
+ }
}
static const struct hisi_sas_debugfs_reg *debugfs_reg_array_v3_hw[DEBUGFS_REGS_NUM] = {
@@ -4658,8 +4706,6 @@ static int debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
debugfs_snapshot_itct_reg_v3_hw(hisi_hba);
debugfs_snapshot_iost_reg_v3_hw(hisi_hba);
- debugfs_create_files_v3_hw(hisi_hba);
-
debugfs_snapshot_restore_v3_hw(hisi_hba);
hisi_hba->debugfs_dump_index++;
@@ -4743,6 +4789,17 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba)
hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
}
+static void debugfs_dump_init_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int i;
+
+ hisi_hba->debugfs_dump_dentry =
+ debugfs_create_dir("dump", hisi_hba->debugfs_dir);
+
+ for (i = 0; i < hisi_sas_debugfs_dump_count; i++)
+ debugfs_create_files_v3_hw(hisi_hba, i);
+}
+
static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
{
debugfs_remove_recursive(hisi_hba->debugfs_dir);
@@ -4763,8 +4820,7 @@ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
/* create bist structures */
debugfs_bist_init_v3_hw(hisi_hba);
- hisi_hba->debugfs_dump_dentry =
- debugfs_create_dir("dump", hisi_hba->debugfs_dir);
+ debugfs_dump_init_v3_hw(hisi_hba);
debugfs_phy_down_cnt_init_v3_hw(hisi_hba);
debugfs_fifo_init_v3_hw(hisi_hba);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 134bc96dd134..ce3a1f42713d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -2226,6 +2226,11 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ulp_status, ulp_word4, latt);
if (latt || ulp_status) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
+ "0229 FDMI cmd %04x failed, latt = %d "
+ "ulp_status: (x%x/x%x), sli_flag x%x\n",
+ be16_to_cpu(fdmi_cmd), latt, ulp_status,
+ ulp_word4, phba->sli.sli_flag);
/* Look for a retryable error */
if (ulp_status == IOSTAT_LOCAL_REJECT) {
@@ -2234,8 +2239,16 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case IOERR_SLI_DOWN:
/* Driver aborted this IO. No retry as error
* is likely Offline->Online or some adapter
- * error. Recovery will try again.
+ * error. Recovery will try again, but if port
+ * is not active there's no point to continue
+ * issuing follow up FDMI commands.
*/
+ if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
+ free_ndlp = cmdiocb->ndlp;
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(free_ndlp);
+ return;
+ }
break;
case IOERR_ABORT_IN_PROGRESS:
case IOERR_SEQUENCE_TIMEOUT:
@@ -2256,12 +2269,6 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
}
}
-
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0229 FDMI cmd %04x latt = %d "
- "ulp_status: x%x, rid x%x\n",
- be16_to_cpu(fdmi_cmd), latt, ulp_status,
- ulp_word4);
}
free_ndlp = cmdiocb->ndlp;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 9241075f72fa..6e8d8a96c54f 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -155,6 +155,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
struct lpfc_hba *phba;
struct lpfc_work_evt *evtp;
unsigned long iflags;
+ bool nvme_reg = false;
ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
if (!ndlp)
@@ -177,38 +178,49 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
/* Don't schedule a worker thread event if the vport is going down. */
if (test_bit(FC_UNLOADING, &vport->load_flag) ||
!test_bit(HBA_SETUP, &phba->hba_flag)) {
+
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->rport = NULL;
+ if (ndlp->fc4_xpt_flags & NVME_XPT_REGD)
+ nvme_reg = true;
+
/* The scsi_transport is done with the rport so lpfc cannot
- * call to unregister. Remove the scsi transport reference
- * and clean up the SCSI transport node details.
+ * call to unregister.
*/
- if (ndlp->fc4_xpt_flags & (NLP_XPT_REGD | SCSI_XPT_REGD)) {
+ if (ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
- /* NVME transport-registered rports need the
- * NLP_XPT_REGD flag to complete an unregister.
+ /* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node,
+ * unregister calls were made to the scsi and nvme
+ * transports and refcnt was already decremented. Clear
+ * the NLP_XPT_REGD flag only if the NVME Rport is
+ * confirmed unregistered.
*/
- if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
+ if (!nvme_reg && ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_nlp_put(ndlp); /* may free ndlp */
+ } else {
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ }
+ } else {
spin_unlock_irqrestore(&ndlp->lock, iflags);
- lpfc_nlp_put(ndlp);
- spin_lock_irqsave(&ndlp->lock, iflags);
}
+ spin_lock_irqsave(&ndlp->lock, iflags);
+
/* Only 1 thread can drop the initial node reference. If
* another thread has set NLP_DROPPED, this thread is done.
*/
- if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) &&
- !(ndlp->nlp_flag & NLP_DROPPED)) {
- ndlp->nlp_flag |= NLP_DROPPED;
+ if (nvme_reg || (ndlp->nlp_flag & NLP_DROPPED)) {
spin_unlock_irqrestore(&ndlp->lock, iflags);
- lpfc_nlp_put(ndlp);
return;
}
+ ndlp->nlp_flag |= NLP_DROPPED;
spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_nlp_put(ndlp);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 0dd451009b07..a3658ef1141b 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -13518,6 +13518,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Disable FW logging to host memory */
lpfc_ras_stop_fwlog(phba);
+ lpfc_sli4_queue_unset(phba);
+
/* Reset SLI4 HBA FCoE function */
lpfc_pci_function_reset(phba);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 2ec6e55771b4..6748fba48a07 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5291,6 +5291,8 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
"0296 Restart HBA Data: x%x x%x\n",
phba->pport->port_state, psli->sli_flag);
+ lpfc_sli4_queue_unset(phba);
+
rc = lpfc_sli4_brdreset(phba);
if (rc) {
phba->link_state = LPFC_HBA_ERROR;
@@ -17625,6 +17627,9 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
if (!eq)
return -ENODEV;
+ if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
+ goto list_remove;
+
mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
@@ -17651,10 +17656,12 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
shdr_status, shdr_add_status, rc);
status = -ENXIO;
}
+ mempool_free(mbox, eq->phba->mbox_mem_pool);
+list_remove:
/* Remove eq from any list */
list_del_init(&eq->list);
- mempool_free(mbox, eq->phba->mbox_mem_pool);
+
return status;
}
@@ -17682,6 +17689,10 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
/* sanity check on queue memory */
if (!cq)
return -ENODEV;
+
+ if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
+ goto list_remove;
+
mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
@@ -17707,9 +17718,11 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
shdr_status, shdr_add_status, rc);
status = -ENXIO;
}
+ mempool_free(mbox, cq->phba->mbox_mem_pool);
+
+list_remove:
/* Remove cq from any list */
list_del_init(&cq->list);
- mempool_free(mbox, cq->phba->mbox_mem_pool);
return status;
}
@@ -17737,6 +17750,10 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
/* sanity check on queue memory */
if (!mq)
return -ENODEV;
+
+ if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
+ goto list_remove;
+
mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
@@ -17762,9 +17779,11 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
shdr_status, shdr_add_status, rc);
status = -ENXIO;
}
+ mempool_free(mbox, mq->phba->mbox_mem_pool);
+
+list_remove:
/* Remove mq from any list */
list_del_init(&mq->list);
- mempool_free(mbox, mq->phba->mbox_mem_pool);
return status;
}
@@ -17792,6 +17811,10 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
/* sanity check on queue memory */
if (!wq)
return -ENODEV;
+
+ if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
+ goto list_remove;
+
mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
@@ -17816,11 +17839,13 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
shdr_status, shdr_add_status, rc);
status = -ENXIO;
}
+ mempool_free(mbox, wq->phba->mbox_mem_pool);
+
+list_remove:
/* Remove wq from any list */
list_del_init(&wq->list);
kfree(wq->pring);
wq->pring = NULL;
- mempool_free(mbox, wq->phba->mbox_mem_pool);
return status;
}
@@ -17850,6 +17875,10 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
/* sanity check on queue memory */
if (!hrq || !drq)
return -ENODEV;
+
+ if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
+ goto list_remove;
+
mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
@@ -17890,9 +17919,11 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
shdr_status, shdr_add_status, rc);
status = -ENXIO;
}
+ mempool_free(mbox, hrq->phba->mbox_mem_pool);
+
+list_remove:
list_del_init(&hrq->list);
list_del_init(&drq->list);
- mempool_free(mbox, hrq->phba->mbox_mem_pool);
return status;
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 2810608acd96..e6ece30c4348 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -3304,6 +3304,7 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
+ .show_host_supported_speeds = 1,
.get_host_port_id = qla2x00_get_host_port_id,
.show_host_port_id = 1,
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 52dc9604f567..10431a67d202 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -24,6 +24,7 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
{
struct bsg_job *bsg_job = sp->u.bsg_job;
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct completion *comp = sp->comp;
ql_dbg(ql_dbg_user, sp->vha, 0x7009,
"%s: sp hdl %x, result=%x bsg ptr %p\n",
@@ -35,6 +36,9 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
bsg_reply->result = res;
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
+
+ if (comp)
+ complete(comp);
}
void qla2x00_bsg_sp_free(srb_t *sp)
@@ -490,16 +494,6 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
goto done;
}
- if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
- (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
- ql_log(ql_log_warn, vha, 0x7011,
- "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
- "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
- req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
- rval = -EAGAIN;
- goto done_unmap_sg;
- }
-
if (!vha->flags.online) {
ql_log(ql_log_warn, vha, 0x7012,
"Host is not online.\n");
@@ -3061,7 +3055,7 @@ skip_chip_chk:
static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
{
- bool found = false;
+ bool found, do_bsg_done;
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
struct qla_hw_data *ha = vha->hw;
@@ -3069,6 +3063,11 @@ static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
int cnt;
unsigned long flags;
struct req_que *req;
+ int rval;
+ DECLARE_COMPLETION_ONSTACK(comp);
+ uint32_t ratov_j;
+
+ found = do_bsg_done = false;
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
req = qpair->req;
@@ -3080,42 +3079,104 @@ static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
sp->type == SRB_ELS_CMD_HST ||
sp->type == SRB_ELS_CMD_HST_NOLOGIN) &&
sp->u.bsg_job == bsg_job) {
- req->outstanding_cmds[cnt] = NULL;
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-
- if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
- ql_log(ql_log_warn, vha, 0x7089,
- "mbx abort_command failed.\n");
- bsg_reply->result = -EIO;
- } else {
- ql_dbg(ql_dbg_user, vha, 0x708a,
- "mbx abort_command success.\n");
- bsg_reply->result = 0;
- }
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
found = true;
- goto done;
+ sp->comp = &comp;
+ break;
}
}
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-done:
- return found;
+ if (!found)
+ return false;
+
+ if (ha->flags.eeh_busy) {
+ /* skip over abort. EEH handling will return the bsg. Wait for it */
+ rval = QLA_SUCCESS;
+ ql_dbg(ql_dbg_user, vha, 0x802c,
+ "eeh encounter. bsg %p sp=%p handle=%x \n",
+ bsg_job, sp, sp->handle);
+ } else {
+ rval = ha->isp_ops->abort_command(sp);
+ ql_dbg(ql_dbg_user, vha, 0x802c,
+ "Aborting bsg %p sp=%p handle=%x rval=%x\n",
+ bsg_job, sp, sp->handle, rval);
+ }
+
+ switch (rval) {
+ case QLA_SUCCESS:
+ /* Wait for the command completion. */
+ ratov_j = ha->r_a_tov / 10 * 4 * 1000;
+ ratov_j = msecs_to_jiffies(ratov_j);
+
+ if (!wait_for_completion_timeout(&comp, ratov_j)) {
+ ql_log(ql_log_info, vha, 0x7089,
+ "bsg abort timeout. bsg=%p sp=%p handle %#x .\n",
+ bsg_job, sp, sp->handle);
+
+ do_bsg_done = true;
+ } else {
+ /* fw had returned the bsg */
+ ql_dbg(ql_dbg_user, vha, 0x708a,
+ "bsg abort success. bsg %p sp=%p handle=%#x\n",
+ bsg_job, sp, sp->handle);
+ do_bsg_done = false;
+ }
+ break;
+ default:
+ ql_log(ql_log_info, vha, 0x704f,
+ "bsg abort fail. bsg=%p sp=%p rval=%x.\n",
+ bsg_job, sp, rval);
+
+ do_bsg_done = true;
+ break;
+ }
+
+ if (!do_bsg_done)
+ return true;
+
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ /*
+ * recheck to make sure it's still the same bsg_job due to
+ * qp_lock_ptr was released earlier.
+ */
+ if (req->outstanding_cmds[cnt] &&
+ req->outstanding_cmds[cnt]->u.bsg_job != bsg_job) {
+ /* fw had returned the bsg */
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ return true;
+ }
+ req->outstanding_cmds[cnt] = NULL;
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ /* ref: INIT */
+ sp->comp = NULL;
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ bsg_reply->result = -ENXIO;
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ ql_dbg(ql_dbg_user, vha, 0x7051,
+ "%s bsg_job_done : bsg %p result %#x sp %p.\n",
+ __func__, bsg_job, bsg_reply->result, sp);
+
+ bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+
+ return true;
}
int
qla24xx_bsg_timeout(struct bsg_job *bsg_job)
{
- struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
struct qla_hw_data *ha = vha->hw;
int i;
struct qla_qpair *qpair;
- ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
- __func__, bsg_job);
+ ql_log(ql_log_info, vha, 0x708b,
+ "%s CMD timeout. bsg ptr %p msgcode %x vendor cmd %x\n",
+ __func__, bsg_job, bsg_request->msgcode,
+ bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x9007,
@@ -3136,7 +3197,6 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
}
ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
- bsg_reply->result = -ENXIO;
done:
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 76703f2706b8..79879c4743e6 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -506,6 +506,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
return(NULL);
}
+ vha->irq_offset = QLA_BASE_VECTORS;
host = vha->host;
fc_vport->dd_data = vha;
/* New host info */
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 7f980e6141c2..7ab717ed7232 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -6902,12 +6902,15 @@ qla2x00_do_dpc(void *data)
set_user_nice(current, MIN_NICE);
set_current_state(TASK_INTERRUPTIBLE);
- while (!kthread_should_stop()) {
+ while (1) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
"DPC handler sleeping.\n");
schedule();
+ if (kthread_should_stop())
+ break;
+
if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags))
qla_pci_set_eeh_busy(base_vha);
@@ -6920,15 +6923,16 @@ qla2x00_do_dpc(void *data)
goto end_loop;
}
+ if (test_bit(UNLOADING, &base_vha->dpc_flags))
+ /* don't do any work. Wait to be terminated by kthread_stop */
+ goto end_loop;
+
ha->dpc_active = 1;
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
"DPC handler waking up, dpc_flags=0x%lx.\n",
base_vha->dpc_flags);
- if (test_bit(UNLOADING, &base_vha->dpc_flags))
- break;
-
if (IS_P3P_TYPE(ha)) {
if (IS_QLA8044(ha)) {
if (test_and_clear_bit(ISP_UNRECOVERABLE,
@@ -7241,9 +7245,6 @@ end_loop:
*/
ha->dpc_active = 0;
- /* Cleanup any residual CTX SRBs. */
- qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
-
return 0;
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index b52513eeeafa..680ba180a672 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -6447,7 +6447,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
sd_dp = &sqcp->sd_dp;
- if (polled)
+ if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
ns_from_boot = ktime_get_boottime_ns();
/* one of the resp_*() response functions is called here */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 84334ab39c81..94127868bedf 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -386,7 +386,6 @@ sg_release(struct inode *inode, struct file *filp)
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n"));
mutex_lock(&sdp->open_rel_lock);
- kref_put(&sfp->f_ref, sg_remove_sfp);
sdp->open_cnt--;
/* possibly many open()s waiting on exlude clearing, start many;
@@ -398,6 +397,7 @@ sg_release(struct inode *inode, struct file *filp)
wake_up_interruptible(&sdp->open_wait);
}
mutex_unlock(&sdp->open_rel_lock);
+ kref_put(&sfp->f_ref, sg_remove_sfp);
return 0;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index beb88f25dbb9..c9038284bc89 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3506,6 +3506,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
int i, cmd_nr, cmd_type, bt;
int retval = 0;
unsigned int blk;
+ bool cmd_mtiocget;
struct scsi_tape *STp = file->private_data;
struct st_modedef *STm;
struct st_partstat *STps;
@@ -3619,6 +3620,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
*/
if (mtc.mt_op != MTREW &&
mtc.mt_op != MTOFFL &&
+ mtc.mt_op != MTLOAD &&
mtc.mt_op != MTRETEN &&
mtc.mt_op != MTERASE &&
mtc.mt_op != MTSEEK &&
@@ -3732,17 +3734,28 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
goto out;
}
+ cmd_mtiocget = cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET);
+
if ((i = flush_buffer(STp, 0)) < 0) {
- retval = i;
- goto out;
- }
- if (STp->can_partitions &&
- (i = switch_partition(STp)) < 0) {
- retval = i;
- goto out;
+ if (cmd_mtiocget && STp->pos_unknown) {
+ /* flush fails -> modify status accordingly */
+ reset_state(STp);
+ STp->pos_unknown = 1;
+ } else { /* return error */
+ retval = i;
+ goto out;
+ }
+ } else { /* flush_buffer succeeds */
+ if (STp->can_partitions) {
+ i = switch_partition(STp);
+ if (i < 0) {
+ retval = i;
+ goto out;
+ }
+ }
}
- if (cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET)) {
+ if (cmd_mtiocget) {
struct mtget mt_status;
if (_IOC_SIZE(cmd_in) != sizeof(struct mtget)) {
@@ -3756,7 +3769,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
((STp->density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK);
mt_status.mt_blkno = STps->drv_block;
mt_status.mt_fileno = STps->drv_file;
- if (STp->block_size != 0) {
+ if (STp->block_size != 0 && mt_status.mt_blkno >= 0) {
if (STps->rw == ST_WRITING)
mt_status.mt_blkno +=
(STp->buffer)->buffer_bytes / STp->block_size;
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index fe111bae38c8..5ea8887828c0 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -30,7 +30,7 @@
struct imx8_soc_data {
char *name;
- u32 (*soc_revision)(void);
+ int (*soc_revision)(u32 *socrev);
};
static u64 soc_uid;
@@ -51,24 +51,29 @@ static u32 imx8mq_soc_revision_from_atf(void)
static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; };
#endif
-static u32 __init imx8mq_soc_revision(void)
+static int imx8mq_soc_revision(u32 *socrev)
{
struct device_node *np;
void __iomem *ocotp_base;
u32 magic;
u32 rev;
struct clk *clk;
+ int ret;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp");
if (!np)
- return 0;
+ return -EINVAL;
ocotp_base = of_iomap(np, 0);
- WARN_ON(!ocotp_base);
+ if (!ocotp_base) {
+ ret = -EINVAL;
+ goto err_iomap;
+ }
+
clk = of_clk_get_by_name(np, NULL);
if (IS_ERR(clk)) {
- WARN_ON(IS_ERR(clk));
- return 0;
+ ret = PTR_ERR(clk);
+ goto err_clk;
}
clk_prepare_enable(clk);
@@ -88,32 +93,45 @@ static u32 __init imx8mq_soc_revision(void)
soc_uid <<= 32;
soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
+ *socrev = rev;
+
clk_disable_unprepare(clk);
clk_put(clk);
iounmap(ocotp_base);
of_node_put(np);
- return rev;
+ return 0;
+
+err_clk:
+ iounmap(ocotp_base);
+err_iomap:
+ of_node_put(np);
+ return ret;
}
-static void __init imx8mm_soc_uid(void)
+static int imx8mm_soc_uid(void)
{
void __iomem *ocotp_base;
struct device_node *np;
struct clk *clk;
+ int ret = 0;
u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
IMX8MP_OCOTP_UID_OFFSET : 0;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
if (!np)
- return;
+ return -EINVAL;
ocotp_base = of_iomap(np, 0);
- WARN_ON(!ocotp_base);
+ if (!ocotp_base) {
+ ret = -EINVAL;
+ goto err_iomap;
+ }
+
clk = of_clk_get_by_name(np, NULL);
if (IS_ERR(clk)) {
- WARN_ON(IS_ERR(clk));
- return;
+ ret = PTR_ERR(clk);
+ goto err_clk;
}
clk_prepare_enable(clk);
@@ -124,31 +142,41 @@ static void __init imx8mm_soc_uid(void)
clk_disable_unprepare(clk);
clk_put(clk);
+
+err_clk:
iounmap(ocotp_base);
+err_iomap:
of_node_put(np);
+
+ return ret;
}
-static u32 __init imx8mm_soc_revision(void)
+static int imx8mm_soc_revision(u32 *socrev)
{
struct device_node *np;
void __iomem *anatop_base;
- u32 rev;
+ int ret;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
if (!np)
- return 0;
+ return -EINVAL;
anatop_base = of_iomap(np, 0);
- WARN_ON(!anatop_base);
+ if (!anatop_base) {
+ ret = -EINVAL;
+ goto err_iomap;
+ }
- rev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM);
+ *socrev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM);
iounmap(anatop_base);
of_node_put(np);
- imx8mm_soc_uid();
+ return imx8mm_soc_uid();
- return rev;
+err_iomap:
+ of_node_put(np);
+ return ret;
}
static const struct imx8_soc_data imx8mq_soc_data = {
@@ -184,7 +212,7 @@ static __maybe_unused const struct of_device_id imx8_soc_match[] = {
kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf, soc_rev & 0xf) : \
"unknown"
-static int __init imx8_soc_init(void)
+static int imx8m_soc_probe(struct platform_device *pdev)
{
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
@@ -212,8 +240,11 @@ static int __init imx8_soc_init(void)
data = id->data;
if (data) {
soc_dev_attr->soc_id = data->name;
- if (data->soc_revision)
- soc_rev = data->soc_revision();
+ if (data->soc_revision) {
+ ret = data->soc_revision(&soc_rev);
+ if (ret)
+ goto free_soc;
+ }
}
soc_dev_attr->revision = imx8_revision(soc_rev);
@@ -251,6 +282,38 @@ free_soc:
kfree(soc_dev_attr);
return ret;
}
+
+static struct platform_driver imx8m_soc_driver = {
+ .probe = imx8m_soc_probe,
+ .driver = {
+ .name = "imx8m-soc",
+ },
+};
+
+static int __init imx8_soc_init(void)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ /* No match means this is non-i.MX8M hardware, do nothing. */
+ if (!of_match_node(imx8_soc_match, of_root))
+ return 0;
+
+ ret = platform_driver_register(&imx8m_soc_driver);
+ if (ret) {
+ pr_err("Failed to register imx8m-soc platform driver: %d\n", ret);
+ return ret;
+ }
+
+ pdev = platform_device_register_simple("imx8m-soc", -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ pr_err("Failed to register imx8m-soc platform device: %ld\n", PTR_ERR(pdev));
+ platform_driver_unregister(&imx8m_soc_driver);
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
device_initcall(imx8_soc_init);
MODULE_DESCRIPTION("NXP i.MX8M SoC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 28bcc65e91be..a470285f54a8 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -153,325 +153,2431 @@ enum llcc_reg_offset {
};
static const struct llcc_slice_config sa8775p_data[] = {
- {LLCC_CPUSS, 1, 2048, 1, 0, 0x00FF, 0x0, 0, 0, 0, 1, 1, 0, 0},
- {LLCC_VIDSC0, 2, 512, 3, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
- {LLCC_CPUSS1, 3, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
- {LLCC_CPUHWT, 5, 512, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
- {LLCC_AUDIO, 6, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CMPT, 10, 4096, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
- {LLCC_GPUHTW, 11, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
- {LLCC_GPU, 12, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 1, 0},
- {LLCC_MMUHWT, 13, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 1, 0, 0},
- {LLCC_CMPTDMA, 15, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
- {LLCC_DISP, 16, 4096, 2, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
- {LLCC_VIDFW, 17, 3072, 1, 0, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
- {LLCC_AUDHW, 22, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CVP, 28, 256, 3, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0},
- {LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0xF0, 1, 0, 0, 1, 0, 0, 0},
- {LLCC_WRCACHE, 31, 512, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 1, 0, 0},
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 2048,
+ .priority = 1,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CPUSS1,
+ .slice_id = 3,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CPUHWT,
+ .slice_id = 5,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CMPTDMA,
+ .slice_id = 15,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 4096,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VIDFW,
+ .slice_id = 17,
+ .max_cap = 3072,
+ .priority = 1,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 28,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0xf0,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ },
};
static const struct llcc_slice_config sc7180_data[] = {
- { LLCC_CPUSS, 1, 256, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 1 },
- { LLCC_MDM, 8, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_GPUHTW, 11, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_GPU, 12, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 256,
+ .priority = 1,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MDM,
+ .slice_id = 8,
+ .max_cap = 128,
+ .priority = 1,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 128,
+ .priority = 1,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 128,
+ .priority = 1,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ },
};
static const struct llcc_slice_config sc7280_data[] = {
- { LLCC_CPUSS, 1, 768, 1, 0, 0x3f, 0x0, 0, 0, 0, 1, 1, 0},
- { LLCC_MDMHPGRW, 7, 512, 2, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
- { LLCC_CMPT, 10, 768, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
- { LLCC_GPUHTW, 11, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
- { LLCC_GPU, 12, 512, 1, 0, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
- { LLCC_MMUHWT, 13, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 0, 1, 0},
- { LLCC_MDMPNG, 21, 768, 0, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
- { LLCC_WLHW, 24, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
- { LLCC_MODPE, 29, 64, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 768,
+ .priority = 1,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 512,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 768,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 512,
+ .priority = 1,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 768,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WLHW,
+ .slice_id = 24,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 64,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3f,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ },
};
static const struct llcc_slice_config sc8180x_data[] = {
- { LLCC_CPUSS, 1, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1 },
- { LLCC_VIDSC0, 2, 512, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_VIDSC1, 3, 512, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MDMHPGRW, 7, 3072, 1, 1, 0x3ff, 0xc00, 0, 0, 0, 1, 0 },
- { LLCC_MDM, 8, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_CMPT, 10, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_GPU, 12, 5120, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1 },
- { LLCC_CMPTDMA, 15, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_DISP, 16, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_VIDFW, 17, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MDMHPFX, 20, 1024, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MDMPNG, 21, 1024, 0, 1, 0xc, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_NPU, 23, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_WLHW, 24, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MODPE, 29, 512, 1, 1, 0xc, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_APTCM, 30, 512, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0 },
- { LLCC_WRCACHE, 31, 128, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VIDSC1,
+ .slice_id = 3,
+ .max_cap = 512,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3ff,
+ .res_ways = 0xc00,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDM,
+ .slice_id = 8,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 9,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 5120,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CMPTDMA,
+ .slice_id = 15,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VIDFW,
+ .slice_id = 17,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMHPFX,
+ .slice_id = 20,
+ .max_cap = 1024,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 1024,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0xc,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_NPU,
+ .slice_id = 23,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WLHW,
+ .slice_id = 24,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xc,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0x1,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 128,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ },
};
static const struct llcc_slice_config sc8280xp_data[] = {
- { LLCC_CPUSS, 1, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
- { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
- { LLCC_CMPT, 10, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
- { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_GPU, 12, 4096, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 1 },
- { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_DISP, 16, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_AUDHW, 22, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_ECC, 26, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0, 0 },
- { LLCC_WRCACHE, 31, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_CPUSS1, 3, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 2048,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_ECC,
+ .slice_id = 26,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 28,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0x1,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CVPFW,
+ .slice_id = 17,
+ .max_cap = 512,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CPUSS1,
+ .slice_id = 3,
+ .max_cap = 2048,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CPUHWT,
+ .slice_id = 5,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ },
};
-static const struct llcc_slice_config sdm845_data[] = {
- { LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1 },
- { LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
- { LLCC_VIDSC1, 3, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
- { LLCC_ROTATOR, 4, 563, 2, 1, 0x0, 0x00e, 2, 0, 1, 1, 0 },
- { LLCC_VOICE, 5, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
- { LLCC_AUDIO, 6, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
- { LLCC_MDMHPGRW, 7, 1024, 2, 0, 0xfc, 0xf00, 0, 0, 1, 1, 0 },
- { LLCC_MDM, 8, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
- { LLCC_CMPT, 10, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
- { LLCC_GPUHTW, 11, 512, 1, 1, 0xc, 0x0, 0, 0, 1, 1, 0 },
- { LLCC_GPU, 12, 2304, 1, 0, 0xff0, 0x2, 0, 0, 1, 1, 0 },
- { LLCC_MMUHWT, 13, 256, 2, 0, 0x0, 0x1, 0, 0, 1, 0, 1 },
- { LLCC_CMPTDMA, 15, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
- { LLCC_DISP, 16, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
- { LLCC_VIDFW, 17, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
- { LLCC_MDMHPFX, 20, 1024, 2, 1, 0x0, 0xf00, 0, 0, 1, 1, 0 },
- { LLCC_MDMPNG, 21, 1024, 0, 1, 0x1e, 0x0, 0, 0, 1, 1, 0 },
- { LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+static const struct llcc_slice_config sdm845_data[] = {{
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 2816,
+ .priority = 1,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 2,
+ .fixed_size = true,
+ .res_ways = 0xf0,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VIDSC1,
+ .slice_id = 3,
+ .max_cap = 512,
+ .priority = 2,
+ .fixed_size = true,
+ .res_ways = 0xf0,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_ROTATOR,
+ .slice_id = 4,
+ .max_cap = 563,
+ .priority = 2,
+ .fixed_size = true,
+ .res_ways = 0xe,
+ .cache_mode = 2,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VOICE,
+ .slice_id = 5,
+ .max_cap = 2816,
+ .priority = 1,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 2816,
+ .priority = 1,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 1024,
+ .priority = 2,
+ .bonus_ways = 0xfc,
+ .res_ways = 0xf00,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDM,
+ .slice_id = 8,
+ .max_cap = 2816,
+ .priority = 1,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 2816,
+ .priority = 1,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xc,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 2304,
+ .priority = 1,
+ .bonus_ways = 0xff0,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 256,
+ .priority = 2,
+ .res_ways = 0x1,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CMPTDMA,
+ .slice_id = 15,
+ .max_cap = 2816,
+ .priority = 1,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 2816,
+ .priority = 1,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VIDFW,
+ .slice_id = 17,
+ .max_cap = 2816,
+ .priority = 1,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMHPFX,
+ .slice_id = 20,
+ .max_cap = 1024,
+ .priority = 2,
+ .fixed_size = true,
+ .res_ways = 0xf00,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 1024,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0x1e,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffc,
+ .res_ways = 0x2,
+ .cache_mode = 0,
+ .dis_cap_alloc = true,
+ .retain_on_pc = true,
+ },
};
static const struct llcc_slice_config sm6350_data[] = {
- { LLCC_CPUSS, 1, 768, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1 },
- { LLCC_MDM, 8, 512, 2, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_GPUHTW, 11, 256, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_GPU, 12, 512, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_MDMPNG, 21, 768, 0, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_NPU, 23, 768, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_MODPE, 29, 64, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 768,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MDM,
+ .slice_id = 8,
+ .max_cap = 512,
+ .priority = 2,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 256,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 512,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 768,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_NPU,
+ .slice_id = 23,
+ .max_cap = 768,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 64,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ },
};
static const struct llcc_slice_config sm7150_data[] = {
- { LLCC_CPUSS, 1, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 1 },
- { LLCC_MDM, 8, 128, 2, 0, 0xF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_GPUHTW, 11, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_GPU, 12, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_NPU, 23, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 512,
+ .priority = 1,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MDM,
+ .slice_id = 8,
+ .max_cap = 128,
+ .priority = 2,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_NPU,
+ .slice_id = 23,
+ .max_cap = 512,
+ .priority = 1,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ },
};
static const struct llcc_slice_config sm8150_data[] = {
- { LLCC_CPUSS, 1, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 1 },
- { LLCC_VIDSC0, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_VIDSC1, 3, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_AUDIO, 6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MDMHPGRW, 7, 3072, 1, 0, 0xFF, 0xF00, 0, 0, 0, 1, 0 },
- { LLCC_MDM, 8, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MODHW, 9, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_CMPT, 10, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_GPUHTW , 11, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_GPU, 12, 2560, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MMUHWT, 13, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1 },
- { LLCC_CMPTDMA, 15, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_DISP, 16, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MDMHPFX, 20, 1024, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MDMHPFX, 21, 1024, 0, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_AUDHW, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_NPU, 23, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_WLHW, 24, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_MODPE, 29, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
- { LLCC_APTCM, 30, 256, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0 },
- { LLCC_WRCACHE, 31, 128, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VIDSC1,
+ .slice_id = 3,
+ .max_cap = 512,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 3072,
+ .priority = 1,
+ .bonus_ways = 0xff,
+ .res_ways = 0xf00,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDM,
+ .slice_id = 8,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 9,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 2560,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CMPTDMA,
+ .slice_id = 15,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMHPFX,
+ .slice_id = 20,
+ .max_cap = 1024,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMHPFX,
+ .slice_id = 21,
+ .max_cap = 1024,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_NPU,
+ .slice_id = 23,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WLHW,
+ .slice_id = 24,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0x1,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 128,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ },
};
static const struct llcc_slice_config sm8250_data[] = {
- { LLCC_CPUSS, 1, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
- { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_AUDIO, 6, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
- { LLCC_CMPT, 10, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
- { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_GPU, 12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 1 },
- { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_CMPTDMA, 15, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_DISP, 16, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_VIDFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_NPU, 23, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_WLHW, 24, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_CVP, 28, 256, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_APTCM, 30, 128, 3, 0, 0x0, 0x3, 1, 0, 0, 1, 0, 0 },
- { LLCC_WRCACHE, 31, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 1024,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 1024,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CMPTDMA,
+ .slice_id = 15,
+ .max_cap = 1024,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_VIDFW,
+ .slice_id = 17,
+ .max_cap = 512,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_NPU,
+ .slice_id = 23,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WLHW,
+ .slice_id = 24,
+ .max_cap = 1024,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 28,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 128,
+ .priority = 3,
+ .res_ways = 0x3,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ },
};
static const struct llcc_slice_config sm8350_data[] = {
- { LLCC_CPUSS, 1, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 1 },
- { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
- { LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_CMPT, 10, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_GPU, 12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
- { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
- { LLCC_DISP, 16, 3072, 2, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_MDMPNG, 21, 1024, 0, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_MODPE, 29, 256, 1, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 0, 1, 0 },
- { LLCC_WRCACHE, 31, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
- { LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_CPUSS1, 3, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 1024,
+ .priority = 3,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 9,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 1024,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 3072,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 1024,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 28,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0x1,
+ .cache_mode = 1,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_CVPFW,
+ .slice_id = 17,
+ .max_cap = 512,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CPUSS1,
+ .slice_id = 3,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CPUHWT,
+ .slice_id = 5,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .write_scid_en = true,
+ },
};
static const struct llcc_slice_config sm8450_data[] = {
- {LLCC_CPUSS, 1, 3072, 1, 0, 0xFFFF, 0x0, 0, 0, 0, 1, 1, 0, 0 },
- {LLCC_VIDSC0, 2, 512, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_AUDIO, 6, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
- {LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_MODHW, 9, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_CMPT, 10, 4096, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_GPU, 12, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 1, 0 },
- {LLCC_MMUHWT, 13, 768, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0 },
- {LLCC_DISP, 16, 4096, 2, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_MDMPNG, 21, 1024, 1, 1, 0xF000, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
- {LLCC_CVP, 28, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_MODPE, 29, 64, 1, 1, 0xF000, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0xF0, 1, 0, 0, 1, 0, 0, 0 },
- {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0 },
- {LLCC_CVPFW, 17, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_CPUSS1, 3, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_CAMEXP0, 4, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_CPUMTE, 23, 256, 1, 1, 0x0FFF, 0x0, 0, 0, 0, 0, 1, 0, 0 },
- {LLCC_CPUHWT, 5, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 1, 0, 0 },
- {LLCC_CAMEXP1, 27, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- {LLCC_AENPU, 8, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 3072,
+ .priority = 1,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 1024,
+ .priority = 3,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 9,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 12,
+ .max_cap = 2048,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 13,
+ .max_cap = 768,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 4096,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf000,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 28,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 64,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf000,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0xf0,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CVPFW,
+ .slice_id = 17,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CPUSS1,
+ .slice_id = 3,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CAMEXP0,
+ .slice_id = 4,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_CPUMTE,
+ .slice_id = 23,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CPUHWT,
+ .slice_id = 5,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CAMEXP1,
+ .slice_id = 27,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_AENPU,
+ .slice_id = 8,
+ .max_cap = 2048,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffff,
+ .cache_mode = 0,
+ },
};
static const struct llcc_slice_config sm8550_data[] = {
- {LLCC_CPUSS, 1, 5120, 1, 0, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_VIDSC0, 2, 512, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_AUDIO, 6, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_MDMHPGRW, 25, 1024, 4, 0, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_MODHW, 26, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CMPT, 10, 4096, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_GPU, 9, 3096, 1, 0, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_MMUHWT, 18, 768, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_DISP, 16, 6144, 1, 1, 0xFFFFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_MDMPNG, 27, 1024, 0, 1, 0xF00000, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CVP, 8, 256, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_MODPE, 29, 64, 1, 1, 0xF00000, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, },
- {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CAMEXP0, 4, 256, 4, 1, 0xF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CPUHWT, 5, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CAMEXP1, 7, 3200, 3, 1, 0xFFFFF0, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CMPTHCP, 17, 256, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_LCPDARE, 30, 128, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, },
- {LLCC_AENPU, 3, 3072, 1, 1, 0xFE01FF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_ISLAND1, 12, 1792, 7, 1, 0xFE00, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_ISLAND4, 15, 256, 7, 1, 0x10000, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CAMEXP2, 19, 3200, 3, 1, 0xFFFFF0, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CAMEXP3, 20, 3200, 2, 1, 0xFFFFF0, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_CAMEXP4, 21, 3200, 2, 1, 0xFFFFF0, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_DISP_WB, 23, 1024, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_DISP_1, 24, 6144, 1, 1, 0xFFFFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- {LLCC_VIDVSP, 28, 256, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 5120,
+ .priority = 1,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 25,
+ .max_cap = 1024,
+ .priority = 4,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 26,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 9,
+ .max_cap = 3096,
+ .priority = 1,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .write_scid_en = true,
+ .write_scid_cacheable_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 18,
+ .max_cap = 768,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 27,
+ .max_cap = 1024,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0xf00000,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 8,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 64,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf00000,
+ .cache_mode = 0,
+ .alloc_oneway_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CAMEXP0,
+ .slice_id = 4,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CPUHWT,
+ .slice_id = 5,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CAMEXP1,
+ .slice_id = 7,
+ .max_cap = 3200,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfffff0,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_CMPTHCP,
+ .slice_id = 17,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_LCPDARE,
+ .slice_id = 30,
+ .max_cap = 128,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ .alloc_oneway_en = true,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_AENPU,
+ .slice_id = 3,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfe01ff,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_ISLAND1,
+ .slice_id = 12,
+ .max_cap = 1792,
+ .priority = 7,
+ .fixed_size = true,
+ .bonus_ways = 0xfe00,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_ISLAND4,
+ .slice_id = 15,
+ .max_cap = 256,
+ .priority = 7,
+ .fixed_size = true,
+ .bonus_ways = 0x10000,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CAMEXP2,
+ .slice_id = 19,
+ .max_cap = 3200,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfffff0,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_CAMEXP3,
+ .slice_id = 20,
+ .max_cap = 3200,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfffff0,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_CAMEXP4,
+ .slice_id = 21,
+ .max_cap = 3200,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xfffff0,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_DISP_WB,
+ .slice_id = 23,
+ .max_cap = 1024,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_DISP_1,
+ .slice_id = 24,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_VIDVSP,
+ .slice_id = 28,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ },
};
static const struct llcc_slice_config sm8650_data[] = {
- {LLCC_CPUSS, 1, 5120, 1, 0, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_VIDSC0, 2, 512, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_AUDIO, 6, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_MDMHPGRW, 25, 1024, 3, 0, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_MODHW, 26, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CMPT, 10, 4096, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_GPU, 9, 3096, 1, 0, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_MMUHWT, 18, 768, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_DISP, 16, 6144, 1, 1, 0xFFFFFF, 0x0, 2, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_MDMHPFX, 24, 1024, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_MDMPNG, 27, 1024, 0, 1, 0x000000, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CVP, 8, 256, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_MODPE, 29, 128, 1, 1, 0xF00000, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},
- {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CAMEXP0, 4, 256, 3, 1, 0xF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CAMEXP1, 7, 3200, 3, 1, 0xFFFFF0, 0x0, 2, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CMPTHCP, 17, 256, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_LCPDARE, 30, 128, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},
- {LLCC_AENPU, 3, 3072, 1, 1, 0xFFFFFF, 0x0, 2, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_ISLAND1, 12, 5888, 7, 1, 0x0, 0x7FFFFF, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_DISP_WB, 23, 1024, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_VIDVSP, 28, 256, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 5120,
+ .priority = 1,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ .stale_en = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 25,
+ .max_cap = 1024,
+ .priority = 3,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 26,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 9,
+ .max_cap = 3096,
+ .priority = 1,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .write_scid_en = true,
+ .write_scid_cacheable_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 18,
+ .max_cap = 768,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_MDMHPFX,
+ .slice_id = 24,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 27,
+ .max_cap = 1024,
+ .priority = 0,
+ .fixed_size = true,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 8,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 128,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf00000,
+ .cache_mode = 0,
+ .alloc_oneway_en = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CAMEXP0,
+ .slice_id = 4,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xf,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CAMEXP1,
+ .slice_id = 7,
+ .max_cap = 3200,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfffff0,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_CMPTHCP,
+ .slice_id = 17,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_LCPDARE,
+ .slice_id = 30,
+ .max_cap = 128,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ .alloc_oneway_en = true,
+ }, {
+ .usecase_id = LLCC_AENPU,
+ .slice_id = 3,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_ISLAND1,
+ .slice_id = 12,
+ .max_cap = 5888,
+ .priority = 7,
+ .fixed_size = true,
+ .res_ways = 0x7fffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_DISP_WB,
+ .slice_id = 23,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_VIDVSP,
+ .slice_id = 28,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffffff,
+ .cache_mode = 0,
+ },
};
static const struct llcc_slice_config qdu1000_data_2ch[] = {
- { LLCC_MDMHPGRW, 7, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- { LLCC_MODHW, 9, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- { LLCC_MDMPNG, 21, 256, 0, 1, 0x3, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- { LLCC_ECC, 26, 512, 3, 1, 0xffc, 0x0, 0, 0, 0, 0, 1, 0, 0 },
- { LLCC_MODPE, 29, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- { LLCC_APTCM, 30, 256, 3, 1, 0x0, 0xc, 1, 0, 0, 1, 0, 0, 0 },
- { LLCC_WRCACHE, 31, 128, 1, 1, 0x3, 0x0, 0, 0, 0, 0, 1, 0, 0 },
+ {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 9,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 256,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0x3,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_ECC,
+ .slice_id = 26,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffc,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 256,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0xc,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 128,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ },
};
static const struct llcc_slice_config qdu1000_data_4ch[] = {
- { LLCC_MDMHPGRW, 7, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- { LLCC_MODHW, 9, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- { LLCC_MDMPNG, 21, 512, 0, 1, 0x3, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- { LLCC_ECC, 26, 1024, 3, 1, 0xffc, 0x0, 0, 0, 0, 0, 1, 0, 0 },
- { LLCC_MODPE, 29, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- { LLCC_APTCM, 30, 512, 3, 1, 0x0, 0xc, 1, 0, 0, 1, 0, 0, 0 },
- { LLCC_WRCACHE, 31, 256, 1, 1, 0x3, 0x0, 0, 0, 0, 0, 1, 0, 0 },
+ {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 9,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 512,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0x3,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_ECC,
+ .slice_id = 26,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffc,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0xc,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ },
};
static const struct llcc_slice_config qdu1000_data_8ch[] = {
- { LLCC_MDMHPGRW, 7, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- { LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- { LLCC_MDMPNG, 21, 1024, 0, 1, 0x3, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- { LLCC_ECC, 26, 2048, 3, 1, 0xffc, 0x0, 0, 0, 0, 0, 1, 0, 0 },
- { LLCC_MODPE, 29, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
- { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0xc, 1, 0, 0, 1, 0, 0, 0 },
- { LLCC_WRCACHE, 31, 512, 1, 1, 0x3, 0x0, 0, 0, 0, 0, 1, 0, 0 },
+ {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 7,
+ .max_cap = 2048,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 9,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 21,
+ .max_cap = 1024,
+ .priority = 0,
+ .fixed_size = true,
+ .bonus_ways = 0x3,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_ECC,
+ .slice_id = 26,
+ .max_cap = 2048,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffc,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_APTCM,
+ .slice_id = 30,
+ .max_cap = 1024,
+ .priority = 3,
+ .fixed_size = true,
+ .res_ways = 0xc,
+ .cache_mode = 1,
+ .retain_on_pc = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0x3,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ },
};
static const struct llcc_slice_config x1e80100_data[] = {
- {LLCC_CPUSS, 1, 6144, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_VIDSC0, 2, 512, 4, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_AUDIO, 6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CMPT, 10, 6144, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_GPU, 9, 4608, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_MMUHWT, 18, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CVP, 8, 512, 4, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_WRCACHE, 31, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CAMEXP0, 4, 256, 4, 1, 0x3, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CAMEXP1, 7, 3072, 3, 1, 0xFFC, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_LCPDARE, 30, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},
- {LLCC_AENPU, 3, 3072, 1, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_ISLAND1, 12, 2048, 7, 1, 0x0, 0xF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CAMEXP2, 19, 3072, 3, 1, 0xFFC, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CAMEXP3, 20, 3072, 2, 1, 0xFFC, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CAMEXP4, 21, 3072, 2, 1, 0xFFC, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 6,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 10,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 9,
+ .max_cap = 4608,
+ .priority = 1,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .write_scid_en = true,
+ .write_scid_cacheable_en = true,
+ .stale_en = true,
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 18,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 8,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CAMEXP0,
+ .slice_id = 4,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0x3,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CAMEXP1,
+ .slice_id = 7,
+ .max_cap = 3072,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffc,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_LCPDARE,
+ .slice_id = 30,
+ .max_cap = 512,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 0,
+ .activate_on_init = true,
+ .alloc_oneway_en = true,
+ }, {
+ .usecase_id = LLCC_AENPU,
+ .slice_id = 3,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xfff,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_ISLAND1,
+ .slice_id = 12,
+ .max_cap = 2048,
+ .priority = 7,
+ .fixed_size = true,
+ .res_ways = 0xf,
+ .cache_mode = 0,
+ }, {
+ .usecase_id = LLCC_CAMEXP2,
+ .slice_id = 19,
+ .max_cap = 3072,
+ .priority = 3,
+ .fixed_size = true,
+ .bonus_ways = 0xffc,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_CAMEXP3,
+ .slice_id = 20,
+ .max_cap = 3072,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xffc,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_CAMEXP4,
+ .slice_id = 21,
+ .max_cap = 3072,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xffc,
+ .cache_mode = 2,
+ },
};
static const struct llcc_edac_reg_offset llcc_v1_edac_reg_offset = {
diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
index c940f4da28ed..6e30f08761aa 100644
--- a/drivers/soc/qcom/qcom_pd_mapper.c
+++ b/drivers/soc/qcom/qcom_pd_mapper.c
@@ -540,6 +540,7 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,msm8996", .data = msm8996_domains, },
{ .compatible = "qcom,msm8998", .data = msm8998_domains, },
{ .compatible = "qcom,qcm2290", .data = qcm2290_domains, },
+ { .compatible = "qcom,qcm6490", .data = sc7280_domains, },
{ .compatible = "qcom,qcs404", .data = qcs404_domains, },
{ .compatible = "qcom,sc7180", .data = sc7180_domains, },
{ .compatible = "qcom,sc7280", .data = sc7280_domains, },
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 9573b8fa4fbf..29b9676fe43d 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -315,9 +315,10 @@ static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
{
struct lpspi_config config = fsl_lpspi->config;
- unsigned int perclk_rate, scldiv, div;
+ unsigned int perclk_rate, div;
u8 prescale_max;
u8 prescale;
+ int scldiv;
perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
prescale_max = fsl_lpspi->devtype_data->prescale_max;
@@ -338,13 +339,13 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
for (prescale = 0; prescale <= prescale_max; prescale++) {
scldiv = div / (1 << prescale) - 2;
- if (scldiv < 256) {
+ if (scldiv >= 0 && scldiv < 256) {
fsl_lpspi->config.prescale = prescale;
break;
}
}
- if (scldiv >= 256)
+ if (scldiv < 0 || scldiv >= 256)
return -EINVAL;
writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index d5ac60c135c2..159f359d7501 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -520,6 +520,7 @@ static void mpc52xx_spi_remove(struct platform_device *op)
struct mpc52xx_spi *ms = spi_controller_get_devdata(host);
int i;
+ cancel_work_sync(&ms->work);
free_irq(ms->irq0, ms);
free_irq(ms->irq1, ms);
diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
index dc1c4ae2d8b0..1a7874676f68 100644
--- a/drivers/thermal/qcom/tsens-v1.c
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -162,28 +162,35 @@ struct tsens_plat_data data_tsens_v1 = {
.fields = tsens_v1_regfields,
};
-static const struct tsens_ops ops_8956 = {
- .init = init_8956,
+static const struct tsens_ops ops_common = {
+ .init = init_common,
.calibrate = tsens_calibrate_common,
.get_temp = get_temp_tsens_valid,
};
-struct tsens_plat_data data_8956 = {
+struct tsens_plat_data data_8937 = {
.num_sensors = 11,
- .ops = &ops_8956,
+ .ops = &ops_common,
.feat = &tsens_v1_feat,
.fields = tsens_v1_regfields,
};
-static const struct tsens_ops ops_8976 = {
- .init = init_common,
+static const struct tsens_ops ops_8956 = {
+ .init = init_8956,
.calibrate = tsens_calibrate_common,
.get_temp = get_temp_tsens_valid,
};
+struct tsens_plat_data data_8956 = {
+ .num_sensors = 11,
+ .ops = &ops_8956,
+ .feat = &tsens_v1_feat,
+ .fields = tsens_v1_regfields,
+};
+
struct tsens_plat_data data_8976 = {
.num_sensors = 11,
- .ops = &ops_8976,
+ .ops = &ops_common,
.feat = &tsens_v1_feat,
.fields = tsens_v1_regfields,
};
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 0b4421bf4785..d2db804692f0 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -1120,6 +1120,9 @@ static const struct of_device_id tsens_table[] = {
.compatible = "qcom,msm8916-tsens",
.data = &data_8916,
}, {
+ .compatible = "qcom,msm8937-tsens",
+ .data = &data_8937,
+ }, {
.compatible = "qcom,msm8939-tsens",
.data = &data_8939,
}, {
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index cab39de045b1..7b36a0318fa6 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -647,7 +647,7 @@ extern struct tsens_plat_data data_8960;
extern struct tsens_plat_data data_8226, data_8909, data_8916, data_8939, data_8974, data_9607;
/* TSENS v1 targets */
-extern struct tsens_plat_data data_tsens_v1, data_8976, data_8956;
+extern struct tsens_plat_data data_tsens_v1, data_8937, data_8976, data_8956;
/* TSENS v2 targets */
extern struct tsens_plat_data data_8996, data_ipq8074, data_tsens_v2;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index ab9e7f204260..51894c93c8a3 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -750,7 +750,7 @@ static const struct dw8250_platform_data dw8250_renesas_rzn1_data = {
.quirks = DW_UART_QUIRK_CPR_VALUE | DW_UART_QUIRK_IS_DMA_FC,
};
-static const struct dw8250_platform_data dw8250_starfive_jh7100_data = {
+static const struct dw8250_platform_data dw8250_skip_set_rate_data = {
.usr_reg = DW_UART_USR,
.quirks = DW_UART_QUIRK_SKIP_SET_RATE,
};
@@ -760,7 +760,8 @@ static const struct of_device_id dw8250_of_match[] = {
{ .compatible = "cavium,octeon-3860-uart", .data = &dw8250_octeon_3860_data },
{ .compatible = "marvell,armada-38x-uart", .data = &dw8250_armada_38x_data },
{ .compatible = "renesas,rzn1-uart", .data = &dw8250_renesas_rzn1_data },
- { .compatible = "starfive,jh7100-uart", .data = &dw8250_starfive_jh7100_data },
+ { .compatible = "sophgo,sg2044-uart", .data = &dw8250_skip_set_rate_data },
+ { .compatible = "starfive,jh7100-uart", .data = &dw8250_skip_set_rate_data },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, dw8250_of_match);
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 265f21133b63..796e37a1d859 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -670,6 +670,9 @@ static ssize_t read_req_latency_avg_show(struct device *dev,
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_hba_monitor *m = &hba->monitor;
+ if (!m->nr_req[READ])
+ return sysfs_emit(buf, "0\n");
+
return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]),
m->nr_req[READ]));
}
@@ -737,6 +740,9 @@ static ssize_t write_req_latency_avg_show(struct device *dev,
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_hba_monitor *m = &hba->monitor;
+ if (!m->nr_req[WRITE])
+ return sysfs_emit(buf, "0\n");
+
return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]),
m->nr_req[WRITE]));
}
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 433d0480391e..6c09d97ae006 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -170,7 +170,7 @@ static int ufs_bsg_request(struct bsg_job *job)
break;
case UPIU_TRANSACTION_UIC_CMD:
memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
- ret = ufshcd_send_uic_cmd(hba, &uc);
+ ret = ufshcd_send_bsg_uic_cmd(hba, &uc);
if (ret)
dev_err(hba->dev, "send uic cmd: error code %d\n", ret);
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 7aea8fbaeee8..9ffd94ddf8c7 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -84,6 +84,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
u8 **buf, bool ascii);
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
+int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
struct utp_upiu_req *req_upiu,
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index abbe7135a977..cfebe4a1af9e 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -2411,8 +2411,6 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
int err;
hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
- if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS)
- hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
/* nutrs and nutmrs are 0 based values */
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1;
@@ -2551,13 +2549,11 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
* @hba: per adapter instance
* @uic_cmd: UIC command
- * @completion: initialize the completion only if this is set to true
*
* Return: 0 only if success.
*/
static int
-__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
- bool completion)
+__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
lockdep_assert_held(&hba->uic_cmd_mutex);
@@ -2567,8 +2563,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
return -EIO;
}
- if (completion)
- init_completion(&uic_cmd->done);
+ init_completion(&uic_cmd->done);
uic_cmd->cmd_active = 1;
ufshcd_dispatch_uic_cmd(hba, uic_cmd);
@@ -2594,7 +2589,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
- ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
+ ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
if (!ret)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
@@ -4288,7 +4283,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
reenable_intr = true;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+ ret = __ufshcd_send_uic_cmd(hba, cmd);
if (ret) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
@@ -4344,6 +4339,42 @@ out_unlock:
}
/**
+ * ufshcd_send_bsg_uic_cmd - Send UIC commands requested via BSG layer and retrieve the result
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ *
+ * Return: 0 only if success.
+ */
+int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ int ret;
+
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
+ return 0;
+
+ ufshcd_hold(hba);
+
+ if (uic_cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE) &&
+ uic_cmd->command == UIC_CMD_DME_SET) {
+ ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd);
+ goto out;
+ }
+
+ mutex_lock(&hba->uic_cmd_mutex);
+ ufshcd_add_delay_before_dme_cmd(hba);
+
+ ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
+ if (!ret)
+ ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
+
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+out:
+ ufshcd_release(hba);
+ return ret;
+}
+
+/**
* ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
* using DME_SET primitives.
* @hba: per adapter instance
@@ -4651,9 +4682,6 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
dev_err(hba->dev,
"%s: power mode change failed %d\n", __func__, ret);
} else {
- ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
- pwr_mode);
-
memcpy(&hba->pwr_info, pwr_mode,
sizeof(struct ufs_pa_layer_attr));
}
@@ -4682,6 +4710,10 @@ int ufshcd_config_pwr_mode(struct ufs_hba *hba,
ret = ufshcd_change_power_mode(hba, &final_params);
+ if (!ret)
+ ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
+ &final_params);
+
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
@@ -10231,6 +10263,7 @@ void ufshcd_remove(struct ufs_hba *hba)
ufs_hwmon_remove(hba);
ufs_bsg_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
+ cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
blk_mq_destroy_queue(hba->tmf_queue);
blk_put_queue(hba->tmf_queue);
blk_mq_free_tag_set(&hba->tmf_tag_set);
@@ -10309,6 +10342,8 @@ EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
*/
static int ufshcd_set_dma_mask(struct ufs_hba *hba)
{
+ if (hba->vops && hba->vops->set_dma_mask)
+ return hba->vops->set_dma_mask(hba);
if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
return 0;
diff --git a/drivers/ufs/host/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c
index 66811d8d1929..b31aa8411151 100644
--- a/drivers/ufs/host/cdns-pltfrm.c
+++ b/drivers/ufs/host/cdns-pltfrm.c
@@ -307,9 +307,7 @@ static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
*/
static void cdns_ufs_pltfrm_remove(struct platform_device *pdev)
{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- ufshcd_remove(hba);
+ ufshcd_pltfrm_remove(pdev);
}
static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
diff --git a/drivers/ufs/host/tc-dwc-g210-pltfrm.c b/drivers/ufs/host/tc-dwc-g210-pltfrm.c
index a3877592604d..c6f8565ede21 100644
--- a/drivers/ufs/host/tc-dwc-g210-pltfrm.c
+++ b/drivers/ufs/host/tc-dwc-g210-pltfrm.c
@@ -76,10 +76,7 @@ static int tc_dwc_g210_pltfm_probe(struct platform_device *pdev)
*/
static void tc_dwc_g210_pltfm_remove(struct platform_device *pdev)
{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- pm_runtime_get_sync(&(pdev)->dev);
- ufshcd_remove(hba);
+ ufshcd_pltfrm_remove(pdev);
}
static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = {
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index fb550a7c16b3..98505c68103d 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -1963,8 +1963,7 @@ static void exynos_ufs_remove(struct platform_device *pdev)
struct ufs_hba *hba = platform_get_drvdata(pdev);
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
- pm_runtime_get_sync(&(pdev)->dev);
- ufshcd_remove(hba);
+ ufshcd_pltfrm_remove(pdev);
phy_power_off(ufs->phy);
phy_exit(ufs->phy);
diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c
index 5ee73ff05251..501609521b26 100644
--- a/drivers/ufs/host/ufs-hisi.c
+++ b/drivers/ufs/host/ufs-hisi.c
@@ -576,9 +576,7 @@ static int ufs_hisi_probe(struct platform_device *pdev)
static void ufs_hisi_remove(struct platform_device *pdev)
{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- ufshcd_remove(hba);
+ ufshcd_pltfrm_remove(pdev);
}
static const struct dev_pm_ops ufs_hisi_pm_ops = {
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 9a5919434c4e..c834d38921b6 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -1869,10 +1869,7 @@ out:
*/
static void ufs_mtk_remove(struct platform_device *pdev)
{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- pm_runtime_get_sync(&(pdev)->dev);
- ufshcd_remove(hba);
+ ufshcd_pltfrm_remove(pdev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index ecdfff2456e3..91127fb17186 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -1843,10 +1843,11 @@ static int ufs_qcom_probe(struct platform_device *pdev)
static void ufs_qcom_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- pm_runtime_get_sync(&(pdev)->dev);
- ufshcd_remove(hba);
- platform_device_msi_free_irqs_all(hba->dev);
+ ufshcd_pltfrm_remove(pdev);
+ if (host->esi_enabled)
+ platform_device_msi_free_irqs_all(hba->dev);
}
static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = {
diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c
index 8711e5cbc968..21a64b34397d 100644
--- a/drivers/ufs/host/ufs-renesas.c
+++ b/drivers/ufs/host/ufs-renesas.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -364,14 +365,20 @@ static int ufs_renesas_init(struct ufs_hba *hba)
return -ENOMEM;
ufshcd_set_variant(hba, priv);
- hba->quirks |= UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS | UFSHCD_QUIRK_HIBERN_FASTAUTO;
+ hba->quirks |= UFSHCD_QUIRK_HIBERN_FASTAUTO;
return 0;
}
+static int ufs_renesas_set_dma_mask(struct ufs_hba *hba)
+{
+ return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
+}
+
static const struct ufs_hba_variant_ops ufs_renesas_vops = {
.name = "renesas",
.init = ufs_renesas_init,
+ .set_dma_mask = ufs_renesas_set_dma_mask,
.setup_clocks = ufs_renesas_setup_clocks,
.hce_enable_notify = ufs_renesas_hce_enable_notify,
.dbg_register_dump = ufs_renesas_dbg_register_dump,
@@ -390,9 +397,7 @@ static int ufs_renesas_probe(struct platform_device *pdev)
static void ufs_renesas_remove(struct platform_device *pdev)
{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- ufshcd_remove(hba);
+ ufshcd_pltfrm_remove(pdev);
}
static struct platform_driver ufs_renesas_platform = {
diff --git a/drivers/ufs/host/ufs-sprd.c b/drivers/ufs/host/ufs-sprd.c
index d8b165908809..d220978c2d8c 100644
--- a/drivers/ufs/host/ufs-sprd.c
+++ b/drivers/ufs/host/ufs-sprd.c
@@ -427,10 +427,7 @@ static int ufs_sprd_probe(struct platform_device *pdev)
static void ufs_sprd_remove(struct platform_device *pdev)
{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- pm_runtime_get_sync(&(pdev)->dev);
- ufshcd_remove(hba);
+ ufshcd_pltfrm_remove(pdev);
}
static const struct dev_pm_ops ufs_sprd_pm_ops = {
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index 1f4f30d6cb42..505572d4fa87 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -524,6 +524,22 @@ out:
}
EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
+/**
+ * ufshcd_pltfrm_remove - Remove ufshcd platform
+ * @pdev: pointer to Platform device handle
+ */
+void ufshcd_pltfrm_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&pdev->dev);
+ ufshcd_remove(hba);
+ ufshcd_dealloc_host(hba);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+}
+EXPORT_SYMBOL_GPL(ufshcd_pltfrm_remove);
+
MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver");
diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h
index df387be5216b..3017f8e8f93c 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.h
+++ b/drivers/ufs/host/ufshcd-pltfrm.h
@@ -31,6 +31,7 @@ int ufshcd_negotiate_pwr_params(const struct ufs_host_params *host_params,
void ufshcd_init_host_params(struct ufs_host_params *host_params);
int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops);
+void ufshcd_pltfrm_remove(struct platform_device *pdev);
int ufshcd_populate_vreg(struct device *dev, const char *name,
struct ufs_vreg **out_vreg, bool skip_current);
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 2a38e1eb6546..97437de52ef6 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -25,6 +25,7 @@
#define TD_PAGE_COUNT 5
#define CI_HDRC_PAGE_SIZE 4096ul /* page size for TD's */
#define ENDPT_MAX 32
+#define CI_MAX_REQ_SIZE (4 * CI_HDRC_PAGE_SIZE)
#define CI_MAX_BUF_SIZE (TD_PAGE_COUNT * CI_HDRC_PAGE_SIZE)
/******************************************************************************
@@ -260,6 +261,7 @@ struct ci_hdrc {
bool b_sess_valid_event;
bool imx28_write_fix;
bool has_portsc_pec_bug;
+ bool has_short_pkt_limit;
bool supports_runtime_pm;
bool in_lpm;
bool wakeup_int;
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index c64ab0e07ea0..17b3ac2ac8a1 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -342,6 +342,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
struct ci_hdrc_platform_data pdata = {
.name = dev_name(&pdev->dev),
.capoffset = DEF_CAPOFFSET,
+ .flags = CI_HDRC_HAS_SHORT_PKT_LIMIT,
.notify_event = ci_hdrc_imx_notify_event,
};
int ret;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 835bf2428dc6..5aa16dbfc289 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1076,6 +1076,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
CI_HDRC_SUPPORTS_RUNTIME_PM);
ci->has_portsc_pec_bug = !!(ci->platdata->flags &
CI_HDRC_HAS_PORTSC_PEC_MISSED);
+ ci->has_short_pkt_limit = !!(ci->platdata->flags &
+ CI_HDRC_HAS_SHORT_PKT_LIMIT);
platform_set_drvdata(pdev, ci);
ret = hw_device_init(ci, base);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 69ef3cd8d4f8..fd6032874bf3 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmapool.h>
+#include <linux/dma-direct.h>
#include <linux/err.h>
#include <linux/irqreturn.h>
#include <linux/kernel.h>
@@ -540,6 +541,126 @@ static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
return ret;
}
+/*
+ * Verify if the scatterlist is valid by iterating each sg entry.
+ * Return invalid sg entry index which is less than num_sgs.
+ */
+static int sglist_get_invalid_entry(struct device *dma_dev, u8 dir,
+ struct usb_request *req)
+{
+ int i;
+ struct scatterlist *s = req->sg;
+
+ if (req->num_sgs == 1)
+ return 1;
+
+ dir = dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ for (i = 0; i < req->num_sgs; i++, s = sg_next(s)) {
+ /* Only small sg (generally last sg) may be bounced. If
+ * that happens. we can't ensure the addr is page-aligned
+ * after dma map.
+ */
+ if (dma_kmalloc_needs_bounce(dma_dev, s->length, dir))
+ break;
+
+ /* Make sure each sg start address (except first sg) is
+ * page-aligned and end address (except last sg) is also
+ * page-aligned.
+ */
+ if (i == 0) {
+ if (!IS_ALIGNED(s->offset + s->length,
+ CI_HDRC_PAGE_SIZE))
+ break;
+ } else {
+ if (s->offset)
+ break;
+ if (!sg_is_last(s) && !IS_ALIGNED(s->length,
+ CI_HDRC_PAGE_SIZE))
+ break;
+ }
+ }
+
+ return i;
+}
+
+static int sglist_do_bounce(struct ci_hw_req *hwreq, int index,
+ bool copy, unsigned int *bounced)
+{
+ void *buf;
+ int i, ret, nents, num_sgs;
+ unsigned int rest, rounded;
+ struct scatterlist *sg, *src, *dst;
+
+ nents = index + 1;
+ ret = sg_alloc_table(&hwreq->sgt, nents, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ sg = src = hwreq->req.sg;
+ num_sgs = hwreq->req.num_sgs;
+ rest = hwreq->req.length;
+ dst = hwreq->sgt.sgl;
+
+ for (i = 0; i < index; i++) {
+ memcpy(dst, src, sizeof(*src));
+ rest -= src->length;
+ src = sg_next(src);
+ dst = sg_next(dst);
+ }
+
+ /* create one bounce buffer */
+ rounded = round_up(rest, CI_HDRC_PAGE_SIZE);
+ buf = kmalloc(rounded, GFP_KERNEL);
+ if (!buf) {
+ sg_free_table(&hwreq->sgt);
+ return -ENOMEM;
+ }
+
+ sg_set_buf(dst, buf, rounded);
+
+ hwreq->req.sg = hwreq->sgt.sgl;
+ hwreq->req.num_sgs = nents;
+ hwreq->sgt.sgl = sg;
+ hwreq->sgt.nents = num_sgs;
+
+ if (copy)
+ sg_copy_to_buffer(src, num_sgs - index, buf, rest);
+
+ *bounced = rest;
+
+ return 0;
+}
+
+static void sglist_do_debounce(struct ci_hw_req *hwreq, bool copy)
+{
+ void *buf;
+ int i, nents, num_sgs;
+ struct scatterlist *sg, *src, *dst;
+
+ sg = hwreq->req.sg;
+ num_sgs = hwreq->req.num_sgs;
+ src = sg_last(sg, num_sgs);
+ buf = sg_virt(src);
+
+ if (copy) {
+ dst = hwreq->sgt.sgl;
+ for (i = 0; i < num_sgs - 1; i++)
+ dst = sg_next(dst);
+
+ nents = hwreq->sgt.nents - num_sgs + 1;
+ sg_copy_from_buffer(dst, nents, buf, sg_dma_len(src));
+ }
+
+ hwreq->req.sg = hwreq->sgt.sgl;
+ hwreq->req.num_sgs = hwreq->sgt.nents;
+ hwreq->sgt.sgl = sg;
+ hwreq->sgt.nents = num_sgs;
+
+ kfree(buf);
+ sg_free_table(&hwreq->sgt);
+}
+
/**
* _hardware_enqueue: configures a request at hardware level
* @hwep: endpoint
@@ -552,6 +673,8 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
struct ci_hdrc *ci = hwep->ci;
int ret = 0;
struct td_node *firstnode, *lastnode;
+ unsigned int bounced_size;
+ struct scatterlist *sg;
/* don't queue twice */
if (hwreq->req.status == -EALREADY)
@@ -559,11 +682,29 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
hwreq->req.status = -EALREADY;
+ if (hwreq->req.num_sgs && hwreq->req.length &&
+ ci->has_short_pkt_limit) {
+ ret = sglist_get_invalid_entry(ci->dev->parent, hwep->dir,
+ &hwreq->req);
+ if (ret < hwreq->req.num_sgs) {
+ ret = sglist_do_bounce(hwreq, ret, hwep->dir == TX,
+ &bounced_size);
+ if (ret)
+ return ret;
+ }
+ }
+
ret = usb_gadget_map_request_by_dev(ci->dev->parent,
&hwreq->req, hwep->dir);
if (ret)
return ret;
+ if (hwreq->sgt.sgl) {
+ /* We've mapped a bigger buffer, now recover the actual size */
+ sg = sg_last(hwreq->req.sg, hwreq->req.num_sgs);
+ sg_dma_len(sg) = min(sg_dma_len(sg), bounced_size);
+ }
+
if (hwreq->req.num_mapped_sgs)
ret = prepare_td_for_sg(hwep, hwreq);
else
@@ -733,6 +874,10 @@ static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent,
&hwreq->req, hwep->dir);
+ /* sglist bounced */
+ if (hwreq->sgt.sgl)
+ sglist_do_debounce(hwreq, hwep->dir == RX);
+
hwreq->req.actual += actual;
if (hwreq->req.status)
@@ -960,6 +1105,12 @@ static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
return -EMSGSIZE;
}
+ if (ci->has_short_pkt_limit &&
+ hwreq->req.length > CI_MAX_REQ_SIZE) {
+ dev_err(hwep->ci->dev, "request length too big (max 16KB)\n");
+ return -EMSGSIZE;
+ }
+
/* first nuke then test link, e.g. previous status has not sent */
if (!list_empty(&hwreq->queue)) {
dev_err(hwep->ci->dev, "request already in queue\n");
@@ -1574,6 +1725,9 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
+ if (hwreq->sgt.sgl)
+ sglist_do_debounce(hwreq, false);
+
req->status = -ECONNRESET;
if (hwreq->req.complete != NULL) {
@@ -2063,7 +2217,7 @@ static irqreturn_t udc_irq(struct ci_hdrc *ci)
}
}
- if (USBi_UI & intr)
+ if ((USBi_UI | USBi_UEI) & intr)
isr_tr_complete_handler(ci);
if ((USBi_SLI & intr) && !(ci->suspended)) {
diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h
index 5193df1e18c7..c8a47389a46b 100644
--- a/drivers/usb/chipidea/udc.h
+++ b/drivers/usb/chipidea/udc.h
@@ -69,11 +69,13 @@ struct td_node {
* @req: request structure for gadget drivers
* @queue: link to QH list
* @tds: link to TD list
+ * @sgt: hold original sglist when bounce sglist
*/
struct ci_hw_req {
struct usb_request req;
struct list_head queue;
struct list_head tds;
+ struct sg_table sgt;
};
#ifdef CONFIG_USB_CHIPIDEA_UDC
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 7a5dff8d9cc6..accf15ff1306 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -61,9 +61,11 @@ static int ucsi_acpi_read_cci(struct ucsi *ucsi, u32 *cci)
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
int ret;
- ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
- if (ret)
- return ret;
+ if (UCSI_COMMAND(ua->cmd) == UCSI_PPM_RESET) {
+ ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+ if (ret)
+ return ret;
+ }
memcpy(cci, ua->base + UCSI_CCI, sizeof(*cci));
@@ -73,11 +75,6 @@ static int ucsi_acpi_read_cci(struct ucsi *ucsi, u32 *cci)
static int ucsi_acpi_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
- int ret;
-
- ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
- if (ret)
- return ret;
memcpy(val, ua->base + UCSI_MESSAGE_IN, val_len);
@@ -102,42 +99,6 @@ static const struct ucsi_operations ucsi_acpi_ops = {
.async_control = ucsi_acpi_async_control
};
-static int
-ucsi_zenbook_read_cci(struct ucsi *ucsi, u32 *cci)
-{
- struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
- int ret;
-
- if (UCSI_COMMAND(ua->cmd) == UCSI_PPM_RESET) {
- ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
- if (ret)
- return ret;
- }
-
- memcpy(cci, ua->base + UCSI_CCI, sizeof(*cci));
-
- return 0;
-}
-
-static int
-ucsi_zenbook_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
-{
- struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
-
- /* UCSI_MESSAGE_IN is never read for PPM_RESET, return stored data */
- memcpy(val, ua->base + UCSI_MESSAGE_IN, val_len);
-
- return 0;
-}
-
-static const struct ucsi_operations ucsi_zenbook_ops = {
- .read_version = ucsi_acpi_read_version,
- .read_cci = ucsi_zenbook_read_cci,
- .read_message_in = ucsi_zenbook_read_message_in,
- .sync_control = ucsi_sync_control_common,
- .async_control = ucsi_acpi_async_control
-};
-
static int ucsi_gram_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
{
u16 bogus_change = UCSI_CONSTAT_POWER_LEVEL_CHANGE |
@@ -192,13 +153,6 @@ static const struct ucsi_operations ucsi_gram_ops = {
static const struct dmi_system_id ucsi_acpi_quirks[] = {
{
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
- },
- .driver_data = (void *)&ucsi_zenbook_ops,
- },
- {
- .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "LG gram PC"),
DMI_MATCH(DMI_PRODUCT_NAME, "90Q"),
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index f7000d383a4e..9b6cb76e6328 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -172,12 +172,12 @@ static int pmic_glink_ucsi_async_control(struct ucsi *__ucsi, u64 command)
static void pmic_glink_ucsi_update_connector(struct ucsi_connector *con)
{
struct pmic_glink_ucsi *ucsi = ucsi_get_drvdata(con->ucsi);
- int i;
- for (i = 0; i < PMIC_GLINK_MAX_PORTS; i++) {
- if (ucsi->port_orientation[i])
- con->typec_cap.orientation_aware = true;
- }
+ if (con->num > PMIC_GLINK_MAX_PORTS ||
+ !ucsi->port_orientation[con->num - 1])
+ return;
+
+ con->typec_cap.orientation_aware = true;
}
static void pmic_glink_ucsi_connector_status(struct ucsi_connector *con)
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index 7527e277c898..eb7387ee6ebd 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -1517,7 +1517,8 @@ int mlx5vf_start_page_tracker(struct vfio_device *vdev,
struct mlx5_vhca_qp *host_qp;
struct mlx5_vhca_qp *fw_qp;
struct mlx5_core_dev *mdev;
- u32 max_msg_size = PAGE_SIZE;
+ u32 log_max_msg_size;
+ u32 max_msg_size;
u64 rq_size = SZ_2M;
u32 max_recv_wr;
int err;
@@ -1534,6 +1535,12 @@ int mlx5vf_start_page_tracker(struct vfio_device *vdev,
}
mdev = mvdev->mdev;
+ log_max_msg_size = MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_max_msg_size);
+ max_msg_size = (1ULL << log_max_msg_size);
+ /* The RQ must hold at least 4 WQEs/messages for successful QP creation */
+ if (rq_size < 4 * max_msg_size)
+ rq_size = 4 * max_msg_size;
+
memset(tracker, 0, sizeof(*tracker));
tracker->uar = mlx5_get_uars_page(mdev);
if (IS_ERR(tracker->uar)) {
@@ -1623,25 +1630,41 @@ set_report_output(u32 size, int index, struct mlx5_vhca_qp *qp,
{
u32 entry_size = MLX5_ST_SZ_BYTES(page_track_report_entry);
u32 nent = size / entry_size;
+ u32 nent_in_page;
+ u32 nent_to_set;
struct page *page;
+ u32 page_offset;
+ u32 page_index;
+ u32 buf_offset;
+ void *kaddr;
u64 addr;
u64 *buf;
int i;
- if (WARN_ON(index >= qp->recv_buf.npages ||
+ buf_offset = index * qp->max_msg_size;
+ if (WARN_ON(buf_offset + size >= qp->recv_buf.npages * PAGE_SIZE ||
(nent > qp->max_msg_size / entry_size)))
return;
- page = qp->recv_buf.page_list[index];
- buf = kmap_local_page(page);
- for (i = 0; i < nent; i++) {
- addr = MLX5_GET(page_track_report_entry, buf + i,
- dirty_address_low);
- addr |= (u64)MLX5_GET(page_track_report_entry, buf + i,
- dirty_address_high) << 32;
- iova_bitmap_set(dirty, addr, qp->tracked_page_size);
- }
- kunmap_local(buf);
+ do {
+ page_index = buf_offset / PAGE_SIZE;
+ page_offset = buf_offset % PAGE_SIZE;
+ nent_in_page = (PAGE_SIZE - page_offset) / entry_size;
+ page = qp->recv_buf.page_list[page_index];
+ kaddr = kmap_local_page(page);
+ buf = kaddr + page_offset;
+ nent_to_set = min(nent, nent_in_page);
+ for (i = 0; i < nent_to_set; i++) {
+ addr = MLX5_GET(page_track_report_entry, buf + i,
+ dirty_address_low);
+ addr |= (u64)MLX5_GET(page_track_report_entry, buf + i,
+ dirty_address_high) << 32;
+ iova_bitmap_set(dirty, addr, qp->tracked_page_size);
+ }
+ kunmap_local(kaddr);
+ buf_offset += (nent_to_set * entry_size);
+ nent -= nent_to_set;
+ } while (nent);
}
static void
diff --git a/drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c b/drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c
index 56a3859dda8a..4230b817a80b 100644
--- a/drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c
+++ b/drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c
@@ -87,12 +87,8 @@ static int mmio_guard_ioremap_hook(phys_addr_t phys, size_t size,
while (phys < end) {
const int func_id = ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_FUNC_ID;
- int err;
-
- err = arm_smccc_do_one_page(func_id, phys);
- if (err)
- return err;
+ WARN_ON_ONCE(arm_smccc_do_one_page(func_id, phys));
phys += PAGE_SIZE;
}
diff --git a/drivers/watchdog/apple_wdt.c b/drivers/watchdog/apple_wdt.c
index d4f739932f0b..62dabf223d90 100644
--- a/drivers/watchdog/apple_wdt.c
+++ b/drivers/watchdog/apple_wdt.c
@@ -130,7 +130,7 @@ static int apple_wdt_restart(struct watchdog_device *wdd, unsigned long mode,
* can take up to ~20-25ms until the SoC is actually reset. Just wait
* 50ms here to be safe.
*/
- (void)readl_relaxed(wdt->regs + APPLE_WDT_WD1_CUR_TIME);
+ (void)readl(wdt->regs + APPLE_WDT_WD1_CUR_TIME);
mdelay(50);
return 0;
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 35b358bcf94c..f01ed38aba67 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -82,6 +82,13 @@
#define TCO2_CNT(p) (TCOBASE(p) + 0x0a) /* TCO2 Control Register */
#define TCOv2_TMR(p) (TCOBASE(p) + 0x12) /* TCOv2 Timer Initial Value*/
+/*
+ * NMI_NOW is bit 8 of TCO1_CNT register
+ * Read/Write
+ * This bit is implemented as RW but has no effect on HW.
+ */
+#define NMI_NOW BIT(8)
+
/* internal variables */
struct iTCO_wdt_private {
struct watchdog_device wddev;
@@ -219,13 +226,23 @@ static int update_no_reboot_bit_cnt(void *priv, bool set)
struct iTCO_wdt_private *p = priv;
u16 val, newval;
- val = inw(TCO1_CNT(p));
+ /*
+ * writing back 1b1 to NMI_NOW of TCO1_CNT register
+ * causes NMI_NOW bit inversion what consequently does
+ * not allow to perform the register's value comparison
+ * properly.
+ *
+ * NMI_NOW bit masking for TCO1_CNT register values
+ * helps to avoid possible NMI_NOW bit inversions on
+ * following write operation.
+ */
+ val = inw(TCO1_CNT(p)) & ~NMI_NOW;
if (set)
val |= BIT(0);
else
val &= ~BIT(0);
outw(val, TCO1_CNT(p));
- newval = inw(TCO1_CNT(p));
+ newval = inw(TCO1_CNT(p)) & ~NMI_NOW;
/* make sure the update is successful */
return val != newval ? -EIO : 0;
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index c35f85ce8d69..e2d7a57d6ea2 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -225,9 +225,15 @@ static int mtk_wdt_restart(struct watchdog_device *wdt_dev,
{
struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
void __iomem *wdt_base;
+ u32 reg;
wdt_base = mtk_wdt->wdt_base;
+ /* Enable reset in order to issue a system reset instead of an IRQ */
+ reg = readl(wdt_base + WDT_MODE);
+ reg &= ~WDT_MODE_IRQ_EN;
+ writel(reg | WDT_MODE_KEY, wdt_base + WDT_MODE);
+
while (1) {
writel(WDT_SWRST_KEY, wdt_base + WDT_SWRST);
mdelay(5);
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index 4895a69015a8..563d842014df 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -61,7 +61,7 @@
#define MAX_HW_ERROR 250
-static int heartbeat = DEFAULT_HEARTBEAT;
+static int heartbeat;
/*
* struct to hold data for each WDT device
@@ -252,6 +252,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
wdd->min_timeout = 1;
wdd->max_hw_heartbeat_ms = (WDT_PRELOAD_MAX << WDT_PRELOAD_SHIFT) /
wdt->freq * 1000;
+ wdd->timeout = DEFAULT_HEARTBEAT;
wdd->parent = dev;
watchdog_set_drvdata(wdd, wdt);
diff --git a/drivers/watchdog/xilinx_wwdt.c b/drivers/watchdog/xilinx_wwdt.c
index d271e2e8d6e2..3d2a156f7180 100644
--- a/drivers/watchdog/xilinx_wwdt.c
+++ b/drivers/watchdog/xilinx_wwdt.c
@@ -2,7 +2,7 @@
/*
* Window watchdog device driver for Xilinx Versal WWDT
*
- * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc.
+ * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc.
*/
#include <linux/clk.h>
@@ -36,6 +36,12 @@
#define XWWDT_CLOSE_WINDOW_PERCENT 50
+/* Maximum count value of each 32 bit window */
+#define XWWDT_MAX_COUNT_WINDOW GENMASK(31, 0)
+
+/* Maximum count value of closed and open window combined */
+#define XWWDT_MAX_COUNT_WINDOW_COMBINED GENMASK_ULL(32, 1)
+
static int wwdt_timeout;
static int closed_window_percent;
@@ -54,6 +60,8 @@ MODULE_PARM_DESC(closed_window_percent,
* @xilinx_wwdt_wdd: watchdog device structure
* @freq: source clock frequency of WWDT
* @close_percent: Closed window percent
+ * @closed_timeout: Closed window timeout in ticks
+ * @open_timeout: Open window timeout in ticks
*/
struct xwwdt_device {
void __iomem *base;
@@ -61,27 +69,22 @@ struct xwwdt_device {
struct watchdog_device xilinx_wwdt_wdd;
unsigned long freq;
u32 close_percent;
+ u64 closed_timeout;
+ u64 open_timeout;
};
static int xilinx_wwdt_start(struct watchdog_device *wdd)
{
struct xwwdt_device *xdev = watchdog_get_drvdata(wdd);
struct watchdog_device *xilinx_wwdt_wdd = &xdev->xilinx_wwdt_wdd;
- u64 time_out, closed_timeout, open_timeout;
u32 control_status_reg;
- /* Calculate timeout count */
- time_out = xdev->freq * wdd->timeout;
- closed_timeout = div_u64(time_out * xdev->close_percent, 100);
- open_timeout = time_out - closed_timeout;
- wdd->min_hw_heartbeat_ms = xdev->close_percent * 10 * wdd->timeout;
-
spin_lock(&xdev->spinlock);
iowrite32(XWWDT_MWR_MASK, xdev->base + XWWDT_MWR_OFFSET);
iowrite32(~(u32)XWWDT_ESR_WEN_MASK, xdev->base + XWWDT_ESR_OFFSET);
- iowrite32((u32)closed_timeout, xdev->base + XWWDT_FWR_OFFSET);
- iowrite32((u32)open_timeout, xdev->base + XWWDT_SWR_OFFSET);
+ iowrite32((u32)xdev->closed_timeout, xdev->base + XWWDT_FWR_OFFSET);
+ iowrite32((u32)xdev->open_timeout, xdev->base + XWWDT_SWR_OFFSET);
/* Enable the window watchdog timer */
control_status_reg = ioread32(xdev->base + XWWDT_ESR_OFFSET);
@@ -133,7 +136,12 @@ static int xwwdt_probe(struct platform_device *pdev)
struct watchdog_device *xilinx_wwdt_wdd;
struct device *dev = &pdev->dev;
struct xwwdt_device *xdev;
+ u64 max_per_window_ms;
+ u64 min_per_window_ms;
+ u64 timeout_count;
struct clk *clk;
+ u32 timeout_ms;
+ u64 ms_count;
int ret;
xdev = devm_kzalloc(dev, sizeof(*xdev), GFP_KERNEL);
@@ -154,12 +162,13 @@ static int xwwdt_probe(struct platform_device *pdev)
return PTR_ERR(clk);
xdev->freq = clk_get_rate(clk);
- if (!xdev->freq)
+ if (xdev->freq < 1000000)
return -EINVAL;
xilinx_wwdt_wdd->min_timeout = XWWDT_MIN_TIMEOUT;
xilinx_wwdt_wdd->timeout = XWWDT_DEFAULT_TIMEOUT;
- xilinx_wwdt_wdd->max_hw_heartbeat_ms = 1000 * xilinx_wwdt_wdd->timeout;
+ xilinx_wwdt_wdd->max_hw_heartbeat_ms =
+ div64_u64(XWWDT_MAX_COUNT_WINDOW_COMBINED, xdev->freq) * 1000;
if (closed_window_percent == 0 || closed_window_percent >= 100)
xdev->close_percent = XWWDT_CLOSE_WINDOW_PERCENT;
@@ -167,6 +176,48 @@ static int xwwdt_probe(struct platform_device *pdev)
xdev->close_percent = closed_window_percent;
watchdog_init_timeout(xilinx_wwdt_wdd, wwdt_timeout, &pdev->dev);
+
+ /* Calculate ticks for 1 milli-second */
+ ms_count = div_u64(xdev->freq, 1000);
+ timeout_ms = xilinx_wwdt_wdd->timeout * 1000;
+ timeout_count = timeout_ms * ms_count;
+
+ if (timeout_ms > xilinx_wwdt_wdd->max_hw_heartbeat_ms) {
+ /*
+ * To avoid ping restrictions until the minimum hardware heartbeat,
+ * we will solely rely on the open window and
+ * adjust the minimum hardware heartbeat to 0.
+ */
+ xdev->closed_timeout = 0;
+ xdev->open_timeout = XWWDT_MAX_COUNT_WINDOW;
+ xilinx_wwdt_wdd->min_hw_heartbeat_ms = 0;
+ xilinx_wwdt_wdd->max_hw_heartbeat_ms = xilinx_wwdt_wdd->max_hw_heartbeat_ms / 2;
+ } else {
+ xdev->closed_timeout = div64_u64(timeout_count * xdev->close_percent, 100);
+ xilinx_wwdt_wdd->min_hw_heartbeat_ms =
+ div64_u64(timeout_ms * xdev->close_percent, 100);
+
+ if (timeout_ms > xilinx_wwdt_wdd->max_hw_heartbeat_ms / 2) {
+ max_per_window_ms = xilinx_wwdt_wdd->max_hw_heartbeat_ms / 2;
+ min_per_window_ms = timeout_ms - max_per_window_ms;
+
+ if (xilinx_wwdt_wdd->min_hw_heartbeat_ms > max_per_window_ms) {
+ dev_info(xilinx_wwdt_wdd->parent,
+ "Closed window cannot be set to %d%%. Using maximum supported value.\n",
+ xdev->close_percent);
+ xdev->closed_timeout = max_per_window_ms * ms_count;
+ xilinx_wwdt_wdd->min_hw_heartbeat_ms = max_per_window_ms;
+ } else if (xilinx_wwdt_wdd->min_hw_heartbeat_ms < min_per_window_ms) {
+ dev_info(xilinx_wwdt_wdd->parent,
+ "Closed window cannot be set to %d%%. Using minimum supported value.\n",
+ xdev->close_percent);
+ xdev->closed_timeout = min_per_window_ms * ms_count;
+ xilinx_wwdt_wdd->min_hw_heartbeat_ms = min_per_window_ms;
+ }
+ }
+ xdev->open_timeout = timeout_count - xdev->closed_timeout;
+ }
+
spin_lock_init(&xdev->spinlock);
watchdog_set_drvdata(xilinx_wwdt_wdd, xdev);
watchdog_set_nowayout(xilinx_wwdt_wdd, 1);